1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Memory Migration functionality - linux/mm/migrate.c
4 *
5 * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6 *
7 * Page migration was first developed in the context of the memory hotplug
8 * project. The main authors of the migration code are:
9 *
10 * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11 * Hirokazu Takahashi <taka@valinux.co.jp>
12 * Dave Hansen <haveblue@us.ibm.com>
13 * Christoph Lameter
14 */
15
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/ksm.h>
25 #include <linux/rmap.h>
26 #include <linux/topology.h>
27 #include <linux/cpu.h>
28 #include <linux/cpuset.h>
29 #include <linux/writeback.h>
30 #include <linux/mempolicy.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/backing-dev.h>
34 #include <linux/compaction.h>
35 #include <linux/syscalls.h>
36 #include <linux/compat.h>
37 #include <linux/hugetlb.h>
38 #include <linux/hugetlb_cgroup.h>
39 #include <linux/gfp.h>
40 #include <linux/pfn_t.h>
41 #include <linux/memremap.h>
42 #include <linux/userfaultfd_k.h>
43 #include <linux/balloon_compaction.h>
44 #include <linux/page_idle.h>
45 #include <linux/page_owner.h>
46 #include <linux/sched/mm.h>
47 #include <linux/ptrace.h>
48 #include <linux/oom.h>
49 #include <linux/memory.h>
50 #include <linux/random.h>
51 #include <linux/sched/sysctl.h>
52 #include <linux/memory-tiers.h>
53
54 #include <asm/tlbflush.h>
55
56 #include <trace/events/migrate.h>
57
58 #undef CREATE_TRACE_POINTS
59 #include <trace/hooks/mm.h>
60 #include <trace/hooks/vmscan.h>
61
62 #include "internal.h"
63
isolate_movable_page(struct page * page,isolate_mode_t mode)64 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
65 {
66 struct folio *folio = folio_get_nontail_page(page);
67 const struct movable_operations *mops;
68
69 /*
70 * Avoid burning cycles with pages that are yet under __free_pages(),
71 * or just got freed under us.
72 *
73 * In case we 'win' a race for a movable page being freed under us and
74 * raise its refcount preventing __free_pages() from doing its job
75 * the put_page() at the end of this block will take care of
76 * release this page, thus avoiding a nasty leakage.
77 */
78 if (!folio)
79 goto out;
80
81 if (unlikely(folio_test_slab(folio)))
82 goto out_putfolio;
83 /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
84 smp_rmb();
85 /*
86 * Check movable flag before taking the page lock because
87 * we use non-atomic bitops on newly allocated page flags so
88 * unconditionally grabbing the lock ruins page's owner side.
89 */
90 if (unlikely(!__folio_test_movable(folio)))
91 goto out_putfolio;
92 /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
93 smp_rmb();
94 if (unlikely(folio_test_slab(folio)))
95 goto out_putfolio;
96
97 /*
98 * As movable pages are not isolated from LRU lists, concurrent
99 * compaction threads can race against page migration functions
100 * as well as race against the releasing a page.
101 *
102 * In order to avoid having an already isolated movable page
103 * being (wrongly) re-isolated while it is under migration,
104 * or to avoid attempting to isolate pages being released,
105 * lets be sure we have the page lock
106 * before proceeding with the movable page isolation steps.
107 */
108 if (unlikely(!folio_trylock(folio)))
109 goto out_putfolio;
110
111 if (!folio_test_movable(folio) || folio_test_isolated(folio))
112 goto out_no_isolated;
113
114 mops = folio_movable_ops(folio);
115 VM_BUG_ON_FOLIO(!mops, folio);
116
117 if (!mops->isolate_page(&folio->page, mode))
118 goto out_no_isolated;
119
120 /* Driver shouldn't use PG_isolated bit of page->flags */
121 WARN_ON_ONCE(folio_test_isolated(folio));
122 folio_set_isolated(folio);
123 folio_unlock(folio);
124
125 return true;
126
127 out_no_isolated:
128 folio_unlock(folio);
129 out_putfolio:
130 folio_put(folio);
131 out:
132 return false;
133 }
134
putback_movable_folio(struct folio * folio)135 static void putback_movable_folio(struct folio *folio)
136 {
137 const struct movable_operations *mops = folio_movable_ops(folio);
138
139 mops->putback_page(&folio->page);
140 folio_clear_isolated(folio);
141 }
142
143 /*
144 * Put previously isolated pages back onto the appropriate lists
145 * from where they were once taken off for compaction/migration.
146 *
147 * This function shall be used whenever the isolated pageset has been
148 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
149 * and isolate_hugetlb().
150 */
putback_movable_pages(struct list_head * l)151 void putback_movable_pages(struct list_head *l)
152 {
153 struct folio *folio;
154 struct folio *folio2;
155
156 list_for_each_entry_safe(folio, folio2, l, lru) {
157 if (unlikely(folio_test_hugetlb(folio))) {
158 folio_putback_active_hugetlb(folio);
159 continue;
160 }
161 list_del(&folio->lru);
162 /*
163 * We isolated non-lru movable folio so here we can use
164 * __folio_test_movable because LRU folio's mapping cannot
165 * have PAGE_MAPPING_MOVABLE.
166 */
167 if (unlikely(__folio_test_movable(folio))) {
168 VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
169 folio_lock(folio);
170 if (folio_test_movable(folio))
171 putback_movable_folio(folio);
172 else
173 folio_clear_isolated(folio);
174 folio_unlock(folio);
175 folio_put(folio);
176 } else {
177 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
178 folio_is_file_lru(folio), -folio_nr_pages(folio));
179 folio_putback_lru(folio);
180 }
181 }
182 }
183 EXPORT_SYMBOL_GPL(putback_movable_pages);
184
185 /*
186 * Restore a potential migration pte to a working pte entry
187 */
remove_migration_pte(struct folio * dst,struct vm_area_struct * vma,unsigned long addr,void * arg)188 static bool remove_migration_pte(struct folio *dst,
189 struct vm_area_struct *vma, unsigned long addr, void *arg)
190 {
191 struct folio *src = arg;
192 DEFINE_FOLIO_VMA_WALK(pvmw, src, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
193
194 while (page_vma_mapped_walk(&pvmw)) {
195 rmap_t rmap_flags = RMAP_NONE;
196 pte_t old_pte;
197 pte_t pte;
198 swp_entry_t entry;
199 struct page *page;
200 struct folio *folio;
201 unsigned long idx = 0;
202
203 /* pgoff is invalid for ksm pages, but they are never large */
204 if (folio_test_large(dst) && !folio_test_hugetlb(dst))
205 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
206 page = folio_page(dst, idx);
207
208 if (src == dst) {
209 if (can_discard_src(page)) {
210 VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(src), src);
211
212 pte_clear_not_present_full(pvmw.vma->vm_mm, pvmw.address,
213 pvmw.pte, false);
214 dec_mm_counter(pvmw.vma->vm_mm, MM_ANONPAGES);
215 continue;
216 }
217 page = folio_dst_page(src, idx);
218 }
219
220 folio = page_folio(page);
221
222 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
223 /* PMD-mapped THP migration entry */
224 if (!pvmw.pte) {
225 VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
226 !folio_test_pmd_mappable(folio), folio);
227 remove_migration_pmd(&pvmw, page);
228 continue;
229 }
230 #endif
231
232 folio_get(folio);
233 pte = mk_pte(page, READ_ONCE(vma->vm_page_prot));
234 old_pte = ptep_get(pvmw.pte);
235 if (pte_swp_soft_dirty(old_pte))
236 pte = pte_mksoft_dirty(pte);
237
238 entry = pte_to_swp_entry(old_pte);
239 if (!is_migration_entry_young(entry))
240 pte = pte_mkold(pte);
241 if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
242 pte = pte_mkdirty(pte);
243 if (is_writable_migration_entry(entry))
244 pte = pte_mkwrite(pte, vma);
245 else if (pte_swp_uffd_wp(old_pte))
246 pte = pte_mkuffd_wp(pte);
247
248 if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
249 rmap_flags |= RMAP_EXCLUSIVE;
250
251 if (unlikely(is_device_private_page(page))) {
252 if (pte_write(pte))
253 entry = make_writable_device_private_entry(
254 page_to_pfn(page));
255 else
256 entry = make_readable_device_private_entry(
257 page_to_pfn(page));
258 pte = swp_entry_to_pte(entry);
259 if (pte_swp_soft_dirty(old_pte))
260 pte = pte_swp_mksoft_dirty(pte);
261 if (pte_swp_uffd_wp(old_pte))
262 pte = pte_swp_mkuffd_wp(pte);
263 }
264
265 #ifdef CONFIG_HUGETLB_PAGE
266 if (folio_test_hugetlb(folio)) {
267 struct hstate *h = hstate_vma(vma);
268 unsigned int shift = huge_page_shift(h);
269 unsigned long psize = huge_page_size(h);
270
271 pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
272 if (folio_test_anon(folio))
273 hugetlb_add_anon_rmap(folio, vma, pvmw.address,
274 rmap_flags);
275 else
276 hugetlb_add_file_rmap(folio);
277 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
278 psize);
279 } else
280 #endif
281 {
282 if (folio_test_anon(folio))
283 folio_add_anon_rmap_pte(folio, page, vma,
284 pvmw.address, rmap_flags);
285 else
286 folio_add_file_rmap_pte(folio, page, vma);
287 set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
288 }
289 if (vma->vm_flags & VM_LOCKED)
290 mlock_drain_local();
291
292 trace_remove_migration_pte(pvmw.address, pte_val(pte),
293 compound_order(page));
294
295 /* No need to invalidate - it was non-present before */
296 update_mmu_cache(vma, pvmw.address, pvmw.pte);
297 }
298
299 return true;
300 }
301
302 /*
303 * Get rid of all migration entries and replace them by
304 * references to the indicated page.
305 */
remove_migration_ptes(struct folio * src,struct folio * dst,bool locked)306 void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
307 {
308 struct rmap_walk_control rwc = {
309 .rmap_one = remove_migration_pte,
310 .arg = src,
311 };
312
313 if (locked)
314 rmap_walk_locked(dst, &rwc);
315 else
316 rmap_walk(dst, &rwc);
317 }
318
319 /*
320 * Something used the pte of a page under migration. We need to
321 * get to the page and wait until migration is finished.
322 * When we return from this function the fault will be retried.
323 */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)324 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
325 unsigned long address)
326 {
327 spinlock_t *ptl;
328 pte_t *ptep;
329 pte_t pte;
330 swp_entry_t entry;
331
332 ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
333 if (!ptep)
334 return;
335
336 pte = ptep_get(ptep);
337 pte_unmap(ptep);
338
339 if (!is_swap_pte(pte))
340 goto out;
341
342 entry = pte_to_swp_entry(pte);
343 if (!is_migration_entry(entry))
344 goto out;
345
346 migration_entry_wait_on_locked(entry, ptl);
347 return;
348 out:
349 spin_unlock(ptl);
350 }
351
352 #ifdef CONFIG_HUGETLB_PAGE
353 /*
354 * The vma read lock must be held upon entry. Holding that lock prevents either
355 * the pte or the ptl from being freed.
356 *
357 * This function will release the vma lock before returning.
358 */
migration_entry_wait_huge(struct vm_area_struct * vma,pte_t * ptep)359 void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep)
360 {
361 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
362 pte_t pte;
363
364 hugetlb_vma_assert_locked(vma);
365 spin_lock(ptl);
366 pte = huge_ptep_get(ptep);
367
368 if (unlikely(!is_hugetlb_entry_migration(pte))) {
369 spin_unlock(ptl);
370 hugetlb_vma_unlock_read(vma);
371 } else {
372 /*
373 * If migration entry existed, safe to release vma lock
374 * here because the pgtable page won't be freed without the
375 * pgtable lock released. See comment right above pgtable
376 * lock release in migration_entry_wait_on_locked().
377 */
378 hugetlb_vma_unlock_read(vma);
379 migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
380 }
381 }
382 #endif
383
384 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)385 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
386 {
387 spinlock_t *ptl;
388
389 ptl = pmd_lock(mm, pmd);
390 if (!is_pmd_migration_entry(*pmd))
391 goto unlock;
392 migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
393 return;
394 unlock:
395 spin_unlock(ptl);
396 }
397 #endif
398
folio_expected_refs(struct address_space * mapping,struct folio * folio)399 static int folio_expected_refs(struct address_space *mapping,
400 struct folio *folio)
401 {
402 int refs = 1;
403 if (!mapping)
404 return refs;
405
406 refs += folio_nr_pages(folio);
407 if (folio_test_private(folio))
408 refs++;
409
410 return refs;
411 }
412
413 /*
414 * Replace the page in the mapping.
415 *
416 * The number of remaining references must be:
417 * 1 for anonymous pages without a mapping
418 * 2 for pages with a mapping
419 * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
420 */
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)421 int folio_migrate_mapping(struct address_space *mapping,
422 struct folio *newfolio, struct folio *folio, int extra_count)
423 {
424 XA_STATE(xas, &mapping->i_pages, folio_index(folio));
425 struct zone *oldzone, *newzone;
426 int dirty;
427 int expected_count = folio_expected_refs(mapping, folio) + extra_count;
428 long nr = folio_nr_pages(folio);
429 long entries, i;
430
431 if (!mapping) {
432 /* Anonymous page without mapping */
433 if (folio_ref_count(folio) != expected_count)
434 return -EAGAIN;
435
436 /* No turning back from here */
437 newfolio->index = folio->index;
438 newfolio->mapping = folio->mapping;
439 if (folio_test_swapbacked(folio))
440 __folio_set_swapbacked(newfolio);
441
442 return MIGRATEPAGE_SUCCESS;
443 }
444
445 oldzone = folio_zone(folio);
446 newzone = folio_zone(newfolio);
447
448 xas_lock_irq(&xas);
449 if (!folio_ref_freeze(folio, expected_count)) {
450 xas_unlock_irq(&xas);
451 return -EAGAIN;
452 }
453
454 /*
455 * Now we know that no one else is looking at the folio:
456 * no turning back from here.
457 */
458 newfolio->index = folio->index;
459 newfolio->mapping = folio->mapping;
460 folio_ref_add(newfolio, nr); /* add cache reference */
461 if (folio_test_swapbacked(folio)) {
462 __folio_set_swapbacked(newfolio);
463 if (folio_test_swapcache(folio)) {
464 folio_set_swapcache(newfolio);
465 newfolio->private = folio_get_private(folio);
466 }
467 entries = nr;
468 } else {
469 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
470 entries = 1;
471 }
472
473 /* Move dirty while page refs frozen and newpage not yet exposed */
474 dirty = folio_test_dirty(folio);
475 if (dirty) {
476 folio_clear_dirty(folio);
477 folio_set_dirty(newfolio);
478 }
479
480 /* Swap cache still stores N entries instead of a high-order entry */
481 for (i = 0; i < entries; i++) {
482 xas_store(&xas, newfolio);
483 xas_next(&xas);
484 }
485
486 /*
487 * Drop cache reference from old page by unfreezing
488 * to one less reference.
489 * We know this isn't the last reference.
490 */
491 folio_ref_unfreeze(folio, expected_count - nr);
492
493 xas_unlock(&xas);
494 /* Leave irq disabled to prevent preemption while updating stats */
495
496 /*
497 * If moved to a different zone then also account
498 * the page for that zone. Other VM counters will be
499 * taken care of when we establish references to the
500 * new page and drop references to the old page.
501 *
502 * Note that anonymous pages are accounted for
503 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
504 * are mapped to swap space.
505 */
506 if (newzone != oldzone) {
507 struct lruvec *old_lruvec, *new_lruvec;
508 struct mem_cgroup *memcg;
509
510 memcg = folio_memcg(folio);
511 old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
512 new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
513
514 __mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
515 __mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
516 if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
517 __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
518 __mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
519
520 if (folio_test_pmd_mappable(folio)) {
521 __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
522 __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
523 }
524 }
525 #ifdef CONFIG_SWAP
526 if (folio_test_swapcache(folio)) {
527 __mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
528 __mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
529 }
530 #endif
531 if (dirty && mapping_can_writeback(mapping)) {
532 __mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
533 __mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
534 __mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
535 __mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
536 }
537 }
538 local_irq_enable();
539
540 return MIGRATEPAGE_SUCCESS;
541 }
542 EXPORT_SYMBOL(folio_migrate_mapping);
543
544 /*
545 * The expected number of remaining references is the same as that
546 * of folio_migrate_mapping().
547 */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)548 int migrate_huge_page_move_mapping(struct address_space *mapping,
549 struct folio *dst, struct folio *src)
550 {
551 XA_STATE(xas, &mapping->i_pages, folio_index(src));
552 int expected_count;
553
554 xas_lock_irq(&xas);
555 expected_count = 2 + folio_has_private(src);
556 if (!folio_ref_freeze(src, expected_count)) {
557 xas_unlock_irq(&xas);
558 return -EAGAIN;
559 }
560
561 dst->index = src->index;
562 dst->mapping = src->mapping;
563
564 folio_get(dst);
565
566 xas_store(&xas, dst);
567
568 folio_ref_unfreeze(src, expected_count - 1);
569
570 xas_unlock_irq(&xas);
571
572 return MIGRATEPAGE_SUCCESS;
573 }
574
575 /*
576 * Copy the flags and some other ancillary information
577 */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)578 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
579 {
580 int cpupid;
581
582 if (folio_test_error(folio))
583 folio_set_error(newfolio);
584 if (folio_test_referenced(folio))
585 folio_set_referenced(newfolio);
586 if (folio_test_uptodate(folio))
587 folio_mark_uptodate(newfolio);
588 if (folio_test_clear_active(folio)) {
589 VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
590 folio_set_active(newfolio);
591 } else if (folio_test_clear_unevictable(folio))
592 folio_set_unevictable(newfolio);
593 if (folio_test_workingset(folio))
594 folio_set_workingset(newfolio);
595 if (folio_test_checked(folio))
596 folio_set_checked(newfolio);
597 /*
598 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
599 * migration entries. We can still have PG_anon_exclusive set on an
600 * effectively unmapped and unreferenced first sub-pages of an
601 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
602 */
603 if (folio_test_mappedtodisk(folio))
604 folio_set_mappedtodisk(newfolio);
605
606 trace_android_vh_look_around_migrate_folio(folio, newfolio);
607
608 /* Move dirty on pages not done by folio_migrate_mapping() */
609 if (folio_test_dirty(folio))
610 folio_set_dirty(newfolio);
611
612 if (folio_test_young(folio))
613 folio_set_young(newfolio);
614 if (folio_test_idle(folio))
615 folio_set_idle(newfolio);
616
617 /*
618 * Copy NUMA information to the new page, to prevent over-eager
619 * future migrations of this same page.
620 */
621 cpupid = page_cpupid_xchg_last(&folio->page, -1);
622 /*
623 * For memory tiering mode, when migrate between slow and fast
624 * memory node, reset cpupid, because that is used to record
625 * page access time in slow memory node.
626 */
627 if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
628 bool f_toptier = node_is_toptier(page_to_nid(&folio->page));
629 bool t_toptier = node_is_toptier(page_to_nid(&newfolio->page));
630
631 if (f_toptier != t_toptier)
632 cpupid = -1;
633 }
634 page_cpupid_xchg_last(&newfolio->page, cpupid);
635
636 folio_migrate_ksm(newfolio, folio);
637 /*
638 * Please do not reorder this without considering how mm/ksm.c's
639 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
640 */
641 if (folio_test_swapcache(folio))
642 folio_clear_swapcache(folio);
643 folio_clear_private(folio);
644
645 /* page->private contains hugetlb specific flags */
646 if (!folio_test_hugetlb(folio))
647 folio->private = NULL;
648
649 /*
650 * If any waiters have accumulated on the new page then
651 * wake them up.
652 */
653 if (folio_test_writeback(newfolio))
654 folio_end_writeback(newfolio);
655
656 /*
657 * PG_readahead shares the same bit with PG_reclaim. The above
658 * end_page_writeback() may clear PG_readahead mistakenly, so set the
659 * bit after that.
660 */
661 if (folio_test_readahead(folio))
662 folio_set_readahead(newfolio);
663
664 folio_copy_owner(newfolio, folio);
665
666 if (!folio_test_hugetlb(folio))
667 mem_cgroup_migrate(folio, newfolio);
668 }
669 EXPORT_SYMBOL(folio_migrate_flags);
670
folio_migrate_copy(struct folio * newfolio,struct folio * folio)671 void folio_migrate_copy(struct folio *newfolio, struct folio *folio)
672 {
673 folio_copy(newfolio, folio);
674 folio_migrate_flags(newfolio, folio);
675 }
676 EXPORT_SYMBOL(folio_migrate_copy);
677
678 /************************************************************
679 * Migration functions
680 ***********************************************************/
681
migrate_folio_extra(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,int extra_count)682 int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
683 struct folio *src, enum migrate_mode mode, int extra_count)
684 {
685 int rc;
686
687 BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
688
689 rc = folio_migrate_mapping(mapping, dst, src, extra_count);
690
691 if (rc != MIGRATEPAGE_SUCCESS)
692 return rc;
693
694 if (mode != MIGRATE_SYNC_NO_COPY)
695 folio_migrate_copy(dst, src);
696 else
697 folio_migrate_flags(dst, src);
698 return MIGRATEPAGE_SUCCESS;
699 }
700
701 /**
702 * migrate_folio() - Simple folio migration.
703 * @mapping: The address_space containing the folio.
704 * @dst: The folio to migrate the data to.
705 * @src: The folio containing the current data.
706 * @mode: How to migrate the page.
707 *
708 * Common logic to directly migrate a single LRU folio suitable for
709 * folios that do not use PagePrivate/PagePrivate2.
710 *
711 * Folios are locked upon entry and exit.
712 */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)713 int migrate_folio(struct address_space *mapping, struct folio *dst,
714 struct folio *src, enum migrate_mode mode)
715 {
716 return migrate_folio_extra(mapping, dst, src, mode, 0);
717 }
718 EXPORT_SYMBOL(migrate_folio);
719
720 #ifdef CONFIG_BUFFER_HEAD
721 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)722 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
723 enum migrate_mode mode)
724 {
725 struct buffer_head *bh = head;
726 struct buffer_head *failed_bh;
727
728 do {
729 if (!trylock_buffer(bh)) {
730 if (mode == MIGRATE_ASYNC)
731 goto unlock;
732 if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
733 goto unlock;
734 lock_buffer(bh);
735 }
736
737 bh = bh->b_this_page;
738 } while (bh != head);
739
740 return true;
741
742 unlock:
743 /* We failed to lock the buffer and cannot stall. */
744 failed_bh = bh;
745 bh = head;
746 while (bh != failed_bh) {
747 unlock_buffer(bh);
748 bh = bh->b_this_page;
749 }
750
751 return false;
752 }
753
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)754 static int __buffer_migrate_folio(struct address_space *mapping,
755 struct folio *dst, struct folio *src, enum migrate_mode mode,
756 bool check_refs)
757 {
758 struct buffer_head *bh, *head;
759 int rc;
760 int expected_count;
761
762 head = folio_buffers(src);
763 if (!head)
764 return migrate_folio(mapping, dst, src, mode);
765
766 /* Check whether page does not have extra refs before we do more work */
767 expected_count = folio_expected_refs(mapping, src);
768 if (folio_ref_count(src) != expected_count)
769 return -EAGAIN;
770
771 if (!buffer_migrate_lock_buffers(head, mode))
772 return -EAGAIN;
773
774 if (check_refs) {
775 bool busy;
776 bool invalidated = false;
777
778 recheck_buffers:
779 busy = false;
780 spin_lock(&mapping->private_lock);
781 bh = head;
782 do {
783 if (atomic_read(&bh->b_count)) {
784 busy = true;
785 break;
786 }
787 bh = bh->b_this_page;
788 } while (bh != head);
789 if (busy) {
790 if (invalidated) {
791 rc = -EAGAIN;
792 goto unlock_buffers;
793 }
794 spin_unlock(&mapping->private_lock);
795 invalidate_bh_lrus();
796 invalidated = true;
797 goto recheck_buffers;
798 }
799 }
800
801 rc = folio_migrate_mapping(mapping, dst, src, 0);
802 if (rc != MIGRATEPAGE_SUCCESS)
803 goto unlock_buffers;
804
805 folio_attach_private(dst, folio_detach_private(src));
806
807 bh = head;
808 do {
809 folio_set_bh(bh, dst, bh_offset(bh));
810 bh = bh->b_this_page;
811 } while (bh != head);
812
813 if (mode != MIGRATE_SYNC_NO_COPY)
814 folio_migrate_copy(dst, src);
815 else
816 folio_migrate_flags(dst, src);
817
818 rc = MIGRATEPAGE_SUCCESS;
819 unlock_buffers:
820 if (check_refs)
821 spin_unlock(&mapping->private_lock);
822 bh = head;
823 do {
824 unlock_buffer(bh);
825 bh = bh->b_this_page;
826 } while (bh != head);
827
828 return rc;
829 }
830
831 /**
832 * buffer_migrate_folio() - Migration function for folios with buffers.
833 * @mapping: The address space containing @src.
834 * @dst: The folio to migrate to.
835 * @src: The folio to migrate from.
836 * @mode: How to migrate the folio.
837 *
838 * This function can only be used if the underlying filesystem guarantees
839 * that no other references to @src exist. For example attached buffer
840 * heads are accessed only under the folio lock. If your filesystem cannot
841 * provide this guarantee, buffer_migrate_folio_norefs() may be more
842 * appropriate.
843 *
844 * Return: 0 on success or a negative errno on failure.
845 */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)846 int buffer_migrate_folio(struct address_space *mapping,
847 struct folio *dst, struct folio *src, enum migrate_mode mode)
848 {
849 return __buffer_migrate_folio(mapping, dst, src, mode, false);
850 }
851 EXPORT_SYMBOL(buffer_migrate_folio);
852
853 /**
854 * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
855 * @mapping: The address space containing @src.
856 * @dst: The folio to migrate to.
857 * @src: The folio to migrate from.
858 * @mode: How to migrate the folio.
859 *
860 * Like buffer_migrate_folio() except that this variant is more careful
861 * and checks that there are also no buffer head references. This function
862 * is the right one for mappings where buffer heads are directly looked
863 * up and referenced (such as block device mappings).
864 *
865 * Return: 0 on success or a negative errno on failure.
866 */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)867 int buffer_migrate_folio_norefs(struct address_space *mapping,
868 struct folio *dst, struct folio *src, enum migrate_mode mode)
869 {
870 return __buffer_migrate_folio(mapping, dst, src, mode, true);
871 }
872 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
873 #endif /* CONFIG_BUFFER_HEAD */
874
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)875 int filemap_migrate_folio(struct address_space *mapping,
876 struct folio *dst, struct folio *src, enum migrate_mode mode)
877 {
878 int ret;
879
880 ret = folio_migrate_mapping(mapping, dst, src, 0);
881 if (ret != MIGRATEPAGE_SUCCESS)
882 return ret;
883
884 if (folio_get_private(src))
885 folio_attach_private(dst, folio_detach_private(src));
886
887 if (mode != MIGRATE_SYNC_NO_COPY)
888 folio_migrate_copy(dst, src);
889 else
890 folio_migrate_flags(dst, src);
891 return MIGRATEPAGE_SUCCESS;
892 }
893 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
894
895 /*
896 * Writeback a folio to clean the dirty state
897 */
writeout(struct address_space * mapping,struct folio * folio)898 static int writeout(struct address_space *mapping, struct folio *folio)
899 {
900 struct writeback_control wbc = {
901 .sync_mode = WB_SYNC_NONE,
902 .nr_to_write = 1,
903 .range_start = 0,
904 .range_end = LLONG_MAX,
905 .for_reclaim = 1
906 };
907 int rc;
908
909 if (!mapping->a_ops->writepage)
910 /* No write method for the address space */
911 return -EINVAL;
912
913 if (!folio_clear_dirty_for_io(folio))
914 /* Someone else already triggered a write */
915 return -EAGAIN;
916
917 /*
918 * A dirty folio may imply that the underlying filesystem has
919 * the folio on some queue. So the folio must be clean for
920 * migration. Writeout may mean we lose the lock and the
921 * folio state is no longer what we checked for earlier.
922 * At this point we know that the migration attempt cannot
923 * be successful.
924 */
925 remove_migration_ptes(folio, folio, false);
926
927 rc = mapping->a_ops->writepage(&folio->page, &wbc);
928
929 if (rc != AOP_WRITEPAGE_ACTIVATE)
930 /* unlocked. Relock */
931 folio_lock(folio);
932
933 return (rc < 0) ? -EIO : -EAGAIN;
934 }
935
936 /*
937 * Default handling if a filesystem does not provide a migration function.
938 */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)939 static int fallback_migrate_folio(struct address_space *mapping,
940 struct folio *dst, struct folio *src, enum migrate_mode mode)
941 {
942 if (folio_test_dirty(src)) {
943 /* Only writeback folios in full synchronous migration */
944 switch (mode) {
945 case MIGRATE_SYNC:
946 case MIGRATE_SYNC_NO_COPY:
947 break;
948 default:
949 return -EBUSY;
950 }
951 return writeout(mapping, src);
952 }
953
954 /*
955 * Buffers may be managed in a filesystem specific way.
956 * We must have no buffers or drop them.
957 */
958 if (!filemap_release_folio(src, GFP_KERNEL))
959 return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
960
961 return migrate_folio(mapping, dst, src, mode);
962 }
963
964 /*
965 * Move a page to a newly allocated page
966 * The page is locked and all ptes have been successfully removed.
967 *
968 * The new page will have replaced the old page if this function
969 * is successful.
970 *
971 * Return value:
972 * < 0 - error code
973 * MIGRATEPAGE_SUCCESS - success
974 */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)975 static int move_to_new_folio(struct folio *dst, struct folio *src,
976 enum migrate_mode mode)
977 {
978 int rc = -EAGAIN;
979 bool is_lru = !__folio_test_movable(src);
980
981 VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
982 VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
983
984 if (likely(is_lru)) {
985 struct address_space *mapping = folio_mapping(src);
986
987 if (!mapping)
988 rc = migrate_folio(mapping, dst, src, mode);
989 else if (mapping->a_ops->migrate_folio)
990 /*
991 * Most folios have a mapping and most filesystems
992 * provide a migrate_folio callback. Anonymous folios
993 * are part of swap space which also has its own
994 * migrate_folio callback. This is the most common path
995 * for page migration.
996 */
997 rc = mapping->a_ops->migrate_folio(mapping, dst, src,
998 mode);
999 else
1000 rc = fallback_migrate_folio(mapping, dst, src, mode);
1001 } else {
1002 const struct movable_operations *mops;
1003
1004 /*
1005 * In case of non-lru page, it could be released after
1006 * isolation step. In that case, we shouldn't try migration.
1007 */
1008 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1009 if (!folio_test_movable(src)) {
1010 rc = MIGRATEPAGE_SUCCESS;
1011 folio_clear_isolated(src);
1012 goto out;
1013 }
1014
1015 mops = folio_movable_ops(src);
1016 rc = mops->migrate_page(&dst->page, &src->page, mode);
1017 WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1018 !folio_test_isolated(src));
1019 }
1020
1021 /*
1022 * When successful, old pagecache src->mapping must be cleared before
1023 * src is freed; but stats require that PageAnon be left as PageAnon.
1024 */
1025 if (rc == MIGRATEPAGE_SUCCESS) {
1026 if (__folio_test_movable(src)) {
1027 VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1028
1029 /*
1030 * We clear PG_movable under page_lock so any compactor
1031 * cannot try to migrate this page.
1032 */
1033 folio_clear_isolated(src);
1034 }
1035
1036 /*
1037 * Anonymous and movable src->mapping will be cleared by
1038 * free_pages_prepare so don't reset it here for keeping
1039 * the type to work PageAnon, for example.
1040 */
1041 if (!folio_mapping_flags(src))
1042 src->mapping = NULL;
1043
1044 if (likely(!folio_is_zone_device(dst)))
1045 flush_dcache_folio(dst);
1046 }
1047 out:
1048 return rc;
1049 }
1050
1051 /*
1052 * To record some information during migration, we use unused private
1053 * field of struct folio of the newly allocated destination folio.
1054 * This is safe because nobody is using it except us.
1055 */
1056 enum {
1057 PAGE_WAS_MAPPED = BIT(0),
1058 PAGE_WAS_MLOCKED = BIT(1),
1059 PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1060 };
1061
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1062 static void __migrate_folio_record(struct folio *dst,
1063 int old_page_state,
1064 struct anon_vma *anon_vma)
1065 {
1066 dst->private = (void *)anon_vma + old_page_state;
1067 }
1068
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1069 static void __migrate_folio_extract(struct folio *dst,
1070 int *old_page_state,
1071 struct anon_vma **anon_vmap)
1072 {
1073 unsigned long private = (unsigned long)dst->private;
1074
1075 *anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1076 *old_page_state = private & PAGE_OLD_STATES;
1077 dst->private = NULL;
1078 }
1079
1080 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1081 static void migrate_folio_undo_src(struct folio *src,
1082 int page_was_mapped,
1083 struct anon_vma *anon_vma,
1084 bool locked,
1085 struct list_head *ret)
1086 {
1087 if (page_was_mapped)
1088 remove_migration_ptes(src, src, false);
1089 /* Drop an anon_vma reference if we took one */
1090 if (anon_vma)
1091 put_anon_vma(anon_vma);
1092 if (locked)
1093 folio_unlock(src);
1094 if (ret)
1095 list_move_tail(&src->lru, ret);
1096 }
1097
1098 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1099 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1100 free_folio_t put_new_folio, unsigned long private)
1101 {
1102 if (locked)
1103 folio_unlock(dst);
1104 if (put_new_folio)
1105 put_new_folio(dst, private);
1106 else
1107 folio_put(dst);
1108 }
1109
1110 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1111 static void migrate_folio_done(struct folio *src,
1112 enum migrate_reason reason)
1113 {
1114 /*
1115 * Compaction can migrate also non-LRU pages which are
1116 * not accounted to NR_ISOLATED_*. They can be recognized
1117 * as __folio_test_movable
1118 */
1119 if (likely(!__folio_test_movable(src)))
1120 mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1121 folio_is_file_lru(src), -folio_nr_pages(src));
1122
1123 if (reason != MR_MEMORY_FAILURE)
1124 /* We release the page in page_handle_poison. */
1125 folio_put(src);
1126 }
1127
1128 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1129 static int migrate_folio_unmap(new_folio_t get_new_folio,
1130 free_folio_t put_new_folio, unsigned long private,
1131 struct folio *src, struct folio **dstp, enum migrate_mode mode,
1132 enum migrate_reason reason, struct list_head *ret)
1133 {
1134 struct folio *dst;
1135 int rc = -EAGAIN;
1136 int old_page_state = 0;
1137 struct anon_vma *anon_vma = NULL;
1138 bool is_lru = !__folio_test_movable(src);
1139 bool locked = false;
1140 bool dst_locked = false;
1141
1142 if (folio_ref_count(src) == 1) {
1143 /* Folio was freed from under us. So we are done. */
1144 folio_clear_active(src);
1145 folio_clear_unevictable(src);
1146 /* free_pages_prepare() will clear PG_isolated. */
1147 list_del(&src->lru);
1148 migrate_folio_done(src, reason);
1149 return MIGRATEPAGE_SUCCESS;
1150 }
1151
1152 dst = get_new_folio(src, private);
1153 if (!dst)
1154 return -ENOMEM;
1155 *dstp = dst;
1156
1157 dst->private = NULL;
1158
1159 if (!folio_trylock(src)) {
1160 if (mode == MIGRATE_ASYNC)
1161 goto out;
1162
1163 /*
1164 * It's not safe for direct compaction to call lock_page.
1165 * For example, during page readahead pages are added locked
1166 * to the LRU. Later, when the IO completes the pages are
1167 * marked uptodate and unlocked. However, the queueing
1168 * could be merging multiple pages for one bio (e.g.
1169 * mpage_readahead). If an allocation happens for the
1170 * second or third page, the process can end up locking
1171 * the same page twice and deadlocking. Rather than
1172 * trying to be clever about what pages can be locked,
1173 * avoid the use of lock_page for direct compaction
1174 * altogether.
1175 */
1176 if (current->flags & PF_MEMALLOC)
1177 goto out;
1178
1179 /*
1180 * In "light" mode, we can wait for transient locks (eg
1181 * inserting a page into the page table), but it's not
1182 * worth waiting for I/O.
1183 */
1184 if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1185 goto out;
1186
1187 folio_lock(src);
1188 }
1189 locked = true;
1190 if (folio_test_mlocked(src))
1191 old_page_state |= PAGE_WAS_MLOCKED;
1192
1193 if (folio_test_writeback(src)) {
1194 /*
1195 * Only in the case of a full synchronous migration is it
1196 * necessary to wait for PageWriteback. In the async case,
1197 * the retry loop is too short and in the sync-light case,
1198 * the overhead of stalling is too much
1199 */
1200 switch (mode) {
1201 case MIGRATE_SYNC:
1202 case MIGRATE_SYNC_NO_COPY:
1203 break;
1204 default:
1205 rc = -EBUSY;
1206 goto out;
1207 }
1208 folio_wait_writeback(src);
1209 }
1210
1211 /*
1212 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1213 * we cannot notice that anon_vma is freed while we migrate a page.
1214 * This get_anon_vma() delays freeing anon_vma pointer until the end
1215 * of migration. File cache pages are no problem because of page_lock()
1216 * File Caches may use write_page() or lock_page() in migration, then,
1217 * just care Anon page here.
1218 *
1219 * Only folio_get_anon_vma() understands the subtleties of
1220 * getting a hold on an anon_vma from outside one of its mms.
1221 * But if we cannot get anon_vma, then we won't need it anyway,
1222 * because that implies that the anon page is no longer mapped
1223 * (and cannot be remapped so long as we hold the page lock).
1224 */
1225 if (folio_test_anon(src) && !folio_test_ksm(src))
1226 anon_vma = folio_get_anon_vma(src);
1227
1228 /*
1229 * Block others from accessing the new page when we get around to
1230 * establishing additional references. We are usually the only one
1231 * holding a reference to dst at this point. We used to have a BUG
1232 * here if folio_trylock(dst) fails, but would like to allow for
1233 * cases where there might be a race with the previous use of dst.
1234 * This is much like races on refcount of oldpage: just don't BUG().
1235 */
1236 if (unlikely(!folio_trylock(dst)))
1237 goto out;
1238 dst_locked = true;
1239
1240 if (unlikely(!is_lru)) {
1241 __migrate_folio_record(dst, old_page_state, anon_vma);
1242 return MIGRATEPAGE_UNMAP;
1243 }
1244
1245 /*
1246 * Corner case handling:
1247 * 1. When a new swap-cache page is read into, it is added to the LRU
1248 * and treated as swapcache but it has no rmap yet.
1249 * Calling try_to_unmap() against a src->mapping==NULL page will
1250 * trigger a BUG. So handle it here.
1251 * 2. An orphaned page (see truncate_cleanup_page) might have
1252 * fs-private metadata. The page can be picked up due to memory
1253 * offlining. Everywhere else except page reclaim, the page is
1254 * invisible to the vm, so the page can not be migrated. So try to
1255 * free the metadata, so the page can be freed.
1256 */
1257 if (!src->mapping) {
1258 if (folio_test_private(src)) {
1259 try_to_free_buffers(src);
1260 goto out;
1261 }
1262 } else if (folio_mapped(src)) {
1263 /* Establish migration ptes */
1264 VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1265 !folio_test_ksm(src) && !anon_vma, src);
1266 try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1267 old_page_state |= PAGE_WAS_MAPPED;
1268 }
1269
1270 if (!folio_mapped(src)) {
1271 __migrate_folio_record(dst, old_page_state, anon_vma);
1272 return MIGRATEPAGE_UNMAP;
1273 }
1274
1275 out:
1276 /*
1277 * A folio that has not been unmapped will be restored to
1278 * right list unless we want to retry.
1279 */
1280 if (rc == -EAGAIN)
1281 ret = NULL;
1282
1283 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1284 anon_vma, locked, ret);
1285 migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1286
1287 return rc;
1288 }
1289
1290 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1291 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1292 struct folio *src, struct folio *dst,
1293 enum migrate_mode mode, enum migrate_reason reason,
1294 struct list_head *ret)
1295 {
1296 int rc;
1297 int old_page_state = 0;
1298 struct anon_vma *anon_vma = NULL;
1299 bool is_lru = !__folio_test_movable(src);
1300 struct list_head *prev;
1301
1302 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1303 prev = dst->lru.prev;
1304 list_del(&dst->lru);
1305
1306 rc = move_to_new_folio(dst, src, mode);
1307 if (rc)
1308 goto out;
1309
1310 if (unlikely(!is_lru))
1311 goto out_unlock_both;
1312
1313 /*
1314 * When successful, push dst to LRU immediately: so that if it
1315 * turns out to be an mlocked page, remove_migration_ptes() will
1316 * automatically build up the correct dst->mlock_count for it.
1317 *
1318 * We would like to do something similar for the old page, when
1319 * unsuccessful, and other cases when a page has been temporarily
1320 * isolated from the unevictable LRU: but this case is the easiest.
1321 */
1322 folio_add_lru(dst);
1323 if (old_page_state & PAGE_WAS_MLOCKED)
1324 lru_add_drain();
1325
1326 if (old_page_state & PAGE_WAS_MAPPED)
1327 remove_migration_ptes(src, dst, false);
1328
1329 out_unlock_both:
1330 folio_unlock(dst);
1331 set_page_owner_migrate_reason(&dst->page, reason);
1332 /*
1333 * If migration is successful, decrease refcount of dst,
1334 * which will not free the page because new page owner increased
1335 * refcounter.
1336 */
1337 folio_put(dst);
1338
1339 /*
1340 * A folio that has been migrated has all references removed
1341 * and will be freed.
1342 */
1343 list_del(&src->lru);
1344 /* Drop an anon_vma reference if we took one */
1345 if (anon_vma)
1346 put_anon_vma(anon_vma);
1347 folio_unlock(src);
1348 migrate_folio_done(src, reason);
1349
1350 return rc;
1351 out:
1352 /*
1353 * A folio that has not been migrated will be restored to
1354 * right list unless we want to retry.
1355 */
1356 if (rc == -EAGAIN) {
1357 list_add(&dst->lru, prev);
1358 __migrate_folio_record(dst, old_page_state, anon_vma);
1359 return rc;
1360 }
1361
1362 migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1363 anon_vma, true, ret);
1364 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1365
1366 return rc;
1367 }
1368
1369 /*
1370 * Counterpart of unmap_and_move_page() for hugepage migration.
1371 *
1372 * This function doesn't wait the completion of hugepage I/O
1373 * because there is no race between I/O and migration for hugepage.
1374 * Note that currently hugepage I/O occurs only in direct I/O
1375 * where no lock is held and PG_writeback is irrelevant,
1376 * and writeback status of all subpages are counted in the reference
1377 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1378 * under direct I/O, the reference of the head page is 512 and a bit more.)
1379 * This means that when we try to migrate hugepage whose subpages are
1380 * doing direct I/O, some references remain after try_to_unmap() and
1381 * hugepage migration fails without data corruption.
1382 *
1383 * There is also no race when direct I/O is issued on the page under migration,
1384 * because then pte is replaced with migration swap entry and direct I/O code
1385 * will wait in the page fault for migration to complete.
1386 */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1387 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1388 free_folio_t put_new_folio, unsigned long private,
1389 struct folio *src, int force, enum migrate_mode mode,
1390 int reason, struct list_head *ret)
1391 {
1392 struct folio *dst;
1393 int rc = -EAGAIN;
1394 int page_was_mapped = 0;
1395 struct anon_vma *anon_vma = NULL;
1396 struct address_space *mapping = NULL;
1397
1398 if (folio_ref_count(src) == 1) {
1399 /* page was freed from under us. So we are done. */
1400 folio_putback_active_hugetlb(src);
1401 return MIGRATEPAGE_SUCCESS;
1402 }
1403
1404 dst = get_new_folio(src, private);
1405 if (!dst)
1406 return -ENOMEM;
1407
1408 if (!folio_trylock(src)) {
1409 if (!force)
1410 goto out;
1411 switch (mode) {
1412 case MIGRATE_SYNC:
1413 case MIGRATE_SYNC_NO_COPY:
1414 break;
1415 default:
1416 goto out;
1417 }
1418 folio_lock(src);
1419 }
1420
1421 /*
1422 * Check for pages which are in the process of being freed. Without
1423 * folio_mapping() set, hugetlbfs specific move page routine will not
1424 * be called and we could leak usage counts for subpools.
1425 */
1426 if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1427 rc = -EBUSY;
1428 goto out_unlock;
1429 }
1430
1431 if (folio_test_anon(src))
1432 anon_vma = folio_get_anon_vma(src);
1433
1434 if (unlikely(!folio_trylock(dst)))
1435 goto put_anon;
1436
1437 if (folio_mapped(src)) {
1438 enum ttu_flags ttu = 0;
1439
1440 if (!folio_test_anon(src)) {
1441 /*
1442 * In shared mappings, try_to_unmap could potentially
1443 * call huge_pmd_unshare. Because of this, take
1444 * semaphore in write mode here and set TTU_RMAP_LOCKED
1445 * to let lower levels know we have taken the lock.
1446 */
1447 mapping = hugetlb_page_mapping_lock_write(&src->page);
1448 if (unlikely(!mapping))
1449 goto unlock_put_anon;
1450
1451 ttu = TTU_RMAP_LOCKED;
1452 }
1453
1454 try_to_migrate(src, ttu);
1455 page_was_mapped = 1;
1456
1457 if (ttu & TTU_RMAP_LOCKED)
1458 i_mmap_unlock_write(mapping);
1459 }
1460
1461 if (!folio_mapped(src))
1462 rc = move_to_new_folio(dst, src, mode);
1463
1464 if (page_was_mapped)
1465 remove_migration_ptes(src,
1466 rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
1467
1468 unlock_put_anon:
1469 folio_unlock(dst);
1470
1471 put_anon:
1472 if (anon_vma)
1473 put_anon_vma(anon_vma);
1474
1475 if (rc == MIGRATEPAGE_SUCCESS) {
1476 move_hugetlb_state(src, dst, reason);
1477 put_new_folio = NULL;
1478 }
1479
1480 out_unlock:
1481 folio_unlock(src);
1482 out:
1483 if (rc == MIGRATEPAGE_SUCCESS)
1484 folio_putback_active_hugetlb(src);
1485 else if (rc != -EAGAIN)
1486 list_move_tail(&src->lru, ret);
1487
1488 /*
1489 * If migration was not successful and there's a freeing callback, use
1490 * it. Otherwise, put_page() will drop the reference grabbed during
1491 * isolation.
1492 */
1493 if (put_new_folio)
1494 put_new_folio(dst, private);
1495 else
1496 folio_putback_active_hugetlb(dst);
1497
1498 return rc;
1499 }
1500
try_split_folio(struct folio * folio,struct list_head * split_folios,int reason)1501 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1502 int reason)
1503 {
1504 int rc;
1505
1506 if (!folio_can_split(folio)) {
1507 LIST_HEAD(head);
1508
1509 if (reason != MR_CONTIG_RANGE)
1510 return -EBUSY;
1511
1512 folio_lock(folio);
1513 rc = split_folio_to_list(folio, &head);
1514 folio_unlock(folio);
1515
1516 if (rc > 0) {
1517 putback_movable_pages(&head);
1518 return 0;
1519 }
1520
1521 VM_WARN_ON_ONCE_FOLIO(!rc, folio);
1522 return rc;
1523 }
1524
1525 folio_lock(folio);
1526 rc = split_folio_to_list(folio, split_folios);
1527 folio_unlock(folio);
1528 if (!rc)
1529 list_move_tail(&folio->lru, split_folios);
1530
1531 return rc;
1532 }
1533
1534 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1535 #define NR_MAX_BATCHED_MIGRATION HPAGE_PMD_NR
1536 #else
1537 #define NR_MAX_BATCHED_MIGRATION 512
1538 #endif
1539 #define NR_MAX_MIGRATE_PAGES_RETRY 10
1540 #define NR_MAX_MIGRATE_ASYNC_RETRY 3
1541 #define NR_MAX_MIGRATE_SYNC_RETRY \
1542 (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1543
1544 struct migrate_pages_stats {
1545 int nr_succeeded; /* Normal and large folios migrated successfully, in
1546 units of base pages */
1547 int nr_failed_pages; /* Normal and large folios failed to be migrated, in
1548 units of base pages. Untried folios aren't counted */
1549 int nr_thp_succeeded; /* THP migrated successfully */
1550 int nr_thp_failed; /* THP failed to be migrated */
1551 int nr_thp_split; /* THP split before migrating */
1552 };
1553
1554 /*
1555 * Returns the number of hugetlb folios that were not migrated, or an error code
1556 * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1557 * any more because the list has become empty or no retryable hugetlb folios
1558 * exist any more. It is caller's responsibility to call putback_movable_pages()
1559 * only if ret != 0.
1560 */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1561 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1562 free_folio_t put_new_folio, unsigned long private,
1563 enum migrate_mode mode, int reason,
1564 struct migrate_pages_stats *stats,
1565 struct list_head *ret_folios)
1566 {
1567 int retry = 1;
1568 int nr_failed = 0;
1569 int nr_retry_pages = 0;
1570 int pass = 0;
1571 struct folio *folio, *folio2;
1572 int rc, nr_pages;
1573
1574 for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1575 retry = 0;
1576 nr_retry_pages = 0;
1577
1578 list_for_each_entry_safe(folio, folio2, from, lru) {
1579 if (!folio_test_hugetlb(folio))
1580 continue;
1581
1582 nr_pages = folio_nr_pages(folio);
1583
1584 cond_resched();
1585
1586 /*
1587 * Migratability of hugepages depends on architectures and
1588 * their size. This check is necessary because some callers
1589 * of hugepage migration like soft offline and memory
1590 * hotremove don't walk through page tables or check whether
1591 * the hugepage is pmd-based or not before kicking migration.
1592 */
1593 if (!hugepage_migration_supported(folio_hstate(folio))) {
1594 nr_failed++;
1595 stats->nr_failed_pages += nr_pages;
1596 list_move_tail(&folio->lru, ret_folios);
1597 continue;
1598 }
1599
1600 rc = unmap_and_move_huge_page(get_new_folio,
1601 put_new_folio, private,
1602 folio, pass > 2, mode,
1603 reason, ret_folios);
1604 /*
1605 * The rules are:
1606 * Success: hugetlb folio will be put back
1607 * -EAGAIN: stay on the from list
1608 * -ENOMEM: stay on the from list
1609 * Other errno: put on ret_folios list
1610 */
1611 switch(rc) {
1612 case -ENOMEM:
1613 /*
1614 * When memory is low, don't bother to try to migrate
1615 * other folios, just exit.
1616 */
1617 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1618 return -ENOMEM;
1619 case -EAGAIN:
1620 retry++;
1621 nr_retry_pages += nr_pages;
1622 break;
1623 case MIGRATEPAGE_SUCCESS:
1624 stats->nr_succeeded += nr_pages;
1625 break;
1626 default:
1627 /*
1628 * Permanent failure (-EBUSY, etc.):
1629 * unlike -EAGAIN case, the failed folio is
1630 * removed from migration folio list and not
1631 * retried in the next outer loop.
1632 */
1633 nr_failed++;
1634 stats->nr_failed_pages += nr_pages;
1635 break;
1636 }
1637 }
1638 }
1639 /*
1640 * nr_failed is number of hugetlb folios failed to be migrated. After
1641 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1642 * folios as failed.
1643 */
1644 nr_failed += retry;
1645 stats->nr_failed_pages += nr_retry_pages;
1646
1647 return nr_failed;
1648 }
1649
1650 /*
1651 * migrate_pages_batch() first unmaps folios in the from list as many as
1652 * possible, then move the unmapped folios.
1653 *
1654 * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1655 * lock or bit when we have locked more than one folio. Which may cause
1656 * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the
1657 * length of the from list must be <= 1.
1658 */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1659 static int migrate_pages_batch(struct list_head *from,
1660 new_folio_t get_new_folio, free_folio_t put_new_folio,
1661 unsigned long private, enum migrate_mode mode, int reason,
1662 struct list_head *ret_folios, struct list_head *split_folios,
1663 struct migrate_pages_stats *stats, int nr_pass)
1664 {
1665 int retry = 1;
1666 int thp_retry = 1;
1667 int nr_failed = 0;
1668 int nr_retry_pages = 0;
1669 int pass = 0;
1670 bool is_thp = false;
1671 struct folio *folio, *folio2, *dst = NULL, *dst2;
1672 int rc, rc_saved = 0, nr_pages;
1673 LIST_HEAD(unmap_folios);
1674 LIST_HEAD(dst_folios);
1675 bool nosplit = (reason == MR_NUMA_MISPLACED);
1676
1677 VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1678 !list_empty(from) && !list_is_singular(from));
1679
1680 for (pass = 0; pass < nr_pass && retry; pass++) {
1681 retry = 0;
1682 thp_retry = 0;
1683 nr_retry_pages = 0;
1684
1685 list_for_each_entry_safe(folio, folio2, from, lru) {
1686 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1687 nr_pages = folio_nr_pages(folio);
1688
1689 cond_resched();
1690
1691 /*
1692 * Large folio migration might be unsupported or
1693 * the allocation might be failed so we should retry
1694 * on the same folio with the large folio split
1695 * to normal folios.
1696 *
1697 * Split folios are put in split_folios, and
1698 * we will migrate them after the rest of the
1699 * list is processed.
1700 */
1701 if (!thp_migration_supported() && is_thp) {
1702 nr_failed++;
1703 stats->nr_thp_failed++;
1704 if (!try_split_folio(folio, split_folios, reason)) {
1705 stats->nr_thp_split++;
1706 continue;
1707 }
1708 stats->nr_failed_pages += nr_pages;
1709 list_move_tail(&folio->lru, ret_folios);
1710 continue;
1711 }
1712
1713 rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1714 private, folio, &dst, mode, reason,
1715 ret_folios);
1716 /*
1717 * The rules are:
1718 * Success: folio will be freed
1719 * Unmap: folio will be put on unmap_folios list,
1720 * dst folio put on dst_folios list
1721 * -EAGAIN: stay on the from list
1722 * -ENOMEM: stay on the from list
1723 * Other errno: put on ret_folios list
1724 */
1725 switch(rc) {
1726 case -ENOMEM:
1727 /*
1728 * When memory is low, don't bother to try to migrate
1729 * other folios, move unmapped folios, then exit.
1730 */
1731 nr_failed++;
1732 stats->nr_thp_failed += is_thp;
1733 /* Large folio NUMA faulting doesn't split to retry. */
1734 if (folio_test_large(folio) && !nosplit) {
1735 int ret = try_split_folio(folio, split_folios, reason);
1736
1737 if (!ret) {
1738 stats->nr_thp_split += is_thp;
1739 break;
1740 } else if (reason == MR_LONGTERM_PIN &&
1741 ret == -EAGAIN) {
1742 /*
1743 * Try again to split large folio to
1744 * mitigate the failure of longterm pinning.
1745 */
1746 retry++;
1747 thp_retry += is_thp;
1748 nr_retry_pages += nr_pages;
1749 /* Undo duplicated failure counting. */
1750 nr_failed--;
1751 stats->nr_thp_failed -= is_thp;
1752 break;
1753 }
1754 }
1755
1756 stats->nr_failed_pages += nr_pages + nr_retry_pages;
1757 /* nr_failed isn't updated for not used */
1758 stats->nr_thp_failed += thp_retry;
1759 rc_saved = rc;
1760 if (list_empty(&unmap_folios))
1761 goto out;
1762 else
1763 goto move;
1764 case -EAGAIN:
1765 retry++;
1766 thp_retry += is_thp;
1767 nr_retry_pages += nr_pages;
1768 break;
1769 case MIGRATEPAGE_SUCCESS:
1770 stats->nr_succeeded += nr_pages;
1771 stats->nr_thp_succeeded += is_thp;
1772 break;
1773 case MIGRATEPAGE_UNMAP:
1774 list_move_tail(&folio->lru, &unmap_folios);
1775 list_add_tail(&dst->lru, &dst_folios);
1776 break;
1777 default:
1778 /*
1779 * Permanent failure (-EBUSY, etc.):
1780 * unlike -EAGAIN case, the failed folio is
1781 * removed from migration folio list and not
1782 * retried in the next outer loop.
1783 */
1784 nr_failed++;
1785 stats->nr_thp_failed += is_thp;
1786 stats->nr_failed_pages += nr_pages;
1787 break;
1788 }
1789 }
1790 }
1791 nr_failed += retry;
1792 stats->nr_thp_failed += thp_retry;
1793 stats->nr_failed_pages += nr_retry_pages;
1794 move:
1795 /* Flush TLBs for all unmapped folios */
1796 try_to_unmap_flush();
1797
1798 retry = 1;
1799 for (pass = 0; pass < nr_pass && retry; pass++) {
1800 retry = 0;
1801 thp_retry = 0;
1802 nr_retry_pages = 0;
1803
1804 dst = list_first_entry(&dst_folios, struct folio, lru);
1805 dst2 = list_next_entry(dst, lru);
1806 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1807 is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1808 nr_pages = folio_nr_pages(folio);
1809
1810 cond_resched();
1811
1812 rc = migrate_folio_move(put_new_folio, private,
1813 folio, dst, mode,
1814 reason, ret_folios);
1815 /*
1816 * The rules are:
1817 * Success: folio will be freed
1818 * -EAGAIN: stay on the unmap_folios list
1819 * Other errno: put on ret_folios list
1820 */
1821 switch(rc) {
1822 case -EAGAIN:
1823 retry++;
1824 thp_retry += is_thp;
1825 nr_retry_pages += nr_pages;
1826 break;
1827 case MIGRATEPAGE_SUCCESS:
1828 stats->nr_succeeded += nr_pages;
1829 stats->nr_thp_succeeded += is_thp;
1830 break;
1831 default:
1832 nr_failed++;
1833 stats->nr_thp_failed += is_thp;
1834 stats->nr_failed_pages += nr_pages;
1835 break;
1836 }
1837 dst = dst2;
1838 dst2 = list_next_entry(dst, lru);
1839 }
1840 }
1841 nr_failed += retry;
1842 stats->nr_thp_failed += thp_retry;
1843 stats->nr_failed_pages += nr_retry_pages;
1844
1845 rc = rc_saved ? : nr_failed;
1846 out:
1847 /* Cleanup remaining folios */
1848 dst = list_first_entry(&dst_folios, struct folio, lru);
1849 dst2 = list_next_entry(dst, lru);
1850 list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1851 int old_page_state = 0;
1852 struct anon_vma *anon_vma = NULL;
1853
1854 __migrate_folio_extract(dst, &old_page_state, &anon_vma);
1855 migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1856 anon_vma, true, ret_folios);
1857 list_del(&dst->lru);
1858 migrate_folio_undo_dst(dst, true, put_new_folio, private);
1859 dst = dst2;
1860 dst2 = list_next_entry(dst, lru);
1861 }
1862
1863 return rc;
1864 }
1865
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1866 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1867 free_folio_t put_new_folio, unsigned long private,
1868 enum migrate_mode mode, int reason,
1869 struct list_head *ret_folios, struct list_head *split_folios,
1870 struct migrate_pages_stats *stats)
1871 {
1872 int rc, nr_failed = 0;
1873 LIST_HEAD(folios);
1874 struct migrate_pages_stats astats;
1875
1876 memset(&astats, 0, sizeof(astats));
1877 /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1878 rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1879 reason, &folios, split_folios, &astats,
1880 NR_MAX_MIGRATE_ASYNC_RETRY);
1881 stats->nr_succeeded += astats.nr_succeeded;
1882 stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1883 stats->nr_thp_split += astats.nr_thp_split;
1884 if (rc < 0) {
1885 stats->nr_failed_pages += astats.nr_failed_pages;
1886 stats->nr_thp_failed += astats.nr_thp_failed;
1887 list_splice_tail(&folios, ret_folios);
1888 return rc;
1889 }
1890 stats->nr_thp_failed += astats.nr_thp_split;
1891 nr_failed += astats.nr_thp_split;
1892 /*
1893 * Fall back to migrate all failed folios one by one synchronously. All
1894 * failed folios except split THPs will be retried, so their failure
1895 * isn't counted
1896 */
1897 list_splice_tail_init(&folios, from);
1898 while (!list_empty(from)) {
1899 list_move(from->next, &folios);
1900 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1901 private, mode, reason, ret_folios,
1902 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1903 list_splice_tail_init(&folios, ret_folios);
1904 if (rc < 0)
1905 return rc;
1906 nr_failed += rc;
1907 }
1908
1909 return nr_failed;
1910 }
1911
1912 /*
1913 * migrate_pages - migrate the folios specified in a list, to the free folios
1914 * supplied as the target for the page migration
1915 *
1916 * @from: The list of folios to be migrated.
1917 * @get_new_folio: The function used to allocate free folios to be used
1918 * as the target of the folio migration.
1919 * @put_new_folio: The function used to free target folios if migration
1920 * fails, or NULL if no special handling is necessary.
1921 * @private: Private data to be passed on to get_new_folio()
1922 * @mode: The migration mode that specifies the constraints for
1923 * folio migration, if any.
1924 * @reason: The reason for folio migration.
1925 * @ret_succeeded: Set to the number of folios migrated successfully if
1926 * the caller passes a non-NULL pointer.
1927 *
1928 * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
1929 * are movable any more because the list has become empty or no retryable folios
1930 * exist any more. It is caller's responsibility to call putback_movable_pages()
1931 * only if ret != 0.
1932 *
1933 * Returns the number of {normal folio, large folio, hugetlb} that were not
1934 * migrated, or an error code. The number of large folio splits will be
1935 * considered as the number of non-migrated large folio, no matter how many
1936 * split folios of the large folio are migrated successfully.
1937 */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)1938 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
1939 free_folio_t put_new_folio, unsigned long private,
1940 enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1941 {
1942 int rc, rc_gather;
1943 int nr_pages;
1944 struct folio *folio, *folio2;
1945 LIST_HEAD(folios);
1946 LIST_HEAD(ret_folios);
1947 LIST_HEAD(split_folios);
1948 struct migrate_pages_stats stats;
1949
1950 trace_mm_migrate_pages_start(mode, reason);
1951
1952 memset(&stats, 0, sizeof(stats));
1953
1954 rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
1955 mode, reason, &stats, &ret_folios);
1956 if (rc_gather < 0)
1957 goto out;
1958
1959 again:
1960 nr_pages = 0;
1961 list_for_each_entry_safe(folio, folio2, from, lru) {
1962 /* Retried hugetlb folios will be kept in list */
1963 if (folio_test_hugetlb(folio)) {
1964 list_move_tail(&folio->lru, &ret_folios);
1965 continue;
1966 }
1967
1968 nr_pages += folio_nr_pages(folio);
1969 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1970 break;
1971 }
1972 if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
1973 list_cut_before(&folios, from, &folio2->lru);
1974 else
1975 list_splice_init(from, &folios);
1976 if (mode == MIGRATE_ASYNC)
1977 rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1978 private, mode, reason, &ret_folios,
1979 &split_folios, &stats,
1980 NR_MAX_MIGRATE_PAGES_RETRY);
1981 else
1982 rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
1983 private, mode, reason, &ret_folios,
1984 &split_folios, &stats);
1985 list_splice_tail_init(&folios, &ret_folios);
1986 if (rc < 0) {
1987 rc_gather = rc;
1988 list_splice_tail(&split_folios, &ret_folios);
1989 goto out;
1990 }
1991 if (!list_empty(&split_folios)) {
1992 /*
1993 * Failure isn't counted since all split folios of a large folio
1994 * is counted as 1 failure already. And, we only try to migrate
1995 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1996 */
1997 migrate_pages_batch(&split_folios, get_new_folio,
1998 put_new_folio, private, MIGRATE_ASYNC, reason,
1999 &ret_folios, NULL, &stats, 1);
2000 list_splice_tail_init(&split_folios, &ret_folios);
2001 }
2002 rc_gather += rc;
2003 if (!list_empty(from))
2004 goto again;
2005 out:
2006 /*
2007 * Put the permanent failure folio back to migration list, they
2008 * will be put back to the right list by the caller.
2009 */
2010 list_splice(&ret_folios, from);
2011
2012 /*
2013 * Return 0 in case all split folios of fail-to-migrate large folios
2014 * are migrated successfully.
2015 */
2016 if (list_empty(from))
2017 rc_gather = 0;
2018
2019 count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2020 count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2021 count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2022 count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2023 count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2024 trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2025 stats.nr_thp_succeeded, stats.nr_thp_failed,
2026 stats.nr_thp_split, mode, reason);
2027
2028 if (ret_succeeded)
2029 *ret_succeeded = stats.nr_succeeded;
2030
2031 return rc_gather;
2032 }
2033 EXPORT_SYMBOL_GPL(migrate_pages);
2034
alloc_migration_target(struct folio * src,unsigned long private)2035 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2036 {
2037 struct migration_target_control *mtc;
2038 gfp_t gfp_mask;
2039 unsigned int order = 0;
2040 int nid;
2041 int zidx;
2042
2043 mtc = (struct migration_target_control *)private;
2044 gfp_mask = mtc->gfp_mask;
2045 nid = mtc->nid;
2046 if (nid == NUMA_NO_NODE)
2047 nid = folio_nid(src);
2048
2049 if (folio_test_hugetlb(src)) {
2050 struct hstate *h = folio_hstate(src);
2051
2052 gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2053 return alloc_hugetlb_folio_nodemask(h, nid,
2054 mtc->nmask, gfp_mask);
2055 }
2056
2057 if (folio_test_large(src)) {
2058 /*
2059 * clear __GFP_RECLAIM to make the migration callback
2060 * consistent with regular THP allocations.
2061 */
2062 gfp_mask &= ~__GFP_RECLAIM;
2063 gfp_mask |= GFP_TRANSHUGE;
2064 order = folio_order(src);
2065 }
2066 zidx = zone_idx(folio_zone(src));
2067 if (zidx > ZONE_NORMAL)
2068 gfp_mask |= __GFP_HIGHMEM;
2069
2070 return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2071 }
2072
2073 #ifdef CONFIG_NUMA
2074
store_status(int __user * status,int start,int value,int nr)2075 static int store_status(int __user *status, int start, int value, int nr)
2076 {
2077 while (nr-- > 0) {
2078 if (put_user(value, status + start))
2079 return -EFAULT;
2080 start++;
2081 }
2082
2083 return 0;
2084 }
2085
do_move_pages_to_node(struct mm_struct * mm,struct list_head * pagelist,int node)2086 static int do_move_pages_to_node(struct mm_struct *mm,
2087 struct list_head *pagelist, int node)
2088 {
2089 int err;
2090 struct migration_target_control mtc = {
2091 .nid = node,
2092 .gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2093 };
2094
2095 err = migrate_pages(pagelist, alloc_migration_target, NULL,
2096 (unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2097 if (err)
2098 putback_movable_pages(pagelist);
2099 return err;
2100 }
2101
2102 /*
2103 * Resolves the given address to a struct page, isolates it from the LRU and
2104 * puts it to the given pagelist.
2105 * Returns:
2106 * errno - if the page cannot be found/isolated
2107 * 0 - when it doesn't have to be migrated because it is already on the
2108 * target node
2109 * 1 - when it has been queued
2110 */
add_page_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2111 static int add_page_for_migration(struct mm_struct *mm, const void __user *p,
2112 int node, struct list_head *pagelist, bool migrate_all)
2113 {
2114 struct vm_area_struct *vma;
2115 unsigned long addr;
2116 struct page *page;
2117 struct folio *folio;
2118 int err;
2119
2120 mmap_read_lock(mm);
2121 addr = (unsigned long)untagged_addr_remote(mm, p);
2122
2123 err = -EFAULT;
2124 vma = vma_lookup(mm, addr);
2125 if (!vma || !vma_migratable(vma))
2126 goto out;
2127
2128 /* FOLL_DUMP to ignore special (like zero) pages */
2129 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2130
2131 err = PTR_ERR(page);
2132 if (IS_ERR(page))
2133 goto out;
2134
2135 err = -ENOENT;
2136 if (!page)
2137 goto out;
2138
2139 folio = page_folio(page);
2140 if (folio_is_zone_device(folio))
2141 goto out_putfolio;
2142
2143 err = 0;
2144 if (folio_nid(folio) == node)
2145 goto out_putfolio;
2146
2147 err = -EACCES;
2148 if (page_mapcount(page) > 1 && !migrate_all)
2149 goto out_putfolio;
2150
2151 err = -EBUSY;
2152 if (folio_test_hugetlb(folio)) {
2153 if (isolate_hugetlb(folio, pagelist))
2154 err = 1;
2155 } else {
2156 if (!folio_isolate_lru(folio))
2157 goto out_putfolio;
2158
2159 err = 1;
2160 list_add_tail(&folio->lru, pagelist);
2161 node_stat_mod_folio(folio,
2162 NR_ISOLATED_ANON + folio_is_file_lru(folio),
2163 folio_nr_pages(folio));
2164 }
2165 out_putfolio:
2166 /*
2167 * Either remove the duplicate refcount from folio_isolate_lru()
2168 * or drop the folio ref if it was not isolated.
2169 */
2170 folio_put(folio);
2171 out:
2172 mmap_read_unlock(mm);
2173 return err;
2174 }
2175
move_pages_and_store_status(struct mm_struct * mm,int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2176 static int move_pages_and_store_status(struct mm_struct *mm, int node,
2177 struct list_head *pagelist, int __user *status,
2178 int start, int i, unsigned long nr_pages)
2179 {
2180 int err;
2181
2182 if (list_empty(pagelist))
2183 return 0;
2184
2185 err = do_move_pages_to_node(mm, pagelist, node);
2186 if (err) {
2187 /*
2188 * Positive err means the number of failed
2189 * pages to migrate. Since we are going to
2190 * abort and return the number of non-migrated
2191 * pages, so need to include the rest of the
2192 * nr_pages that have not been attempted as
2193 * well.
2194 */
2195 if (err > 0)
2196 err += nr_pages - i;
2197 return err;
2198 }
2199 return store_status(status, start, node, i - start);
2200 }
2201
2202 /*
2203 * Migrate an array of page address onto an array of nodes and fill
2204 * the corresponding array of status.
2205 */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2206 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2207 unsigned long nr_pages,
2208 const void __user * __user *pages,
2209 const int __user *nodes,
2210 int __user *status, int flags)
2211 {
2212 compat_uptr_t __user *compat_pages = (void __user *)pages;
2213 int current_node = NUMA_NO_NODE;
2214 LIST_HEAD(pagelist);
2215 int start, i;
2216 int err = 0, err1;
2217
2218 lru_cache_disable();
2219
2220 for (i = start = 0; i < nr_pages; i++) {
2221 const void __user *p;
2222 int node;
2223
2224 err = -EFAULT;
2225 if (in_compat_syscall()) {
2226 compat_uptr_t cp;
2227
2228 if (get_user(cp, compat_pages + i))
2229 goto out_flush;
2230
2231 p = compat_ptr(cp);
2232 } else {
2233 if (get_user(p, pages + i))
2234 goto out_flush;
2235 }
2236 if (get_user(node, nodes + i))
2237 goto out_flush;
2238
2239 err = -ENODEV;
2240 if (node < 0 || node >= MAX_NUMNODES)
2241 goto out_flush;
2242 if (!node_state(node, N_MEMORY))
2243 goto out_flush;
2244
2245 err = -EACCES;
2246 if (!node_isset(node, task_nodes))
2247 goto out_flush;
2248
2249 if (current_node == NUMA_NO_NODE) {
2250 current_node = node;
2251 start = i;
2252 } else if (node != current_node) {
2253 err = move_pages_and_store_status(mm, current_node,
2254 &pagelist, status, start, i, nr_pages);
2255 if (err)
2256 goto out;
2257 start = i;
2258 current_node = node;
2259 }
2260
2261 /*
2262 * Errors in the page lookup or isolation are not fatal and we simply
2263 * report them via status
2264 */
2265 err = add_page_for_migration(mm, p, current_node, &pagelist,
2266 flags & MPOL_MF_MOVE_ALL);
2267
2268 if (err > 0) {
2269 /* The page is successfully queued for migration */
2270 continue;
2271 }
2272
2273 /*
2274 * The move_pages() man page does not have an -EEXIST choice, so
2275 * use -EFAULT instead.
2276 */
2277 if (err == -EEXIST)
2278 err = -EFAULT;
2279
2280 /*
2281 * If the page is already on the target node (!err), store the
2282 * node, otherwise, store the err.
2283 */
2284 err = store_status(status, i, err ? : current_node, 1);
2285 if (err)
2286 goto out_flush;
2287
2288 err = move_pages_and_store_status(mm, current_node, &pagelist,
2289 status, start, i, nr_pages);
2290 if (err) {
2291 /* We have accounted for page i */
2292 if (err > 0)
2293 err--;
2294 goto out;
2295 }
2296 current_node = NUMA_NO_NODE;
2297 }
2298 out_flush:
2299 /* Make sure we do not overwrite the existing error */
2300 err1 = move_pages_and_store_status(mm, current_node, &pagelist,
2301 status, start, i, nr_pages);
2302 if (err >= 0)
2303 err = err1;
2304 out:
2305 lru_cache_enable();
2306 return err;
2307 }
2308
2309 /*
2310 * Determine the nodes of an array of pages and store it in an array of status.
2311 */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2312 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2313 const void __user **pages, int *status)
2314 {
2315 unsigned long i;
2316
2317 mmap_read_lock(mm);
2318
2319 for (i = 0; i < nr_pages; i++) {
2320 unsigned long addr = (unsigned long)(*pages);
2321 struct vm_area_struct *vma;
2322 struct page *page;
2323 int err = -EFAULT;
2324
2325 vma = vma_lookup(mm, addr);
2326 if (!vma)
2327 goto set_status;
2328
2329 /* FOLL_DUMP to ignore special (like zero) pages */
2330 page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
2331
2332 err = PTR_ERR(page);
2333 if (IS_ERR(page))
2334 goto set_status;
2335
2336 err = -ENOENT;
2337 if (!page)
2338 goto set_status;
2339
2340 if (!is_zone_device_page(page))
2341 err = page_to_nid(page);
2342
2343 put_page(page);
2344 set_status:
2345 *status = err;
2346
2347 pages++;
2348 status++;
2349 }
2350
2351 mmap_read_unlock(mm);
2352 }
2353
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)2354 static int get_compat_pages_array(const void __user *chunk_pages[],
2355 const void __user * __user *pages,
2356 unsigned long chunk_nr)
2357 {
2358 compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2359 compat_uptr_t p;
2360 int i;
2361
2362 for (i = 0; i < chunk_nr; i++) {
2363 if (get_user(p, pages32 + i))
2364 return -EFAULT;
2365 chunk_pages[i] = compat_ptr(p);
2366 }
2367
2368 return 0;
2369 }
2370
2371 /*
2372 * Determine the nodes of a user array of pages and store it in
2373 * a user array of status.
2374 */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2375 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2376 const void __user * __user *pages,
2377 int __user *status)
2378 {
2379 #define DO_PAGES_STAT_CHUNK_NR 16UL
2380 const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2381 int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2382
2383 while (nr_pages) {
2384 unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2385
2386 if (in_compat_syscall()) {
2387 if (get_compat_pages_array(chunk_pages, pages,
2388 chunk_nr))
2389 break;
2390 } else {
2391 if (copy_from_user(chunk_pages, pages,
2392 chunk_nr * sizeof(*chunk_pages)))
2393 break;
2394 }
2395
2396 do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2397
2398 if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2399 break;
2400
2401 pages += chunk_nr;
2402 status += chunk_nr;
2403 nr_pages -= chunk_nr;
2404 }
2405 return nr_pages ? -EFAULT : 0;
2406 }
2407
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2408 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2409 {
2410 struct task_struct *task;
2411 struct mm_struct *mm;
2412
2413 /*
2414 * There is no need to check if current process has the right to modify
2415 * the specified process when they are same.
2416 */
2417 if (!pid) {
2418 mmget(current->mm);
2419 *mem_nodes = cpuset_mems_allowed(current);
2420 return current->mm;
2421 }
2422
2423 /* Find the mm_struct */
2424 rcu_read_lock();
2425 task = find_task_by_vpid(pid);
2426 if (!task) {
2427 rcu_read_unlock();
2428 return ERR_PTR(-ESRCH);
2429 }
2430 get_task_struct(task);
2431
2432 /*
2433 * Check if this process has the right to modify the specified
2434 * process. Use the regular "ptrace_may_access()" checks.
2435 */
2436 if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2437 rcu_read_unlock();
2438 mm = ERR_PTR(-EPERM);
2439 goto out;
2440 }
2441 rcu_read_unlock();
2442
2443 mm = ERR_PTR(security_task_movememory(task));
2444 if (IS_ERR(mm))
2445 goto out;
2446 *mem_nodes = cpuset_mems_allowed(task);
2447 mm = get_task_mm(task);
2448 out:
2449 put_task_struct(task);
2450 if (!mm)
2451 mm = ERR_PTR(-EINVAL);
2452 return mm;
2453 }
2454
2455 /*
2456 * Move a list of pages in the address space of the currently executing
2457 * process.
2458 */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2459 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2460 const void __user * __user *pages,
2461 const int __user *nodes,
2462 int __user *status, int flags)
2463 {
2464 struct mm_struct *mm;
2465 int err;
2466 nodemask_t task_nodes;
2467
2468 /* Check flags */
2469 if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2470 return -EINVAL;
2471
2472 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2473 return -EPERM;
2474
2475 mm = find_mm_struct(pid, &task_nodes);
2476 if (IS_ERR(mm))
2477 return PTR_ERR(mm);
2478
2479 if (nodes)
2480 err = do_pages_move(mm, task_nodes, nr_pages, pages,
2481 nodes, status, flags);
2482 else
2483 err = do_pages_stat(mm, nr_pages, pages, status);
2484
2485 mmput(mm);
2486 return err;
2487 }
2488
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2489 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2490 const void __user * __user *, pages,
2491 const int __user *, nodes,
2492 int __user *, status, int, flags)
2493 {
2494 return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2495 }
2496
2497 #ifdef CONFIG_NUMA_BALANCING
2498 /*
2499 * Returns true if this is a safe migration target node for misplaced NUMA
2500 * pages. Currently it only checks the watermarks which is crude.
2501 */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2502 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2503 unsigned long nr_migrate_pages)
2504 {
2505 int z;
2506
2507 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2508 struct zone *zone = pgdat->node_zones + z;
2509
2510 if (!managed_zone(zone))
2511 continue;
2512
2513 /* Avoid waking kswapd by allocating pages_to_migrate pages. */
2514 if (!zone_watermark_ok(zone, 0,
2515 high_wmark_pages(zone) +
2516 nr_migrate_pages,
2517 ZONE_MOVABLE, 0))
2518 continue;
2519 return true;
2520 }
2521 return false;
2522 }
2523
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2524 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2525 unsigned long data)
2526 {
2527 int nid = (int) data;
2528 int order = folio_order(src);
2529 gfp_t gfp = __GFP_THISNODE;
2530
2531 if (order > 0)
2532 gfp |= GFP_TRANSHUGE_LIGHT;
2533 else {
2534 gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2535 __GFP_NOWARN;
2536 gfp &= ~__GFP_RECLAIM;
2537 }
2538 return __folio_alloc_node(gfp, order, nid);
2539 }
2540
numamigrate_isolate_folio(pg_data_t * pgdat,struct folio * folio)2541 static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
2542 {
2543 int nr_pages = folio_nr_pages(folio);
2544
2545 /* Avoid migrating to a node that is nearly full */
2546 if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2547 int z;
2548
2549 if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2550 return 0;
2551 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2552 if (managed_zone(pgdat->node_zones + z))
2553 break;
2554 }
2555
2556 /*
2557 * If there are no managed zones, it should not proceed
2558 * further.
2559 */
2560 if (z < 0)
2561 return 0;
2562
2563 wakeup_kswapd(pgdat->node_zones + z, 0,
2564 folio_order(folio), z);
2565 return 0;
2566 }
2567
2568 if (!folio_isolate_lru(folio))
2569 return 0;
2570
2571 node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2572 nr_pages);
2573
2574 /*
2575 * Isolating the folio has taken another reference, so the
2576 * caller's reference can be safely dropped without the folio
2577 * disappearing underneath us during migration.
2578 */
2579 folio_put(folio);
2580 return 1;
2581 }
2582
2583 /*
2584 * Attempt to migrate a misplaced folio to the specified destination
2585 * node. Caller is expected to have an elevated reference count on
2586 * the folio that will be dropped by this function before returning.
2587 */
migrate_misplaced_folio(struct folio * folio,struct vm_area_struct * vma,int node)2588 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2589 int node)
2590 {
2591 pg_data_t *pgdat = NODE_DATA(node);
2592 int isolated;
2593 int nr_remaining;
2594 unsigned int nr_succeeded;
2595 LIST_HEAD(migratepages);
2596 int nr_pages = folio_nr_pages(folio);
2597
2598 /*
2599 * Don't migrate file folios that are mapped in multiple processes
2600 * with execute permissions as they are probably shared libraries.
2601 *
2602 * See folio_likely_mapped_shared() on possible imprecision when we
2603 * cannot easily detect if a folio is shared.
2604 */
2605 if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
2606 (vma->vm_flags & VM_EXEC))
2607 goto out;
2608
2609 /*
2610 * Also do not migrate dirty folios as not all filesystems can move
2611 * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
2612 */
2613 if (folio_is_file_lru(folio) && folio_test_dirty(folio))
2614 goto out;
2615
2616 isolated = numamigrate_isolate_folio(pgdat, folio);
2617 if (!isolated)
2618 goto out;
2619
2620 list_add(&folio->lru, &migratepages);
2621 nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2622 NULL, node, MIGRATE_ASYNC,
2623 MR_NUMA_MISPLACED, &nr_succeeded);
2624 if (nr_remaining) {
2625 if (!list_empty(&migratepages)) {
2626 list_del(&folio->lru);
2627 node_stat_mod_folio(folio, NR_ISOLATED_ANON +
2628 folio_is_file_lru(folio), -nr_pages);
2629 folio_putback_lru(folio);
2630 }
2631 isolated = 0;
2632 }
2633 if (nr_succeeded) {
2634 count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2635 if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
2636 mod_node_page_state(pgdat, PGPROMOTE_SUCCESS,
2637 nr_succeeded);
2638 }
2639 BUG_ON(!list_empty(&migratepages));
2640 return isolated;
2641
2642 out:
2643 folio_put(folio);
2644 return 0;
2645 }
2646 #endif /* CONFIG_NUMA_BALANCING */
2647 #endif /* CONFIG_NUMA */
2648