• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/ksm.h>
24 #include <linux/rmap.h>
25 #include <linux/topology.h>
26 #include <linux/cpu.h>
27 #include <linux/cpuset.h>
28 #include <linux/writeback.h>
29 #include <linux/mempolicy.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/backing-dev.h>
33 #include <linux/compaction.h>
34 #include <linux/syscalls.h>
35 #include <linux/compat.h>
36 #include <linux/hugetlb.h>
37 #include <linux/gfp.h>
38 #include <linux/pfn_t.h>
39 #include <linux/page_idle.h>
40 #include <linux/page_owner.h>
41 #include <linux/sched/mm.h>
42 #include <linux/ptrace.h>
43 #include <linux/memory.h>
44 #include <linux/sched/sysctl.h>
45 #include <linux/memory-tiers.h>
46 #include <linux/pagewalk.h>
47 
48 #include <asm/tlbflush.h>
49 
50 #include <trace/events/migrate.h>
51 
52 #undef CREATE_TRACE_POINTS
53 #include <trace/hooks/mm.h>
54 #include <trace/hooks/vmscan.h>
55 
56 #include "internal.h"
57 
isolate_movable_page(struct page * page,isolate_mode_t mode)58 bool isolate_movable_page(struct page *page, isolate_mode_t mode)
59 {
60 	struct folio *folio = folio_get_nontail_page(page);
61 	const struct movable_operations *mops;
62 
63 	/*
64 	 * Avoid burning cycles with pages that are yet under __free_pages(),
65 	 * or just got freed under us.
66 	 *
67 	 * In case we 'win' a race for a movable page being freed under us and
68 	 * raise its refcount preventing __free_pages() from doing its job
69 	 * the put_page() at the end of this block will take care of
70 	 * release this page, thus avoiding a nasty leakage.
71 	 */
72 	if (!folio)
73 		goto out;
74 
75 	if (unlikely(folio_test_slab(folio)))
76 		goto out_putfolio;
77 	/* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */
78 	smp_rmb();
79 	/*
80 	 * Check movable flag before taking the page lock because
81 	 * we use non-atomic bitops on newly allocated page flags so
82 	 * unconditionally grabbing the lock ruins page's owner side.
83 	 */
84 	if (unlikely(!__folio_test_movable(folio)))
85 		goto out_putfolio;
86 	/* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */
87 	smp_rmb();
88 	if (unlikely(folio_test_slab(folio)))
89 		goto out_putfolio;
90 
91 	/*
92 	 * As movable pages are not isolated from LRU lists, concurrent
93 	 * compaction threads can race against page migration functions
94 	 * as well as race against the releasing a page.
95 	 *
96 	 * In order to avoid having an already isolated movable page
97 	 * being (wrongly) re-isolated while it is under migration,
98 	 * or to avoid attempting to isolate pages being released,
99 	 * lets be sure we have the page lock
100 	 * before proceeding with the movable page isolation steps.
101 	 */
102 	if (unlikely(!folio_trylock(folio)))
103 		goto out_putfolio;
104 
105 	if (!folio_test_movable(folio) || folio_test_isolated(folio))
106 		goto out_no_isolated;
107 
108 	mops = folio_movable_ops(folio);
109 	VM_BUG_ON_FOLIO(!mops, folio);
110 
111 	if (!mops->isolate_page(&folio->page, mode))
112 		goto out_no_isolated;
113 
114 	/* Driver shouldn't use the isolated flag */
115 	WARN_ON_ONCE(folio_test_isolated(folio));
116 	folio_set_isolated(folio);
117 	folio_unlock(folio);
118 
119 	return true;
120 
121 out_no_isolated:
122 	folio_unlock(folio);
123 out_putfolio:
124 	folio_put(folio);
125 out:
126 	return false;
127 }
128 
putback_movable_folio(struct folio * folio)129 static void putback_movable_folio(struct folio *folio)
130 {
131 	const struct movable_operations *mops = folio_movable_ops(folio);
132 
133 	mops->putback_page(&folio->page);
134 	folio_clear_isolated(folio);
135 }
136 
137 /*
138  * Put previously isolated pages back onto the appropriate lists
139  * from where they were once taken off for compaction/migration.
140  *
141  * This function shall be used whenever the isolated pageset has been
142  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
143  * and isolate_hugetlb().
144  */
putback_movable_pages(struct list_head * l)145 void putback_movable_pages(struct list_head *l)
146 {
147 	struct folio *folio;
148 	struct folio *folio2;
149 
150 	list_for_each_entry_safe(folio, folio2, l, lru) {
151 		if (unlikely(folio_test_hugetlb(folio))) {
152 			folio_putback_active_hugetlb(folio);
153 			continue;
154 		}
155 		list_del(&folio->lru);
156 		/*
157 		 * We isolated non-lru movable folio so here we can use
158 		 * __folio_test_movable because LRU folio's mapping cannot
159 		 * have PAGE_MAPPING_MOVABLE.
160 		 */
161 		if (unlikely(__folio_test_movable(folio))) {
162 			VM_BUG_ON_FOLIO(!folio_test_isolated(folio), folio);
163 			folio_lock(folio);
164 			if (folio_test_movable(folio))
165 				putback_movable_folio(folio);
166 			else
167 				folio_clear_isolated(folio);
168 			folio_unlock(folio);
169 			folio_put(folio);
170 		} else {
171 			node_stat_mod_folio(folio, NR_ISOLATED_ANON +
172 					folio_is_file_lru(folio), -folio_nr_pages(folio));
173 			folio_putback_lru(folio);
174 		}
175 	}
176 }
177 EXPORT_SYMBOL_GPL(putback_movable_pages);
178 
179 /* Must be called with an elevated refcount on the non-hugetlb folio */
isolate_folio_to_list(struct folio * folio,struct list_head * list)180 bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
181 {
182 	bool isolated, lru;
183 
184 	if (folio_test_hugetlb(folio))
185 		return isolate_hugetlb(folio, list);
186 
187 	lru = !__folio_test_movable(folio);
188 	if (lru)
189 		isolated = folio_isolate_lru(folio);
190 	else
191 		isolated = isolate_movable_page(&folio->page,
192 						ISOLATE_UNEVICTABLE);
193 
194 	if (!isolated)
195 		return false;
196 
197 	list_add(&folio->lru, list);
198 	if (lru)
199 		node_stat_add_folio(folio, NR_ISOLATED_ANON +
200 				    folio_is_file_lru(folio));
201 
202 	return true;
203 }
204 
try_to_map_unused_to_zeropage(struct page_vma_mapped_walk * pvmw,struct folio * folio,unsigned long idx)205 static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
206 					  struct folio *folio,
207 					  unsigned long idx)
208 {
209 	struct page *page = folio_page(folio, idx);
210 	bool contains_data;
211 	pte_t newpte;
212 	void *addr;
213 
214 	if (PageCompound(page))
215 		return false;
216 	VM_BUG_ON_PAGE(!PageAnon(page), page);
217 	VM_BUG_ON_PAGE(!PageLocked(page), page);
218 	VM_BUG_ON_PAGE(pte_present(*pvmw->pte), page);
219 
220 	if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
221 	    mm_forbids_zeropage(pvmw->vma->vm_mm))
222 		return false;
223 
224 	/*
225 	 * The pmd entry mapping the old thp was flushed and the pte mapping
226 	 * this subpage has been non present. If the subpage is only zero-filled
227 	 * then map it to the shared zeropage.
228 	 */
229 	addr = kmap_local_page(page);
230 	contains_data = memchr_inv(addr, 0, PAGE_SIZE);
231 	kunmap_local(addr);
232 
233 	if (contains_data)
234 		return false;
235 
236 	newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
237 					pvmw->vma->vm_page_prot));
238 	set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
239 
240 	dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
241 	return true;
242 }
243 
244 struct rmap_walk_arg {
245 	struct folio *folio;
246 	bool map_unused_to_zeropage;
247 };
248 
249 /*
250  * Restore a potential migration pte to a working pte entry
251  */
remove_migration_pte(struct folio * folio,struct vm_area_struct * vma,unsigned long addr,void * arg)252 static bool remove_migration_pte(struct folio *folio,
253 		struct vm_area_struct *vma, unsigned long addr, void *arg)
254 {
255 	struct rmap_walk_arg *rmap_walk_arg = arg;
256 	DEFINE_FOLIO_VMA_WALK(pvmw, rmap_walk_arg->folio, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
257 	bool bypass = false;
258 
259 	trace_android_vh_mm_remove_migration_pte_bypass(folio, vma, addr,
260 							rmap_walk_arg->folio, &bypass);
261 	if (bypass)
262 		return true;
263 
264 	while (page_vma_mapped_walk(&pvmw)) {
265 		rmap_t rmap_flags = RMAP_NONE;
266 		pte_t old_pte;
267 		pte_t pte;
268 		swp_entry_t entry;
269 		struct page *new;
270 		unsigned long idx = 0;
271 
272 		/* pgoff is invalid for ksm pages, but they are never large */
273 		if (folio_test_large(folio) && !folio_test_hugetlb(folio))
274 			idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff;
275 		new = folio_page(folio, idx);
276 
277 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
278 		/* PMD-mapped THP migration entry */
279 		if (!pvmw.pte) {
280 			VM_BUG_ON_FOLIO(folio_test_hugetlb(folio) ||
281 					!folio_test_pmd_mappable(folio), folio);
282 			remove_migration_pmd(&pvmw, new);
283 			continue;
284 		}
285 #endif
286 		if (rmap_walk_arg->map_unused_to_zeropage &&
287 		    try_to_map_unused_to_zeropage(&pvmw, folio, idx))
288 			continue;
289 
290 		folio_get(folio);
291 		pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
292 		old_pte = ptep_get(pvmw.pte);
293 
294 		entry = pte_to_swp_entry(old_pte);
295 		if (!is_migration_entry_young(entry))
296 			pte = pte_mkold(pte);
297 		if (folio_test_dirty(folio) && is_migration_entry_dirty(entry))
298 			pte = pte_mkdirty(pte);
299 		if (pte_swp_soft_dirty(old_pte))
300 			pte = pte_mksoft_dirty(pte);
301 		else
302 			pte = pte_clear_soft_dirty(pte);
303 
304 		if (is_writable_migration_entry(entry))
305 			pte = pte_mkwrite(pte, vma);
306 		else if (pte_swp_uffd_wp(old_pte))
307 			pte = pte_mkuffd_wp(pte);
308 
309 		if (folio_test_anon(folio) && !is_readable_migration_entry(entry))
310 			rmap_flags |= RMAP_EXCLUSIVE;
311 
312 		if (unlikely(is_device_private_page(new))) {
313 			if (pte_write(pte))
314 				entry = make_writable_device_private_entry(
315 							page_to_pfn(new));
316 			else
317 				entry = make_readable_device_private_entry(
318 							page_to_pfn(new));
319 			pte = swp_entry_to_pte(entry);
320 			if (pte_swp_soft_dirty(old_pte))
321 				pte = pte_swp_mksoft_dirty(pte);
322 			if (pte_swp_uffd_wp(old_pte))
323 				pte = pte_swp_mkuffd_wp(pte);
324 		}
325 
326 #ifdef CONFIG_HUGETLB_PAGE
327 		if (folio_test_hugetlb(folio)) {
328 			struct hstate *h = hstate_vma(vma);
329 			unsigned int shift = huge_page_shift(h);
330 			unsigned long psize = huge_page_size(h);
331 
332 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
333 			if (folio_test_anon(folio))
334 				hugetlb_add_anon_rmap(folio, vma, pvmw.address,
335 						      rmap_flags);
336 			else
337 				hugetlb_add_file_rmap(folio);
338 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte,
339 					psize);
340 		} else
341 #endif
342 		{
343 			if (folio_test_anon(folio))
344 				folio_add_anon_rmap_pte(folio, new, vma,
345 							pvmw.address, rmap_flags);
346 			else
347 				folio_add_file_rmap_pte(folio, new, vma);
348 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
349 		}
350 		if (vma->vm_flags & VM_LOCKED)
351 			mlock_drain_local();
352 
353 		trace_remove_migration_pte(pvmw.address, pte_val(pte),
354 					   compound_order(new));
355 
356 		/* No need to invalidate - it was non-present before */
357 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
358 	}
359 
360 	return true;
361 }
362 
363 /*
364  * Get rid of all migration entries and replace them by
365  * references to the indicated page.
366  */
remove_migration_ptes(struct folio * src,struct folio * dst,int flags)367 void remove_migration_ptes(struct folio *src, struct folio *dst, int flags)
368 {
369 	struct rmap_walk_arg rmap_walk_arg = {
370 		.folio = src,
371 		.map_unused_to_zeropage = flags & RMP_USE_SHARED_ZEROPAGE,
372 	};
373 
374 	struct rmap_walk_control rwc = {
375 		.rmap_one = remove_migration_pte,
376 		.arg = &rmap_walk_arg,
377 	};
378 
379 	VM_BUG_ON_FOLIO((flags & RMP_USE_SHARED_ZEROPAGE) && (src != dst), src);
380 
381 	if (flags & RMP_LOCKED)
382 		rmap_walk_locked(dst, &rwc);
383 	else
384 		rmap_walk(dst, &rwc);
385 }
386 
387 /*
388  * Something used the pte of a page under migration. We need to
389  * get to the page and wait until migration is finished.
390  * When we return from this function the fault will be retried.
391  */
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)392 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
393 			  unsigned long address)
394 {
395 	spinlock_t *ptl;
396 	pte_t *ptep;
397 	pte_t pte;
398 	swp_entry_t entry;
399 
400 	ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
401 	if (!ptep)
402 		return;
403 
404 	pte = ptep_get(ptep);
405 	pte_unmap(ptep);
406 
407 	if (!is_swap_pte(pte))
408 		goto out;
409 
410 	entry = pte_to_swp_entry(pte);
411 	if (!is_migration_entry(entry))
412 		goto out;
413 
414 	migration_entry_wait_on_locked(entry, ptl);
415 	return;
416 out:
417 	spin_unlock(ptl);
418 }
419 
420 #ifdef CONFIG_HUGETLB_PAGE
421 /*
422  * The vma read lock must be held upon entry. Holding that lock prevents either
423  * the pte or the ptl from being freed.
424  *
425  * This function will release the vma lock before returning.
426  */
migration_entry_wait_huge(struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)427 void migration_entry_wait_huge(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
428 {
429 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep);
430 	pte_t pte;
431 
432 	hugetlb_vma_assert_locked(vma);
433 	spin_lock(ptl);
434 	pte = huge_ptep_get(vma->vm_mm, addr, ptep);
435 
436 	if (unlikely(!is_hugetlb_entry_migration(pte))) {
437 		spin_unlock(ptl);
438 		hugetlb_vma_unlock_read(vma);
439 	} else {
440 		/*
441 		 * If migration entry existed, safe to release vma lock
442 		 * here because the pgtable page won't be freed without the
443 		 * pgtable lock released.  See comment right above pgtable
444 		 * lock release in migration_entry_wait_on_locked().
445 		 */
446 		hugetlb_vma_unlock_read(vma);
447 		migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl);
448 	}
449 }
450 #endif
451 
452 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)453 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
454 {
455 	spinlock_t *ptl;
456 
457 	ptl = pmd_lock(mm, pmd);
458 	if (!is_pmd_migration_entry(*pmd))
459 		goto unlock;
460 	migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl);
461 	return;
462 unlock:
463 	spin_unlock(ptl);
464 }
465 #endif
466 
467 /*
468  * Replace the folio in the mapping.
469  *
470  * The number of remaining references must be:
471  * 1 for anonymous folios without a mapping
472  * 2 for folios with a mapping
473  * 3 for folios with a mapping and PagePrivate/PagePrivate2 set.
474  */
__folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int expected_count)475 static int __folio_migrate_mapping(struct address_space *mapping,
476 		struct folio *newfolio, struct folio *folio, int expected_count)
477 {
478 	XA_STATE(xas, &mapping->i_pages, folio_index(folio));
479 	struct zone *oldzone, *newzone;
480 	int dirty;
481 	long nr = folio_nr_pages(folio);
482 	long entries, i;
483 
484 	if (!mapping) {
485 		/* Take off deferred split queue while frozen and memcg set */
486 		if (folio_test_large(folio) &&
487 		    folio_test_large_rmappable(folio)) {
488 			if (!folio_ref_freeze(folio, expected_count))
489 				return -EAGAIN;
490 			folio_unqueue_deferred_split(folio);
491 			folio_ref_unfreeze(folio, expected_count);
492 		}
493 
494 		/* No turning back from here */
495 		newfolio->index = folio->index;
496 		newfolio->mapping = folio->mapping;
497 		if (folio_test_anon(folio) && folio_test_large(folio))
498 			mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
499 		if (folio_test_swapbacked(folio))
500 			__folio_set_swapbacked(newfolio);
501 
502 		return MIGRATEPAGE_SUCCESS;
503 	}
504 
505 	oldzone = folio_zone(folio);
506 	newzone = folio_zone(newfolio);
507 
508 	xas_lock_irq(&xas);
509 	if (!folio_ref_freeze(folio, expected_count)) {
510 		xas_unlock_irq(&xas);
511 		return -EAGAIN;
512 	}
513 
514 	/* Take off deferred split queue while frozen and memcg set */
515 	folio_unqueue_deferred_split(folio);
516 
517 	/*
518 	 * Now we know that no one else is looking at the folio:
519 	 * no turning back from here.
520 	 */
521 	newfolio->index = folio->index;
522 	newfolio->mapping = folio->mapping;
523 	if (folio_test_anon(folio) && folio_test_large(folio))
524 		mod_mthp_stat(folio_order(folio), MTHP_STAT_NR_ANON, 1);
525 	folio_ref_add(newfolio, nr); /* add cache reference */
526 	if (folio_test_swapbacked(folio))
527 		__folio_set_swapbacked(newfolio);
528 	if (folio_test_swapcache(folio)) {
529 		folio_set_swapcache(newfolio);
530 		newfolio->private = folio_get_private(folio);
531 		entries = nr;
532 	} else {
533 		entries = 1;
534 	}
535 
536 	/* Move dirty while folio refs frozen and newfolio not yet exposed */
537 	dirty = folio_test_dirty(folio);
538 	if (dirty) {
539 		folio_clear_dirty(folio);
540 		folio_set_dirty(newfolio);
541 	}
542 
543 	/* Swap cache still stores N entries instead of a high-order entry */
544 	for (i = 0; i < entries; i++) {
545 		xas_store(&xas, newfolio);
546 		xas_next(&xas);
547 	}
548 
549 	/*
550 	 * Drop cache reference from old folio by unfreezing
551 	 * to one less reference.
552 	 * We know this isn't the last reference.
553 	 */
554 	folio_ref_unfreeze(folio, expected_count - nr);
555 
556 	xas_unlock(&xas);
557 	/* Leave irq disabled to prevent preemption while updating stats */
558 
559 	/*
560 	 * If moved to a different zone then also account
561 	 * the folio for that zone. Other VM counters will be
562 	 * taken care of when we establish references to the
563 	 * new folio and drop references to the old folio.
564 	 *
565 	 * Note that anonymous folios are accounted for
566 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
567 	 * are mapped to swap space.
568 	 */
569 	if (newzone != oldzone) {
570 		struct lruvec *old_lruvec, *new_lruvec;
571 		struct mem_cgroup *memcg;
572 
573 		memcg = folio_memcg(folio);
574 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
575 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
576 
577 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
578 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
579 		if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) {
580 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
581 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
582 
583 			if (folio_test_pmd_mappable(folio)) {
584 				__mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr);
585 				__mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr);
586 			}
587 		}
588 #ifdef CONFIG_SWAP
589 		if (folio_test_swapcache(folio)) {
590 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
591 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
592 		}
593 #endif
594 		if (dirty && mapping_can_writeback(mapping)) {
595 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
596 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
597 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
598 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
599 		}
600 	}
601 	local_irq_enable();
602 
603 	return MIGRATEPAGE_SUCCESS;
604 }
605 
folio_migrate_mapping(struct address_space * mapping,struct folio * newfolio,struct folio * folio,int extra_count)606 int folio_migrate_mapping(struct address_space *mapping,
607 		struct folio *newfolio, struct folio *folio, int extra_count)
608 {
609 	int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
610 
611 	if (folio_ref_count(folio) != expected_count)
612 		return -EAGAIN;
613 
614 	return __folio_migrate_mapping(mapping, newfolio, folio, expected_count);
615 }
616 EXPORT_SYMBOL(folio_migrate_mapping);
617 
618 /*
619  * The expected number of remaining references is the same as that
620  * of folio_migrate_mapping().
621  */
migrate_huge_page_move_mapping(struct address_space * mapping,struct folio * dst,struct folio * src)622 int migrate_huge_page_move_mapping(struct address_space *mapping,
623 				   struct folio *dst, struct folio *src)
624 {
625 	XA_STATE(xas, &mapping->i_pages, folio_index(src));
626 	int rc, expected_count = folio_expected_ref_count(src) + 1;
627 
628 	if (folio_ref_count(src) != expected_count)
629 		return -EAGAIN;
630 
631 	rc = folio_mc_copy(dst, src);
632 	if (unlikely(rc))
633 		return rc;
634 
635 	xas_lock_irq(&xas);
636 	if (!folio_ref_freeze(src, expected_count)) {
637 		xas_unlock_irq(&xas);
638 		return -EAGAIN;
639 	}
640 
641 	dst->index = src->index;
642 	dst->mapping = src->mapping;
643 
644 	folio_ref_add(dst, folio_nr_pages(dst));
645 
646 	xas_store(&xas, dst);
647 
648 	folio_ref_unfreeze(src, expected_count - folio_nr_pages(src));
649 
650 	xas_unlock_irq(&xas);
651 
652 	return MIGRATEPAGE_SUCCESS;
653 }
654 
655 /*
656  * Copy the flags and some other ancillary information
657  */
folio_migrate_flags(struct folio * newfolio,struct folio * folio)658 void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
659 {
660 	int cpupid;
661 
662 	if (folio_test_referenced(folio))
663 		folio_set_referenced(newfolio);
664 	if (folio_test_uptodate(folio))
665 		folio_mark_uptodate(newfolio);
666 	if (folio_test_clear_active(folio)) {
667 		VM_BUG_ON_FOLIO(folio_test_unevictable(folio), folio);
668 		folio_set_active(newfolio);
669 	} else if (folio_test_clear_unevictable(folio))
670 		folio_set_unevictable(newfolio);
671 	if (folio_test_workingset(folio))
672 		folio_set_workingset(newfolio);
673 	if (folio_test_checked(folio))
674 		folio_set_checked(newfolio);
675 	/*
676 	 * PG_anon_exclusive (-> PG_mappedtodisk) is always migrated via
677 	 * migration entries. We can still have PG_anon_exclusive set on an
678 	 * effectively unmapped and unreferenced first sub-pages of an
679 	 * anonymous THP: we can simply copy it here via PG_mappedtodisk.
680 	 */
681 	if (folio_test_mappedtodisk(folio))
682 		folio_set_mappedtodisk(newfolio);
683 
684 	trace_android_vh_look_around_migrate_folio(folio, newfolio);
685 
686 	/* Move dirty on pages not done by folio_migrate_mapping() */
687 	if (folio_test_dirty(folio))
688 		folio_set_dirty(newfolio);
689 
690 	if (folio_test_young(folio))
691 		folio_set_young(newfolio);
692 	if (folio_test_idle(folio))
693 		folio_set_idle(newfolio);
694 
695 	folio_migrate_refs(newfolio, folio);
696 	/*
697 	 * Copy NUMA information to the new page, to prevent over-eager
698 	 * future migrations of this same page.
699 	 */
700 	cpupid = folio_xchg_last_cpupid(folio, -1);
701 	/*
702 	 * For memory tiering mode, when migrate between slow and fast
703 	 * memory node, reset cpupid, because that is used to record
704 	 * page access time in slow memory node.
705 	 */
706 	if (sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING) {
707 		bool f_toptier = node_is_toptier(folio_nid(folio));
708 		bool t_toptier = node_is_toptier(folio_nid(newfolio));
709 
710 		if (f_toptier != t_toptier)
711 			cpupid = -1;
712 	}
713 	folio_xchg_last_cpupid(newfolio, cpupid);
714 
715 	folio_migrate_ksm(newfolio, folio);
716 	/*
717 	 * Please do not reorder this without considering how mm/ksm.c's
718 	 * ksm_get_folio() depends upon ksm_migrate_page() and the
719 	 * swapcache flag.
720 	 */
721 	if (folio_test_swapcache(folio))
722 		folio_clear_swapcache(folio);
723 	folio_clear_private(folio);
724 
725 	/* page->private contains hugetlb specific flags */
726 	if (!folio_test_hugetlb(folio))
727 		folio->private = NULL;
728 
729 	/*
730 	 * If any waiters have accumulated on the new page then
731 	 * wake them up.
732 	 */
733 	if (folio_test_writeback(newfolio))
734 		folio_end_writeback(newfolio);
735 
736 	/*
737 	 * PG_readahead shares the same bit with PG_reclaim.  The above
738 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
739 	 * bit after that.
740 	 */
741 	if (folio_test_readahead(folio))
742 		folio_set_readahead(newfolio);
743 
744 	folio_copy_owner(newfolio, folio);
745 	pgalloc_tag_swap(newfolio, folio);
746 
747 	mem_cgroup_migrate(folio, newfolio);
748 }
749 EXPORT_SYMBOL(folio_migrate_flags);
750 
751 /************************************************************
752  *                    Migration functions
753  ***********************************************************/
754 
__migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,void * src_private,enum migrate_mode mode)755 static int __migrate_folio(struct address_space *mapping, struct folio *dst,
756 			   struct folio *src, void *src_private,
757 			   enum migrate_mode mode)
758 {
759 	int rc, expected_count = folio_expected_ref_count(src) + 1;
760 
761 	/* Check whether src does not have extra refs before we do more work */
762 	if (folio_ref_count(src) != expected_count)
763 		return -EAGAIN;
764 
765 	rc = folio_mc_copy(dst, src);
766 	if (unlikely(rc))
767 		return rc;
768 
769 	rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
770 	if (rc != MIGRATEPAGE_SUCCESS)
771 		return rc;
772 
773 	if (src_private)
774 		folio_attach_private(dst, folio_detach_private(src));
775 
776 	folio_migrate_flags(dst, src);
777 	return MIGRATEPAGE_SUCCESS;
778 }
779 
780 /**
781  * migrate_folio() - Simple folio migration.
782  * @mapping: The address_space containing the folio.
783  * @dst: The folio to migrate the data to.
784  * @src: The folio containing the current data.
785  * @mode: How to migrate the page.
786  *
787  * Common logic to directly migrate a single LRU folio suitable for
788  * folios that do not use PagePrivate/PagePrivate2.
789  *
790  * Folios are locked upon entry and exit.
791  */
migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)792 int migrate_folio(struct address_space *mapping, struct folio *dst,
793 		  struct folio *src, enum migrate_mode mode)
794 {
795 	BUG_ON(folio_test_writeback(src));	/* Writeback must be complete */
796 	return __migrate_folio(mapping, dst, src, NULL, mode);
797 }
798 EXPORT_SYMBOL(migrate_folio);
799 
800 #ifdef CONFIG_BUFFER_HEAD
801 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)802 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
803 							enum migrate_mode mode)
804 {
805 	struct buffer_head *bh = head;
806 	struct buffer_head *failed_bh;
807 
808 	do {
809 		if (!trylock_buffer(bh)) {
810 			if (mode == MIGRATE_ASYNC)
811 				goto unlock;
812 			if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh))
813 				goto unlock;
814 			lock_buffer(bh);
815 		}
816 
817 		bh = bh->b_this_page;
818 	} while (bh != head);
819 
820 	return true;
821 
822 unlock:
823 	/* We failed to lock the buffer and cannot stall. */
824 	failed_bh = bh;
825 	bh = head;
826 	while (bh != failed_bh) {
827 		unlock_buffer(bh);
828 		bh = bh->b_this_page;
829 	}
830 
831 	return false;
832 }
833 
__buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode,bool check_refs)834 static int __buffer_migrate_folio(struct address_space *mapping,
835 		struct folio *dst, struct folio *src, enum migrate_mode mode,
836 		bool check_refs)
837 {
838 	struct buffer_head *bh, *head;
839 	int rc;
840 	int expected_count;
841 
842 	head = folio_buffers(src);
843 	if (!head)
844 		return migrate_folio(mapping, dst, src, mode);
845 
846 	/* Check whether page does not have extra refs before we do more work */
847 	expected_count = folio_expected_ref_count(src) + 1;
848 	if (folio_ref_count(src) != expected_count)
849 		return -EAGAIN;
850 
851 	if (!buffer_migrate_lock_buffers(head, mode))
852 		return -EAGAIN;
853 
854 	if (check_refs) {
855 		bool busy;
856 		bool invalidated = false;
857 
858 recheck_buffers:
859 		busy = false;
860 		spin_lock(&mapping->i_private_lock);
861 		bh = head;
862 		do {
863 			if (atomic_read(&bh->b_count)) {
864 				busy = true;
865 				break;
866 			}
867 			bh = bh->b_this_page;
868 		} while (bh != head);
869 		if (busy) {
870 			if (invalidated) {
871 				rc = -EAGAIN;
872 				goto unlock_buffers;
873 			}
874 			spin_unlock(&mapping->i_private_lock);
875 			invalidate_bh_lrus();
876 			invalidated = true;
877 			goto recheck_buffers;
878 		}
879 	}
880 
881 	rc = filemap_migrate_folio(mapping, dst, src, mode);
882 	if (rc != MIGRATEPAGE_SUCCESS)
883 		goto unlock_buffers;
884 
885 	bh = head;
886 	do {
887 		folio_set_bh(bh, dst, bh_offset(bh));
888 		bh = bh->b_this_page;
889 	} while (bh != head);
890 
891 unlock_buffers:
892 	if (check_refs)
893 		spin_unlock(&mapping->i_private_lock);
894 	bh = head;
895 	do {
896 		unlock_buffer(bh);
897 		bh = bh->b_this_page;
898 	} while (bh != head);
899 
900 	return rc;
901 }
902 
903 /**
904  * buffer_migrate_folio() - Migration function for folios with buffers.
905  * @mapping: The address space containing @src.
906  * @dst: The folio to migrate to.
907  * @src: The folio to migrate from.
908  * @mode: How to migrate the folio.
909  *
910  * This function can only be used if the underlying filesystem guarantees
911  * that no other references to @src exist. For example attached buffer
912  * heads are accessed only under the folio lock.  If your filesystem cannot
913  * provide this guarantee, buffer_migrate_folio_norefs() may be more
914  * appropriate.
915  *
916  * Return: 0 on success or a negative errno on failure.
917  */
buffer_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)918 int buffer_migrate_folio(struct address_space *mapping,
919 		struct folio *dst, struct folio *src, enum migrate_mode mode)
920 {
921 	return __buffer_migrate_folio(mapping, dst, src, mode, false);
922 }
923 EXPORT_SYMBOL(buffer_migrate_folio);
924 
925 /**
926  * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
927  * @mapping: The address space containing @src.
928  * @dst: The folio to migrate to.
929  * @src: The folio to migrate from.
930  * @mode: How to migrate the folio.
931  *
932  * Like buffer_migrate_folio() except that this variant is more careful
933  * and checks that there are also no buffer head references. This function
934  * is the right one for mappings where buffer heads are directly looked
935  * up and referenced (such as block device mappings).
936  *
937  * Return: 0 on success or a negative errno on failure.
938  */
buffer_migrate_folio_norefs(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)939 int buffer_migrate_folio_norefs(struct address_space *mapping,
940 		struct folio *dst, struct folio *src, enum migrate_mode mode)
941 {
942 	return __buffer_migrate_folio(mapping, dst, src, mode, true);
943 }
944 EXPORT_SYMBOL_GPL(buffer_migrate_folio_norefs);
945 #endif /* CONFIG_BUFFER_HEAD */
946 
filemap_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)947 int filemap_migrate_folio(struct address_space *mapping,
948 		struct folio *dst, struct folio *src, enum migrate_mode mode)
949 {
950 	return __migrate_folio(mapping, dst, src, folio_get_private(src), mode);
951 }
952 EXPORT_SYMBOL_GPL(filemap_migrate_folio);
953 
954 /*
955  * Writeback a folio to clean the dirty state
956  */
writeout(struct address_space * mapping,struct folio * folio)957 static int writeout(struct address_space *mapping, struct folio *folio)
958 {
959 	struct writeback_control wbc = {
960 		.sync_mode = WB_SYNC_NONE,
961 		.nr_to_write = 1,
962 		.range_start = 0,
963 		.range_end = LLONG_MAX,
964 		.for_reclaim = 1
965 	};
966 	int rc;
967 
968 	if (!mapping->a_ops->writepage)
969 		/* No write method for the address space */
970 		return -EINVAL;
971 
972 	if (!folio_clear_dirty_for_io(folio))
973 		/* Someone else already triggered a write */
974 		return -EAGAIN;
975 
976 	/*
977 	 * A dirty folio may imply that the underlying filesystem has
978 	 * the folio on some queue. So the folio must be clean for
979 	 * migration. Writeout may mean we lose the lock and the
980 	 * folio state is no longer what we checked for earlier.
981 	 * At this point we know that the migration attempt cannot
982 	 * be successful.
983 	 */
984 	remove_migration_ptes(folio, folio, 0);
985 
986 	rc = mapping->a_ops->writepage(&folio->page, &wbc);
987 
988 	if (rc != AOP_WRITEPAGE_ACTIVATE)
989 		/* unlocked. Relock */
990 		folio_lock(folio);
991 
992 	return (rc < 0) ? -EIO : -EAGAIN;
993 }
994 
995 /*
996  * Default handling if a filesystem does not provide a migration function.
997  */
fallback_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)998 static int fallback_migrate_folio(struct address_space *mapping,
999 		struct folio *dst, struct folio *src, enum migrate_mode mode)
1000 {
1001 	if (folio_test_dirty(src)) {
1002 		/* Only writeback folios in full synchronous migration */
1003 		switch (mode) {
1004 		case MIGRATE_SYNC:
1005 			break;
1006 		default:
1007 			return -EBUSY;
1008 		}
1009 		return writeout(mapping, src);
1010 	}
1011 
1012 	/*
1013 	 * Buffers may be managed in a filesystem specific way.
1014 	 * We must have no buffers or drop them.
1015 	 */
1016 	if (!filemap_release_folio(src, GFP_KERNEL))
1017 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
1018 
1019 	return migrate_folio(mapping, dst, src, mode);
1020 }
1021 
1022 /*
1023  * Move a page to a newly allocated page
1024  * The page is locked and all ptes have been successfully removed.
1025  *
1026  * The new page will have replaced the old page if this function
1027  * is successful.
1028  *
1029  * Return value:
1030  *   < 0 - error code
1031  *  MIGRATEPAGE_SUCCESS - success
1032  */
move_to_new_folio(struct folio * dst,struct folio * src,enum migrate_mode mode)1033 static int move_to_new_folio(struct folio *dst, struct folio *src,
1034 				enum migrate_mode mode)
1035 {
1036 	int rc = -EAGAIN;
1037 	bool is_lru = !__folio_test_movable(src);
1038 
1039 	VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
1040 	VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
1041 
1042 	if (likely(is_lru)) {
1043 		struct address_space *mapping = folio_mapping(src);
1044 
1045 		if (!mapping)
1046 			rc = migrate_folio(mapping, dst, src, mode);
1047 		else if (mapping_inaccessible(mapping))
1048 			rc = -EOPNOTSUPP;
1049 		else if (mapping->a_ops->migrate_folio)
1050 			/*
1051 			 * Most folios have a mapping and most filesystems
1052 			 * provide a migrate_folio callback. Anonymous folios
1053 			 * are part of swap space which also has its own
1054 			 * migrate_folio callback. This is the most common path
1055 			 * for page migration.
1056 			 */
1057 			rc = mapping->a_ops->migrate_folio(mapping, dst, src,
1058 								mode);
1059 		else
1060 			rc = fallback_migrate_folio(mapping, dst, src, mode);
1061 	} else {
1062 		const struct movable_operations *mops;
1063 
1064 		/*
1065 		 * In case of non-lru page, it could be released after
1066 		 * isolation step. In that case, we shouldn't try migration.
1067 		 */
1068 		VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1069 		if (!folio_test_movable(src)) {
1070 			rc = MIGRATEPAGE_SUCCESS;
1071 			folio_clear_isolated(src);
1072 			goto out;
1073 		}
1074 
1075 		mops = folio_movable_ops(src);
1076 		rc = mops->migrate_page(&dst->page, &src->page, mode);
1077 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
1078 				!folio_test_isolated(src));
1079 	}
1080 
1081 	/*
1082 	 * When successful, old pagecache src->mapping must be cleared before
1083 	 * src is freed; but stats require that PageAnon be left as PageAnon.
1084 	 */
1085 	if (rc == MIGRATEPAGE_SUCCESS) {
1086 		if (__folio_test_movable(src)) {
1087 			VM_BUG_ON_FOLIO(!folio_test_isolated(src), src);
1088 
1089 			/*
1090 			 * We clear PG_movable under page_lock so any compactor
1091 			 * cannot try to migrate this page.
1092 			 */
1093 			folio_clear_isolated(src);
1094 		}
1095 
1096 		/*
1097 		 * Anonymous and movable src->mapping will be cleared by
1098 		 * free_pages_prepare so don't reset it here for keeping
1099 		 * the type to work PageAnon, for example.
1100 		 */
1101 		if (!folio_mapping_flags(src))
1102 			src->mapping = NULL;
1103 
1104 		if (likely(!folio_is_zone_device(dst)))
1105 			flush_dcache_folio(dst);
1106 	}
1107 out:
1108 	return rc;
1109 }
1110 
1111 /*
1112  * To record some information during migration, we use unused private
1113  * field of struct folio of the newly allocated destination folio.
1114  * This is safe because nobody is using it except us.
1115  */
1116 enum {
1117 	PAGE_WAS_MAPPED = BIT(0),
1118 	PAGE_WAS_MLOCKED = BIT(1),
1119 	PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
1120 };
1121 
__migrate_folio_record(struct folio * dst,int old_page_state,struct anon_vma * anon_vma)1122 static void __migrate_folio_record(struct folio *dst,
1123 				   int old_page_state,
1124 				   struct anon_vma *anon_vma)
1125 {
1126 	dst->private = (void *)anon_vma + old_page_state;
1127 }
1128 
__migrate_folio_extract(struct folio * dst,int * old_page_state,struct anon_vma ** anon_vmap)1129 static void __migrate_folio_extract(struct folio *dst,
1130 				   int *old_page_state,
1131 				   struct anon_vma **anon_vmap)
1132 {
1133 	unsigned long private = (unsigned long)dst->private;
1134 
1135 	*anon_vmap = (struct anon_vma *)(private & ~PAGE_OLD_STATES);
1136 	*old_page_state = private & PAGE_OLD_STATES;
1137 	dst->private = NULL;
1138 }
1139 
1140 /* Restore the source folio to the original state upon failure */
migrate_folio_undo_src(struct folio * src,int page_was_mapped,struct anon_vma * anon_vma,bool locked,struct list_head * ret)1141 static void migrate_folio_undo_src(struct folio *src,
1142 				   int page_was_mapped,
1143 				   struct anon_vma *anon_vma,
1144 				   bool locked,
1145 				   struct list_head *ret)
1146 {
1147 	if (page_was_mapped)
1148 		remove_migration_ptes(src, src, 0);
1149 	/* Drop an anon_vma reference if we took one */
1150 	if (anon_vma)
1151 		put_anon_vma(anon_vma);
1152 	if (locked)
1153 		folio_unlock(src);
1154 	if (ret)
1155 		list_move_tail(&src->lru, ret);
1156 }
1157 
1158 /* Restore the destination folio to the original state upon failure */
migrate_folio_undo_dst(struct folio * dst,bool locked,free_folio_t put_new_folio,unsigned long private)1159 static void migrate_folio_undo_dst(struct folio *dst, bool locked,
1160 		free_folio_t put_new_folio, unsigned long private)
1161 {
1162 	if (locked)
1163 		folio_unlock(dst);
1164 	if (put_new_folio)
1165 		put_new_folio(dst, private);
1166 	else
1167 		folio_put(dst);
1168 }
1169 
1170 /* Cleanup src folio upon migration success */
migrate_folio_done(struct folio * src,enum migrate_reason reason)1171 static void migrate_folio_done(struct folio *src,
1172 			       enum migrate_reason reason)
1173 {
1174 	/*
1175 	 * Compaction can migrate also non-LRU pages which are
1176 	 * not accounted to NR_ISOLATED_*. They can be recognized
1177 	 * as __folio_test_movable
1178 	 */
1179 	if (likely(!__folio_test_movable(src)) && reason != MR_DEMOTION)
1180 		mod_node_page_state(folio_pgdat(src), NR_ISOLATED_ANON +
1181 				    folio_is_file_lru(src), -folio_nr_pages(src));
1182 
1183 	if (reason != MR_MEMORY_FAILURE)
1184 		/* We release the page in page_handle_poison. */
1185 		folio_put(src);
1186 }
1187 
1188 /* Obtain the lock on page, remove all ptes. */
migrate_folio_unmap(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio ** dstp,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1189 static int migrate_folio_unmap(new_folio_t get_new_folio,
1190 		free_folio_t put_new_folio, unsigned long private,
1191 		struct folio *src, struct folio **dstp, enum migrate_mode mode,
1192 		enum migrate_reason reason, struct list_head *ret)
1193 {
1194 	struct folio *dst;
1195 	int rc = -EAGAIN;
1196 	int old_page_state = 0;
1197 	struct anon_vma *anon_vma = NULL;
1198 	bool is_lru = data_race(!__folio_test_movable(src));
1199 	bool locked = false;
1200 	bool dst_locked = false;
1201 
1202 	if (folio_ref_count(src) == 1) {
1203 		/* Folio was freed from under us. So we are done. */
1204 		folio_clear_active(src);
1205 		folio_clear_unevictable(src);
1206 		/* free_pages_prepare() will clear PG_isolated. */
1207 		list_del(&src->lru);
1208 		migrate_folio_done(src, reason);
1209 		return MIGRATEPAGE_SUCCESS;
1210 	}
1211 
1212 	dst = get_new_folio(src, private);
1213 	if (!dst)
1214 		return -ENOMEM;
1215 	*dstp = dst;
1216 
1217 	dst->private = NULL;
1218 
1219 	if (!folio_trylock(src)) {
1220 		if (mode == MIGRATE_ASYNC)
1221 			goto out;
1222 
1223 		/*
1224 		 * It's not safe for direct compaction to call lock_page.
1225 		 * For example, during page readahead pages are added locked
1226 		 * to the LRU. Later, when the IO completes the pages are
1227 		 * marked uptodate and unlocked. However, the queueing
1228 		 * could be merging multiple pages for one bio (e.g.
1229 		 * mpage_readahead). If an allocation happens for the
1230 		 * second or third page, the process can end up locking
1231 		 * the same page twice and deadlocking. Rather than
1232 		 * trying to be clever about what pages can be locked,
1233 		 * avoid the use of lock_page for direct compaction
1234 		 * altogether.
1235 		 */
1236 		if (current->flags & PF_MEMALLOC)
1237 			goto out;
1238 
1239 		/*
1240 		 * In "light" mode, we can wait for transient locks (eg
1241 		 * inserting a page into the page table), but it's not
1242 		 * worth waiting for I/O.
1243 		 */
1244 		if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src))
1245 			goto out;
1246 
1247 		folio_lock(src);
1248 	}
1249 	locked = true;
1250 	if (folio_test_mlocked(src))
1251 		old_page_state |= PAGE_WAS_MLOCKED;
1252 
1253 	if (folio_test_writeback(src)) {
1254 		/*
1255 		 * Only in the case of a full synchronous migration is it
1256 		 * necessary to wait for PageWriteback. In the async case,
1257 		 * the retry loop is too short and in the sync-light case,
1258 		 * the overhead of stalling is too much
1259 		 */
1260 		switch (mode) {
1261 		case MIGRATE_SYNC:
1262 			break;
1263 		default:
1264 			rc = -EBUSY;
1265 			goto out;
1266 		}
1267 		folio_wait_writeback(src);
1268 	}
1269 
1270 	/*
1271 	 * By try_to_migrate(), src->mapcount goes down to 0 here. In this case,
1272 	 * we cannot notice that anon_vma is freed while we migrate a page.
1273 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1274 	 * of migration. File cache pages are no problem because of page_lock()
1275 	 * File Caches may use write_page() or lock_page() in migration, then,
1276 	 * just care Anon page here.
1277 	 *
1278 	 * Only folio_get_anon_vma() understands the subtleties of
1279 	 * getting a hold on an anon_vma from outside one of its mms.
1280 	 * But if we cannot get anon_vma, then we won't need it anyway,
1281 	 * because that implies that the anon page is no longer mapped
1282 	 * (and cannot be remapped so long as we hold the page lock).
1283 	 */
1284 	if (folio_test_anon(src) && !folio_test_ksm(src))
1285 		anon_vma = folio_get_anon_vma(src);
1286 
1287 	/*
1288 	 * Block others from accessing the new page when we get around to
1289 	 * establishing additional references. We are usually the only one
1290 	 * holding a reference to dst at this point. We used to have a BUG
1291 	 * here if folio_trylock(dst) fails, but would like to allow for
1292 	 * cases where there might be a race with the previous use of dst.
1293 	 * This is much like races on refcount of oldpage: just don't BUG().
1294 	 */
1295 	if (unlikely(!folio_trylock(dst)))
1296 		goto out;
1297 	dst_locked = true;
1298 
1299 	if (unlikely(!is_lru)) {
1300 		__migrate_folio_record(dst, old_page_state, anon_vma);
1301 		return MIGRATEPAGE_UNMAP;
1302 	}
1303 
1304 	/*
1305 	 * Corner case handling:
1306 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1307 	 * and treated as swapcache but it has no rmap yet.
1308 	 * Calling try_to_unmap() against a src->mapping==NULL page will
1309 	 * trigger a BUG.  So handle it here.
1310 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1311 	 * fs-private metadata. The page can be picked up due to memory
1312 	 * offlining.  Everywhere else except page reclaim, the page is
1313 	 * invisible to the vm, so the page can not be migrated.  So try to
1314 	 * free the metadata, so the page can be freed.
1315 	 */
1316 	if (!src->mapping) {
1317 		if (folio_test_private(src)) {
1318 			try_to_free_buffers(src);
1319 			goto out;
1320 		}
1321 	} else if (folio_mapped(src)) {
1322 		/* Establish migration ptes */
1323 		VM_BUG_ON_FOLIO(folio_test_anon(src) &&
1324 			       !folio_test_ksm(src) && !anon_vma, src);
1325 		try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0);
1326 		old_page_state |= PAGE_WAS_MAPPED;
1327 	}
1328 
1329 	if (!folio_mapped(src)) {
1330 		__migrate_folio_record(dst, old_page_state, anon_vma);
1331 		return MIGRATEPAGE_UNMAP;
1332 	}
1333 
1334 out:
1335 	/*
1336 	 * A folio that has not been unmapped will be restored to
1337 	 * right list unless we want to retry.
1338 	 */
1339 	if (rc == -EAGAIN)
1340 		ret = NULL;
1341 
1342 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1343 			       anon_vma, locked, ret);
1344 	migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private);
1345 
1346 	return rc;
1347 }
1348 
1349 /* Migrate the folio to the newly allocated folio in dst. */
migrate_folio_move(free_folio_t put_new_folio,unsigned long private,struct folio * src,struct folio * dst,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1350 static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
1351 			      struct folio *src, struct folio *dst,
1352 			      enum migrate_mode mode, enum migrate_reason reason,
1353 			      struct list_head *ret)
1354 {
1355 	int rc;
1356 	int old_page_state = 0;
1357 	struct anon_vma *anon_vma = NULL;
1358 	bool is_lru = !__folio_test_movable(src);
1359 	struct list_head *prev;
1360 
1361 	__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1362 	prev = dst->lru.prev;
1363 	list_del(&dst->lru);
1364 
1365 	rc = move_to_new_folio(dst, src, mode);
1366 	if (rc)
1367 		goto out;
1368 
1369 	if (unlikely(!is_lru))
1370 		goto out_unlock_both;
1371 
1372 	/*
1373 	 * When successful, push dst to LRU immediately: so that if it
1374 	 * turns out to be an mlocked page, remove_migration_ptes() will
1375 	 * automatically build up the correct dst->mlock_count for it.
1376 	 *
1377 	 * We would like to do something similar for the old page, when
1378 	 * unsuccessful, and other cases when a page has been temporarily
1379 	 * isolated from the unevictable LRU: but this case is the easiest.
1380 	 */
1381 	folio_add_lru(dst);
1382 	if (old_page_state & PAGE_WAS_MLOCKED)
1383 		lru_add_drain();
1384 
1385 	if (old_page_state & PAGE_WAS_MAPPED)
1386 		remove_migration_ptes(src, dst, 0);
1387 
1388 out_unlock_both:
1389 	folio_unlock(dst);
1390 	set_page_owner_migrate_reason(&dst->page, reason);
1391 	/*
1392 	 * If migration is successful, decrease refcount of dst,
1393 	 * which will not free the page because new page owner increased
1394 	 * refcounter.
1395 	 */
1396 	folio_put(dst);
1397 
1398 	/*
1399 	 * A folio that has been migrated has all references removed
1400 	 * and will be freed.
1401 	 */
1402 	list_del(&src->lru);
1403 	/* Drop an anon_vma reference if we took one */
1404 	if (anon_vma)
1405 		put_anon_vma(anon_vma);
1406 	folio_unlock(src);
1407 	migrate_folio_done(src, reason);
1408 
1409 	return rc;
1410 out:
1411 	/*
1412 	 * A folio that has not been migrated will be restored to
1413 	 * right list unless we want to retry.
1414 	 */
1415 	if (rc == -EAGAIN) {
1416 		list_add(&dst->lru, prev);
1417 		__migrate_folio_record(dst, old_page_state, anon_vma);
1418 		return rc;
1419 	}
1420 
1421 	migrate_folio_undo_src(src, old_page_state & PAGE_WAS_MAPPED,
1422 			       anon_vma, true, ret);
1423 	migrate_folio_undo_dst(dst, true, put_new_folio, private);
1424 
1425 	return rc;
1426 }
1427 
1428 /*
1429  * Counterpart of unmap_and_move_page() for hugepage migration.
1430  *
1431  * This function doesn't wait the completion of hugepage I/O
1432  * because there is no race between I/O and migration for hugepage.
1433  * Note that currently hugepage I/O occurs only in direct I/O
1434  * where no lock is held and PG_writeback is irrelevant,
1435  * and writeback status of all subpages are counted in the reference
1436  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1437  * under direct I/O, the reference of the head page is 512 and a bit more.)
1438  * This means that when we try to migrate hugepage whose subpages are
1439  * doing direct I/O, some references remain after try_to_unmap() and
1440  * hugepage migration fails without data corruption.
1441  *
1442  * There is also no race when direct I/O is issued on the page under migration,
1443  * because then pte is replaced with migration swap entry and direct I/O code
1444  * will wait in the page fault for migration to complete.
1445  */
unmap_and_move_huge_page(new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,struct folio * src,int force,enum migrate_mode mode,int reason,struct list_head * ret)1446 static int unmap_and_move_huge_page(new_folio_t get_new_folio,
1447 		free_folio_t put_new_folio, unsigned long private,
1448 		struct folio *src, int force, enum migrate_mode mode,
1449 		int reason, struct list_head *ret)
1450 {
1451 	struct folio *dst;
1452 	int rc = -EAGAIN;
1453 	int page_was_mapped = 0;
1454 	struct anon_vma *anon_vma = NULL;
1455 	struct address_space *mapping = NULL;
1456 
1457 	if (folio_ref_count(src) == 1) {
1458 		/* page was freed from under us. So we are done. */
1459 		folio_putback_active_hugetlb(src);
1460 		return MIGRATEPAGE_SUCCESS;
1461 	}
1462 
1463 	dst = get_new_folio(src, private);
1464 	if (!dst)
1465 		return -ENOMEM;
1466 
1467 	if (!folio_trylock(src)) {
1468 		if (!force)
1469 			goto out;
1470 		switch (mode) {
1471 		case MIGRATE_SYNC:
1472 			break;
1473 		default:
1474 			goto out;
1475 		}
1476 		folio_lock(src);
1477 	}
1478 
1479 	/*
1480 	 * Check for pages which are in the process of being freed.  Without
1481 	 * folio_mapping() set, hugetlbfs specific move page routine will not
1482 	 * be called and we could leak usage counts for subpools.
1483 	 */
1484 	if (hugetlb_folio_subpool(src) && !folio_mapping(src)) {
1485 		rc = -EBUSY;
1486 		goto out_unlock;
1487 	}
1488 
1489 	if (folio_test_anon(src))
1490 		anon_vma = folio_get_anon_vma(src);
1491 
1492 	if (unlikely(!folio_trylock(dst)))
1493 		goto put_anon;
1494 
1495 	if (folio_mapped(src)) {
1496 		enum ttu_flags ttu = 0;
1497 
1498 		if (!folio_test_anon(src)) {
1499 			/*
1500 			 * In shared mappings, try_to_unmap could potentially
1501 			 * call huge_pmd_unshare.  Because of this, take
1502 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1503 			 * to let lower levels know we have taken the lock.
1504 			 */
1505 			mapping = hugetlb_folio_mapping_lock_write(src);
1506 			if (unlikely(!mapping))
1507 				goto unlock_put_anon;
1508 
1509 			ttu = TTU_RMAP_LOCKED;
1510 		}
1511 
1512 		try_to_migrate(src, ttu);
1513 		page_was_mapped = 1;
1514 
1515 		if (ttu & TTU_RMAP_LOCKED)
1516 			i_mmap_unlock_write(mapping);
1517 	}
1518 
1519 	if (!folio_mapped(src))
1520 		rc = move_to_new_folio(dst, src, mode);
1521 
1522 	if (page_was_mapped)
1523 		remove_migration_ptes(src,
1524 			rc == MIGRATEPAGE_SUCCESS ? dst : src, 0);
1525 
1526 unlock_put_anon:
1527 	folio_unlock(dst);
1528 
1529 put_anon:
1530 	if (anon_vma)
1531 		put_anon_vma(anon_vma);
1532 
1533 	if (rc == MIGRATEPAGE_SUCCESS) {
1534 		move_hugetlb_state(src, dst, reason);
1535 		put_new_folio = NULL;
1536 	}
1537 
1538 out_unlock:
1539 	folio_unlock(src);
1540 out:
1541 	if (rc == MIGRATEPAGE_SUCCESS)
1542 		folio_putback_active_hugetlb(src);
1543 	else if (rc != -EAGAIN)
1544 		list_move_tail(&src->lru, ret);
1545 
1546 	/*
1547 	 * If migration was not successful and there's a freeing callback, use
1548 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1549 	 * isolation.
1550 	 */
1551 	if (put_new_folio)
1552 		put_new_folio(dst, private);
1553 	else
1554 		folio_putback_active_hugetlb(dst);
1555 
1556 	return rc;
1557 }
1558 
try_split_folio(struct folio * folio,struct list_head * split_folios,enum migrate_mode mode)1559 static inline int try_split_folio(struct folio *folio, struct list_head *split_folios,
1560 				  enum migrate_mode mode)
1561 {
1562 	int rc;
1563 	bool bypass = false;
1564 
1565 	trace_android_vh_mm_try_split_folio_bypass(folio, &bypass);
1566 	if (bypass)
1567 		return -EBUSY;
1568 
1569 	if (mode == MIGRATE_ASYNC) {
1570 		if (!folio_trylock(folio))
1571 			return -EAGAIN;
1572 	} else {
1573 		folio_lock(folio);
1574 	}
1575 	rc = split_folio_to_list(folio, split_folios);
1576 	folio_unlock(folio);
1577 	if (!rc)
1578 		list_move_tail(&folio->lru, split_folios);
1579 
1580 	return rc;
1581 }
1582 
1583 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1584 #define NR_MAX_BATCHED_MIGRATION	HPAGE_PMD_NR
1585 #else
1586 #define NR_MAX_BATCHED_MIGRATION	512
1587 #endif
1588 #define NR_MAX_MIGRATE_PAGES_RETRY	10
1589 #define NR_MAX_MIGRATE_ASYNC_RETRY	3
1590 #define NR_MAX_MIGRATE_SYNC_RETRY					\
1591 	(NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY)
1592 
1593 struct migrate_pages_stats {
1594 	int nr_succeeded;	/* Normal and large folios migrated successfully, in
1595 				   units of base pages */
1596 	int nr_failed_pages;	/* Normal and large folios failed to be migrated, in
1597 				   units of base pages.  Untried folios aren't counted */
1598 	int nr_thp_succeeded;	/* THP migrated successfully */
1599 	int nr_thp_failed;	/* THP failed to be migrated */
1600 	int nr_thp_split;	/* THP split before migrating */
1601 	int nr_split;	/* Large folio (include THP) split before migrating */
1602 };
1603 
1604 /*
1605  * Returns the number of hugetlb folios that were not migrated, or an error code
1606  * after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no hugetlb folios are movable
1607  * any more because the list has become empty or no retryable hugetlb folios
1608  * exist any more. It is caller's responsibility to call putback_movable_pages()
1609  * only if ret != 0.
1610  */
migrate_hugetlbs(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct migrate_pages_stats * stats,struct list_head * ret_folios)1611 static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
1612 			    free_folio_t put_new_folio, unsigned long private,
1613 			    enum migrate_mode mode, int reason,
1614 			    struct migrate_pages_stats *stats,
1615 			    struct list_head *ret_folios)
1616 {
1617 	int retry = 1;
1618 	int nr_failed = 0;
1619 	int nr_retry_pages = 0;
1620 	int pass = 0;
1621 	struct folio *folio, *folio2;
1622 	int rc, nr_pages;
1623 
1624 	for (pass = 0; pass < NR_MAX_MIGRATE_PAGES_RETRY && retry; pass++) {
1625 		retry = 0;
1626 		nr_retry_pages = 0;
1627 
1628 		list_for_each_entry_safe(folio, folio2, from, lru) {
1629 			if (!folio_test_hugetlb(folio))
1630 				continue;
1631 
1632 			nr_pages = folio_nr_pages(folio);
1633 
1634 			cond_resched();
1635 
1636 			/*
1637 			 * Migratability of hugepages depends on architectures and
1638 			 * their size.  This check is necessary because some callers
1639 			 * of hugepage migration like soft offline and memory
1640 			 * hotremove don't walk through page tables or check whether
1641 			 * the hugepage is pmd-based or not before kicking migration.
1642 			 */
1643 			if (!hugepage_migration_supported(folio_hstate(folio))) {
1644 				nr_failed++;
1645 				stats->nr_failed_pages += nr_pages;
1646 				list_move_tail(&folio->lru, ret_folios);
1647 				continue;
1648 			}
1649 
1650 			rc = unmap_and_move_huge_page(get_new_folio,
1651 						      put_new_folio, private,
1652 						      folio, pass > 2, mode,
1653 						      reason, ret_folios);
1654 			/*
1655 			 * The rules are:
1656 			 *	Success: hugetlb folio will be put back
1657 			 *	-EAGAIN: stay on the from list
1658 			 *	-ENOMEM: stay on the from list
1659 			 *	Other errno: put on ret_folios list
1660 			 */
1661 			switch(rc) {
1662 			case -ENOMEM:
1663 				/*
1664 				 * When memory is low, don't bother to try to migrate
1665 				 * other folios, just exit.
1666 				 */
1667 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1668 				return -ENOMEM;
1669 			case -EAGAIN:
1670 				retry++;
1671 				nr_retry_pages += nr_pages;
1672 				break;
1673 			case MIGRATEPAGE_SUCCESS:
1674 				stats->nr_succeeded += nr_pages;
1675 				break;
1676 			default:
1677 				/*
1678 				 * Permanent failure (-EBUSY, etc.):
1679 				 * unlike -EAGAIN case, the failed folio is
1680 				 * removed from migration folio list and not
1681 				 * retried in the next outer loop.
1682 				 */
1683 				nr_failed++;
1684 				stats->nr_failed_pages += nr_pages;
1685 				break;
1686 			}
1687 		}
1688 	}
1689 	/*
1690 	 * nr_failed is number of hugetlb folios failed to be migrated.  After
1691 	 * NR_MAX_MIGRATE_PAGES_RETRY attempts, give up and count retried hugetlb
1692 	 * folios as failed.
1693 	 */
1694 	nr_failed += retry;
1695 	stats->nr_failed_pages += nr_retry_pages;
1696 
1697 	return nr_failed;
1698 }
1699 
1700 /*
1701  * migrate_pages_batch() first unmaps folios in the from list as many as
1702  * possible, then move the unmapped folios.
1703  *
1704  * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a
1705  * lock or bit when we have locked more than one folio.  Which may cause
1706  * deadlock (e.g., for loop device).  So, if mode != MIGRATE_ASYNC, the
1707  * length of the from list must be <= 1.
1708  */
migrate_pages_batch(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats,int nr_pass)1709 static int migrate_pages_batch(struct list_head *from,
1710 		new_folio_t get_new_folio, free_folio_t put_new_folio,
1711 		unsigned long private, enum migrate_mode mode, int reason,
1712 		struct list_head *ret_folios, struct list_head *split_folios,
1713 		struct migrate_pages_stats *stats, int nr_pass)
1714 {
1715 	int retry = 1;
1716 	int thp_retry = 1;
1717 	int nr_failed = 0;
1718 	int nr_retry_pages = 0;
1719 	int pass = 0;
1720 	bool is_thp = false;
1721 	bool is_large = false;
1722 	struct folio *folio, *folio2, *dst = NULL, *dst2;
1723 	int rc, rc_saved = 0, nr_pages;
1724 	LIST_HEAD(unmap_folios);
1725 	LIST_HEAD(dst_folios);
1726 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1727 
1728 	VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
1729 			!list_empty(from) && !list_is_singular(from));
1730 
1731 	for (pass = 0; pass < nr_pass && retry; pass++) {
1732 		retry = 0;
1733 		thp_retry = 0;
1734 		nr_retry_pages = 0;
1735 
1736 		list_for_each_entry_safe(folio, folio2, from, lru) {
1737 			is_large = folio_test_large(folio);
1738 			is_thp = is_large && folio_test_pmd_mappable(folio);
1739 			nr_pages = folio_nr_pages(folio);
1740 
1741 			cond_resched();
1742 
1743 			/*
1744 			 * The rare folio on the deferred split list should
1745 			 * be split now. It should not count as a failure:
1746 			 * but increment nr_failed because, without doing so,
1747 			 * migrate_pages() may report success with (split but
1748 			 * unmigrated) pages still on its fromlist; whereas it
1749 			 * always reports success when its fromlist is empty.
1750 			 * stats->nr_thp_failed should be increased too,
1751 			 * otherwise stats inconsistency will happen when
1752 			 * migrate_pages_batch is called via migrate_pages()
1753 			 * with MIGRATE_SYNC and MIGRATE_ASYNC.
1754 			 *
1755 			 * Only check it without removing it from the list.
1756 			 * Since the folio can be on deferred_split_scan()
1757 			 * local list and removing it can cause the local list
1758 			 * corruption. Folio split process below can handle it
1759 			 * with the help of folio_ref_freeze().
1760 			 *
1761 			 * nr_pages > 2 is needed to avoid checking order-1
1762 			 * page cache folios. They exist, in contrast to
1763 			 * non-existent order-1 anonymous folios, and do not
1764 			 * use _deferred_list.
1765 			 */
1766 			if (nr_pages > 2 &&
1767 			   !list_empty(&folio->_deferred_list) &&
1768 			   folio_test_partially_mapped(folio)) {
1769 				if (!try_split_folio(folio, split_folios, mode)) {
1770 					nr_failed++;
1771 					stats->nr_thp_failed += is_thp;
1772 					stats->nr_thp_split += is_thp;
1773 					stats->nr_split++;
1774 					continue;
1775 				}
1776 			}
1777 
1778 			/*
1779 			 * Large folio migration might be unsupported or
1780 			 * the allocation might be failed so we should retry
1781 			 * on the same folio with the large folio split
1782 			 * to normal folios.
1783 			 *
1784 			 * Split folios are put in split_folios, and
1785 			 * we will migrate them after the rest of the
1786 			 * list is processed.
1787 			 */
1788 			if (!thp_migration_supported() && is_thp) {
1789 				nr_failed++;
1790 				stats->nr_thp_failed++;
1791 				if (!try_split_folio(folio, split_folios, mode)) {
1792 					stats->nr_thp_split++;
1793 					stats->nr_split++;
1794 					continue;
1795 				}
1796 				stats->nr_failed_pages += nr_pages;
1797 				list_move_tail(&folio->lru, ret_folios);
1798 				continue;
1799 			}
1800 
1801 			rc = migrate_folio_unmap(get_new_folio, put_new_folio,
1802 					private, folio, &dst, mode, reason,
1803 					ret_folios);
1804 			/*
1805 			 * The rules are:
1806 			 *	Success: folio will be freed
1807 			 *	Unmap: folio will be put on unmap_folios list,
1808 			 *	       dst folio put on dst_folios list
1809 			 *	-EAGAIN: stay on the from list
1810 			 *	-ENOMEM: stay on the from list
1811 			 *	Other errno: put on ret_folios list
1812 			 */
1813 			switch(rc) {
1814 			case -ENOMEM:
1815 				/*
1816 				 * When memory is low, don't bother to try to migrate
1817 				 * other folios, move unmapped folios, then exit.
1818 				 */
1819 				nr_failed++;
1820 				stats->nr_thp_failed += is_thp;
1821 				/* Large folio NUMA faulting doesn't split to retry. */
1822 				if (is_large && !nosplit) {
1823 					int ret = try_split_folio(folio, split_folios, mode);
1824 
1825 					if (!ret) {
1826 						stats->nr_thp_split += is_thp;
1827 						stats->nr_split++;
1828 						break;
1829 					} else if (reason == MR_LONGTERM_PIN &&
1830 						   ret == -EAGAIN) {
1831 						/*
1832 						 * Try again to split large folio to
1833 						 * mitigate the failure of longterm pinning.
1834 						 */
1835 						retry++;
1836 						thp_retry += is_thp;
1837 						nr_retry_pages += nr_pages;
1838 						/* Undo duplicated failure counting. */
1839 						nr_failed--;
1840 						stats->nr_thp_failed -= is_thp;
1841 						break;
1842 					}
1843 				}
1844 
1845 				stats->nr_failed_pages += nr_pages + nr_retry_pages;
1846 				/* nr_failed isn't updated for not used */
1847 				stats->nr_thp_failed += thp_retry;
1848 				rc_saved = rc;
1849 				if (list_empty(&unmap_folios))
1850 					goto out;
1851 				else
1852 					goto move;
1853 			case -EAGAIN:
1854 				retry++;
1855 				thp_retry += is_thp;
1856 				nr_retry_pages += nr_pages;
1857 				break;
1858 			case MIGRATEPAGE_SUCCESS:
1859 				stats->nr_succeeded += nr_pages;
1860 				stats->nr_thp_succeeded += is_thp;
1861 				break;
1862 			case MIGRATEPAGE_UNMAP:
1863 				list_move_tail(&folio->lru, &unmap_folios);
1864 				list_add_tail(&dst->lru, &dst_folios);
1865 				break;
1866 			default:
1867 				/*
1868 				 * Permanent failure (-EBUSY, etc.):
1869 				 * unlike -EAGAIN case, the failed folio is
1870 				 * removed from migration folio list and not
1871 				 * retried in the next outer loop.
1872 				 */
1873 				nr_failed++;
1874 				stats->nr_thp_failed += is_thp;
1875 				stats->nr_failed_pages += nr_pages;
1876 				break;
1877 			}
1878 		}
1879 	}
1880 	nr_failed += retry;
1881 	stats->nr_thp_failed += thp_retry;
1882 	stats->nr_failed_pages += nr_retry_pages;
1883 move:
1884 	/* Flush TLBs for all unmapped folios */
1885 	try_to_unmap_flush();
1886 
1887 	retry = 1;
1888 	for (pass = 0; pass < nr_pass && retry; pass++) {
1889 		retry = 0;
1890 		thp_retry = 0;
1891 		nr_retry_pages = 0;
1892 
1893 		dst = list_first_entry(&dst_folios, struct folio, lru);
1894 		dst2 = list_next_entry(dst, lru);
1895 		list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1896 			is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio);
1897 			nr_pages = folio_nr_pages(folio);
1898 
1899 			cond_resched();
1900 
1901 			rc = migrate_folio_move(put_new_folio, private,
1902 						folio, dst, mode,
1903 						reason, ret_folios);
1904 			/*
1905 			 * The rules are:
1906 			 *	Success: folio will be freed
1907 			 *	-EAGAIN: stay on the unmap_folios list
1908 			 *	Other errno: put on ret_folios list
1909 			 */
1910 			switch(rc) {
1911 			case -EAGAIN:
1912 				retry++;
1913 				thp_retry += is_thp;
1914 				nr_retry_pages += nr_pages;
1915 				break;
1916 			case MIGRATEPAGE_SUCCESS:
1917 				stats->nr_succeeded += nr_pages;
1918 				stats->nr_thp_succeeded += is_thp;
1919 				break;
1920 			default:
1921 				nr_failed++;
1922 				stats->nr_thp_failed += is_thp;
1923 				stats->nr_failed_pages += nr_pages;
1924 				break;
1925 			}
1926 			dst = dst2;
1927 			dst2 = list_next_entry(dst, lru);
1928 		}
1929 	}
1930 	nr_failed += retry;
1931 	stats->nr_thp_failed += thp_retry;
1932 	stats->nr_failed_pages += nr_retry_pages;
1933 
1934 	rc = rc_saved ? : nr_failed;
1935 out:
1936 	/* Cleanup remaining folios */
1937 	dst = list_first_entry(&dst_folios, struct folio, lru);
1938 	dst2 = list_next_entry(dst, lru);
1939 	list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) {
1940 		int old_page_state = 0;
1941 		struct anon_vma *anon_vma = NULL;
1942 
1943 		__migrate_folio_extract(dst, &old_page_state, &anon_vma);
1944 		migrate_folio_undo_src(folio, old_page_state & PAGE_WAS_MAPPED,
1945 				       anon_vma, true, ret_folios);
1946 		list_del(&dst->lru);
1947 		migrate_folio_undo_dst(dst, true, put_new_folio, private);
1948 		dst = dst2;
1949 		dst2 = list_next_entry(dst, lru);
1950 	}
1951 
1952 	return rc;
1953 }
1954 
migrate_pages_sync(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,struct list_head * ret_folios,struct list_head * split_folios,struct migrate_pages_stats * stats)1955 static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio,
1956 		free_folio_t put_new_folio, unsigned long private,
1957 		enum migrate_mode mode, int reason,
1958 		struct list_head *ret_folios, struct list_head *split_folios,
1959 		struct migrate_pages_stats *stats)
1960 {
1961 	int rc, nr_failed = 0;
1962 	LIST_HEAD(folios);
1963 	struct migrate_pages_stats astats;
1964 
1965 	memset(&astats, 0, sizeof(astats));
1966 	/* Try to migrate in batch with MIGRATE_ASYNC mode firstly */
1967 	rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC,
1968 				 reason, &folios, split_folios, &astats,
1969 				 NR_MAX_MIGRATE_ASYNC_RETRY);
1970 	stats->nr_succeeded += astats.nr_succeeded;
1971 	stats->nr_thp_succeeded += astats.nr_thp_succeeded;
1972 	stats->nr_thp_split += astats.nr_thp_split;
1973 	stats->nr_split += astats.nr_split;
1974 	if (rc < 0) {
1975 		stats->nr_failed_pages += astats.nr_failed_pages;
1976 		stats->nr_thp_failed += astats.nr_thp_failed;
1977 		list_splice_tail(&folios, ret_folios);
1978 		return rc;
1979 	}
1980 	stats->nr_thp_failed += astats.nr_thp_split;
1981 	/*
1982 	 * Do not count rc, as pages will be retried below.
1983 	 * Count nr_split only, since it includes nr_thp_split.
1984 	 */
1985 	nr_failed += astats.nr_split;
1986 	/*
1987 	 * Fall back to migrate all failed folios one by one synchronously. All
1988 	 * failed folios except split THPs will be retried, so their failure
1989 	 * isn't counted
1990 	 */
1991 	list_splice_tail_init(&folios, from);
1992 	while (!list_empty(from)) {
1993 		list_move(from->next, &folios);
1994 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
1995 					 private, mode, reason, ret_folios,
1996 					 split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY);
1997 		list_splice_tail_init(&folios, ret_folios);
1998 		if (rc < 0)
1999 			return rc;
2000 		nr_failed += rc;
2001 	}
2002 
2003 	return nr_failed;
2004 }
2005 
2006 /*
2007  * migrate_pages - migrate the folios specified in a list, to the free folios
2008  *		   supplied as the target for the page migration
2009  *
2010  * @from:		The list of folios to be migrated.
2011  * @get_new_folio:	The function used to allocate free folios to be used
2012  *			as the target of the folio migration.
2013  * @put_new_folio:	The function used to free target folios if migration
2014  *			fails, or NULL if no special handling is necessary.
2015  * @private:		Private data to be passed on to get_new_folio()
2016  * @mode:		The migration mode that specifies the constraints for
2017  *			folio migration, if any.
2018  * @reason:		The reason for folio migration.
2019  * @ret_succeeded:	Set to the number of folios migrated successfully if
2020  *			the caller passes a non-NULL pointer.
2021  *
2022  * The function returns after NR_MAX_MIGRATE_PAGES_RETRY attempts or if no folios
2023  * are movable any more because the list has become empty or no retryable folios
2024  * exist any more. It is caller's responsibility to call putback_movable_pages()
2025  * only if ret != 0.
2026  *
2027  * Returns the number of {normal folio, large folio, hugetlb} that were not
2028  * migrated, or an error code. The number of large folio splits will be
2029  * considered as the number of non-migrated large folio, no matter how many
2030  * split folios of the large folio are migrated successfully.
2031  */
migrate_pages(struct list_head * from,new_folio_t get_new_folio,free_folio_t put_new_folio,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)2032 int migrate_pages(struct list_head *from, new_folio_t get_new_folio,
2033 		free_folio_t put_new_folio, unsigned long private,
2034 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
2035 {
2036 	int rc, rc_gather;
2037 	int nr_pages;
2038 	struct folio *folio, *folio2;
2039 	LIST_HEAD(folios);
2040 	LIST_HEAD(ret_folios);
2041 	LIST_HEAD(split_folios);
2042 	struct migrate_pages_stats stats;
2043 
2044 	trace_mm_migrate_pages_start(mode, reason);
2045 
2046 	memset(&stats, 0, sizeof(stats));
2047 
2048 	rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private,
2049 				     mode, reason, &stats, &ret_folios);
2050 	if (rc_gather < 0)
2051 		goto out;
2052 
2053 again:
2054 	nr_pages = 0;
2055 	list_for_each_entry_safe(folio, folio2, from, lru) {
2056 		/* Retried hugetlb folios will be kept in list  */
2057 		if (folio_test_hugetlb(folio)) {
2058 			list_move_tail(&folio->lru, &ret_folios);
2059 			continue;
2060 		}
2061 
2062 		nr_pages += folio_nr_pages(folio);
2063 		if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2064 			break;
2065 	}
2066 	if (nr_pages >= NR_MAX_BATCHED_MIGRATION)
2067 		list_cut_before(&folios, from, &folio2->lru);
2068 	else
2069 		list_splice_init(from, &folios);
2070 	if (mode == MIGRATE_ASYNC)
2071 		rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio,
2072 				private, mode, reason, &ret_folios,
2073 				&split_folios, &stats,
2074 				NR_MAX_MIGRATE_PAGES_RETRY);
2075 	else
2076 		rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio,
2077 				private, mode, reason, &ret_folios,
2078 				&split_folios, &stats);
2079 	list_splice_tail_init(&folios, &ret_folios);
2080 	if (rc < 0) {
2081 		rc_gather = rc;
2082 		list_splice_tail(&split_folios, &ret_folios);
2083 		goto out;
2084 	}
2085 	if (!list_empty(&split_folios)) {
2086 		/*
2087 		 * Failure isn't counted since all split folios of a large folio
2088 		 * is counted as 1 failure already.  And, we only try to migrate
2089 		 * with minimal effort, force MIGRATE_ASYNC mode and retry once.
2090 		 */
2091 		migrate_pages_batch(&split_folios, get_new_folio,
2092 				put_new_folio, private, MIGRATE_ASYNC, reason,
2093 				&ret_folios, NULL, &stats, 1);
2094 		list_splice_tail_init(&split_folios, &ret_folios);
2095 	}
2096 	rc_gather += rc;
2097 	if (!list_empty(from))
2098 		goto again;
2099 out:
2100 	/*
2101 	 * Put the permanent failure folio back to migration list, they
2102 	 * will be put back to the right list by the caller.
2103 	 */
2104 	list_splice(&ret_folios, from);
2105 
2106 	/*
2107 	 * Return 0 in case all split folios of fail-to-migrate large folios
2108 	 * are migrated successfully.
2109 	 */
2110 	if (list_empty(from))
2111 		rc_gather = 0;
2112 
2113 	count_vm_events(PGMIGRATE_SUCCESS, stats.nr_succeeded);
2114 	count_vm_events(PGMIGRATE_FAIL, stats.nr_failed_pages);
2115 	count_vm_events(THP_MIGRATION_SUCCESS, stats.nr_thp_succeeded);
2116 	count_vm_events(THP_MIGRATION_FAIL, stats.nr_thp_failed);
2117 	count_vm_events(THP_MIGRATION_SPLIT, stats.nr_thp_split);
2118 	trace_mm_migrate_pages(stats.nr_succeeded, stats.nr_failed_pages,
2119 			       stats.nr_thp_succeeded, stats.nr_thp_failed,
2120 			       stats.nr_thp_split, stats.nr_split, mode,
2121 			       reason);
2122 
2123 	if (ret_succeeded)
2124 		*ret_succeeded = stats.nr_succeeded;
2125 
2126 	return rc_gather;
2127 }
2128 EXPORT_SYMBOL_GPL(migrate_pages);
2129 
alloc_migration_target(struct folio * src,unsigned long private)2130 struct folio *alloc_migration_target(struct folio *src, unsigned long private)
2131 {
2132 	struct migration_target_control *mtc;
2133 	gfp_t gfp_mask;
2134 	unsigned int order = 0;
2135 	int nid;
2136 	int zidx;
2137 
2138 	mtc = (struct migration_target_control *)private;
2139 	gfp_mask = mtc->gfp_mask;
2140 	nid = mtc->nid;
2141 	if (nid == NUMA_NO_NODE)
2142 		nid = folio_nid(src);
2143 
2144 	if (folio_test_hugetlb(src)) {
2145 		struct hstate *h = folio_hstate(src);
2146 
2147 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
2148 		return alloc_hugetlb_folio_nodemask(h, nid,
2149 						mtc->nmask, gfp_mask,
2150 						htlb_allow_alloc_fallback(mtc->reason));
2151 	}
2152 
2153 	if (folio_test_large(src)) {
2154 		/*
2155 		 * clear __GFP_RECLAIM to make the migration callback
2156 		 * consistent with regular THP allocations.
2157 		 */
2158 		gfp_mask &= ~__GFP_RECLAIM;
2159 		gfp_mask |= GFP_TRANSHUGE;
2160 		order = folio_order(src);
2161 	}
2162 	zidx = zone_idx(folio_zone(src));
2163 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
2164 		gfp_mask |= __GFP_HIGHMEM;
2165 
2166 	return __folio_alloc(gfp_mask, order, nid, mtc->nmask);
2167 }
2168 
2169 #ifdef CONFIG_NUMA
2170 
store_status(int __user * status,int start,int value,int nr)2171 static int store_status(int __user *status, int start, int value, int nr)
2172 {
2173 	while (nr-- > 0) {
2174 		if (put_user(value, status + start))
2175 			return -EFAULT;
2176 		start++;
2177 	}
2178 
2179 	return 0;
2180 }
2181 
do_move_pages_to_node(struct list_head * pagelist,int node)2182 static int do_move_pages_to_node(struct list_head *pagelist, int node)
2183 {
2184 	int err;
2185 	struct migration_target_control mtc = {
2186 		.nid = node,
2187 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
2188 		.reason = MR_SYSCALL,
2189 	};
2190 
2191 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
2192 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
2193 	if (err)
2194 		putback_movable_pages(pagelist);
2195 	return err;
2196 }
2197 
__add_folio_for_migration(struct folio * folio,int node,struct list_head * pagelist,bool migrate_all)2198 static int __add_folio_for_migration(struct folio *folio, int node,
2199 		struct list_head *pagelist, bool migrate_all)
2200 {
2201 	if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2202 		return -EFAULT;
2203 
2204 	if (folio_is_zone_device(folio))
2205 		return -ENOENT;
2206 
2207 	if (folio_nid(folio) == node)
2208 		return 0;
2209 
2210 	if (folio_likely_mapped_shared(folio) && !migrate_all)
2211 		return -EACCES;
2212 
2213 	if (folio_test_hugetlb(folio)) {
2214 		if (isolate_hugetlb(folio, pagelist))
2215 			return 1;
2216 	} else if (folio_isolate_lru(folio)) {
2217 		list_add_tail(&folio->lru, pagelist);
2218 		node_stat_mod_folio(folio,
2219 			NR_ISOLATED_ANON + folio_is_file_lru(folio),
2220 			folio_nr_pages(folio));
2221 		return 1;
2222 	}
2223 	return -EBUSY;
2224 }
2225 
2226 /*
2227  * Resolves the given address to a struct folio, isolates it from the LRU and
2228  * puts it to the given pagelist.
2229  * Returns:
2230  *     errno - if the folio cannot be found/isolated
2231  *     0 - when it doesn't have to be migrated because it is already on the
2232  *         target node
2233  *     1 - when it has been queued
2234  */
add_folio_for_migration(struct mm_struct * mm,const void __user * p,int node,struct list_head * pagelist,bool migrate_all)2235 static int add_folio_for_migration(struct mm_struct *mm, const void __user *p,
2236 		int node, struct list_head *pagelist, bool migrate_all)
2237 {
2238 	struct vm_area_struct *vma;
2239 	struct folio_walk fw;
2240 	struct folio *folio;
2241 	unsigned long addr;
2242 	int err = -EFAULT;
2243 
2244 	mmap_read_lock(mm);
2245 	addr = (unsigned long)untagged_addr_remote(mm, p);
2246 
2247 	vma = vma_lookup(mm, addr);
2248 	if (vma && vma_migratable(vma)) {
2249 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2250 		if (folio) {
2251 			err = __add_folio_for_migration(folio, node, pagelist,
2252 							migrate_all);
2253 			folio_walk_end(&fw, vma);
2254 		} else {
2255 			err = -ENOENT;
2256 		}
2257 	}
2258 	mmap_read_unlock(mm);
2259 	return err;
2260 }
2261 
move_pages_and_store_status(int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)2262 static int move_pages_and_store_status(int node,
2263 		struct list_head *pagelist, int __user *status,
2264 		int start, int i, unsigned long nr_pages)
2265 {
2266 	int err;
2267 
2268 	if (list_empty(pagelist))
2269 		return 0;
2270 
2271 	err = do_move_pages_to_node(pagelist, node);
2272 	if (err) {
2273 		/*
2274 		 * Positive err means the number of failed
2275 		 * pages to migrate.  Since we are going to
2276 		 * abort and return the number of non-migrated
2277 		 * pages, so need to include the rest of the
2278 		 * nr_pages that have not been attempted as
2279 		 * well.
2280 		 */
2281 		if (err > 0)
2282 			err += nr_pages - i;
2283 		return err;
2284 	}
2285 	return store_status(status, start, node, i - start);
2286 }
2287 
2288 /*
2289  * Migrate an array of page address onto an array of nodes and fill
2290  * the corresponding array of status.
2291  */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2292 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
2293 			 unsigned long nr_pages,
2294 			 const void __user * __user *pages,
2295 			 const int __user *nodes,
2296 			 int __user *status, int flags)
2297 {
2298 	compat_uptr_t __user *compat_pages = (void __user *)pages;
2299 	int current_node = NUMA_NO_NODE;
2300 	LIST_HEAD(pagelist);
2301 	int start, i;
2302 	int err = 0, err1;
2303 
2304 	lru_cache_disable();
2305 
2306 	for (i = start = 0; i < nr_pages; i++) {
2307 		const void __user *p;
2308 		int node;
2309 
2310 		err = -EFAULT;
2311 		if (in_compat_syscall()) {
2312 			compat_uptr_t cp;
2313 
2314 			if (get_user(cp, compat_pages + i))
2315 				goto out_flush;
2316 
2317 			p = compat_ptr(cp);
2318 		} else {
2319 			if (get_user(p, pages + i))
2320 				goto out_flush;
2321 		}
2322 		if (get_user(node, nodes + i))
2323 			goto out_flush;
2324 
2325 		err = -ENODEV;
2326 		if (node < 0 || node >= MAX_NUMNODES)
2327 			goto out_flush;
2328 		if (!node_state(node, N_MEMORY))
2329 			goto out_flush;
2330 
2331 		err = -EACCES;
2332 		if (!node_isset(node, task_nodes))
2333 			goto out_flush;
2334 
2335 		if (current_node == NUMA_NO_NODE) {
2336 			current_node = node;
2337 			start = i;
2338 		} else if (node != current_node) {
2339 			err = move_pages_and_store_status(current_node,
2340 					&pagelist, status, start, i, nr_pages);
2341 			if (err)
2342 				goto out;
2343 			start = i;
2344 			current_node = node;
2345 		}
2346 
2347 		/*
2348 		 * Errors in the page lookup or isolation are not fatal and we simply
2349 		 * report them via status
2350 		 */
2351 		err = add_folio_for_migration(mm, p, current_node, &pagelist,
2352 					      flags & MPOL_MF_MOVE_ALL);
2353 
2354 		if (err > 0) {
2355 			/* The page is successfully queued for migration */
2356 			continue;
2357 		}
2358 
2359 		/*
2360 		 * The move_pages() man page does not have an -EEXIST choice, so
2361 		 * use -EFAULT instead.
2362 		 */
2363 		if (err == -EEXIST)
2364 			err = -EFAULT;
2365 
2366 		/*
2367 		 * If the page is already on the target node (!err), store the
2368 		 * node, otherwise, store the err.
2369 		 */
2370 		err = store_status(status, i, err ? : current_node, 1);
2371 		if (err)
2372 			goto out_flush;
2373 
2374 		err = move_pages_and_store_status(current_node, &pagelist,
2375 				status, start, i, nr_pages);
2376 		if (err) {
2377 			/* We have accounted for page i */
2378 			if (err > 0)
2379 				err--;
2380 			goto out;
2381 		}
2382 		current_node = NUMA_NO_NODE;
2383 	}
2384 out_flush:
2385 	/* Make sure we do not overwrite the existing error */
2386 	err1 = move_pages_and_store_status(current_node, &pagelist,
2387 				status, start, i, nr_pages);
2388 	if (err >= 0)
2389 		err = err1;
2390 out:
2391 	lru_cache_enable();
2392 	return err;
2393 }
2394 
2395 /*
2396  * Determine the nodes of an array of pages and store it in an array of status.
2397  */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)2398 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
2399 				const void __user **pages, int *status)
2400 {
2401 	unsigned long i;
2402 
2403 	mmap_read_lock(mm);
2404 
2405 	for (i = 0; i < nr_pages; i++) {
2406 		unsigned long addr = (unsigned long)(*pages);
2407 		struct vm_area_struct *vma;
2408 		struct folio_walk fw;
2409 		struct folio *folio;
2410 		int err = -EFAULT;
2411 
2412 		vma = vma_lookup(mm, addr);
2413 		if (!vma)
2414 			goto set_status;
2415 
2416 		folio = folio_walk_start(&fw, vma, addr, FW_ZEROPAGE);
2417 		if (folio) {
2418 			if (is_zero_folio(folio) || is_huge_zero_folio(folio))
2419 				err = -EFAULT;
2420 			else if (folio_is_zone_device(folio))
2421 				err = -ENOENT;
2422 			else
2423 				err = folio_nid(folio);
2424 			folio_walk_end(&fw, vma);
2425 		} else {
2426 			err = -ENOENT;
2427 		}
2428 set_status:
2429 		*status = err;
2430 
2431 		pages++;
2432 		status++;
2433 	}
2434 
2435 	mmap_read_unlock(mm);
2436 }
2437 
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)2438 static int get_compat_pages_array(const void __user *chunk_pages[],
2439 				  const void __user * __user *pages,
2440 				  unsigned long chunk_nr)
2441 {
2442 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
2443 	compat_uptr_t p;
2444 	int i;
2445 
2446 	for (i = 0; i < chunk_nr; i++) {
2447 		if (get_user(p, pages32 + i))
2448 			return -EFAULT;
2449 		chunk_pages[i] = compat_ptr(p);
2450 	}
2451 
2452 	return 0;
2453 }
2454 
2455 /*
2456  * Determine the nodes of a user array of pages and store it in
2457  * a user array of status.
2458  */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)2459 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
2460 			 const void __user * __user *pages,
2461 			 int __user *status)
2462 {
2463 #define DO_PAGES_STAT_CHUNK_NR 16UL
2464 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
2465 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
2466 
2467 	while (nr_pages) {
2468 		unsigned long chunk_nr = min(nr_pages, DO_PAGES_STAT_CHUNK_NR);
2469 
2470 		if (in_compat_syscall()) {
2471 			if (get_compat_pages_array(chunk_pages, pages,
2472 						   chunk_nr))
2473 				break;
2474 		} else {
2475 			if (copy_from_user(chunk_pages, pages,
2476 				      chunk_nr * sizeof(*chunk_pages)))
2477 				break;
2478 		}
2479 
2480 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
2481 
2482 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
2483 			break;
2484 
2485 		pages += chunk_nr;
2486 		status += chunk_nr;
2487 		nr_pages -= chunk_nr;
2488 	}
2489 	return nr_pages ? -EFAULT : 0;
2490 }
2491 
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)2492 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
2493 {
2494 	struct task_struct *task;
2495 	struct mm_struct *mm;
2496 
2497 	/*
2498 	 * There is no need to check if current process has the right to modify
2499 	 * the specified process when they are same.
2500 	 */
2501 	if (!pid) {
2502 		mmget(current->mm);
2503 		*mem_nodes = cpuset_mems_allowed(current);
2504 		return current->mm;
2505 	}
2506 
2507 	task = find_get_task_by_vpid(pid);
2508 	if (!task) {
2509 		return ERR_PTR(-ESRCH);
2510 	}
2511 
2512 	/*
2513 	 * Check if this process has the right to modify the specified
2514 	 * process. Use the regular "ptrace_may_access()" checks.
2515 	 */
2516 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2517 		mm = ERR_PTR(-EPERM);
2518 		goto out;
2519 	}
2520 
2521 	mm = ERR_PTR(security_task_movememory(task));
2522 	if (IS_ERR(mm))
2523 		goto out;
2524 	*mem_nodes = cpuset_mems_allowed(task);
2525 	mm = get_task_mm(task);
2526 out:
2527 	put_task_struct(task);
2528 	if (!mm)
2529 		mm = ERR_PTR(-EINVAL);
2530 	return mm;
2531 }
2532 
2533 /*
2534  * Move a list of pages in the address space of the currently executing
2535  * process.
2536  */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2537 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2538 			     const void __user * __user *pages,
2539 			     const int __user *nodes,
2540 			     int __user *status, int flags)
2541 {
2542 	struct mm_struct *mm;
2543 	int err;
2544 	nodemask_t task_nodes;
2545 
2546 	/* Check flags */
2547 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2548 		return -EINVAL;
2549 
2550 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2551 		return -EPERM;
2552 
2553 	mm = find_mm_struct(pid, &task_nodes);
2554 	if (IS_ERR(mm))
2555 		return PTR_ERR(mm);
2556 
2557 	if (nodes)
2558 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2559 				    nodes, status, flags);
2560 	else
2561 		err = do_pages_stat(mm, nr_pages, pages, status);
2562 
2563 	mmput(mm);
2564 	return err;
2565 }
2566 
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2567 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2568 		const void __user * __user *, pages,
2569 		const int __user *, nodes,
2570 		int __user *, status, int, flags)
2571 {
2572 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2573 }
2574 
2575 #ifdef CONFIG_NUMA_BALANCING
2576 /*
2577  * Returns true if this is a safe migration target node for misplaced NUMA
2578  * pages. Currently it only checks the watermarks which is crude.
2579  */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2580 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2581 				   unsigned long nr_migrate_pages)
2582 {
2583 	int z;
2584 
2585 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2586 		struct zone *zone = pgdat->node_zones + z;
2587 
2588 		if (!managed_zone(zone))
2589 			continue;
2590 
2591 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2592 		if (!zone_watermark_ok(zone, 0,
2593 				       high_wmark_pages(zone) +
2594 				       nr_migrate_pages,
2595 				       ZONE_MOVABLE, ALLOC_CMA))
2596 			continue;
2597 		return true;
2598 	}
2599 	return false;
2600 }
2601 
alloc_misplaced_dst_folio(struct folio * src,unsigned long data)2602 static struct folio *alloc_misplaced_dst_folio(struct folio *src,
2603 					   unsigned long data)
2604 {
2605 	int nid = (int) data;
2606 	int order = folio_order(src);
2607 	gfp_t gfp = __GFP_THISNODE;
2608 
2609 	if (order > 0)
2610 		gfp |= GFP_TRANSHUGE_LIGHT;
2611 	else {
2612 		gfp |= GFP_HIGHUSER_MOVABLE | __GFP_NOMEMALLOC | __GFP_NORETRY |
2613 			__GFP_NOWARN;
2614 		gfp &= ~__GFP_RECLAIM;
2615 	}
2616 	return __folio_alloc_node(gfp, order, nid);
2617 }
2618 
2619 /*
2620  * Prepare for calling migrate_misplaced_folio() by isolating the folio if
2621  * permitted. Must be called with the PTL still held.
2622  */
migrate_misplaced_folio_prepare(struct folio * folio,struct vm_area_struct * vma,int node)2623 int migrate_misplaced_folio_prepare(struct folio *folio,
2624 		struct vm_area_struct *vma, int node)
2625 {
2626 	int nr_pages = folio_nr_pages(folio);
2627 	pg_data_t *pgdat = NODE_DATA(node);
2628 
2629 	if (folio_is_file_lru(folio)) {
2630 		/*
2631 		 * Do not migrate file folios that are mapped in multiple
2632 		 * processes with execute permissions as they are probably
2633 		 * shared libraries.
2634 		 *
2635 		 * See folio_likely_mapped_shared() on possible imprecision
2636 		 * when we cannot easily detect if a folio is shared.
2637 		 */
2638 		if ((vma->vm_flags & VM_EXEC) &&
2639 		    folio_likely_mapped_shared(folio))
2640 			return -EACCES;
2641 
2642 		/*
2643 		 * Do not migrate dirty folios as not all filesystems can move
2644 		 * dirty folios in MIGRATE_ASYNC mode which is a waste of
2645 		 * cycles.
2646 		 */
2647 		if (folio_test_dirty(folio))
2648 			return -EAGAIN;
2649 	}
2650 
2651 	/* Avoid migrating to a node that is nearly full */
2652 	if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
2653 		int z;
2654 
2655 		if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
2656 			return -EAGAIN;
2657 		for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2658 			if (managed_zone(pgdat->node_zones + z))
2659 				break;
2660 		}
2661 
2662 		/*
2663 		 * If there are no managed zones, it should not proceed
2664 		 * further.
2665 		 */
2666 		if (z < 0)
2667 			return -EAGAIN;
2668 
2669 		wakeup_kswapd(pgdat->node_zones + z, 0,
2670 			      folio_order(folio), ZONE_MOVABLE);
2671 		return -EAGAIN;
2672 	}
2673 
2674 	if (!folio_isolate_lru(folio))
2675 		return -EAGAIN;
2676 
2677 	node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
2678 			    nr_pages);
2679 	return 0;
2680 }
2681 
2682 /*
2683  * Attempt to migrate a misplaced folio to the specified destination
2684  * node. Caller is expected to have isolated the folio by calling
2685  * migrate_misplaced_folio_prepare(), which will result in an
2686  * elevated reference count on the folio. This function will un-isolate the
2687  * folio, dereferencing the folio before returning.
2688  */
migrate_misplaced_folio(struct folio * folio,struct vm_area_struct * vma,int node)2689 int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
2690 			    int node)
2691 {
2692 	pg_data_t *pgdat = NODE_DATA(node);
2693 	int nr_remaining;
2694 	unsigned int nr_succeeded;
2695 	LIST_HEAD(migratepages);
2696 	struct mem_cgroup *memcg = get_mem_cgroup_from_folio(folio);
2697 	struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat);
2698 
2699 	list_add(&folio->lru, &migratepages);
2700 	nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
2701 				     NULL, node, MIGRATE_ASYNC,
2702 				     MR_NUMA_MISPLACED, &nr_succeeded);
2703 	if (nr_remaining && !list_empty(&migratepages))
2704 		putback_movable_pages(&migratepages);
2705 	if (nr_succeeded) {
2706 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
2707 		count_memcg_events(memcg, NUMA_PAGE_MIGRATE, nr_succeeded);
2708 		if ((sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING)
2709 		    && !node_is_toptier(folio_nid(folio))
2710 		    && node_is_toptier(node))
2711 			mod_lruvec_state(lruvec, PGPROMOTE_SUCCESS, nr_succeeded);
2712 	}
2713 	mem_cgroup_put(memcg);
2714 	BUG_ON(!list_empty(&migratepages));
2715 	return nr_remaining ? -EAGAIN : 0;
2716 }
2717 #endif /* CONFIG_NUMA_BALANCING */
2718 #endif /* CONFIG_NUMA */
2719