• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Memory Migration functionality - linux/mm/migrate.c
4  *
5  * Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
6  *
7  * Page migration was first developed in the context of the memory hotplug
8  * project. The main authors of the migration code are:
9  *
10  * IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
11  * Hirokazu Takahashi <taka@valinux.co.jp>
12  * Dave Hansen <haveblue@us.ibm.com>
13  * Christoph Lameter
14  */
15 
16 #include <linux/migrate.h>
17 #include <linux/export.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <linux/pagemap.h>
21 #include <linux/buffer_head.h>
22 #include <linux/mm_inline.h>
23 #include <linux/nsproxy.h>
24 #include <linux/pagevec.h>
25 #include <linux/ksm.h>
26 #include <linux/rmap.h>
27 #include <linux/topology.h>
28 #include <linux/cpu.h>
29 #include <linux/cpuset.h>
30 #include <linux/writeback.h>
31 #include <linux/mempolicy.h>
32 #include <linux/vmalloc.h>
33 #include <linux/security.h>
34 #include <linux/backing-dev.h>
35 #include <linux/compaction.h>
36 #include <linux/syscalls.h>
37 #include <linux/compat.h>
38 #include <linux/hugetlb.h>
39 #include <linux/hugetlb_cgroup.h>
40 #include <linux/gfp.h>
41 #include <linux/pagewalk.h>
42 #include <linux/pfn_t.h>
43 #include <linux/memremap.h>
44 #include <linux/userfaultfd_k.h>
45 #include <linux/balloon_compaction.h>
46 #include <linux/mmu_notifier.h>
47 #include <linux/page_idle.h>
48 #include <linux/page_owner.h>
49 #include <linux/sched/mm.h>
50 #include <linux/ptrace.h>
51 #include <linux/oom.h>
52 #include <linux/memory.h>
53 
54 #include <asm/tlbflush.h>
55 
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/migrate.h>
58 
59 #undef CREATE_TRACE_POINTS
60 #ifndef __GENKSYMS__
61 #include <trace/hooks/mm.h>
62 #endif
63 
64 #include "internal.h"
65 
isolate_movable_page(struct page * page,isolate_mode_t mode)66 int isolate_movable_page(struct page *page, isolate_mode_t mode)
67 {
68 	struct address_space *mapping;
69 
70 	/*
71 	 * Avoid burning cycles with pages that are yet under __free_pages(),
72 	 * or just got freed under us.
73 	 *
74 	 * In case we 'win' a race for a movable page being freed under us and
75 	 * raise its refcount preventing __free_pages() from doing its job
76 	 * the put_page() at the end of this block will take care of
77 	 * release this page, thus avoiding a nasty leakage.
78 	 */
79 	if (unlikely(!get_page_unless_zero(page)))
80 		goto out;
81 
82 	/*
83 	 * Check PageMovable before holding a PG_lock because page's owner
84 	 * assumes anybody doesn't touch PG_lock of newly allocated page
85 	 * so unconditionally grabbing the lock ruins page's owner side.
86 	 */
87 	if (unlikely(!__PageMovable(page)))
88 		goto out_putpage;
89 	/*
90 	 * As movable pages are not isolated from LRU lists, concurrent
91 	 * compaction threads can race against page migration functions
92 	 * as well as race against the releasing a page.
93 	 *
94 	 * In order to avoid having an already isolated movable page
95 	 * being (wrongly) re-isolated while it is under migration,
96 	 * or to avoid attempting to isolate pages being released,
97 	 * lets be sure we have the page lock
98 	 * before proceeding with the movable page isolation steps.
99 	 */
100 	if (unlikely(!trylock_page(page)))
101 		goto out_putpage;
102 
103 	if (!PageMovable(page) || PageIsolated(page))
104 		goto out_no_isolated;
105 
106 	mapping = page_mapping(page);
107 	VM_BUG_ON_PAGE(!mapping, page);
108 
109 	if (!mapping->a_ops->isolate_page(page, mode))
110 		goto out_no_isolated;
111 
112 	/* Driver shouldn't use PG_isolated bit of page->flags */
113 	WARN_ON_ONCE(PageIsolated(page));
114 	SetPageIsolated(page);
115 	unlock_page(page);
116 
117 	return 0;
118 
119 out_no_isolated:
120 	unlock_page(page);
121 out_putpage:
122 	put_page(page);
123 out:
124 	return -EBUSY;
125 }
126 
putback_movable_page(struct page * page)127 static void putback_movable_page(struct page *page)
128 {
129 	struct address_space *mapping;
130 
131 	mapping = page_mapping(page);
132 	mapping->a_ops->putback_page(page);
133 	ClearPageIsolated(page);
134 }
135 
136 /*
137  * Put previously isolated pages back onto the appropriate lists
138  * from where they were once taken off for compaction/migration.
139  *
140  * This function shall be used whenever the isolated pageset has been
141  * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
142  * and isolate_hugetlb().
143  */
putback_movable_pages(struct list_head * l)144 void putback_movable_pages(struct list_head *l)
145 {
146 	struct page *page;
147 	struct page *page2;
148 
149 	list_for_each_entry_safe(page, page2, l, lru) {
150 		if (unlikely(PageHuge(page))) {
151 			putback_active_hugepage(page);
152 			continue;
153 		}
154 		list_del(&page->lru);
155 		/*
156 		 * We isolated non-lru movable page so here we can use
157 		 * __PageMovable because LRU page's mapping cannot have
158 		 * PAGE_MAPPING_MOVABLE.
159 		 */
160 		if (unlikely(__PageMovable(page))) {
161 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
162 			lock_page(page);
163 			if (PageMovable(page))
164 				putback_movable_page(page);
165 			else
166 				ClearPageIsolated(page);
167 			unlock_page(page);
168 			put_page(page);
169 		} else {
170 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
171 					page_is_file_lru(page), -thp_nr_pages(page));
172 			putback_lru_page(page);
173 		}
174 	}
175 }
176 EXPORT_SYMBOL_GPL(putback_movable_pages);
177 
178 /*
179  * Restore a potential migration pte to a working pte entry
180  */
remove_migration_pte(struct page * page,struct vm_area_struct * vma,unsigned long addr,void * old)181 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
182 				 unsigned long addr, void *old)
183 {
184 	struct page_vma_mapped_walk pvmw = {
185 		.page = old,
186 		.vma = vma,
187 		.address = addr,
188 		.flags = PVMW_SYNC | PVMW_MIGRATION,
189 	};
190 	struct page *new;
191 	pte_t pte;
192 	swp_entry_t entry;
193 
194 	VM_BUG_ON_PAGE(PageTail(page), page);
195 	while (page_vma_mapped_walk(&pvmw)) {
196 		if (PageKsm(page))
197 			new = page;
198 		else
199 			new = page - pvmw.page->index +
200 				linear_page_index(vma, pvmw.address);
201 
202 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
203 		/* PMD-mapped THP migration entry */
204 		if (!pvmw.pte) {
205 			VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
206 			remove_migration_pmd(&pvmw, new);
207 			continue;
208 		}
209 #endif
210 
211 		get_page(new);
212 		pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
213 		if (pte_swp_soft_dirty(*pvmw.pte))
214 			pte = pte_mksoft_dirty(pte);
215 
216 		/*
217 		 * Recheck VMA as permissions can change since migration started
218 		 */
219 		entry = pte_to_swp_entry(*pvmw.pte);
220 		if (is_writable_migration_entry(entry))
221 			pte = maybe_mkwrite(pte, vma);
222 		else if (pte_swp_uffd_wp(*pvmw.pte))
223 			pte = pte_mkuffd_wp(pte);
224 
225 		if (unlikely(is_device_private_page(new))) {
226 			if (pte_write(pte))
227 				entry = make_writable_device_private_entry(
228 							page_to_pfn(new));
229 			else
230 				entry = make_readable_device_private_entry(
231 							page_to_pfn(new));
232 			pte = swp_entry_to_pte(entry);
233 			if (pte_swp_soft_dirty(*pvmw.pte))
234 				pte = pte_swp_mksoft_dirty(pte);
235 			if (pte_swp_uffd_wp(*pvmw.pte))
236 				pte = pte_swp_mkuffd_wp(pte);
237 		}
238 
239 #ifdef CONFIG_HUGETLB_PAGE
240 		if (PageHuge(new)) {
241 			unsigned int shift = huge_page_shift(hstate_vma(vma));
242 
243 			pte = pte_mkhuge(pte);
244 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
245 			set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
246 			if (PageAnon(new))
247 				hugepage_add_anon_rmap(new, vma, pvmw.address);
248 			else
249 				page_dup_rmap(new, true);
250 		} else
251 #endif
252 		{
253 			set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
254 
255 			if (PageAnon(new))
256 				page_add_anon_rmap(new, vma, pvmw.address, false);
257 			else
258 				page_add_file_rmap(new, false);
259 		}
260 		if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
261 			mlock_vma_page(new);
262 
263 		if (PageTransHuge(page) && PageMlocked(page))
264 			clear_page_mlock(page);
265 
266 		/* No need to invalidate - it was non-present before */
267 		update_mmu_cache(vma, pvmw.address, pvmw.pte);
268 	}
269 
270 	return true;
271 }
272 
273 /*
274  * Get rid of all migration entries and replace them by
275  * references to the indicated page.
276  */
remove_migration_ptes(struct page * old,struct page * new,bool locked)277 void remove_migration_ptes(struct page *old, struct page *new, bool locked)
278 {
279 	struct rmap_walk_control rwc = {
280 		.rmap_one = remove_migration_pte,
281 		.arg = old,
282 	};
283 
284 	trace_android_vh_set_page_migrating(new);
285 
286 	if (locked)
287 		rmap_walk_locked(new, &rwc);
288 	else
289 		rmap_walk(new, &rwc);
290 
291 	trace_android_vh_clear_page_migrating(new);
292 }
293 
294 /*
295  * Something used the pte of a page under migration. We need to
296  * get to the page and wait until migration is finished.
297  * When we return from this function the fault will be retried.
298  */
__migration_entry_wait(struct mm_struct * mm,pte_t * ptep,spinlock_t * ptl)299 void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
300 				spinlock_t *ptl)
301 {
302 	pte_t pte;
303 	swp_entry_t entry;
304 	struct page *page;
305 
306 	spin_lock(ptl);
307 	pte = *ptep;
308 	if (!is_swap_pte(pte))
309 		goto out;
310 
311 	entry = pte_to_swp_entry(pte);
312 	if (!is_migration_entry(entry))
313 		goto out;
314 
315 	page = pfn_swap_entry_to_page(entry);
316 	page = compound_head(page);
317 
318 	/*
319 	 * Once page cache replacement of page migration started, page_count
320 	 * is zero; but we must not call put_and_wait_on_page_locked() without
321 	 * a ref. Use get_page_unless_zero(), and just fault again if it fails.
322 	 */
323 	if (!get_page_unless_zero(page))
324 		goto out;
325 	pte_unmap_unlock(ptep, ptl);
326 	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
327 	return;
328 out:
329 	pte_unmap_unlock(ptep, ptl);
330 }
331 
migration_entry_wait(struct mm_struct * mm,pmd_t * pmd,unsigned long address)332 void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
333 				unsigned long address)
334 {
335 	spinlock_t *ptl = pte_lockptr(mm, pmd);
336 	pte_t *ptep = pte_offset_map(pmd, address);
337 	__migration_entry_wait(mm, ptep, ptl);
338 }
339 
migration_entry_wait_huge(struct vm_area_struct * vma,struct mm_struct * mm,pte_t * pte)340 void migration_entry_wait_huge(struct vm_area_struct *vma,
341 		struct mm_struct *mm, pte_t *pte)
342 {
343 	spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
344 	__migration_entry_wait(mm, pte, ptl);
345 }
346 
347 #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmd_migration_entry_wait(struct mm_struct * mm,pmd_t * pmd)348 void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
349 {
350 	spinlock_t *ptl;
351 	struct page *page;
352 
353 	ptl = pmd_lock(mm, pmd);
354 	if (!is_pmd_migration_entry(*pmd))
355 		goto unlock;
356 	page = pfn_swap_entry_to_page(pmd_to_swp_entry(*pmd));
357 	if (!get_page_unless_zero(page))
358 		goto unlock;
359 	spin_unlock(ptl);
360 	put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
361 	return;
362 unlock:
363 	spin_unlock(ptl);
364 }
365 #endif
366 
expected_page_refs(struct address_space * mapping,struct page * page)367 static int expected_page_refs(struct address_space *mapping, struct page *page)
368 {
369 	int expected_count = 1;
370 
371 	/*
372 	 * Device private pages have an extra refcount as they are
373 	 * ZONE_DEVICE pages.
374 	 */
375 	expected_count += is_device_private_page(page);
376 	if (mapping)
377 		expected_count += thp_nr_pages(page) + page_has_private(page);
378 
379 	return expected_count;
380 }
381 
382 /*
383  * Replace the page in the mapping.
384  *
385  * The number of remaining references must be:
386  * 1 for anonymous pages without a mapping
387  * 2 for pages with a mapping
388  * 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
389  */
migrate_page_move_mapping(struct address_space * mapping,struct page * newpage,struct page * page,int extra_count)390 int migrate_page_move_mapping(struct address_space *mapping,
391 		struct page *newpage, struct page *page, int extra_count)
392 {
393 	XA_STATE(xas, &mapping->i_pages, page_index(page));
394 	struct zone *oldzone, *newzone;
395 	int dirty;
396 	int expected_count = expected_page_refs(mapping, page) + extra_count;
397 	int nr = thp_nr_pages(page);
398 
399 	if (!mapping) {
400 		/* Anonymous page without mapping */
401 		if (page_count(page) != expected_count)
402 			return -EAGAIN;
403 
404 		/* No turning back from here */
405 		newpage->index = page->index;
406 		newpage->mapping = page->mapping;
407 		if (PageSwapBacked(page))
408 			__SetPageSwapBacked(newpage);
409 
410 		return MIGRATEPAGE_SUCCESS;
411 	}
412 
413 	oldzone = page_zone(page);
414 	newzone = page_zone(newpage);
415 
416 	xas_lock_irq(&xas);
417 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
418 		xas_unlock_irq(&xas);
419 		return -EAGAIN;
420 	}
421 
422 	if (!page_ref_freeze(page, expected_count)) {
423 		xas_unlock_irq(&xas);
424 		return -EAGAIN;
425 	}
426 
427 	/*
428 	 * Now we know that no one else is looking at the page:
429 	 * no turning back from here.
430 	 */
431 	newpage->index = page->index;
432 	newpage->mapping = page->mapping;
433 	page_ref_add(newpage, nr); /* add cache reference */
434 	if (PageSwapBacked(page)) {
435 		__SetPageSwapBacked(newpage);
436 		if (PageSwapCache(page)) {
437 			SetPageSwapCache(newpage);
438 			set_page_private(newpage, page_private(page));
439 		}
440 	} else {
441 		VM_BUG_ON_PAGE(PageSwapCache(page), page);
442 	}
443 
444 	/* Move dirty while page refs frozen and newpage not yet exposed */
445 	dirty = PageDirty(page);
446 	if (dirty) {
447 		ClearPageDirty(page);
448 		SetPageDirty(newpage);
449 	}
450 
451 	xas_store(&xas, newpage);
452 	if (PageTransHuge(page)) {
453 		int i;
454 
455 		for (i = 1; i < nr; i++) {
456 			xas_next(&xas);
457 			xas_store(&xas, newpage);
458 		}
459 	}
460 
461 	/*
462 	 * Drop cache reference from old page by unfreezing
463 	 * to one less reference.
464 	 * We know this isn't the last reference.
465 	 */
466 	page_ref_unfreeze(page, expected_count - nr);
467 
468 	xas_unlock(&xas);
469 	/* Leave irq disabled to prevent preemption while updating stats */
470 
471 	/*
472 	 * If moved to a different zone then also account
473 	 * the page for that zone. Other VM counters will be
474 	 * taken care of when we establish references to the
475 	 * new page and drop references to the old page.
476 	 *
477 	 * Note that anonymous pages are accounted for
478 	 * via NR_FILE_PAGES and NR_ANON_MAPPED if they
479 	 * are mapped to swap space.
480 	 */
481 	if (newzone != oldzone) {
482 		struct lruvec *old_lruvec, *new_lruvec;
483 		struct mem_cgroup *memcg;
484 
485 		memcg = page_memcg(page);
486 		old_lruvec = mem_cgroup_lruvec(memcg, oldzone->zone_pgdat);
487 		new_lruvec = mem_cgroup_lruvec(memcg, newzone->zone_pgdat);
488 
489 		__mod_lruvec_state(old_lruvec, NR_FILE_PAGES, -nr);
490 		__mod_lruvec_state(new_lruvec, NR_FILE_PAGES, nr);
491 		if (PageSwapBacked(page) && !PageSwapCache(page)) {
492 			__mod_lruvec_state(old_lruvec, NR_SHMEM, -nr);
493 			__mod_lruvec_state(new_lruvec, NR_SHMEM, nr);
494 		}
495 #ifdef CONFIG_SWAP
496 		if (PageSwapCache(page)) {
497 			__mod_lruvec_state(old_lruvec, NR_SWAPCACHE, -nr);
498 			__mod_lruvec_state(new_lruvec, NR_SWAPCACHE, nr);
499 		}
500 #endif
501 		if (dirty && mapping_can_writeback(mapping)) {
502 			__mod_lruvec_state(old_lruvec, NR_FILE_DIRTY, -nr);
503 			__mod_zone_page_state(oldzone, NR_ZONE_WRITE_PENDING, -nr);
504 			__mod_lruvec_state(new_lruvec, NR_FILE_DIRTY, nr);
505 			__mod_zone_page_state(newzone, NR_ZONE_WRITE_PENDING, nr);
506 		}
507 	}
508 	local_irq_enable();
509 
510 	return MIGRATEPAGE_SUCCESS;
511 }
512 EXPORT_SYMBOL(migrate_page_move_mapping);
513 
514 /*
515  * The expected number of remaining references is the same as that
516  * of migrate_page_move_mapping().
517  */
migrate_huge_page_move_mapping(struct address_space * mapping,struct page * newpage,struct page * page)518 int migrate_huge_page_move_mapping(struct address_space *mapping,
519 				   struct page *newpage, struct page *page)
520 {
521 	XA_STATE(xas, &mapping->i_pages, page_index(page));
522 	int expected_count;
523 
524 	xas_lock_irq(&xas);
525 	expected_count = 2 + page_has_private(page);
526 	if (page_count(page) != expected_count || xas_load(&xas) != page) {
527 		xas_unlock_irq(&xas);
528 		return -EAGAIN;
529 	}
530 
531 	if (!page_ref_freeze(page, expected_count)) {
532 		xas_unlock_irq(&xas);
533 		return -EAGAIN;
534 	}
535 
536 	newpage->index = page->index;
537 	newpage->mapping = page->mapping;
538 
539 	get_page(newpage);
540 
541 	xas_store(&xas, newpage);
542 
543 	page_ref_unfreeze(page, expected_count - 1);
544 
545 	xas_unlock_irq(&xas);
546 
547 	return MIGRATEPAGE_SUCCESS;
548 }
549 
550 /*
551  * Copy the page to its new location
552  */
migrate_page_states(struct page * newpage,struct page * page)553 void migrate_page_states(struct page *newpage, struct page *page)
554 {
555 	int cpupid;
556 
557 	if (PageError(page))
558 		SetPageError(newpage);
559 	if (PageReferenced(page))
560 		SetPageReferenced(newpage);
561 	if (PageUptodate(page))
562 		SetPageUptodate(newpage);
563 	if (TestClearPageActive(page)) {
564 		VM_BUG_ON_PAGE(PageUnevictable(page), page);
565 		SetPageActive(newpage);
566 	} else if (TestClearPageUnevictable(page))
567 		SetPageUnevictable(newpage);
568 	if (PageWorkingset(page))
569 		SetPageWorkingset(newpage);
570 	if (PageChecked(page))
571 		SetPageChecked(newpage);
572 	if (PageMappedToDisk(page))
573 		SetPageMappedToDisk(newpage);
574 	trace_android_vh_look_around_migrate_page(page, newpage);
575 
576 	/* Move dirty on pages not done by migrate_page_move_mapping() */
577 	if (PageDirty(page))
578 		SetPageDirty(newpage);
579 
580 	if (page_is_young(page))
581 		set_page_young(newpage);
582 	if (page_is_idle(page))
583 		set_page_idle(newpage);
584 
585 	/*
586 	 * Copy NUMA information to the new page, to prevent over-eager
587 	 * future migrations of this same page.
588 	 */
589 	cpupid = page_cpupid_xchg_last(page, -1);
590 	page_cpupid_xchg_last(newpage, cpupid);
591 
592 	ksm_migrate_page(newpage, page);
593 	/*
594 	 * Please do not reorder this without considering how mm/ksm.c's
595 	 * get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
596 	 */
597 	if (PageSwapCache(page))
598 		ClearPageSwapCache(page);
599 	ClearPagePrivate(page);
600 
601 	/* page->private contains hugetlb specific flags */
602 	if (!PageHuge(page))
603 		set_page_private(page, 0);
604 
605 	/*
606 	 * If any waiters have accumulated on the new page then
607 	 * wake them up.
608 	 */
609 	if (PageWriteback(newpage))
610 		end_page_writeback(newpage);
611 
612 	/*
613 	 * PG_readahead shares the same bit with PG_reclaim.  The above
614 	 * end_page_writeback() may clear PG_readahead mistakenly, so set the
615 	 * bit after that.
616 	 */
617 	if (PageReadahead(page))
618 		SetPageReadahead(newpage);
619 
620 	copy_page_owner(page, newpage);
621 
622 	if (!PageHuge(page))
623 		mem_cgroup_migrate(page, newpage);
624 }
625 EXPORT_SYMBOL(migrate_page_states);
626 
migrate_page_copy(struct page * newpage,struct page * page)627 void migrate_page_copy(struct page *newpage, struct page *page)
628 {
629 	if (PageHuge(page) || PageTransHuge(page))
630 		copy_huge_page(newpage, page);
631 	else
632 		copy_highpage(newpage, page);
633 
634 	migrate_page_states(newpage, page);
635 }
636 EXPORT_SYMBOL(migrate_page_copy);
637 
638 /************************************************************
639  *                    Migration functions
640  ***********************************************************/
641 
642 /*
643  * Common logic to directly migrate a single LRU page suitable for
644  * pages that do not use PagePrivate/PagePrivate2.
645  *
646  * Pages are locked upon entry and exit.
647  */
migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)648 int migrate_page(struct address_space *mapping,
649 		struct page *newpage, struct page *page,
650 		enum migrate_mode mode)
651 {
652 	int rc;
653 
654 	BUG_ON(PageWriteback(page));	/* Writeback must be complete */
655 
656 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
657 
658 	if (rc != MIGRATEPAGE_SUCCESS)
659 		return rc;
660 
661 	if (mode != MIGRATE_SYNC_NO_COPY)
662 		migrate_page_copy(newpage, page);
663 	else
664 		migrate_page_states(newpage, page);
665 	return MIGRATEPAGE_SUCCESS;
666 }
667 EXPORT_SYMBOL(migrate_page);
668 
669 #ifdef CONFIG_BLOCK
670 /* Returns true if all buffers are successfully locked */
buffer_migrate_lock_buffers(struct buffer_head * head,enum migrate_mode mode)671 static bool buffer_migrate_lock_buffers(struct buffer_head *head,
672 							enum migrate_mode mode)
673 {
674 	struct buffer_head *bh = head;
675 
676 	/* Simple case, sync compaction */
677 	if (mode != MIGRATE_ASYNC) {
678 		do {
679 			lock_buffer(bh);
680 			bh = bh->b_this_page;
681 
682 		} while (bh != head);
683 
684 		return true;
685 	}
686 
687 	/* async case, we cannot block on lock_buffer so use trylock_buffer */
688 	do {
689 		if (!trylock_buffer(bh)) {
690 			/*
691 			 * We failed to lock the buffer and cannot stall in
692 			 * async migration. Release the taken locks
693 			 */
694 			struct buffer_head *failed_bh = bh;
695 			bh = head;
696 			while (bh != failed_bh) {
697 				unlock_buffer(bh);
698 				bh = bh->b_this_page;
699 			}
700 			return false;
701 		}
702 
703 		bh = bh->b_this_page;
704 	} while (bh != head);
705 	return true;
706 }
707 
__buffer_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode,bool check_refs)708 static int __buffer_migrate_page(struct address_space *mapping,
709 		struct page *newpage, struct page *page, enum migrate_mode mode,
710 		bool check_refs)
711 {
712 	struct buffer_head *bh, *head;
713 	int rc;
714 	int expected_count;
715 
716 	if (!page_has_buffers(page))
717 		return migrate_page(mapping, newpage, page, mode);
718 
719 	/* Check whether page does not have extra refs before we do more work */
720 	expected_count = expected_page_refs(mapping, page);
721 	if (page_count(page) != expected_count)
722 		return -EAGAIN;
723 
724 	head = page_buffers(page);
725 	if (!buffer_migrate_lock_buffers(head, mode))
726 		return -EAGAIN;
727 
728 	if (check_refs) {
729 		bool busy;
730 		bool invalidated = false;
731 
732 recheck_buffers:
733 		busy = false;
734 		spin_lock(&mapping->private_lock);
735 		bh = head;
736 		do {
737 			if (atomic_read(&bh->b_count)) {
738 				busy = true;
739 				break;
740 			}
741 			bh = bh->b_this_page;
742 		} while (bh != head);
743 		if (busy) {
744 			if (invalidated) {
745 				rc = -EAGAIN;
746 				goto unlock_buffers;
747 			}
748 			spin_unlock(&mapping->private_lock);
749 			invalidate_bh_lrus();
750 			invalidated = true;
751 			goto recheck_buffers;
752 		}
753 	}
754 
755 	rc = migrate_page_move_mapping(mapping, newpage, page, 0);
756 	if (rc != MIGRATEPAGE_SUCCESS)
757 		goto unlock_buffers;
758 
759 	attach_page_private(newpage, detach_page_private(page));
760 
761 	bh = head;
762 	do {
763 		set_bh_page(bh, newpage, bh_offset(bh));
764 		bh = bh->b_this_page;
765 
766 	} while (bh != head);
767 
768 	if (mode != MIGRATE_SYNC_NO_COPY)
769 		migrate_page_copy(newpage, page);
770 	else
771 		migrate_page_states(newpage, page);
772 
773 	rc = MIGRATEPAGE_SUCCESS;
774 unlock_buffers:
775 	if (check_refs)
776 		spin_unlock(&mapping->private_lock);
777 	bh = head;
778 	do {
779 		unlock_buffer(bh);
780 		bh = bh->b_this_page;
781 
782 	} while (bh != head);
783 
784 	return rc;
785 }
786 
787 /*
788  * Migration function for pages with buffers. This function can only be used
789  * if the underlying filesystem guarantees that no other references to "page"
790  * exist. For example attached buffer heads are accessed only under page lock.
791  */
buffer_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)792 int buffer_migrate_page(struct address_space *mapping,
793 		struct page *newpage, struct page *page, enum migrate_mode mode)
794 {
795 	return __buffer_migrate_page(mapping, newpage, page, mode, false);
796 }
797 EXPORT_SYMBOL(buffer_migrate_page);
798 
799 /*
800  * Same as above except that this variant is more careful and checks that there
801  * are also no buffer head references. This function is the right one for
802  * mappings where buffer heads are directly looked up and referenced (such as
803  * block device mappings).
804  */
buffer_migrate_page_norefs(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)805 int buffer_migrate_page_norefs(struct address_space *mapping,
806 		struct page *newpage, struct page *page, enum migrate_mode mode)
807 {
808 	return __buffer_migrate_page(mapping, newpage, page, mode, true);
809 }
810 #endif
811 
812 /*
813  * Writeback a page to clean the dirty state
814  */
writeout(struct address_space * mapping,struct page * page)815 static int writeout(struct address_space *mapping, struct page *page)
816 {
817 	struct writeback_control wbc = {
818 		.sync_mode = WB_SYNC_NONE,
819 		.nr_to_write = 1,
820 		.range_start = 0,
821 		.range_end = LLONG_MAX,
822 		.for_reclaim = 1
823 	};
824 	int rc;
825 
826 	if (!mapping->a_ops->writepage)
827 		/* No write method for the address space */
828 		return -EINVAL;
829 
830 	if (!clear_page_dirty_for_io(page))
831 		/* Someone else already triggered a write */
832 		return -EAGAIN;
833 
834 	/*
835 	 * A dirty page may imply that the underlying filesystem has
836 	 * the page on some queue. So the page must be clean for
837 	 * migration. Writeout may mean we loose the lock and the
838 	 * page state is no longer what we checked for earlier.
839 	 * At this point we know that the migration attempt cannot
840 	 * be successful.
841 	 */
842 	remove_migration_ptes(page, page, false);
843 
844 	rc = mapping->a_ops->writepage(page, &wbc);
845 
846 	if (rc != AOP_WRITEPAGE_ACTIVATE)
847 		/* unlocked. Relock */
848 		lock_page(page);
849 
850 	return (rc < 0) ? -EIO : -EAGAIN;
851 }
852 
853 /*
854  * Default handling if a filesystem does not provide a migration function.
855  */
fallback_migrate_page(struct address_space * mapping,struct page * newpage,struct page * page,enum migrate_mode mode)856 static int fallback_migrate_page(struct address_space *mapping,
857 	struct page *newpage, struct page *page, enum migrate_mode mode)
858 {
859 	if (PageDirty(page)) {
860 		/* Only writeback pages in full synchronous migration */
861 		switch (mode) {
862 		case MIGRATE_SYNC:
863 		case MIGRATE_SYNC_NO_COPY:
864 			break;
865 		default:
866 			return -EBUSY;
867 		}
868 		return writeout(mapping, page);
869 	}
870 
871 	/*
872 	 * Buffers may be managed in a filesystem specific way.
873 	 * We must have no buffers or drop them.
874 	 */
875 	if (page_has_private(page) &&
876 	    !try_to_release_page(page, GFP_KERNEL))
877 		return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
878 
879 	return migrate_page(mapping, newpage, page, mode);
880 }
881 
882 /*
883  * Move a page to a newly allocated page
884  * The page is locked and all ptes have been successfully removed.
885  *
886  * The new page will have replaced the old page if this function
887  * is successful.
888  *
889  * Return value:
890  *   < 0 - error code
891  *  MIGRATEPAGE_SUCCESS - success
892  */
move_to_new_page(struct page * newpage,struct page * page,enum migrate_mode mode)893 static int move_to_new_page(struct page *newpage, struct page *page,
894 				enum migrate_mode mode)
895 {
896 	struct address_space *mapping;
897 	int rc = -EAGAIN;
898 	bool is_lru = !__PageMovable(page);
899 
900 	VM_BUG_ON_PAGE(!PageLocked(page), page);
901 	VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
902 
903 	mapping = page_mapping(page);
904 
905 	if (likely(is_lru)) {
906 		if (!mapping)
907 			rc = migrate_page(mapping, newpage, page, mode);
908 		else if (mapping->a_ops->migratepage)
909 			/*
910 			 * Most pages have a mapping and most filesystems
911 			 * provide a migratepage callback. Anonymous pages
912 			 * are part of swap space which also has its own
913 			 * migratepage callback. This is the most common path
914 			 * for page migration.
915 			 */
916 			rc = mapping->a_ops->migratepage(mapping, newpage,
917 							page, mode);
918 		else
919 			rc = fallback_migrate_page(mapping, newpage,
920 							page, mode);
921 	} else {
922 		/*
923 		 * In case of non-lru page, it could be released after
924 		 * isolation step. In that case, we shouldn't try migration.
925 		 */
926 		VM_BUG_ON_PAGE(!PageIsolated(page), page);
927 		if (!PageMovable(page)) {
928 			rc = MIGRATEPAGE_SUCCESS;
929 			ClearPageIsolated(page);
930 			goto out;
931 		}
932 
933 		rc = mapping->a_ops->migratepage(mapping, newpage,
934 						page, mode);
935 		WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
936 			!PageIsolated(page));
937 	}
938 
939 	/*
940 	 * When successful, old pagecache page->mapping must be cleared before
941 	 * page is freed; but stats require that PageAnon be left as PageAnon.
942 	 */
943 	if (rc == MIGRATEPAGE_SUCCESS) {
944 		if (__PageMovable(page)) {
945 			VM_BUG_ON_PAGE(!PageIsolated(page), page);
946 
947 			/*
948 			 * We clear PG_movable under page_lock so any compactor
949 			 * cannot try to migrate this page.
950 			 */
951 			ClearPageIsolated(page);
952 		}
953 
954 		/*
955 		 * Anonymous and movable page->mapping will be cleared by
956 		 * free_pages_prepare so don't reset it here for keeping
957 		 * the type to work PageAnon, for example.
958 		 */
959 		if (!PageMappingFlags(page))
960 			page->mapping = NULL;
961 
962 		if (likely(!is_zone_device_page(newpage))) {
963 			int i, nr = compound_nr(newpage);
964 
965 			for (i = 0; i < nr; i++)
966 				flush_dcache_page(newpage + i);
967 		}
968 	}
969 out:
970 	return rc;
971 }
972 
__unmap_and_move(struct page * page,struct page * newpage,int force,enum migrate_mode mode)973 static int __unmap_and_move(struct page *page, struct page *newpage,
974 				int force, enum migrate_mode mode)
975 {
976 	int rc = -EAGAIN;
977 	bool page_was_mapped = false;
978 	struct anon_vma *anon_vma = NULL;
979 	bool is_lru = !__PageMovable(page);
980 
981 	if (!trylock_page(page)) {
982 		if (!force || mode == MIGRATE_ASYNC)
983 			goto out;
984 
985 		/*
986 		 * It's not safe for direct compaction to call lock_page.
987 		 * For example, during page readahead pages are added locked
988 		 * to the LRU. Later, when the IO completes the pages are
989 		 * marked uptodate and unlocked. However, the queueing
990 		 * could be merging multiple pages for one bio (e.g.
991 		 * mpage_readahead). If an allocation happens for the
992 		 * second or third page, the process can end up locking
993 		 * the same page twice and deadlocking. Rather than
994 		 * trying to be clever about what pages can be locked,
995 		 * avoid the use of lock_page for direct compaction
996 		 * altogether.
997 		 */
998 		if (current->flags & PF_MEMALLOC)
999 			goto out;
1000 
1001 		lock_page(page);
1002 	}
1003 
1004 	if (PageWriteback(page)) {
1005 		/*
1006 		 * Only in the case of a full synchronous migration is it
1007 		 * necessary to wait for PageWriteback. In the async case,
1008 		 * the retry loop is too short and in the sync-light case,
1009 		 * the overhead of stalling is too much
1010 		 */
1011 		switch (mode) {
1012 		case MIGRATE_SYNC:
1013 		case MIGRATE_SYNC_NO_COPY:
1014 			break;
1015 		default:
1016 			rc = -EBUSY;
1017 			goto out_unlock;
1018 		}
1019 		if (!force)
1020 			goto out_unlock;
1021 		wait_on_page_writeback(page);
1022 	}
1023 
1024 	/*
1025 	 * By try_to_migrate(), page->mapcount goes down to 0 here. In this case,
1026 	 * we cannot notice that anon_vma is freed while we migrates a page.
1027 	 * This get_anon_vma() delays freeing anon_vma pointer until the end
1028 	 * of migration. File cache pages are no problem because of page_lock()
1029 	 * File Caches may use write_page() or lock_page() in migration, then,
1030 	 * just care Anon page here.
1031 	 *
1032 	 * Only page_get_anon_vma() understands the subtleties of
1033 	 * getting a hold on an anon_vma from outside one of its mms.
1034 	 * But if we cannot get anon_vma, then we won't need it anyway,
1035 	 * because that implies that the anon page is no longer mapped
1036 	 * (and cannot be remapped so long as we hold the page lock).
1037 	 */
1038 	if (PageAnon(page) && !PageKsm(page))
1039 		anon_vma = page_get_anon_vma(page);
1040 
1041 	/*
1042 	 * Block others from accessing the new page when we get around to
1043 	 * establishing additional references. We are usually the only one
1044 	 * holding a reference to newpage at this point. We used to have a BUG
1045 	 * here if trylock_page(newpage) fails, but would like to allow for
1046 	 * cases where there might be a race with the previous use of newpage.
1047 	 * This is much like races on refcount of oldpage: just don't BUG().
1048 	 */
1049 	if (unlikely(!trylock_page(newpage)))
1050 		goto out_unlock;
1051 
1052 	if (unlikely(!is_lru)) {
1053 		rc = move_to_new_page(newpage, page, mode);
1054 		goto out_unlock_both;
1055 	}
1056 
1057 	/*
1058 	 * Corner case handling:
1059 	 * 1. When a new swap-cache page is read into, it is added to the LRU
1060 	 * and treated as swapcache but it has no rmap yet.
1061 	 * Calling try_to_unmap() against a page->mapping==NULL page will
1062 	 * trigger a BUG.  So handle it here.
1063 	 * 2. An orphaned page (see truncate_cleanup_page) might have
1064 	 * fs-private metadata. The page can be picked up due to memory
1065 	 * offlining.  Everywhere else except page reclaim, the page is
1066 	 * invisible to the vm, so the page can not be migrated.  So try to
1067 	 * free the metadata, so the page can be freed.
1068 	 */
1069 	if (!page->mapping) {
1070 		VM_BUG_ON_PAGE(PageAnon(page), page);
1071 		if (page_has_private(page)) {
1072 			try_to_free_buffers(page);
1073 			goto out_unlock_both;
1074 		}
1075 	} else if (page_mapped(page)) {
1076 		/* Establish migration ptes */
1077 		VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
1078 				page);
1079 		try_to_migrate(page, 0);
1080 		page_was_mapped = true;
1081 	}
1082 
1083 	if (!page_mapped(page))
1084 		rc = move_to_new_page(newpage, page, mode);
1085 
1086 	if (page_was_mapped)
1087 		remove_migration_ptes(page,
1088 			rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
1089 
1090 out_unlock_both:
1091 	unlock_page(newpage);
1092 out_unlock:
1093 	/* Drop an anon_vma reference if we took one */
1094 	if (anon_vma)
1095 		put_anon_vma(anon_vma);
1096 	unlock_page(page);
1097 out:
1098 	/*
1099 	 * If migration is successful, decrease refcount of the newpage
1100 	 * which will not free the page because new page owner increased
1101 	 * refcounter. As well, if it is LRU page, add the page to LRU
1102 	 * list in here. Use the old state of the isolated source page to
1103 	 * determine if we migrated a LRU page. newpage was already unlocked
1104 	 * and possibly modified by its owner - don't rely on the page
1105 	 * state.
1106 	 */
1107 	if (rc == MIGRATEPAGE_SUCCESS) {
1108 		if (unlikely(!is_lru))
1109 			put_page(newpage);
1110 		else
1111 			putback_lru_page(newpage);
1112 	}
1113 
1114 	return rc;
1115 }
1116 
1117 
1118 /*
1119  * node_demotion[] example:
1120  *
1121  * Consider a system with two sockets.  Each socket has
1122  * three classes of memory attached: fast, medium and slow.
1123  * Each memory class is placed in its own NUMA node.  The
1124  * CPUs are placed in the node with the "fast" memory.  The
1125  * 6 NUMA nodes (0-5) might be split among the sockets like
1126  * this:
1127  *
1128  *	Socket A: 0, 1, 2
1129  *	Socket B: 3, 4, 5
1130  *
1131  * When Node 0 fills up, its memory should be migrated to
1132  * Node 1.  When Node 1 fills up, it should be migrated to
1133  * Node 2.  The migration path start on the nodes with the
1134  * processors (since allocations default to this node) and
1135  * fast memory, progress through medium and end with the
1136  * slow memory:
1137  *
1138  *	0 -> 1 -> 2 -> stop
1139  *	3 -> 4 -> 5 -> stop
1140  *
1141  * This is represented in the node_demotion[] like this:
1142  *
1143  *	{  1, // Node 0 migrates to 1
1144  *	   2, // Node 1 migrates to 2
1145  *	  -1, // Node 2 does not migrate
1146  *	   4, // Node 3 migrates to 4
1147  *	   5, // Node 4 migrates to 5
1148  *	  -1} // Node 5 does not migrate
1149  */
1150 
1151 /*
1152  * Writes to this array occur without locking.  Cycles are
1153  * not allowed: Node X demotes to Y which demotes to X...
1154  *
1155  * If multiple reads are performed, a single rcu_read_lock()
1156  * must be held over all reads to ensure that no cycles are
1157  * observed.
1158  */
1159 static int node_demotion[MAX_NUMNODES] __read_mostly =
1160 	{[0 ...  MAX_NUMNODES - 1] = NUMA_NO_NODE};
1161 
1162 /**
1163  * next_demotion_node() - Get the next node in the demotion path
1164  * @node: The starting node to lookup the next node
1165  *
1166  * Return: node id for next memory node in the demotion path hierarchy
1167  * from @node; NUMA_NO_NODE if @node is terminal.  This does not keep
1168  * @node online or guarantee that it *continues* to be the next demotion
1169  * target.
1170  */
next_demotion_node(int node)1171 int next_demotion_node(int node)
1172 {
1173 	int target;
1174 
1175 	/*
1176 	 * node_demotion[] is updated without excluding this
1177 	 * function from running.  RCU doesn't provide any
1178 	 * compiler barriers, so the READ_ONCE() is required
1179 	 * to avoid compiler reordering or read merging.
1180 	 *
1181 	 * Make sure to use RCU over entire code blocks if
1182 	 * node_demotion[] reads need to be consistent.
1183 	 */
1184 	rcu_read_lock();
1185 	target = READ_ONCE(node_demotion[node]);
1186 	rcu_read_unlock();
1187 
1188 	return target;
1189 }
1190 
1191 /*
1192  * Obtain the lock on page, remove all ptes and migrate the page
1193  * to the newly allocated page in newpage.
1194  */
unmap_and_move(new_page_t get_new_page,free_page_t put_new_page,unsigned long private,struct page * page,int force,enum migrate_mode mode,enum migrate_reason reason,struct list_head * ret)1195 static int unmap_and_move(new_page_t get_new_page,
1196 				   free_page_t put_new_page,
1197 				   unsigned long private, struct page *page,
1198 				   int force, enum migrate_mode mode,
1199 				   enum migrate_reason reason,
1200 				   struct list_head *ret)
1201 {
1202 	int rc = MIGRATEPAGE_SUCCESS;
1203 	struct page *newpage = NULL;
1204 
1205 	if (!thp_migration_supported() && PageTransHuge(page))
1206 		return -ENOSYS;
1207 
1208 	if (page_count(page) == 1) {
1209 		/* page was freed from under us. So we are done. */
1210 		ClearPageActive(page);
1211 		ClearPageUnevictable(page);
1212 		if (unlikely(__PageMovable(page))) {
1213 			lock_page(page);
1214 			if (!PageMovable(page))
1215 				ClearPageIsolated(page);
1216 			unlock_page(page);
1217 		}
1218 		goto out;
1219 	}
1220 
1221 	newpage = get_new_page(page, private);
1222 	if (!newpage)
1223 		return -ENOMEM;
1224 
1225 	rc = __unmap_and_move(page, newpage, force, mode);
1226 	if (rc == MIGRATEPAGE_SUCCESS)
1227 		set_page_owner_migrate_reason(newpage, reason);
1228 
1229 out:
1230 	if (rc != -EAGAIN) {
1231 		/*
1232 		 * A page that has been migrated has all references
1233 		 * removed and will be freed. A page that has not been
1234 		 * migrated will have kept its references and be restored.
1235 		 */
1236 		list_del(&page->lru);
1237 	}
1238 
1239 	/*
1240 	 * If migration is successful, releases reference grabbed during
1241 	 * isolation. Otherwise, restore the page to right list unless
1242 	 * we want to retry.
1243 	 */
1244 	if (rc == MIGRATEPAGE_SUCCESS) {
1245 		/*
1246 		 * Compaction can migrate also non-LRU pages which are
1247 		 * not accounted to NR_ISOLATED_*. They can be recognized
1248 		 * as __PageMovable
1249 		 */
1250 		if (likely(!__PageMovable(page)))
1251 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
1252 					page_is_file_lru(page), -thp_nr_pages(page));
1253 
1254 		if (reason != MR_MEMORY_FAILURE)
1255 			/*
1256 			 * We release the page in page_handle_poison.
1257 			 */
1258 			put_page(page);
1259 	} else {
1260 		if (rc != -EAGAIN)
1261 			list_add_tail(&page->lru, ret);
1262 
1263 		if (put_new_page)
1264 			put_new_page(newpage, private);
1265 		else
1266 			put_page(newpage);
1267 	}
1268 
1269 	return rc;
1270 }
1271 
1272 /*
1273  * Counterpart of unmap_and_move_page() for hugepage migration.
1274  *
1275  * This function doesn't wait the completion of hugepage I/O
1276  * because there is no race between I/O and migration for hugepage.
1277  * Note that currently hugepage I/O occurs only in direct I/O
1278  * where no lock is held and PG_writeback is irrelevant,
1279  * and writeback status of all subpages are counted in the reference
1280  * count of the head page (i.e. if all subpages of a 2MB hugepage are
1281  * under direct I/O, the reference of the head page is 512 and a bit more.)
1282  * This means that when we try to migrate hugepage whose subpages are
1283  * doing direct I/O, some references remain after try_to_unmap() and
1284  * hugepage migration fails without data corruption.
1285  *
1286  * There is also no race when direct I/O is issued on the page under migration,
1287  * because then pte is replaced with migration swap entry and direct I/O code
1288  * will wait in the page fault for migration to complete.
1289  */
unmap_and_move_huge_page(new_page_t get_new_page,free_page_t put_new_page,unsigned long private,struct page * hpage,int force,enum migrate_mode mode,int reason,struct list_head * ret)1290 static int unmap_and_move_huge_page(new_page_t get_new_page,
1291 				free_page_t put_new_page, unsigned long private,
1292 				struct page *hpage, int force,
1293 				enum migrate_mode mode, int reason,
1294 				struct list_head *ret)
1295 {
1296 	int rc = -EAGAIN;
1297 	int page_was_mapped = 0;
1298 	struct page *new_hpage;
1299 	struct anon_vma *anon_vma = NULL;
1300 	struct address_space *mapping = NULL;
1301 
1302 	/*
1303 	 * Migratability of hugepages depends on architectures and their size.
1304 	 * This check is necessary because some callers of hugepage migration
1305 	 * like soft offline and memory hotremove don't walk through page
1306 	 * tables or check whether the hugepage is pmd-based or not before
1307 	 * kicking migration.
1308 	 */
1309 	if (!hugepage_migration_supported(page_hstate(hpage))) {
1310 		list_move_tail(&hpage->lru, ret);
1311 		return -ENOSYS;
1312 	}
1313 
1314 	if (page_count(hpage) == 1) {
1315 		/* page was freed from under us. So we are done. */
1316 		putback_active_hugepage(hpage);
1317 		return MIGRATEPAGE_SUCCESS;
1318 	}
1319 
1320 	new_hpage = get_new_page(hpage, private);
1321 	if (!new_hpage)
1322 		return -ENOMEM;
1323 
1324 	if (!trylock_page(hpage)) {
1325 		if (!force)
1326 			goto out;
1327 		switch (mode) {
1328 		case MIGRATE_SYNC:
1329 		case MIGRATE_SYNC_NO_COPY:
1330 			break;
1331 		default:
1332 			goto out;
1333 		}
1334 		lock_page(hpage);
1335 	}
1336 
1337 	/*
1338 	 * Check for pages which are in the process of being freed.  Without
1339 	 * page_mapping() set, hugetlbfs specific move page routine will not
1340 	 * be called and we could leak usage counts for subpools.
1341 	 */
1342 	if (hugetlb_page_subpool(hpage) && !page_mapping(hpage)) {
1343 		rc = -EBUSY;
1344 		goto out_unlock;
1345 	}
1346 
1347 	if (PageAnon(hpage))
1348 		anon_vma = page_get_anon_vma(hpage);
1349 
1350 	if (unlikely(!trylock_page(new_hpage)))
1351 		goto put_anon;
1352 
1353 	if (page_mapped(hpage)) {
1354 		bool mapping_locked = false;
1355 		enum ttu_flags ttu = 0;
1356 
1357 		if (!PageAnon(hpage)) {
1358 			/*
1359 			 * In shared mappings, try_to_unmap could potentially
1360 			 * call huge_pmd_unshare.  Because of this, take
1361 			 * semaphore in write mode here and set TTU_RMAP_LOCKED
1362 			 * to let lower levels know we have taken the lock.
1363 			 */
1364 			mapping = hugetlb_page_mapping_lock_write(hpage);
1365 			if (unlikely(!mapping))
1366 				goto unlock_put_anon;
1367 
1368 			mapping_locked = true;
1369 			ttu |= TTU_RMAP_LOCKED;
1370 		}
1371 
1372 		try_to_migrate(hpage, ttu);
1373 		page_was_mapped = 1;
1374 
1375 		if (mapping_locked)
1376 			i_mmap_unlock_write(mapping);
1377 	}
1378 
1379 	if (!page_mapped(hpage))
1380 		rc = move_to_new_page(new_hpage, hpage, mode);
1381 
1382 	if (page_was_mapped)
1383 		remove_migration_ptes(hpage,
1384 			rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
1385 
1386 unlock_put_anon:
1387 	unlock_page(new_hpage);
1388 
1389 put_anon:
1390 	if (anon_vma)
1391 		put_anon_vma(anon_vma);
1392 
1393 	if (rc == MIGRATEPAGE_SUCCESS) {
1394 		move_hugetlb_state(hpage, new_hpage, reason);
1395 		put_new_page = NULL;
1396 	}
1397 
1398 out_unlock:
1399 	unlock_page(hpage);
1400 out:
1401 	if (rc == MIGRATEPAGE_SUCCESS)
1402 		putback_active_hugepage(hpage);
1403 	else if (rc != -EAGAIN)
1404 		list_move_tail(&hpage->lru, ret);
1405 
1406 	/*
1407 	 * If migration was not successful and there's a freeing callback, use
1408 	 * it.  Otherwise, put_page() will drop the reference grabbed during
1409 	 * isolation.
1410 	 */
1411 	if (put_new_page)
1412 		put_new_page(new_hpage, private);
1413 	else
1414 		putback_active_hugepage(new_hpage);
1415 
1416 	return rc;
1417 }
1418 
try_split_thp(struct page * page,struct page ** page2,struct list_head * from)1419 static inline int try_split_thp(struct page *page, struct page **page2,
1420 				struct list_head *from)
1421 {
1422 	int rc = 0;
1423 
1424 	lock_page(page);
1425 	rc = split_huge_page_to_list(page, from);
1426 	unlock_page(page);
1427 	if (!rc)
1428 		list_safe_reset_next(page, *page2, lru);
1429 
1430 	return rc;
1431 }
1432 
1433 /*
1434  * migrate_pages - migrate the pages specified in a list, to the free pages
1435  *		   supplied as the target for the page migration
1436  *
1437  * @from:		The list of pages to be migrated.
1438  * @get_new_page:	The function used to allocate free pages to be used
1439  *			as the target of the page migration.
1440  * @put_new_page:	The function used to free target pages if migration
1441  *			fails, or NULL if no special handling is necessary.
1442  * @private:		Private data to be passed on to get_new_page()
1443  * @mode:		The migration mode that specifies the constraints for
1444  *			page migration, if any.
1445  * @reason:		The reason for page migration.
1446  * @ret_succeeded:	Set to the number of pages migrated successfully if
1447  *			the caller passes a non-NULL pointer.
1448  *
1449  * The function returns after 10 attempts or if no pages are movable any more
1450  * because the list has become empty or no retryable pages exist any more.
1451  * It is caller's responsibility to call putback_movable_pages() to return pages
1452  * to the LRU or free list only if ret != 0.
1453  *
1454  * Returns the number of pages that were not migrated, or an error code.
1455  */
migrate_pages(struct list_head * from,new_page_t get_new_page,free_page_t put_new_page,unsigned long private,enum migrate_mode mode,int reason,unsigned int * ret_succeeded)1456 int migrate_pages(struct list_head *from, new_page_t get_new_page,
1457 		free_page_t put_new_page, unsigned long private,
1458 		enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
1459 {
1460 	int retry = 1;
1461 	int thp_retry = 1;
1462 	int nr_failed = 0;
1463 	int nr_succeeded = 0;
1464 	int nr_thp_succeeded = 0;
1465 	int nr_thp_failed = 0;
1466 	int nr_thp_split = 0;
1467 	int pass = 0;
1468 	bool is_thp = false;
1469 	struct page *page;
1470 	struct page *page2;
1471 	int swapwrite = current->flags & PF_SWAPWRITE;
1472 	int rc, nr_subpages;
1473 	LIST_HEAD(ret_pages);
1474 	bool nosplit = (reason == MR_NUMA_MISPLACED);
1475 
1476 	trace_mm_migrate_pages_start(mode, reason);
1477 
1478 	if (!swapwrite)
1479 		current->flags |= PF_SWAPWRITE;
1480 
1481 	for (pass = 0; pass < 10 && (retry || thp_retry); pass++) {
1482 		retry = 0;
1483 		thp_retry = 0;
1484 
1485 		list_for_each_entry_safe(page, page2, from, lru) {
1486 retry:
1487 			/*
1488 			 * THP statistics is based on the source huge page.
1489 			 * Capture required information that might get lost
1490 			 * during migration.
1491 			 */
1492 			is_thp = PageTransHuge(page) && !PageHuge(page);
1493 			nr_subpages = thp_nr_pages(page);
1494 			cond_resched();
1495 
1496 			if (PageHuge(page))
1497 				rc = unmap_and_move_huge_page(get_new_page,
1498 						put_new_page, private, page,
1499 						pass > 2, mode, reason,
1500 						&ret_pages);
1501 			else
1502 				rc = unmap_and_move(get_new_page, put_new_page,
1503 						private, page, pass > 2, mode,
1504 						reason, &ret_pages);
1505 			/*
1506 			 * The rules are:
1507 			 *	Success: non hugetlb page will be freed, hugetlb
1508 			 *		 page will be put back
1509 			 *	-EAGAIN: stay on the from list
1510 			 *	-ENOMEM: stay on the from list
1511 			 *	Other errno: put on ret_pages list then splice to
1512 			 *		     from list
1513 			 */
1514 			switch(rc) {
1515 			/*
1516 			 * THP migration might be unsupported or the
1517 			 * allocation could've failed so we should
1518 			 * retry on the same page with the THP split
1519 			 * to base pages.
1520 			 *
1521 			 * Head page is retried immediately and tail
1522 			 * pages are added to the tail of the list so
1523 			 * we encounter them after the rest of the list
1524 			 * is processed.
1525 			 */
1526 			case -ENOSYS:
1527 				/* THP migration is unsupported */
1528 				if (is_thp) {
1529 					if (!try_split_thp(page, &page2, from)) {
1530 						nr_thp_split++;
1531 						goto retry;
1532 					}
1533 
1534 					nr_thp_failed++;
1535 					nr_failed += nr_subpages;
1536 					break;
1537 				}
1538 
1539 				/* Hugetlb migration is unsupported */
1540 				nr_failed++;
1541 				break;
1542 			case -ENOMEM:
1543 				/*
1544 				 * When memory is low, don't bother to try to migrate
1545 				 * other pages, just exit.
1546 				 * THP NUMA faulting doesn't split THP to retry.
1547 				 */
1548 				if (is_thp && !nosplit) {
1549 					if (!try_split_thp(page, &page2, from)) {
1550 						nr_thp_split++;
1551 						goto retry;
1552 					}
1553 
1554 					nr_thp_failed++;
1555 					nr_failed += nr_subpages;
1556 					goto out;
1557 				}
1558 				nr_failed++;
1559 				goto out;
1560 			case -EAGAIN:
1561 				if (is_thp) {
1562 					thp_retry++;
1563 					break;
1564 				}
1565 				retry++;
1566 				break;
1567 			case MIGRATEPAGE_SUCCESS:
1568 				if (is_thp) {
1569 					nr_thp_succeeded++;
1570 					nr_succeeded += nr_subpages;
1571 					break;
1572 				}
1573 				nr_succeeded++;
1574 				break;
1575 			default:
1576 				/*
1577 				 * Permanent failure (-EBUSY, etc.):
1578 				 * unlike -EAGAIN case, the failed page is
1579 				 * removed from migration page list and not
1580 				 * retried in the next outer loop.
1581 				 */
1582 				if (is_thp) {
1583 					nr_thp_failed++;
1584 					nr_failed += nr_subpages;
1585 					break;
1586 				}
1587 				nr_failed++;
1588 				break;
1589 			}
1590 		}
1591 	}
1592 	nr_failed += retry + thp_retry;
1593 	nr_thp_failed += thp_retry;
1594 	rc = nr_failed;
1595 out:
1596 	/*
1597 	 * Put the permanent failure page back to migration list, they
1598 	 * will be put back to the right list by the caller.
1599 	 */
1600 	list_splice(&ret_pages, from);
1601 
1602 	count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
1603 	count_vm_events(PGMIGRATE_FAIL, nr_failed);
1604 	count_vm_events(THP_MIGRATION_SUCCESS, nr_thp_succeeded);
1605 	count_vm_events(THP_MIGRATION_FAIL, nr_thp_failed);
1606 	count_vm_events(THP_MIGRATION_SPLIT, nr_thp_split);
1607 	trace_mm_migrate_pages(nr_succeeded, nr_failed, nr_thp_succeeded,
1608 			       nr_thp_failed, nr_thp_split, mode, reason);
1609 
1610 	if (!swapwrite)
1611 		current->flags &= ~PF_SWAPWRITE;
1612 
1613 	if (ret_succeeded)
1614 		*ret_succeeded = nr_succeeded;
1615 
1616 	return rc;
1617 }
1618 EXPORT_SYMBOL_GPL(migrate_pages);
1619 
alloc_migration_target(struct page * page,unsigned long private)1620 struct page *alloc_migration_target(struct page *page, unsigned long private)
1621 {
1622 	struct migration_target_control *mtc;
1623 	gfp_t gfp_mask;
1624 	unsigned int order = 0;
1625 	struct page *new_page = NULL;
1626 	int nid;
1627 	int zidx;
1628 
1629 	mtc = (struct migration_target_control *)private;
1630 	gfp_mask = mtc->gfp_mask;
1631 	nid = mtc->nid;
1632 	if (nid == NUMA_NO_NODE)
1633 		nid = page_to_nid(page);
1634 
1635 	if (PageHuge(page)) {
1636 		struct hstate *h = page_hstate(compound_head(page));
1637 
1638 		gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
1639 		return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
1640 	}
1641 
1642 	if (PageTransHuge(page)) {
1643 		/*
1644 		 * clear __GFP_RECLAIM to make the migration callback
1645 		 * consistent with regular THP allocations.
1646 		 */
1647 		gfp_mask &= ~__GFP_RECLAIM;
1648 		gfp_mask |= GFP_TRANSHUGE;
1649 		order = HPAGE_PMD_ORDER;
1650 	}
1651 	zidx = zone_idx(page_zone(page));
1652 	if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
1653 		gfp_mask |= __GFP_HIGHMEM;
1654 
1655 	new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask);
1656 
1657 	if (new_page && PageTransHuge(new_page))
1658 		prep_transhuge_page(new_page);
1659 
1660 	return new_page;
1661 }
1662 
1663 #ifdef CONFIG_NUMA
1664 
store_status(int __user * status,int start,int value,int nr)1665 static int store_status(int __user *status, int start, int value, int nr)
1666 {
1667 	while (nr-- > 0) {
1668 		if (put_user(value, status + start))
1669 			return -EFAULT;
1670 		start++;
1671 	}
1672 
1673 	return 0;
1674 }
1675 
do_move_pages_to_node(struct mm_struct * mm,struct list_head * pagelist,int node)1676 static int do_move_pages_to_node(struct mm_struct *mm,
1677 		struct list_head *pagelist, int node)
1678 {
1679 	int err;
1680 	struct migration_target_control mtc = {
1681 		.nid = node,
1682 		.gfp_mask = GFP_HIGHUSER_MOVABLE | __GFP_THISNODE,
1683 	};
1684 
1685 	err = migrate_pages(pagelist, alloc_migration_target, NULL,
1686 		(unsigned long)&mtc, MIGRATE_SYNC, MR_SYSCALL, NULL);
1687 	if (err)
1688 		putback_movable_pages(pagelist);
1689 	return err;
1690 }
1691 
1692 /*
1693  * Resolves the given address to a struct page, isolates it from the LRU and
1694  * puts it to the given pagelist.
1695  * Returns:
1696  *     errno - if the page cannot be found/isolated
1697  *     0 - when it doesn't have to be migrated because it is already on the
1698  *         target node
1699  *     1 - when it has been queued
1700  */
add_page_for_migration(struct mm_struct * mm,unsigned long addr,int node,struct list_head * pagelist,bool migrate_all)1701 static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
1702 		int node, struct list_head *pagelist, bool migrate_all)
1703 {
1704 	struct vm_area_struct *vma;
1705 	struct page *page;
1706 	unsigned int follflags;
1707 	int err;
1708 
1709 	mmap_read_lock(mm);
1710 	err = -EFAULT;
1711 	vma = find_vma(mm, addr);
1712 	if (!vma || addr < vma->vm_start || !vma_migratable(vma))
1713 		goto out;
1714 
1715 	/* FOLL_DUMP to ignore special (like zero) pages */
1716 	follflags = FOLL_GET | FOLL_DUMP;
1717 	page = follow_page(vma, addr, follflags);
1718 
1719 	err = PTR_ERR(page);
1720 	if (IS_ERR(page))
1721 		goto out;
1722 
1723 	err = -ENOENT;
1724 	if (!page)
1725 		goto out;
1726 
1727 	err = 0;
1728 	if (page_to_nid(page) == node)
1729 		goto out_putpage;
1730 
1731 	err = -EACCES;
1732 	if (page_mapcount(page) > 1 && !migrate_all)
1733 		goto out_putpage;
1734 
1735 	if (PageHuge(page)) {
1736 		if (PageHead(page)) {
1737 			err = isolate_hugetlb(page, pagelist);
1738 			if (!err)
1739 				err = 1;
1740 		}
1741 	} else {
1742 		struct page *head;
1743 
1744 		head = compound_head(page);
1745 		err = isolate_lru_page(head);
1746 		if (err)
1747 			goto out_putpage;
1748 
1749 		err = 1;
1750 		list_add_tail(&head->lru, pagelist);
1751 		mod_node_page_state(page_pgdat(head),
1752 			NR_ISOLATED_ANON + page_is_file_lru(head),
1753 			thp_nr_pages(head));
1754 	}
1755 out_putpage:
1756 	/*
1757 	 * Either remove the duplicate refcount from
1758 	 * isolate_lru_page() or drop the page ref if it was
1759 	 * not isolated.
1760 	 */
1761 	put_page(page);
1762 out:
1763 	mmap_read_unlock(mm);
1764 	return err;
1765 }
1766 
move_pages_and_store_status(struct mm_struct * mm,int node,struct list_head * pagelist,int __user * status,int start,int i,unsigned long nr_pages)1767 static int move_pages_and_store_status(struct mm_struct *mm, int node,
1768 		struct list_head *pagelist, int __user *status,
1769 		int start, int i, unsigned long nr_pages)
1770 {
1771 	int err;
1772 
1773 	if (list_empty(pagelist))
1774 		return 0;
1775 
1776 	err = do_move_pages_to_node(mm, pagelist, node);
1777 	if (err) {
1778 		/*
1779 		 * Positive err means the number of failed
1780 		 * pages to migrate.  Since we are going to
1781 		 * abort and return the number of non-migrated
1782 		 * pages, so need to include the rest of the
1783 		 * nr_pages that have not been attempted as
1784 		 * well.
1785 		 */
1786 		if (err > 0)
1787 			err += nr_pages - i - 1;
1788 		return err;
1789 	}
1790 	return store_status(status, start, node, i - start);
1791 }
1792 
1793 /*
1794  * Migrate an array of page address onto an array of nodes and fill
1795  * the corresponding array of status.
1796  */
do_pages_move(struct mm_struct * mm,nodemask_t task_nodes,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)1797 static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
1798 			 unsigned long nr_pages,
1799 			 const void __user * __user *pages,
1800 			 const int __user *nodes,
1801 			 int __user *status, int flags)
1802 {
1803 	compat_uptr_t __user *compat_pages = (void __user *)pages;
1804 	int current_node = NUMA_NO_NODE;
1805 	LIST_HEAD(pagelist);
1806 	int start, i;
1807 	int err = 0, err1;
1808 
1809 	lru_cache_disable();
1810 
1811 	for (i = start = 0; i < nr_pages; i++) {
1812 		const void __user *p;
1813 		unsigned long addr;
1814 		int node;
1815 
1816 		err = -EFAULT;
1817 		if (in_compat_syscall()) {
1818 			compat_uptr_t cp;
1819 
1820 			if (get_user(cp, compat_pages + i))
1821 				goto out_flush;
1822 
1823 			p = compat_ptr(cp);
1824 		} else {
1825 			if (get_user(p, pages + i))
1826 				goto out_flush;
1827 		}
1828 		if (get_user(node, nodes + i))
1829 			goto out_flush;
1830 		addr = (unsigned long)untagged_addr(p);
1831 
1832 		err = -ENODEV;
1833 		if (node < 0 || node >= MAX_NUMNODES)
1834 			goto out_flush;
1835 		if (!node_state(node, N_MEMORY))
1836 			goto out_flush;
1837 
1838 		err = -EACCES;
1839 		if (!node_isset(node, task_nodes))
1840 			goto out_flush;
1841 
1842 		if (current_node == NUMA_NO_NODE) {
1843 			current_node = node;
1844 			start = i;
1845 		} else if (node != current_node) {
1846 			err = move_pages_and_store_status(mm, current_node,
1847 					&pagelist, status, start, i, nr_pages);
1848 			if (err)
1849 				goto out;
1850 			start = i;
1851 			current_node = node;
1852 		}
1853 
1854 		/*
1855 		 * Errors in the page lookup or isolation are not fatal and we simply
1856 		 * report them via status
1857 		 */
1858 		err = add_page_for_migration(mm, addr, current_node,
1859 				&pagelist, flags & MPOL_MF_MOVE_ALL);
1860 
1861 		if (err > 0) {
1862 			/* The page is successfully queued for migration */
1863 			continue;
1864 		}
1865 
1866 		/*
1867 		 * If the page is already on the target node (!err), store the
1868 		 * node, otherwise, store the err.
1869 		 */
1870 		err = store_status(status, i, err ? : current_node, 1);
1871 		if (err)
1872 			goto out_flush;
1873 
1874 		err = move_pages_and_store_status(mm, current_node, &pagelist,
1875 				status, start, i, nr_pages);
1876 		if (err)
1877 			goto out;
1878 		current_node = NUMA_NO_NODE;
1879 	}
1880 out_flush:
1881 	/* Make sure we do not overwrite the existing error */
1882 	err1 = move_pages_and_store_status(mm, current_node, &pagelist,
1883 				status, start, i, nr_pages);
1884 	if (err >= 0)
1885 		err = err1;
1886 out:
1887 	lru_cache_enable();
1888 	return err;
1889 }
1890 
1891 /*
1892  * Determine the nodes of an array of pages and store it in an array of status.
1893  */
do_pages_stat_array(struct mm_struct * mm,unsigned long nr_pages,const void __user ** pages,int * status)1894 static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1895 				const void __user **pages, int *status)
1896 {
1897 	unsigned long i;
1898 
1899 	mmap_read_lock(mm);
1900 
1901 	for (i = 0; i < nr_pages; i++) {
1902 		unsigned long addr = (unsigned long)(*pages);
1903 		struct vm_area_struct *vma;
1904 		struct page *page;
1905 		int err = -EFAULT;
1906 
1907 		vma = vma_lookup(mm, addr);
1908 		if (!vma)
1909 			goto set_status;
1910 
1911 		/* FOLL_DUMP to ignore special (like zero) pages */
1912 		page = follow_page(vma, addr, FOLL_DUMP);
1913 
1914 		err = PTR_ERR(page);
1915 		if (IS_ERR(page))
1916 			goto set_status;
1917 
1918 		err = page ? page_to_nid(page) : -ENOENT;
1919 set_status:
1920 		*status = err;
1921 
1922 		pages++;
1923 		status++;
1924 	}
1925 
1926 	mmap_read_unlock(mm);
1927 }
1928 
get_compat_pages_array(const void __user * chunk_pages[],const void __user * __user * pages,unsigned long chunk_nr)1929 static int get_compat_pages_array(const void __user *chunk_pages[],
1930 				  const void __user * __user *pages,
1931 				  unsigned long chunk_nr)
1932 {
1933 	compat_uptr_t __user *pages32 = (compat_uptr_t __user *)pages;
1934 	compat_uptr_t p;
1935 	int i;
1936 
1937 	for (i = 0; i < chunk_nr; i++) {
1938 		if (get_user(p, pages32 + i))
1939 			return -EFAULT;
1940 		chunk_pages[i] = compat_ptr(p);
1941 	}
1942 
1943 	return 0;
1944 }
1945 
1946 /*
1947  * Determine the nodes of a user array of pages and store it in
1948  * a user array of status.
1949  */
do_pages_stat(struct mm_struct * mm,unsigned long nr_pages,const void __user * __user * pages,int __user * status)1950 static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
1951 			 const void __user * __user *pages,
1952 			 int __user *status)
1953 {
1954 #define DO_PAGES_STAT_CHUNK_NR 16
1955 	const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
1956 	int chunk_status[DO_PAGES_STAT_CHUNK_NR];
1957 
1958 	while (nr_pages) {
1959 		unsigned long chunk_nr;
1960 
1961 		chunk_nr = nr_pages;
1962 		if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
1963 			chunk_nr = DO_PAGES_STAT_CHUNK_NR;
1964 
1965 		if (in_compat_syscall()) {
1966 			if (get_compat_pages_array(chunk_pages, pages,
1967 						   chunk_nr))
1968 				break;
1969 		} else {
1970 			if (copy_from_user(chunk_pages, pages,
1971 				      chunk_nr * sizeof(*chunk_pages)))
1972 				break;
1973 		}
1974 
1975 		do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
1976 
1977 		if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
1978 			break;
1979 
1980 		pages += chunk_nr;
1981 		status += chunk_nr;
1982 		nr_pages -= chunk_nr;
1983 	}
1984 	return nr_pages ? -EFAULT : 0;
1985 }
1986 
find_mm_struct(pid_t pid,nodemask_t * mem_nodes)1987 static struct mm_struct *find_mm_struct(pid_t pid, nodemask_t *mem_nodes)
1988 {
1989 	struct task_struct *task;
1990 	struct mm_struct *mm;
1991 
1992 	/*
1993 	 * There is no need to check if current process has the right to modify
1994 	 * the specified process when they are same.
1995 	 */
1996 	if (!pid) {
1997 		mmget(current->mm);
1998 		*mem_nodes = cpuset_mems_allowed(current);
1999 		return current->mm;
2000 	}
2001 
2002 	/* Find the mm_struct */
2003 	rcu_read_lock();
2004 	task = find_task_by_vpid(pid);
2005 	if (!task) {
2006 		rcu_read_unlock();
2007 		return ERR_PTR(-ESRCH);
2008 	}
2009 	get_task_struct(task);
2010 
2011 	/*
2012 	 * Check if this process has the right to modify the specified
2013 	 * process. Use the regular "ptrace_may_access()" checks.
2014 	 */
2015 	if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
2016 		rcu_read_unlock();
2017 		mm = ERR_PTR(-EPERM);
2018 		goto out;
2019 	}
2020 	rcu_read_unlock();
2021 
2022 	mm = ERR_PTR(security_task_movememory(task));
2023 	if (IS_ERR(mm))
2024 		goto out;
2025 	*mem_nodes = cpuset_mems_allowed(task);
2026 	mm = get_task_mm(task);
2027 out:
2028 	put_task_struct(task);
2029 	if (!mm)
2030 		mm = ERR_PTR(-EINVAL);
2031 	return mm;
2032 }
2033 
2034 /*
2035  * Move a list of pages in the address space of the currently executing
2036  * process.
2037  */
kernel_move_pages(pid_t pid,unsigned long nr_pages,const void __user * __user * pages,const int __user * nodes,int __user * status,int flags)2038 static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
2039 			     const void __user * __user *pages,
2040 			     const int __user *nodes,
2041 			     int __user *status, int flags)
2042 {
2043 	struct mm_struct *mm;
2044 	int err;
2045 	nodemask_t task_nodes;
2046 
2047 	/* Check flags */
2048 	if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
2049 		return -EINVAL;
2050 
2051 	if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
2052 		return -EPERM;
2053 
2054 	mm = find_mm_struct(pid, &task_nodes);
2055 	if (IS_ERR(mm))
2056 		return PTR_ERR(mm);
2057 
2058 	if (nodes)
2059 		err = do_pages_move(mm, task_nodes, nr_pages, pages,
2060 				    nodes, status, flags);
2061 	else
2062 		err = do_pages_stat(mm, nr_pages, pages, status);
2063 
2064 	mmput(mm);
2065 	return err;
2066 }
2067 
SYSCALL_DEFINE6(move_pages,pid_t,pid,unsigned long,nr_pages,const void __user * __user *,pages,const int __user *,nodes,int __user *,status,int,flags)2068 SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
2069 		const void __user * __user *, pages,
2070 		const int __user *, nodes,
2071 		int __user *, status, int, flags)
2072 {
2073 	return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
2074 }
2075 
2076 #ifdef CONFIG_NUMA_BALANCING
2077 /*
2078  * Returns true if this is a safe migration target node for misplaced NUMA
2079  * pages. Currently it only checks the watermarks which crude
2080  */
migrate_balanced_pgdat(struct pglist_data * pgdat,unsigned long nr_migrate_pages)2081 static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
2082 				   unsigned long nr_migrate_pages)
2083 {
2084 	int z;
2085 
2086 	for (z = pgdat->nr_zones - 1; z >= 0; z--) {
2087 		struct zone *zone = pgdat->node_zones + z;
2088 
2089 		if (!populated_zone(zone))
2090 			continue;
2091 
2092 		/* Avoid waking kswapd by allocating pages_to_migrate pages. */
2093 		if (!zone_watermark_ok(zone, 0,
2094 				       high_wmark_pages(zone) +
2095 				       nr_migrate_pages,
2096 				       ZONE_MOVABLE, 0))
2097 			continue;
2098 		return true;
2099 	}
2100 	return false;
2101 }
2102 
alloc_misplaced_dst_page(struct page * page,unsigned long data)2103 static struct page *alloc_misplaced_dst_page(struct page *page,
2104 					   unsigned long data)
2105 {
2106 	int nid = (int) data;
2107 	struct page *newpage;
2108 
2109 	newpage = __alloc_pages_node(nid,
2110 					 (GFP_HIGHUSER_MOVABLE |
2111 					  __GFP_THISNODE | __GFP_NOMEMALLOC |
2112 					  __GFP_NORETRY | __GFP_NOWARN) &
2113 					 ~__GFP_RECLAIM, 0);
2114 
2115 	return newpage;
2116 }
2117 
alloc_misplaced_dst_page_thp(struct page * page,unsigned long data)2118 static struct page *alloc_misplaced_dst_page_thp(struct page *page,
2119 						 unsigned long data)
2120 {
2121 	int nid = (int) data;
2122 	struct page *newpage;
2123 
2124 	newpage = alloc_pages_node(nid, (GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
2125 				   HPAGE_PMD_ORDER);
2126 	if (!newpage)
2127 		goto out;
2128 
2129 	prep_transhuge_page(newpage);
2130 
2131 out:
2132 	return newpage;
2133 }
2134 
numamigrate_isolate_page(pg_data_t * pgdat,struct page * page)2135 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
2136 {
2137 	int page_lru;
2138 	int nr_pages = thp_nr_pages(page);
2139 
2140 	VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
2141 
2142 	/* Do not migrate THP mapped by multiple processes */
2143 	if (PageTransHuge(page) && total_mapcount(page) > 1)
2144 		return 0;
2145 
2146 	/* Avoid migrating to a node that is nearly full */
2147 	if (!migrate_balanced_pgdat(pgdat, nr_pages))
2148 		return 0;
2149 
2150 	if (isolate_lru_page(page))
2151 		return 0;
2152 
2153 	page_lru = page_is_file_lru(page);
2154 	mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
2155 			    nr_pages);
2156 
2157 	/*
2158 	 * Isolating the page has taken another reference, so the
2159 	 * caller's reference can be safely dropped without the page
2160 	 * disappearing underneath us during migration.
2161 	 */
2162 	put_page(page);
2163 	return 1;
2164 }
2165 
2166 /*
2167  * Attempt to migrate a misplaced page to the specified destination
2168  * node. Caller is expected to have an elevated reference count on
2169  * the page that will be dropped by this function before returning.
2170  */
migrate_misplaced_page(struct page * page,struct vm_area_struct * vma,int node)2171 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
2172 			   int node)
2173 {
2174 	pg_data_t *pgdat = NODE_DATA(node);
2175 	int isolated;
2176 	int nr_remaining;
2177 	LIST_HEAD(migratepages);
2178 	new_page_t *new;
2179 	bool compound;
2180 	int nr_pages = thp_nr_pages(page);
2181 
2182 	/*
2183 	 * PTE mapped THP or HugeTLB page can't reach here so the page could
2184 	 * be either base page or THP.  And it must be head page if it is
2185 	 * THP.
2186 	 */
2187 	compound = PageTransHuge(page);
2188 
2189 	if (compound)
2190 		new = alloc_misplaced_dst_page_thp;
2191 	else
2192 		new = alloc_misplaced_dst_page;
2193 
2194 	/*
2195 	 * Don't migrate file pages that are mapped in multiple processes
2196 	 * with execute permissions as they are probably shared libraries.
2197 	 */
2198 	if (page_mapcount(page) != 1 && page_is_file_lru(page) &&
2199 	    (vma->vm_flags & VM_EXEC))
2200 		goto out;
2201 
2202 	/*
2203 	 * Also do not migrate dirty pages as not all filesystems can move
2204 	 * dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
2205 	 */
2206 	if (page_is_file_lru(page) && PageDirty(page))
2207 		goto out;
2208 
2209 	isolated = numamigrate_isolate_page(pgdat, page);
2210 	if (!isolated)
2211 		goto out;
2212 
2213 	list_add(&page->lru, &migratepages);
2214 	nr_remaining = migrate_pages(&migratepages, *new, NULL, node,
2215 				     MIGRATE_ASYNC, MR_NUMA_MISPLACED, NULL);
2216 	if (nr_remaining) {
2217 		if (!list_empty(&migratepages)) {
2218 			list_del(&page->lru);
2219 			mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
2220 					page_is_file_lru(page), -nr_pages);
2221 			putback_lru_page(page);
2222 		}
2223 		isolated = 0;
2224 	} else
2225 		count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_pages);
2226 	BUG_ON(!list_empty(&migratepages));
2227 	return isolated;
2228 
2229 out:
2230 	put_page(page);
2231 	return 0;
2232 }
2233 #endif /* CONFIG_NUMA_BALANCING */
2234 #endif /* CONFIG_NUMA */
2235 
2236 #ifdef CONFIG_DEVICE_PRIVATE
migrate_vma_collect_skip(unsigned long start,unsigned long end,struct mm_walk * walk)2237 static int migrate_vma_collect_skip(unsigned long start,
2238 				    unsigned long end,
2239 				    struct mm_walk *walk)
2240 {
2241 	struct migrate_vma *migrate = walk->private;
2242 	unsigned long addr;
2243 
2244 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2245 		migrate->dst[migrate->npages] = 0;
2246 		migrate->src[migrate->npages++] = 0;
2247 	}
2248 
2249 	return 0;
2250 }
2251 
migrate_vma_collect_hole(unsigned long start,unsigned long end,__always_unused int depth,struct mm_walk * walk)2252 static int migrate_vma_collect_hole(unsigned long start,
2253 				    unsigned long end,
2254 				    __always_unused int depth,
2255 				    struct mm_walk *walk)
2256 {
2257 	struct migrate_vma *migrate = walk->private;
2258 	unsigned long addr;
2259 
2260 	/* Only allow populating anonymous memory. */
2261 	if (!vma_is_anonymous(walk->vma))
2262 		return migrate_vma_collect_skip(start, end, walk);
2263 
2264 	for (addr = start; addr < end; addr += PAGE_SIZE) {
2265 		migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
2266 		migrate->dst[migrate->npages] = 0;
2267 		migrate->npages++;
2268 		migrate->cpages++;
2269 	}
2270 
2271 	return 0;
2272 }
2273 
migrate_vma_collect_pmd(pmd_t * pmdp,unsigned long start,unsigned long end,struct mm_walk * walk)2274 static int migrate_vma_collect_pmd(pmd_t *pmdp,
2275 				   unsigned long start,
2276 				   unsigned long end,
2277 				   struct mm_walk *walk)
2278 {
2279 	struct migrate_vma *migrate = walk->private;
2280 	struct vm_area_struct *vma = walk->vma;
2281 	struct mm_struct *mm = vma->vm_mm;
2282 	unsigned long addr = start, unmapped = 0;
2283 	spinlock_t *ptl;
2284 	pte_t *ptep;
2285 
2286 again:
2287 	if (pmd_none(*pmdp))
2288 		return migrate_vma_collect_hole(start, end, -1, walk);
2289 
2290 	if (pmd_trans_huge(*pmdp)) {
2291 		struct page *page;
2292 
2293 		ptl = pmd_lock(mm, pmdp);
2294 		if (unlikely(!pmd_trans_huge(*pmdp))) {
2295 			spin_unlock(ptl);
2296 			goto again;
2297 		}
2298 
2299 		page = pmd_page(*pmdp);
2300 		if (is_huge_zero_page(page)) {
2301 			spin_unlock(ptl);
2302 			split_huge_pmd(vma, pmdp, addr);
2303 			if (pmd_trans_unstable(pmdp))
2304 				return migrate_vma_collect_skip(start, end,
2305 								walk);
2306 		} else {
2307 			int ret;
2308 
2309 			get_page(page);
2310 			spin_unlock(ptl);
2311 			if (unlikely(!trylock_page(page)))
2312 				return migrate_vma_collect_skip(start, end,
2313 								walk);
2314 			ret = split_huge_page(page);
2315 			unlock_page(page);
2316 			put_page(page);
2317 			if (ret)
2318 				return migrate_vma_collect_skip(start, end,
2319 								walk);
2320 			if (pmd_none(*pmdp))
2321 				return migrate_vma_collect_hole(start, end, -1,
2322 								walk);
2323 		}
2324 	}
2325 
2326 	if (unlikely(pmd_bad(*pmdp)))
2327 		return migrate_vma_collect_skip(start, end, walk);
2328 
2329 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2330 	arch_enter_lazy_mmu_mode();
2331 
2332 	for (; addr < end; addr += PAGE_SIZE, ptep++) {
2333 		unsigned long mpfn = 0, pfn;
2334 		struct page *page;
2335 		swp_entry_t entry;
2336 		pte_t pte;
2337 
2338 		pte = *ptep;
2339 
2340 		if (pte_none(pte)) {
2341 			if (vma_is_anonymous(vma)) {
2342 				mpfn = MIGRATE_PFN_MIGRATE;
2343 				migrate->cpages++;
2344 			}
2345 			goto next;
2346 		}
2347 
2348 		if (!pte_present(pte)) {
2349 			/*
2350 			 * Only care about unaddressable device page special
2351 			 * page table entry. Other special swap entries are not
2352 			 * migratable, and we ignore regular swapped page.
2353 			 */
2354 			entry = pte_to_swp_entry(pte);
2355 			if (!is_device_private_entry(entry))
2356 				goto next;
2357 
2358 			page = pfn_swap_entry_to_page(entry);
2359 			if (!(migrate->flags &
2360 				MIGRATE_VMA_SELECT_DEVICE_PRIVATE) ||
2361 			    page->pgmap->owner != migrate->pgmap_owner)
2362 				goto next;
2363 
2364 			mpfn = migrate_pfn(page_to_pfn(page)) |
2365 					MIGRATE_PFN_MIGRATE;
2366 			if (is_writable_device_private_entry(entry))
2367 				mpfn |= MIGRATE_PFN_WRITE;
2368 		} else {
2369 			if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM))
2370 				goto next;
2371 			pfn = pte_pfn(pte);
2372 			if (is_zero_pfn(pfn)) {
2373 				mpfn = MIGRATE_PFN_MIGRATE;
2374 				migrate->cpages++;
2375 				goto next;
2376 			}
2377 			page = vm_normal_page(migrate->vma, addr, pte);
2378 			mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
2379 			mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
2380 		}
2381 
2382 		/* FIXME support THP */
2383 		if (!page || !page->mapping || PageTransCompound(page)) {
2384 			mpfn = 0;
2385 			goto next;
2386 		}
2387 
2388 		/*
2389 		 * By getting a reference on the page we pin it and that blocks
2390 		 * any kind of migration. Side effect is that it "freezes" the
2391 		 * pte.
2392 		 *
2393 		 * We drop this reference after isolating the page from the lru
2394 		 * for non device page (device page are not on the lru and thus
2395 		 * can't be dropped from it).
2396 		 */
2397 		get_page(page);
2398 		migrate->cpages++;
2399 
2400 		/*
2401 		 * Optimize for the common case where page is only mapped once
2402 		 * in one process. If we can lock the page, then we can safely
2403 		 * set up a special migration page table entry now.
2404 		 */
2405 		if (trylock_page(page)) {
2406 			pte_t swp_pte;
2407 
2408 			mpfn |= MIGRATE_PFN_LOCKED;
2409 			ptep_get_and_clear(mm, addr, ptep);
2410 
2411 			/* Setup special migration page table entry */
2412 			if (mpfn & MIGRATE_PFN_WRITE)
2413 				entry = make_writable_migration_entry(
2414 							page_to_pfn(page));
2415 			else
2416 				entry = make_readable_migration_entry(
2417 							page_to_pfn(page));
2418 			swp_pte = swp_entry_to_pte(entry);
2419 			if (pte_present(pte)) {
2420 				if (pte_soft_dirty(pte))
2421 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2422 				if (pte_uffd_wp(pte))
2423 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2424 			} else {
2425 				if (pte_swp_soft_dirty(pte))
2426 					swp_pte = pte_swp_mksoft_dirty(swp_pte);
2427 				if (pte_swp_uffd_wp(pte))
2428 					swp_pte = pte_swp_mkuffd_wp(swp_pte);
2429 			}
2430 			set_pte_at(mm, addr, ptep, swp_pte);
2431 
2432 			/*
2433 			 * This is like regular unmap: we remove the rmap and
2434 			 * drop page refcount. Page won't be freed, as we took
2435 			 * a reference just above.
2436 			 */
2437 			page_remove_rmap(page, false);
2438 			put_page(page);
2439 
2440 			if (pte_present(pte))
2441 				unmapped++;
2442 		}
2443 
2444 next:
2445 		migrate->dst[migrate->npages] = 0;
2446 		migrate->src[migrate->npages++] = mpfn;
2447 	}
2448 
2449 	/* Only flush the TLB if we actually modified any entries */
2450 	if (unmapped)
2451 		flush_tlb_range(walk->vma, start, end);
2452 
2453 	arch_leave_lazy_mmu_mode();
2454 	pte_unmap_unlock(ptep - 1, ptl);
2455 
2456 	return 0;
2457 }
2458 
2459 static const struct mm_walk_ops migrate_vma_walk_ops = {
2460 	.pmd_entry		= migrate_vma_collect_pmd,
2461 	.pte_hole		= migrate_vma_collect_hole,
2462 };
2463 
2464 /*
2465  * migrate_vma_collect() - collect pages over a range of virtual addresses
2466  * @migrate: migrate struct containing all migration information
2467  *
2468  * This will walk the CPU page table. For each virtual address backed by a
2469  * valid page, it updates the src array and takes a reference on the page, in
2470  * order to pin the page until we lock it and unmap it.
2471  */
migrate_vma_collect(struct migrate_vma * migrate)2472 static void migrate_vma_collect(struct migrate_vma *migrate)
2473 {
2474 	struct mmu_notifier_range range;
2475 
2476 	/*
2477 	 * Note that the pgmap_owner is passed to the mmu notifier callback so
2478 	 * that the registered device driver can skip invalidating device
2479 	 * private page mappings that won't be migrated.
2480 	 */
2481 	mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
2482 		migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
2483 		migrate->pgmap_owner);
2484 	mmu_notifier_invalidate_range_start(&range);
2485 
2486 	walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
2487 			&migrate_vma_walk_ops, migrate);
2488 
2489 	mmu_notifier_invalidate_range_end(&range);
2490 	migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
2491 }
2492 
2493 /*
2494  * migrate_vma_check_page() - check if page is pinned or not
2495  * @page: struct page to check
2496  *
2497  * Pinned pages cannot be migrated. This is the same test as in
2498  * migrate_page_move_mapping(), except that here we allow migration of a
2499  * ZONE_DEVICE page.
2500  */
migrate_vma_check_page(struct page * page)2501 static bool migrate_vma_check_page(struct page *page)
2502 {
2503 	/*
2504 	 * One extra ref because caller holds an extra reference, either from
2505 	 * isolate_lru_page() for a regular page, or migrate_vma_collect() for
2506 	 * a device page.
2507 	 */
2508 	int extra = 1;
2509 
2510 	/*
2511 	 * FIXME support THP (transparent huge page), it is bit more complex to
2512 	 * check them than regular pages, because they can be mapped with a pmd
2513 	 * or with a pte (split pte mapping).
2514 	 */
2515 	if (PageCompound(page))
2516 		return false;
2517 
2518 	/* Page from ZONE_DEVICE have one extra reference */
2519 	if (is_zone_device_page(page)) {
2520 		/*
2521 		 * Private page can never be pin as they have no valid pte and
2522 		 * GUP will fail for those. Yet if there is a pending migration
2523 		 * a thread might try to wait on the pte migration entry and
2524 		 * will bump the page reference count. Sadly there is no way to
2525 		 * differentiate a regular pin from migration wait. Hence to
2526 		 * avoid 2 racing thread trying to migrate back to CPU to enter
2527 		 * infinite loop (one stopping migration because the other is
2528 		 * waiting on pte migration entry). We always return true here.
2529 		 *
2530 		 * FIXME proper solution is to rework migration_entry_wait() so
2531 		 * it does not need to take a reference on page.
2532 		 */
2533 		return is_device_private_page(page);
2534 	}
2535 
2536 	/* For file back page */
2537 	if (page_mapping(page))
2538 		extra += 1 + page_has_private(page);
2539 
2540 	if ((page_count(page) - extra) > page_mapcount(page))
2541 		return false;
2542 
2543 	return true;
2544 }
2545 
2546 /*
2547  * migrate_vma_prepare() - lock pages and isolate them from the lru
2548  * @migrate: migrate struct containing all migration information
2549  *
2550  * This locks pages that have been collected by migrate_vma_collect(). Once each
2551  * page is locked it is isolated from the lru (for non-device pages). Finally,
2552  * the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
2553  * migrated by concurrent kernel threads.
2554  */
migrate_vma_prepare(struct migrate_vma * migrate)2555 static void migrate_vma_prepare(struct migrate_vma *migrate)
2556 {
2557 	const unsigned long npages = migrate->npages;
2558 	const unsigned long start = migrate->start;
2559 	unsigned long addr, i, restore = 0;
2560 	bool allow_drain = true;
2561 
2562 	lru_add_drain();
2563 
2564 	for (i = 0; (i < npages) && migrate->cpages; i++) {
2565 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2566 		bool remap = true;
2567 
2568 		if (!page)
2569 			continue;
2570 
2571 		if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
2572 			/*
2573 			 * Because we are migrating several pages there can be
2574 			 * a deadlock between 2 concurrent migration where each
2575 			 * are waiting on each other page lock.
2576 			 *
2577 			 * Make migrate_vma() a best effort thing and backoff
2578 			 * for any page we can not lock right away.
2579 			 */
2580 			if (!trylock_page(page)) {
2581 				migrate->src[i] = 0;
2582 				migrate->cpages--;
2583 				put_page(page);
2584 				continue;
2585 			}
2586 			remap = false;
2587 			migrate->src[i] |= MIGRATE_PFN_LOCKED;
2588 		}
2589 
2590 		/* ZONE_DEVICE pages are not on LRU */
2591 		if (!is_zone_device_page(page)) {
2592 			if (!PageLRU(page) && allow_drain) {
2593 				/* Drain CPU's pagevec */
2594 				lru_add_drain_all();
2595 				allow_drain = false;
2596 			}
2597 
2598 			if (isolate_lru_page(page)) {
2599 				if (remap) {
2600 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2601 					migrate->cpages--;
2602 					restore++;
2603 				} else {
2604 					migrate->src[i] = 0;
2605 					unlock_page(page);
2606 					migrate->cpages--;
2607 					put_page(page);
2608 				}
2609 				continue;
2610 			}
2611 
2612 			/* Drop the reference we took in collect */
2613 			put_page(page);
2614 		}
2615 
2616 		if (!migrate_vma_check_page(page)) {
2617 			if (remap) {
2618 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2619 				migrate->cpages--;
2620 				restore++;
2621 
2622 				if (!is_zone_device_page(page)) {
2623 					get_page(page);
2624 					putback_lru_page(page);
2625 				}
2626 			} else {
2627 				migrate->src[i] = 0;
2628 				unlock_page(page);
2629 				migrate->cpages--;
2630 
2631 				if (!is_zone_device_page(page))
2632 					putback_lru_page(page);
2633 				else
2634 					put_page(page);
2635 			}
2636 		}
2637 	}
2638 
2639 	for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
2640 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2641 
2642 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2643 			continue;
2644 
2645 		remove_migration_pte(page, migrate->vma, addr, page);
2646 
2647 		migrate->src[i] = 0;
2648 		unlock_page(page);
2649 		put_page(page);
2650 		restore--;
2651 	}
2652 }
2653 
2654 /*
2655  * migrate_vma_unmap() - replace page mapping with special migration pte entry
2656  * @migrate: migrate struct containing all migration information
2657  *
2658  * Replace page mapping (CPU page table pte) with a special migration pte entry
2659  * and check again if it has been pinned. Pinned pages are restored because we
2660  * cannot migrate them.
2661  *
2662  * This is the last step before we call the device driver callback to allocate
2663  * destination memory and copy contents of original page over to new page.
2664  */
migrate_vma_unmap(struct migrate_vma * migrate)2665 static void migrate_vma_unmap(struct migrate_vma *migrate)
2666 {
2667 	const unsigned long npages = migrate->npages;
2668 	const unsigned long start = migrate->start;
2669 	unsigned long addr, i, restore = 0;
2670 
2671 	for (i = 0; i < npages; i++) {
2672 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2673 
2674 		if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2675 			continue;
2676 
2677 		if (page_mapped(page)) {
2678 			try_to_migrate(page, 0);
2679 			if (page_mapped(page))
2680 				goto restore;
2681 		}
2682 
2683 		if (migrate_vma_check_page(page))
2684 			continue;
2685 
2686 restore:
2687 		migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2688 		migrate->cpages--;
2689 		restore++;
2690 	}
2691 
2692 	for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
2693 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2694 
2695 		if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
2696 			continue;
2697 
2698 		remove_migration_ptes(page, page, false);
2699 
2700 		migrate->src[i] = 0;
2701 		unlock_page(page);
2702 		restore--;
2703 
2704 		if (is_zone_device_page(page))
2705 			put_page(page);
2706 		else
2707 			putback_lru_page(page);
2708 	}
2709 }
2710 
2711 /**
2712  * migrate_vma_setup() - prepare to migrate a range of memory
2713  * @args: contains the vma, start, and pfns arrays for the migration
2714  *
2715  * Returns: negative errno on failures, 0 when 0 or more pages were migrated
2716  * without an error.
2717  *
2718  * Prepare to migrate a range of memory virtual address range by collecting all
2719  * the pages backing each virtual address in the range, saving them inside the
2720  * src array.  Then lock those pages and unmap them. Once the pages are locked
2721  * and unmapped, check whether each page is pinned or not.  Pages that aren't
2722  * pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
2723  * corresponding src array entry.  Then restores any pages that are pinned, by
2724  * remapping and unlocking those pages.
2725  *
2726  * The caller should then allocate destination memory and copy source memory to
2727  * it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
2728  * flag set).  Once these are allocated and copied, the caller must update each
2729  * corresponding entry in the dst array with the pfn value of the destination
2730  * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2731  * (destination pages must have their struct pages locked, via lock_page()).
2732  *
2733  * Note that the caller does not have to migrate all the pages that are marked
2734  * with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
2735  * device memory to system memory.  If the caller cannot migrate a device page
2736  * back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
2737  * consequences for the userspace process, so it must be avoided if at all
2738  * possible.
2739  *
2740  * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2741  * do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
2742  * allowing the caller to allocate device memory for those unbacked virtual
2743  * addresses.  For this the caller simply has to allocate device memory and
2744  * properly set the destination entry like for regular migration.  Note that
2745  * this can still fail, and thus inside the device driver you must check if the
2746  * migration was successful for those entries after calling migrate_vma_pages(),
2747  * just like for regular migration.
2748  *
2749  * After that, the callers must call migrate_vma_pages() to go over each entry
2750  * in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
2751  * set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
2752  * then migrate_vma_pages() to migrate struct page information from the source
2753  * struct page to the destination struct page.  If it fails to migrate the
2754  * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2755  * src array.
2756  *
2757  * At this point all successfully migrated pages have an entry in the src
2758  * array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
2759  * array entry with MIGRATE_PFN_VALID flag set.
2760  *
2761  * Once migrate_vma_pages() returns the caller may inspect which pages were
2762  * successfully migrated, and which were not.  Successfully migrated pages will
2763  * have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
2764  *
2765  * It is safe to update device page table after migrate_vma_pages() because
2766  * both destination and source page are still locked, and the mmap_lock is held
2767  * in read mode (hence no one can unmap the range being migrated).
2768  *
2769  * Once the caller is done cleaning up things and updating its page table (if it
2770  * chose to do so, this is not an obligation) it finally calls
2771  * migrate_vma_finalize() to update the CPU page table to point to new pages
2772  * for successfully migrated pages or otherwise restore the CPU page table to
2773  * point to the original source pages.
2774  */
migrate_vma_setup(struct migrate_vma * args)2775 int migrate_vma_setup(struct migrate_vma *args)
2776 {
2777 	long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
2778 
2779 	args->start &= PAGE_MASK;
2780 	args->end &= PAGE_MASK;
2781 	if (!args->vma || is_vm_hugetlb_page(args->vma) ||
2782 	    (args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
2783 		return -EINVAL;
2784 	if (nr_pages <= 0)
2785 		return -EINVAL;
2786 	if (args->start < args->vma->vm_start ||
2787 	    args->start >= args->vma->vm_end)
2788 		return -EINVAL;
2789 	if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
2790 		return -EINVAL;
2791 	if (!args->src || !args->dst)
2792 		return -EINVAL;
2793 
2794 	memset(args->src, 0, sizeof(*args->src) * nr_pages);
2795 	args->cpages = 0;
2796 	args->npages = 0;
2797 
2798 	migrate_vma_collect(args);
2799 
2800 	if (args->cpages)
2801 		migrate_vma_prepare(args);
2802 	if (args->cpages)
2803 		migrate_vma_unmap(args);
2804 
2805 	/*
2806 	 * At this point pages are locked and unmapped, and thus they have
2807 	 * stable content and can safely be copied to destination memory that
2808 	 * is allocated by the drivers.
2809 	 */
2810 	return 0;
2811 
2812 }
2813 EXPORT_SYMBOL(migrate_vma_setup);
2814 
2815 /*
2816  * This code closely matches the code in:
2817  *   __handle_mm_fault()
2818  *     handle_pte_fault()
2819  *       do_anonymous_page()
2820  * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2821  * private page.
2822  */
migrate_vma_insert_page(struct migrate_vma * migrate,unsigned long addr,struct page * page,unsigned long * src)2823 static void migrate_vma_insert_page(struct migrate_vma *migrate,
2824 				    unsigned long addr,
2825 				    struct page *page,
2826 				    unsigned long *src)
2827 {
2828 	struct vm_area_struct *vma = migrate->vma;
2829 	struct mm_struct *mm = vma->vm_mm;
2830 	bool flush = false;
2831 	spinlock_t *ptl;
2832 	pte_t entry;
2833 	pgd_t *pgdp;
2834 	p4d_t *p4dp;
2835 	pud_t *pudp;
2836 	pmd_t *pmdp;
2837 	pte_t *ptep;
2838 
2839 	/* Only allow populating anonymous memory */
2840 	if (!vma_is_anonymous(vma))
2841 		goto abort;
2842 
2843 	pgdp = pgd_offset(mm, addr);
2844 	p4dp = p4d_alloc(mm, pgdp, addr);
2845 	if (!p4dp)
2846 		goto abort;
2847 	pudp = pud_alloc(mm, p4dp, addr);
2848 	if (!pudp)
2849 		goto abort;
2850 	pmdp = pmd_alloc(mm, pudp, addr);
2851 	if (!pmdp)
2852 		goto abort;
2853 
2854 	if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
2855 		goto abort;
2856 
2857 	/*
2858 	 * Use pte_alloc() instead of pte_alloc_map().  We can't run
2859 	 * pte_offset_map() on pmds where a huge pmd might be created
2860 	 * from a different thread.
2861 	 *
2862 	 * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when
2863 	 * parallel threads are excluded by other means.
2864 	 *
2865 	 * Here we only have mmap_read_lock(mm).
2866 	 */
2867 	if (pte_alloc(mm, pmdp))
2868 		goto abort;
2869 
2870 	/* See the comment in pte_alloc_one_map() */
2871 	if (unlikely(pmd_trans_unstable(pmdp)))
2872 		goto abort;
2873 
2874 	if (unlikely(anon_vma_prepare(vma)))
2875 		goto abort;
2876 	if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
2877 		goto abort;
2878 
2879 	/*
2880 	 * The memory barrier inside __SetPageUptodate makes sure that
2881 	 * preceding stores to the page contents become visible before
2882 	 * the set_pte_at() write.
2883 	 */
2884 	__SetPageUptodate(page);
2885 
2886 	if (is_zone_device_page(page)) {
2887 		if (is_device_private_page(page)) {
2888 			swp_entry_t swp_entry;
2889 
2890 			if (vma->vm_flags & VM_WRITE)
2891 				swp_entry = make_writable_device_private_entry(
2892 							page_to_pfn(page));
2893 			else
2894 				swp_entry = make_readable_device_private_entry(
2895 							page_to_pfn(page));
2896 			entry = swp_entry_to_pte(swp_entry);
2897 		} else {
2898 			/*
2899 			 * For now we only support migrating to un-addressable
2900 			 * device memory.
2901 			 */
2902 			pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
2903 			goto abort;
2904 		}
2905 	} else {
2906 		entry = mk_pte(page, vma->vm_page_prot);
2907 		if (vma->vm_flags & VM_WRITE)
2908 			entry = pte_mkwrite(pte_mkdirty(entry));
2909 	}
2910 
2911 	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
2912 
2913 	if (check_stable_address_space(mm))
2914 		goto unlock_abort;
2915 
2916 	if (pte_present(*ptep)) {
2917 		unsigned long pfn = pte_pfn(*ptep);
2918 
2919 		if (!is_zero_pfn(pfn))
2920 			goto unlock_abort;
2921 		flush = true;
2922 	} else if (!pte_none(*ptep))
2923 		goto unlock_abort;
2924 
2925 	/*
2926 	 * Check for userfaultfd but do not deliver the fault. Instead,
2927 	 * just back off.
2928 	 */
2929 	if (userfaultfd_missing(vma))
2930 		goto unlock_abort;
2931 
2932 	inc_mm_counter(mm, MM_ANONPAGES);
2933 	page_add_new_anon_rmap(page, vma, addr, false);
2934 	if (!is_zone_device_page(page))
2935 		lru_cache_add_inactive_or_unevictable(page, vma);
2936 	get_page(page);
2937 
2938 	if (flush) {
2939 		flush_cache_page(vma, addr, pte_pfn(*ptep));
2940 		ptep_clear_flush_notify(vma, addr, ptep);
2941 		set_pte_at_notify(mm, addr, ptep, entry);
2942 		update_mmu_cache(vma, addr, ptep);
2943 	} else {
2944 		/* No need to invalidate - it was non-present before */
2945 		set_pte_at(mm, addr, ptep, entry);
2946 		update_mmu_cache(vma, addr, ptep);
2947 	}
2948 
2949 	pte_unmap_unlock(ptep, ptl);
2950 	*src = MIGRATE_PFN_MIGRATE;
2951 	return;
2952 
2953 unlock_abort:
2954 	pte_unmap_unlock(ptep, ptl);
2955 abort:
2956 	*src &= ~MIGRATE_PFN_MIGRATE;
2957 }
2958 
2959 /**
2960  * migrate_vma_pages() - migrate meta-data from src page to dst page
2961  * @migrate: migrate struct containing all migration information
2962  *
2963  * This migrates struct page meta-data from source struct page to destination
2964  * struct page. This effectively finishes the migration from source page to the
2965  * destination page.
2966  */
migrate_vma_pages(struct migrate_vma * migrate)2967 void migrate_vma_pages(struct migrate_vma *migrate)
2968 {
2969 	const unsigned long npages = migrate->npages;
2970 	const unsigned long start = migrate->start;
2971 	struct mmu_notifier_range range;
2972 	unsigned long addr, i;
2973 	bool notified = false;
2974 
2975 	for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
2976 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
2977 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
2978 		struct address_space *mapping;
2979 		int r;
2980 
2981 		if (!newpage) {
2982 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
2983 			continue;
2984 		}
2985 
2986 		if (!page) {
2987 			if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE))
2988 				continue;
2989 			if (!notified) {
2990 				notified = true;
2991 
2992 				mmu_notifier_range_init_owner(&range,
2993 					MMU_NOTIFY_MIGRATE, 0, migrate->vma,
2994 					migrate->vma->vm_mm, addr, migrate->end,
2995 					migrate->pgmap_owner);
2996 				mmu_notifier_invalidate_range_start(&range);
2997 			}
2998 			migrate_vma_insert_page(migrate, addr, newpage,
2999 						&migrate->src[i]);
3000 			continue;
3001 		}
3002 
3003 		mapping = page_mapping(page);
3004 
3005 		if (is_zone_device_page(newpage)) {
3006 			if (is_device_private_page(newpage)) {
3007 				/*
3008 				 * For now only support private anonymous when
3009 				 * migrating to un-addressable device memory.
3010 				 */
3011 				if (mapping) {
3012 					migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3013 					continue;
3014 				}
3015 			} else {
3016 				/*
3017 				 * Other types of ZONE_DEVICE page are not
3018 				 * supported.
3019 				 */
3020 				migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3021 				continue;
3022 			}
3023 		}
3024 
3025 		r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
3026 		if (r != MIGRATEPAGE_SUCCESS)
3027 			migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
3028 	}
3029 
3030 	/*
3031 	 * No need to double call mmu_notifier->invalidate_range() callback as
3032 	 * the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
3033 	 * did already call it.
3034 	 */
3035 	if (notified)
3036 		mmu_notifier_invalidate_range_only_end(&range);
3037 }
3038 EXPORT_SYMBOL(migrate_vma_pages);
3039 
3040 /**
3041  * migrate_vma_finalize() - restore CPU page table entry
3042  * @migrate: migrate struct containing all migration information
3043  *
3044  * This replaces the special migration pte entry with either a mapping to the
3045  * new page if migration was successful for that page, or to the original page
3046  * otherwise.
3047  *
3048  * This also unlocks the pages and puts them back on the lru, or drops the extra
3049  * refcount, for device pages.
3050  */
migrate_vma_finalize(struct migrate_vma * migrate)3051 void migrate_vma_finalize(struct migrate_vma *migrate)
3052 {
3053 	const unsigned long npages = migrate->npages;
3054 	unsigned long i;
3055 
3056 	for (i = 0; i < npages; i++) {
3057 		struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
3058 		struct page *page = migrate_pfn_to_page(migrate->src[i]);
3059 
3060 		if (!page) {
3061 			if (newpage) {
3062 				unlock_page(newpage);
3063 				put_page(newpage);
3064 			}
3065 			continue;
3066 		}
3067 
3068 		if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
3069 			if (newpage) {
3070 				unlock_page(newpage);
3071 				put_page(newpage);
3072 			}
3073 			newpage = page;
3074 		}
3075 
3076 		remove_migration_ptes(page, newpage, false);
3077 		unlock_page(page);
3078 
3079 		if (is_zone_device_page(page))
3080 			put_page(page);
3081 		else
3082 			putback_lru_page(page);
3083 
3084 		if (newpage != page) {
3085 			unlock_page(newpage);
3086 			if (is_zone_device_page(newpage))
3087 				put_page(newpage);
3088 			else
3089 				putback_lru_page(newpage);
3090 		}
3091 	}
3092 }
3093 EXPORT_SYMBOL(migrate_vma_finalize);
3094 #endif /* CONFIG_DEVICE_PRIVATE */
3095 
3096 #if defined(CONFIG_HOTPLUG_CPU)
3097 /* Disable reclaim-based migration. */
__disable_all_migrate_targets(void)3098 static void __disable_all_migrate_targets(void)
3099 {
3100 	int node;
3101 
3102 	for_each_online_node(node)
3103 		node_demotion[node] = NUMA_NO_NODE;
3104 }
3105 
disable_all_migrate_targets(void)3106 static void disable_all_migrate_targets(void)
3107 {
3108 	__disable_all_migrate_targets();
3109 
3110 	/*
3111 	 * Ensure that the "disable" is visible across the system.
3112 	 * Readers will see either a combination of before+disable
3113 	 * state or disable+after.  They will never see before and
3114 	 * after state together.
3115 	 *
3116 	 * The before+after state together might have cycles and
3117 	 * could cause readers to do things like loop until this
3118 	 * function finishes.  This ensures they can only see a
3119 	 * single "bad" read and would, for instance, only loop
3120 	 * once.
3121 	 */
3122 	synchronize_rcu();
3123 }
3124 
3125 /*
3126  * Find an automatic demotion target for 'node'.
3127  * Failing here is OK.  It might just indicate
3128  * being at the end of a chain.
3129  */
establish_migrate_target(int node,nodemask_t * used)3130 static int establish_migrate_target(int node, nodemask_t *used)
3131 {
3132 	int migration_target;
3133 
3134 	/*
3135 	 * Can not set a migration target on a
3136 	 * node with it already set.
3137 	 *
3138 	 * No need for READ_ONCE() here since this
3139 	 * in the write path for node_demotion[].
3140 	 * This should be the only thread writing.
3141 	 */
3142 	if (node_demotion[node] != NUMA_NO_NODE)
3143 		return NUMA_NO_NODE;
3144 
3145 	migration_target = find_next_best_node(node, used);
3146 	if (migration_target == NUMA_NO_NODE)
3147 		return NUMA_NO_NODE;
3148 
3149 	node_demotion[node] = migration_target;
3150 
3151 	return migration_target;
3152 }
3153 
3154 /*
3155  * When memory fills up on a node, memory contents can be
3156  * automatically migrated to another node instead of
3157  * discarded at reclaim.
3158  *
3159  * Establish a "migration path" which will start at nodes
3160  * with CPUs and will follow the priorities used to build the
3161  * page allocator zonelists.
3162  *
3163  * The difference here is that cycles must be avoided.  If
3164  * node0 migrates to node1, then neither node1, nor anything
3165  * node1 migrates to can migrate to node0.
3166  *
3167  * This function can run simultaneously with readers of
3168  * node_demotion[].  However, it can not run simultaneously
3169  * with itself.  Exclusion is provided by memory hotplug events
3170  * being single-threaded.
3171  */
__set_migration_target_nodes(void)3172 static void __set_migration_target_nodes(void)
3173 {
3174 	nodemask_t next_pass	= NODE_MASK_NONE;
3175 	nodemask_t this_pass	= NODE_MASK_NONE;
3176 	nodemask_t used_targets = NODE_MASK_NONE;
3177 	int node;
3178 
3179 	/*
3180 	 * Avoid any oddities like cycles that could occur
3181 	 * from changes in the topology.  This will leave
3182 	 * a momentary gap when migration is disabled.
3183 	 */
3184 	disable_all_migrate_targets();
3185 
3186 	/*
3187 	 * Allocations go close to CPUs, first.  Assume that
3188 	 * the migration path starts at the nodes with CPUs.
3189 	 */
3190 	next_pass = node_states[N_CPU];
3191 again:
3192 	this_pass = next_pass;
3193 	next_pass = NODE_MASK_NONE;
3194 	/*
3195 	 * To avoid cycles in the migration "graph", ensure
3196 	 * that migration sources are not future targets by
3197 	 * setting them in 'used_targets'.  Do this only
3198 	 * once per pass so that multiple source nodes can
3199 	 * share a target node.
3200 	 *
3201 	 * 'used_targets' will become unavailable in future
3202 	 * passes.  This limits some opportunities for
3203 	 * multiple source nodes to share a destination.
3204 	 */
3205 	nodes_or(used_targets, used_targets, this_pass);
3206 	for_each_node_mask(node, this_pass) {
3207 		int target_node = establish_migrate_target(node, &used_targets);
3208 
3209 		if (target_node == NUMA_NO_NODE)
3210 			continue;
3211 
3212 		/*
3213 		 * Visit targets from this pass in the next pass.
3214 		 * Eventually, every node will have been part of
3215 		 * a pass, and will become set in 'used_targets'.
3216 		 */
3217 		node_set(target_node, next_pass);
3218 	}
3219 	/*
3220 	 * 'next_pass' contains nodes which became migration
3221 	 * targets in this pass.  Make additional passes until
3222 	 * no more migrations targets are available.
3223 	 */
3224 	if (!nodes_empty(next_pass))
3225 		goto again;
3226 }
3227 
3228 /*
3229  * For callers that do not hold get_online_mems() already.
3230  */
set_migration_target_nodes(void)3231 static void set_migration_target_nodes(void)
3232 {
3233 	get_online_mems();
3234 	__set_migration_target_nodes();
3235 	put_online_mems();
3236 }
3237 
3238 /*
3239  * This leaves migrate-on-reclaim transiently disabled between
3240  * the MEM_GOING_OFFLINE and MEM_OFFLINE events.  This runs
3241  * whether reclaim-based migration is enabled or not, which
3242  * ensures that the user can turn reclaim-based migration at
3243  * any time without needing to recalculate migration targets.
3244  *
3245  * These callbacks already hold get_online_mems().  That is why
3246  * __set_migration_target_nodes() can be used as opposed to
3247  * set_migration_target_nodes().
3248  */
migrate_on_reclaim_callback(struct notifier_block * self,unsigned long action,void * _arg)3249 static int __meminit migrate_on_reclaim_callback(struct notifier_block *self,
3250 						 unsigned long action, void *_arg)
3251 {
3252 	struct memory_notify *arg = _arg;
3253 
3254 	/*
3255 	 * Only update the node migration order when a node is
3256 	 * changing status, like online->offline.  This avoids
3257 	 * the overhead of synchronize_rcu() in most cases.
3258 	 */
3259 	if (arg->status_change_nid < 0)
3260 		return notifier_from_errno(0);
3261 
3262 	switch (action) {
3263 	case MEM_GOING_OFFLINE:
3264 		/*
3265 		 * Make sure there are not transient states where
3266 		 * an offline node is a migration target.  This
3267 		 * will leave migration disabled until the offline
3268 		 * completes and the MEM_OFFLINE case below runs.
3269 		 */
3270 		disable_all_migrate_targets();
3271 		break;
3272 	case MEM_OFFLINE:
3273 	case MEM_ONLINE:
3274 		/*
3275 		 * Recalculate the target nodes once the node
3276 		 * reaches its final state (online or offline).
3277 		 */
3278 		__set_migration_target_nodes();
3279 		break;
3280 	case MEM_CANCEL_OFFLINE:
3281 		/*
3282 		 * MEM_GOING_OFFLINE disabled all the migration
3283 		 * targets.  Reenable them.
3284 		 */
3285 		__set_migration_target_nodes();
3286 		break;
3287 	case MEM_GOING_ONLINE:
3288 	case MEM_CANCEL_ONLINE:
3289 		break;
3290 	}
3291 
3292 	return notifier_from_errno(0);
3293 }
3294 
3295 /*
3296  * React to hotplug events that might affect the migration targets
3297  * like events that online or offline NUMA nodes.
3298  *
3299  * The ordering is also currently dependent on which nodes have
3300  * CPUs.  That means we need CPU on/offline notification too.
3301  */
migration_online_cpu(unsigned int cpu)3302 static int migration_online_cpu(unsigned int cpu)
3303 {
3304 	set_migration_target_nodes();
3305 	return 0;
3306 }
3307 
migration_offline_cpu(unsigned int cpu)3308 static int migration_offline_cpu(unsigned int cpu)
3309 {
3310 	set_migration_target_nodes();
3311 	return 0;
3312 }
3313 
migrate_on_reclaim_init(void)3314 static int __init migrate_on_reclaim_init(void)
3315 {
3316 	int ret;
3317 
3318 	ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline",
3319 					NULL, migration_offline_cpu);
3320 	/*
3321 	 * In the unlikely case that this fails, the automatic
3322 	 * migration targets may become suboptimal for nodes
3323 	 * where N_CPU changes.  With such a small impact in a
3324 	 * rare case, do not bother trying to do anything special.
3325 	 */
3326 	WARN_ON(ret < 0);
3327 	ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online",
3328 				migration_online_cpu, NULL);
3329 	WARN_ON(ret < 0);
3330 
3331 	hotplug_memory_notifier(migrate_on_reclaim_callback, 100);
3332 	return 0;
3333 }
3334 late_initcall(migrate_on_reclaim_init);
3335 #endif /* CONFIG_HOTPLUG_CPU */
3336