Lines Matching full:page
7 * Page migration was first developed in the context of the memory hotplug
84 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
92 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
95 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
97 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
101 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
102 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
103 * so unconditionally grapping the lock ruins page's owner side. in isolate_movable_page()
105 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
109 * compaction threads can race against page migration functions in isolate_movable_page()
110 * as well as race against the releasing a page. in isolate_movable_page()
112 * In order to avoid having an already isolated movable page in isolate_movable_page()
115 * lets be sure we have the page lock in isolate_movable_page()
116 * before proceeding with the movable page isolation steps. in isolate_movable_page()
118 if (unlikely(!trylock_page(page))) in isolate_movable_page()
121 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
124 mapping = page_mapping(page); in isolate_movable_page()
125 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
127 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
130 /* Driver shouldn't use PG_isolated bit of page->flags */ in isolate_movable_page()
131 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
132 __SetPageIsolated(page); in isolate_movable_page()
133 unlock_page(page); in isolate_movable_page()
138 unlock_page(page); in isolate_movable_page()
140 put_page(page); in isolate_movable_page()
145 /* It should be called on page which is PG_movable */
146 void putback_movable_page(struct page *page) in putback_movable_page() argument
150 VM_BUG_ON_PAGE(!PageLocked(page), page); in putback_movable_page()
151 VM_BUG_ON_PAGE(!PageMovable(page), page); in putback_movable_page()
152 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_page()
154 mapping = page_mapping(page); in putback_movable_page()
155 mapping->a_ops->putback_page(page); in putback_movable_page()
156 __ClearPageIsolated(page); in putback_movable_page()
164 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
169 struct page *page; in putback_movable_pages() local
170 struct page *page2; in putback_movable_pages()
172 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
173 if (unlikely(PageHuge(page))) { in putback_movable_pages()
174 putback_active_hugepage(page); in putback_movable_pages()
177 list_del(&page->lru); in putback_movable_pages()
179 * We isolated non-lru movable page so here we can use in putback_movable_pages()
180 * __PageMovable because LRU page's mapping cannot have in putback_movable_pages()
183 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
184 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
185 lock_page(page); in putback_movable_pages()
186 if (PageMovable(page)) in putback_movable_pages()
187 putback_movable_page(page); in putback_movable_pages()
189 __ClearPageIsolated(page); in putback_movable_pages()
190 unlock_page(page); in putback_movable_pages()
191 put_page(page); in putback_movable_pages()
193 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
194 page_is_file_cache(page), -hpage_nr_pages(page)); in putback_movable_pages()
195 putback_lru_page(page); in putback_movable_pages()
203 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
207 .page = old, in remove_migration_pte()
212 struct page *new; in remove_migration_pte()
216 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
218 if (PageKsm(page)) in remove_migration_pte()
219 new = page; in remove_migration_pte()
221 new = page - pvmw.page->index + in remove_migration_pte()
227 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
276 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
277 clear_page_mlock(page); in remove_migration_pte()
288 * references to the indicated page.
290 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
304 * Something used the pte of a page under migration. We need to
305 * get to the page and wait until migration is finished.
313 struct page *page; in __migration_entry_wait() local
324 page = migration_entry_to_page(entry); in __migration_entry_wait()
327 * Once radix-tree replacement of page migration started, page_count in __migration_entry_wait()
329 * against a page without get_page(). in __migration_entry_wait()
330 * So, we use get_page_unless_zero(), here. Even failed, page fault in __migration_entry_wait()
333 if (!get_page_unless_zero(page)) in __migration_entry_wait()
336 wait_on_page_locked(page); in __migration_entry_wait()
337 put_page(page); in __migration_entry_wait()
362 struct page *page; in pmd_migration_entry_wait() local
367 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
368 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
371 wait_on_page_locked(page); in pmd_migration_entry_wait()
372 put_page(page); in pmd_migration_entry_wait()
430 * Replace the page in the mapping.
438 struct page *newpage, struct page *page, in migrate_page_move_mapping() argument
451 expected_count += is_device_private_page(page); in migrate_page_move_mapping()
452 expected_count += is_device_public_page(page); in migrate_page_move_mapping()
455 /* Anonymous page without mapping */ in migrate_page_move_mapping()
456 if (page_count(page) != expected_count) in migrate_page_move_mapping()
460 newpage->index = page->index; in migrate_page_move_mapping()
461 newpage->mapping = page->mapping; in migrate_page_move_mapping()
462 if (PageSwapBacked(page)) in migrate_page_move_mapping()
468 oldzone = page_zone(page); in migrate_page_move_mapping()
474 page_index(page)); in migrate_page_move_mapping()
476 expected_count += hpage_nr_pages(page) + page_has_private(page); in migrate_page_move_mapping()
477 if (page_count(page) != expected_count || in migrate_page_move_mapping()
479 &mapping->i_pages.xa_lock) != page) { in migrate_page_move_mapping()
484 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
490 * In the async migration case of moving a page with buffers, lock the in migrate_page_move_mapping()
493 * the mapping back due to an elevated page count, we would have to in migrate_page_move_mapping()
498 page_ref_unfreeze(page, expected_count); in migrate_page_move_mapping()
504 * Now we know that no one else is looking at the page: in migrate_page_move_mapping()
507 newpage->index = page->index; in migrate_page_move_mapping()
508 newpage->mapping = page->mapping; in migrate_page_move_mapping()
509 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ in migrate_page_move_mapping()
510 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
512 if (PageSwapCache(page)) { in migrate_page_move_mapping()
514 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
517 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
520 /* Move dirty while page refs frozen and newpage not yet exposed */ in migrate_page_move_mapping()
521 dirty = PageDirty(page); in migrate_page_move_mapping()
523 ClearPageDirty(page); in migrate_page_move_mapping()
528 if (PageTransHuge(page)) { in migrate_page_move_mapping()
530 int index = page_index(page); in migrate_page_move_mapping()
541 * Drop cache reference from old page by unfreezing in migrate_page_move_mapping()
545 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); in migrate_page_move_mapping()
552 * the page for that zone. Other VM counters will be in migrate_page_move_mapping()
554 * new page and drop references to the old page. in migrate_page_move_mapping()
563 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
585 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
592 pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
594 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
595 if (page_count(page) != expected_count || in migrate_huge_page_move_mapping()
596 radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { in migrate_huge_page_move_mapping()
601 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
606 newpage->index = page->index; in migrate_huge_page_move_mapping()
607 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
613 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
621 * Gigantic pages are so large that we do not guarantee that page++ pointer
622 * arithmetic will work across the entire page. We need something more
625 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
629 struct page *dst_base = dst; in __copy_gigantic_page()
630 struct page *src_base = src; in __copy_gigantic_page()
642 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
648 /* hugetlbfs page */ in copy_huge_page()
657 /* thp page */ in copy_huge_page()
669 * Copy the page to its new location
671 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
675 if (PageError(page)) in migrate_page_states()
677 if (PageReferenced(page)) in migrate_page_states()
679 if (PageUptodate(page)) in migrate_page_states()
681 if (TestClearPageActive(page)) { in migrate_page_states()
682 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
684 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
686 if (PageChecked(page)) in migrate_page_states()
688 if (PageMappedToDisk(page)) in migrate_page_states()
692 if (PageDirty(page)) in migrate_page_states()
695 if (page_is_young(page)) in migrate_page_states()
697 if (page_is_idle(page)) in migrate_page_states()
701 * Copy NUMA information to the new page, to prevent over-eager in migrate_page_states()
702 * future migrations of this same page. in migrate_page_states()
704 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
707 ksm_migrate_page(newpage, page); in migrate_page_states()
712 if (PageSwapCache(page)) in migrate_page_states()
713 ClearPageSwapCache(page); in migrate_page_states()
714 ClearPagePrivate(page); in migrate_page_states()
715 set_page_private(page, 0); in migrate_page_states()
718 * If any waiters have accumulated on the new page then in migrate_page_states()
724 copy_page_owner(page, newpage); in migrate_page_states()
726 mem_cgroup_migrate(page, newpage); in migrate_page_states()
730 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
732 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
733 copy_huge_page(newpage, page); in migrate_page_copy()
735 copy_highpage(newpage, page); in migrate_page_copy()
737 migrate_page_states(newpage, page); in migrate_page_copy()
746 * Common logic to directly migrate a single LRU page suitable for
752 struct page *newpage, struct page *page, in migrate_page() argument
757 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
759 rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); in migrate_page()
765 migrate_page_copy(newpage, page); in migrate_page()
767 migrate_page_states(newpage, page); in migrate_page()
775 * if the underlying filesystem guarantees that no other references to "page"
779 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
784 if (!page_has_buffers(page)) in buffer_migrate_page()
785 return migrate_page(mapping, newpage, page, mode); in buffer_migrate_page()
787 head = page_buffers(page); in buffer_migrate_page()
789 rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0); in buffer_migrate_page()
802 ClearPagePrivate(page); in buffer_migrate_page()
803 set_page_private(newpage, page_private(page)); in buffer_migrate_page()
804 set_page_private(page, 0); in buffer_migrate_page()
805 put_page(page); in buffer_migrate_page()
818 migrate_page_copy(newpage, page); in buffer_migrate_page()
820 migrate_page_states(newpage, page); in buffer_migrate_page()
836 * Writeback a page to clean the dirty state
838 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
853 if (!clear_page_dirty_for_io(page)) in writeout()
858 * A dirty page may imply that the underlying filesystem has in writeout()
859 * the page on some queue. So the page must be clean for in writeout()
861 * page state is no longer what we checked for earlier. in writeout()
865 remove_migration_ptes(page, page, false); in writeout()
867 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
871 lock_page(page); in writeout()
880 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
882 if (PageDirty(page)) { in fallback_migrate_page()
891 return writeout(mapping, page); in fallback_migrate_page()
898 if (page_has_private(page) && in fallback_migrate_page()
899 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
902 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
906 * Move a page to a newly allocated page
907 * The page is locked and all ptes have been successfully removed.
909 * The new page will have replaced the old page if this function
916 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
921 bool is_lru = !__PageMovable(page); in move_to_new_page()
923 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
926 mapping = page_mapping(page); in move_to_new_page()
930 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
937 * for page migration. in move_to_new_page()
940 page, mode); in move_to_new_page()
943 page, mode); in move_to_new_page()
946 * In case of non-lru page, it could be released after in move_to_new_page()
949 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
950 if (!PageMovable(page)) { in move_to_new_page()
952 __ClearPageIsolated(page); in move_to_new_page()
957 page, mode); in move_to_new_page()
959 !PageIsolated(page)); in move_to_new_page()
963 * When successful, old pagecache page->mapping must be cleared before in move_to_new_page()
964 * page is freed; but stats require that PageAnon be left as PageAnon. in move_to_new_page()
967 if (__PageMovable(page)) { in move_to_new_page()
968 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
972 * cannot try to migrate this page. in move_to_new_page()
974 __ClearPageIsolated(page); in move_to_new_page()
978 * Anonymous and movable page->mapping will be cleard by in move_to_new_page()
982 if (!PageMappingFlags(page)) in move_to_new_page()
983 page->mapping = NULL; in move_to_new_page()
996 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
1002 bool is_lru = !__PageMovable(page); in __unmap_and_move()
1004 if (!trylock_page(page)) { in __unmap_and_move()
1010 * For example, during page readahead pages are added locked in __unmap_and_move()
1015 * second or third page, the process can end up locking in __unmap_and_move()
1016 * the same page twice and deadlocking. Rather than in __unmap_and_move()
1024 lock_page(page); in __unmap_and_move()
1027 if (PageWriteback(page)) { in __unmap_and_move()
1044 wait_on_page_writeback(page); in __unmap_and_move()
1048 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, in __unmap_and_move()
1049 * we cannot notice that anon_vma is freed while we migrates a page. in __unmap_and_move()
1053 * just care Anon page here. in __unmap_and_move()
1058 * because that implies that the anon page is no longer mapped in __unmap_and_move()
1059 * (and cannot be remapped so long as we hold the page lock). in __unmap_and_move()
1061 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1062 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1065 * Block others from accessing the new page when we get around to in __unmap_and_move()
1076 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1082 * 1. When a new swap-cache page is read into, it is added to the LRU in __unmap_and_move()
1084 * Calling try_to_unmap() against a page->mapping==NULL page will in __unmap_and_move()
1086 * 2. An orphaned page (see truncate_complete_page) might have in __unmap_and_move()
1087 * fs-private metadata. The page can be picked up due to memory in __unmap_and_move()
1088 * offlining. Everywhere else except page reclaim, the page is in __unmap_and_move()
1089 * invisible to the vm, so the page can not be migrated. So try to in __unmap_and_move()
1090 * free the metadata, so the page can be freed. in __unmap_and_move()
1092 if (!page->mapping) { in __unmap_and_move()
1093 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1094 if (page_has_private(page)) { in __unmap_and_move()
1095 try_to_free_buffers(page); in __unmap_and_move()
1098 } else if (page_mapped(page)) { in __unmap_and_move()
1100 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1101 page); in __unmap_and_move()
1102 try_to_unmap(page, in __unmap_and_move()
1107 if (!page_mapped(page)) in __unmap_and_move()
1108 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1111 remove_migration_ptes(page, in __unmap_and_move()
1112 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1120 unlock_page(page); in __unmap_and_move()
1124 * which will not free the page because new page owner increased in __unmap_and_move()
1125 * refcounter. As well, if it is LRU page, add the page to LRU in __unmap_and_move()
1126 * list in here. Use the old state of the isolated source page to in __unmap_and_move()
1127 * determine if we migrated a LRU page. newpage was already unlocked in __unmap_and_move()
1128 * and possibly modified by its owner - don't rely on the page in __unmap_and_move()
1153 * Obtain the lock on page, remove all ptes and migrate the page
1154 * to the newly allocated page in newpage.
1158 unsigned long private, struct page *page, in unmap_and_move() argument
1163 struct page *newpage; in unmap_and_move()
1165 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1168 newpage = get_new_page(page, private); in unmap_and_move()
1172 if (page_count(page) == 1) { in unmap_and_move()
1173 /* page was freed from under us. So we are done. */ in unmap_and_move()
1174 ClearPageActive(page); in unmap_and_move()
1175 ClearPageUnevictable(page); in unmap_and_move()
1176 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1177 lock_page(page); in unmap_and_move()
1178 if (!PageMovable(page)) in unmap_and_move()
1179 __ClearPageIsolated(page); in unmap_and_move()
1180 unlock_page(page); in unmap_and_move()
1189 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1196 * A page that has been migrated has all references in unmap_and_move()
1197 * removed and will be freed. A page that has not been in unmap_and_move()
1201 list_del(&page->lru); in unmap_and_move()
1208 if (likely(!__PageMovable(page))) in unmap_and_move()
1209 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1210 page_is_file_cache(page), -hpage_nr_pages(page)); in unmap_and_move()
1215 * isolation. Otherwise, restore the page to right list unless in unmap_and_move()
1219 put_page(page); in unmap_and_move()
1222 * Set PG_HWPoison on just freed page in unmap_and_move()
1226 if (set_hwpoison_free_buddy_page(page)) in unmap_and_move()
1231 if (likely(!__PageMovable(page))) { in unmap_and_move()
1232 putback_lru_page(page); in unmap_and_move()
1236 lock_page(page); in unmap_and_move()
1237 if (PageMovable(page)) in unmap_and_move()
1238 putback_movable_page(page); in unmap_and_move()
1240 __ClearPageIsolated(page); in unmap_and_move()
1241 unlock_page(page); in unmap_and_move()
1242 put_page(page); in unmap_and_move()
1262 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1263 * under direct I/O, the reference of the head page is 512 and a bit more.)
1268 * There is also no race when direct I/O is issued on the page under migration,
1270 * will wait in the page fault for migration to complete.
1274 struct page *hpage, int force, in unmap_and_move_huge_page()
1279 struct page *new_hpage; in unmap_and_move_huge_page()
1285 * like soft offline and memory hotremove don't walk through page in unmap_and_move_huge_page()
1313 * page_mapping() set, hugetlbfs specific move page routine will not in unmap_and_move_huge_page()
1372 * supplied as the target for the page migration
1376 * as the target of the page migration.
1381 * page migration, if any.
1382 * @reason: The reason for page migration.
1399 struct page *page; in migrate_pages() local
1400 struct page *page2; in migrate_pages()
1410 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1414 if (PageHuge(page)) in migrate_pages()
1416 put_new_page, private, page, in migrate_pages()
1420 private, page, pass > 2, mode, in migrate_pages()
1428 * retry on the same page with the THP split in migrate_pages()
1431 * Head page is retried immediately and tail in migrate_pages()
1436 if (PageTransHuge(page) && !PageHuge(page)) { in migrate_pages()
1437 lock_page(page); in migrate_pages()
1438 rc = split_huge_page_to_list(page, from); in migrate_pages()
1439 unlock_page(page); in migrate_pages()
1441 list_safe_reset_next(page, page2, lru); in migrate_pages()
1456 * unlike -EAGAIN case, the failed page is in migrate_pages()
1457 * removed from migration page list and not in migrate_pages()
1509 * Resolves the given address to a struct page, isolates it from the LRU and
1512 * errno - if the page cannot be found/isolated
1521 struct page *page; in add_page_for_migration() local
1533 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1535 err = PTR_ERR(page); in add_page_for_migration()
1536 if (IS_ERR(page)) in add_page_for_migration()
1540 if (!page) in add_page_for_migration()
1544 if (page_to_nid(page) == node) in add_page_for_migration()
1548 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1551 if (PageHuge(page)) { in add_page_for_migration()
1552 if (PageHead(page)) { in add_page_for_migration()
1553 isolate_huge_page(page, pagelist); in add_page_for_migration()
1557 struct page *head; in add_page_for_migration()
1559 head = compound_head(page); in add_page_for_migration()
1573 * isolate_lru_page() or drop the page ref if it was in add_page_for_migration()
1576 put_page(page); in add_page_for_migration()
1583 * Migrate an array of page address onto an array of nodes and fill
1647 * Errors in the page lookup or isolation are not fatal and we simply in do_pages_move()
1654 /* The page is already on the target node */ in do_pages_move()
1660 /* The page is successfully queued for migration */ in do_pages_move()
1715 struct page *page; in do_pages_stat_array() local
1723 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1725 err = PTR_ERR(page); in do_pages_stat_array()
1726 if (IS_ERR(page)) in do_pages_stat_array()
1729 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
1897 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
1901 struct page *newpage; in alloc_misplaced_dst_page()
1912 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
1916 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
1919 if (!migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) in numamigrate_isolate_page()
1922 if (isolate_lru_page(page)) in numamigrate_isolate_page()
1926 * migrate_misplaced_transhuge_page() skips page migration's usual in numamigrate_isolate_page()
1927 * check on page_count(), so we must do it here, now that the page in numamigrate_isolate_page()
1929 * The expected page count is 3: 1 for page's mapcount and 1 for the in numamigrate_isolate_page()
1932 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
1933 putback_lru_page(page); in numamigrate_isolate_page()
1937 page_lru = page_is_file_cache(page); in numamigrate_isolate_page()
1938 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
1939 hpage_nr_pages(page)); in numamigrate_isolate_page()
1942 * Isolating the page has taken another reference, so the in numamigrate_isolate_page()
1943 * caller's reference can be safely dropped without the page in numamigrate_isolate_page()
1946 put_page(page); in numamigrate_isolate_page()
1952 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
1953 return PageLocked(page); in pmd_trans_migrating()
1957 * Attempt to migrate a misplaced page to the specified destination
1959 * the page that will be dropped by this function before returning.
1961 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
1973 if (page_mapcount(page) != 1 && page_is_file_cache(page) && in migrate_misplaced_page()
1981 if (page_is_file_cache(page) && PageDirty(page)) in migrate_misplaced_page()
1984 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
1988 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
1994 list_del(&page->lru); in migrate_misplaced_page()
1995 dec_node_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
1996 page_is_file_cache(page)); in migrate_misplaced_page()
1997 putback_lru_page(page); in migrate_misplaced_page()
2006 put_page(page); in migrate_misplaced_page()
2013 * Migrates a THP to a given target node. page must be locked and is unlocked
2020 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
2025 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
2026 int page_lru = page_is_file_cache(page); in migrate_misplaced_transhuge_page()
2037 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
2043 /* Prepare a page as a migration target */ in migrate_misplaced_transhuge_page()
2045 if (PageSwapBacked(page)) in migrate_misplaced_transhuge_page()
2048 /* anon mapping, we can simply copy page->mapping to the new page: */ in migrate_misplaced_transhuge_page()
2049 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
2050 new_page->index = page->index; in migrate_misplaced_transhuge_page()
2051 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
2057 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { in migrate_misplaced_transhuge_page()
2063 SetPageActive(page); in migrate_misplaced_transhuge_page()
2065 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
2071 get_page(page); in migrate_misplaced_transhuge_page()
2072 putback_lru_page(page); in migrate_misplaced_transhuge_page()
2073 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2085 * page blocking on the page lock, block on the page table in migrate_misplaced_transhuge_page()
2086 * lock or observe the new page. The SetPageUptodate on the in migrate_misplaced_transhuge_page()
2087 * new page and page_add_new_anon_rmap guarantee the copy is in migrate_misplaced_transhuge_page()
2106 page_ref_unfreeze(page, 2); in migrate_misplaced_transhuge_page()
2107 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
2108 page_remove_rmap(page, true); in migrate_misplaced_transhuge_page()
2118 /* Take an "isolate" reference and put new page on the LRU. */ in migrate_misplaced_transhuge_page()
2123 unlock_page(page); in migrate_misplaced_transhuge_page()
2124 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
2125 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
2130 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2146 unlock_page(page); in migrate_misplaced_transhuge_page()
2147 put_page(page); in migrate_misplaced_transhuge_page()
2214 struct page *page; in migrate_vma_collect_pmd() local
2222 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2223 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2232 get_page(page); in migrate_vma_collect_pmd()
2234 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2237 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2238 unlock_page(page); in migrate_vma_collect_pmd()
2239 put_page(page); in migrate_vma_collect_pmd()
2257 struct page *page; in migrate_vma_collect_pmd() local
2275 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
2276 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
2277 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
2283 page = device_private_entry_to_page(entry); in migrate_vma_collect_pmd()
2284 mpfn = migrate_pfn(page_to_pfn(page))| in migrate_vma_collect_pmd()
2295 page = _vm_normal_page(migrate->vma, addr, pte, true); in migrate_vma_collect_pmd()
2301 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2305 pfn = page_to_pfn(page); in migrate_vma_collect_pmd()
2308 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
2312 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
2313 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
2316 get_page(page); in migrate_vma_collect_pmd()
2320 * Optimize for the common case where page is only mapped once in migrate_vma_collect_pmd()
2321 * in one process. If we can lock the page, then we can safely in migrate_vma_collect_pmd()
2322 * set up a special migration page table entry now. in migrate_vma_collect_pmd()
2324 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2330 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
2331 entry = make_migration_entry(page, mpfn & in migrate_vma_collect_pmd()
2340 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
2343 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2344 put_page(page); in migrate_vma_collect_pmd()
2368 * This will walk the CPU page table. For each virtual address backed by a
2369 * valid page, it updates the src array and takes a reference on the page, in
2370 * order to pin the page until we lock it and unmap it.
2394 * migrate_vma_check_page() - check if page is pinned or not
2395 * @page: struct page to check
2399 * ZONE_DEVICE page.
2401 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2405 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
2406 * a device page. in migrate_vma_check_page()
2411 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
2415 if (PageCompound(page)) in migrate_vma_check_page()
2418 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
2419 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2421 * Private page can never be pin as they have no valid pte and in migrate_vma_check_page()
2424 * will bump the page reference count. Sadly there is no way to in migrate_vma_check_page()
2431 * it does not need to take a reference on page. in migrate_vma_check_page()
2433 if (is_device_private_page(page)) in migrate_vma_check_page()
2437 * Only allow device public page to be migrated and account for in migrate_vma_check_page()
2440 if (!is_device_public_page(page)) in migrate_vma_check_page()
2445 /* For file back page */ in migrate_vma_check_page()
2446 if (page_mapping(page)) in migrate_vma_check_page()
2447 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2449 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2460 * page is locked it is isolated from the lru (for non-device pages). Finally,
2474 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2477 if (!page) in migrate_vma_prepare()
2484 * are waiting on each other page lock. in migrate_vma_prepare()
2487 * for any page we can not lock right away. in migrate_vma_prepare()
2489 if (!trylock_page(page)) { in migrate_vma_prepare()
2492 put_page(page); in migrate_vma_prepare()
2500 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2501 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2507 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2514 unlock_page(page); in migrate_vma_prepare()
2516 put_page(page); in migrate_vma_prepare()
2522 put_page(page); in migrate_vma_prepare()
2525 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2531 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2532 get_page(page); in migrate_vma_prepare()
2533 putback_lru_page(page); in migrate_vma_prepare()
2537 unlock_page(page); in migrate_vma_prepare()
2540 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2541 putback_lru_page(page); in migrate_vma_prepare()
2543 put_page(page); in migrate_vma_prepare()
2549 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2551 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2554 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2557 unlock_page(page); in migrate_vma_prepare()
2558 put_page(page); in migrate_vma_prepare()
2564 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2567 * Replace page mapping (CPU page table pte) with a special migration pte entry
2572 * destination memory and copy contents of original page over to new page.
2582 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2584 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2587 if (page_mapped(page)) { in migrate_vma_unmap()
2588 try_to_unmap(page, flags); in migrate_vma_unmap()
2589 if (page_mapped(page)) in migrate_vma_unmap()
2593 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2603 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2605 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2608 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2611 unlock_page(page); in migrate_vma_unmap()
2614 if (is_zone_device_page(page)) in migrate_vma_unmap()
2615 put_page(page); in migrate_vma_unmap()
2617 putback_lru_page(page); in migrate_vma_unmap()
2623 struct page *page, in migrate_vma_insert_page() argument
2676 if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false)) in migrate_vma_insert_page()
2681 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
2684 __SetPageUptodate(page); in migrate_vma_insert_page()
2686 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2687 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2690 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2692 } else if (is_device_public_page(page)) { in migrate_vma_insert_page()
2693 entry = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); in migrate_vma_insert_page()
2699 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2711 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2717 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2727 mem_cgroup_cancel_charge(page, memcg, false); in migrate_vma_insert_page()
2732 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2733 mem_cgroup_commit_charge(page, memcg, false, false); in migrate_vma_insert_page()
2734 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2735 lru_cache_add_active_or_unevictable(page, vma); in migrate_vma_insert_page()
2736 get_page(page); in migrate_vma_insert_page()
2758 * migrate_vma_pages() - migrate meta-data from src page to dst page
2761 * This migrates struct page meta-data from source struct page to destination
2762 * struct page. This effectively finishes the migration from source page to the
2763 * destination page.
2775 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2776 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2785 if (!page) { in migrate_vma_pages()
2802 mapping = page_mapping(page); in migrate_vma_pages()
2816 * Other types of ZONE_DEVICE page are not in migrate_vma_pages()
2824 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
2840 * migrate_vma_finalize() - restore CPU page table entry
2844 * new page if migration was successful for that page, or to the original page
2856 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
2857 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
2859 if (!page) { in migrate_vma_finalize()
2872 newpage = page; in migrate_vma_finalize()
2875 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
2876 unlock_page(page); in migrate_vma_finalize()
2879 if (is_zone_device_page(page)) in migrate_vma_finalize()
2880 put_page(page); in migrate_vma_finalize()
2882 putback_lru_page(page); in migrate_vma_finalize()
2884 if (newpage != page) { in migrate_vma_finalize()
2910 * are locked and unmapped, it checks whether each page is pinned or not. Pages
2922 * then the function tries to migrate struct page information from the source
2923 * struct page to the destination struct page. If it fails to migrate the struct
2924 * page information, then it clears the MIGRATE_PFN_MIGRATE flag in the src
2936 * function updates the CPU page table to point to new pages, otherwise it
2937 * restores the CPU page table to point to the original source pages.
2982 /* Lock and isolate page */ in migrate_vma()
2998 * individual page. in migrate_vma()
3002 /* This does the real migration of struct page */ in migrate_vma()