• Home
  • Raw
  • Download

Lines Matching full:page

7  * Page migration was first developed in the context of the memory hotplug
86 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
94 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
97 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
99 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
103 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
104 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
105 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page()
107 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
111 * compaction threads can race against page migration functions in isolate_movable_page()
112 * as well as race against the releasing a page. in isolate_movable_page()
114 * In order to avoid having an already isolated movable page in isolate_movable_page()
117 * lets be sure we have the page lock in isolate_movable_page()
118 * before proceeding with the movable page isolation steps. in isolate_movable_page()
120 if (unlikely(!trylock_page(page))) in isolate_movable_page()
123 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
126 mapping = page_mapping(page); in isolate_movable_page()
127 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
129 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
132 /* Driver shouldn't use PG_isolated bit of page->flags */ in isolate_movable_page()
133 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
134 __SetPageIsolated(page); in isolate_movable_page()
135 unlock_page(page); in isolate_movable_page()
140 unlock_page(page); in isolate_movable_page()
142 put_page(page); in isolate_movable_page()
147 /* It should be called on page which is PG_movable */
148 void putback_movable_page(struct page *page) in putback_movable_page() argument
152 VM_BUG_ON_PAGE(!PageLocked(page), page); in putback_movable_page()
153 VM_BUG_ON_PAGE(!PageMovable(page), page); in putback_movable_page()
154 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_page()
156 mapping = page_mapping(page); in putback_movable_page()
157 mapping->a_ops->putback_page(page); in putback_movable_page()
158 __ClearPageIsolated(page); in putback_movable_page()
166 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
171 struct page *page; in putback_movable_pages() local
172 struct page *page2; in putback_movable_pages()
174 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
175 if (unlikely(PageHuge(page))) { in putback_movable_pages()
176 putback_active_hugepage(page); in putback_movable_pages()
179 list_del(&page->lru); in putback_movable_pages()
181 * We isolated non-lru movable page so here we can use in putback_movable_pages()
182 * __PageMovable because LRU page's mapping cannot have in putback_movable_pages()
185 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
186 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
187 lock_page(page); in putback_movable_pages()
188 if (PageMovable(page)) in putback_movable_pages()
189 putback_movable_page(page); in putback_movable_pages()
191 __ClearPageIsolated(page); in putback_movable_pages()
192 unlock_page(page); in putback_movable_pages()
193 put_page(page); in putback_movable_pages()
195 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
196 page_is_file_lru(page), -thp_nr_pages(page)); in putback_movable_pages()
197 putback_lru_page(page); in putback_movable_pages()
205 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
209 .page = old, in remove_migration_pte()
214 struct page *new; in remove_migration_pte()
218 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
220 if (PageKsm(page)) in remove_migration_pte()
221 new = page; in remove_migration_pte()
223 new = page - pvmw.page->index + in remove_migration_pte()
229 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
280 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
281 clear_page_mlock(page); in remove_migration_pte()
292 * references to the indicated page.
294 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
308 * Something used the pte of a page under migration. We need to
309 * get to the page and wait until migration is finished.
317 struct page *page; in __migration_entry_wait() local
328 page = migration_entry_to_page(entry); in __migration_entry_wait()
329 page = compound_head(page); in __migration_entry_wait()
332 * Once page cache replacement of page migration started, page_count in __migration_entry_wait()
336 if (!get_page_unless_zero(page)) in __migration_entry_wait()
339 put_and_wait_on_page_locked(page); in __migration_entry_wait()
364 struct page *page; in pmd_migration_entry_wait() local
369 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
370 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
373 put_and_wait_on_page_locked(page); in pmd_migration_entry_wait()
380 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
388 expected_count += is_device_private_page(page); in expected_page_refs()
390 expected_count += thp_nr_pages(page) + page_has_private(page); in expected_page_refs()
396 * Replace the page in the mapping.
404 struct page *newpage, struct page *page, int extra_count) in migrate_page_move_mapping() argument
406 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
409 int expected_count = expected_page_refs(mapping, page) + extra_count; in migrate_page_move_mapping()
410 int nr = thp_nr_pages(page); in migrate_page_move_mapping()
413 /* Anonymous page without mapping */ in migrate_page_move_mapping()
414 if (page_count(page) != expected_count) in migrate_page_move_mapping()
418 newpage->index = page->index; in migrate_page_move_mapping()
419 newpage->mapping = page->mapping; in migrate_page_move_mapping()
420 if (PageSwapBacked(page)) in migrate_page_move_mapping()
426 oldzone = page_zone(page); in migrate_page_move_mapping()
430 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
435 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
441 * Now we know that no one else is looking at the page: in migrate_page_move_mapping()
444 newpage->index = page->index; in migrate_page_move_mapping()
445 newpage->mapping = page->mapping; in migrate_page_move_mapping()
447 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
449 if (PageSwapCache(page)) { in migrate_page_move_mapping()
451 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
454 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
457 /* Move dirty while page refs frozen and newpage not yet exposed */ in migrate_page_move_mapping()
458 dirty = PageDirty(page); in migrate_page_move_mapping()
460 ClearPageDirty(page); in migrate_page_move_mapping()
465 if (PageTransHuge(page)) { in migrate_page_move_mapping()
475 * Drop cache reference from old page by unfreezing in migrate_page_move_mapping()
479 page_ref_unfreeze(page, expected_count - nr); in migrate_page_move_mapping()
486 * the page for that zone. Other VM counters will be in migrate_page_move_mapping()
488 * new page and drop references to the old page. in migrate_page_move_mapping()
498 memcg = page_memcg(page); in migrate_page_move_mapping()
504 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
526 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
528 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
532 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
533 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
538 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
543 newpage->index = page->index; in migrate_huge_page_move_mapping()
544 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
550 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
558 * Gigantic pages are so large that we do not guarantee that page++ pointer
559 * arithmetic will work across the entire page. We need something more
562 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
566 struct page *dst_base = dst; in __copy_gigantic_page()
567 struct page *src_base = src; in __copy_gigantic_page()
579 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
585 /* hugetlbfs page */ in copy_huge_page()
594 /* thp page */ in copy_huge_page()
606 * Copy the page to its new location
608 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
612 if (PageError(page)) in migrate_page_states()
614 if (PageReferenced(page)) in migrate_page_states()
616 if (PageUptodate(page)) in migrate_page_states()
618 if (TestClearPageActive(page)) { in migrate_page_states()
619 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
621 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
623 if (PageWorkingset(page)) in migrate_page_states()
625 if (PageChecked(page)) in migrate_page_states()
627 if (PageMappedToDisk(page)) in migrate_page_states()
631 if (PageDirty(page)) in migrate_page_states()
634 if (page_is_young(page)) in migrate_page_states()
636 if (page_is_idle(page)) in migrate_page_states()
640 * Copy NUMA information to the new page, to prevent over-eager in migrate_page_states()
641 * future migrations of this same page. in migrate_page_states()
643 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
646 ksm_migrate_page(newpage, page); in migrate_page_states()
651 if (PageSwapCache(page)) in migrate_page_states()
652 ClearPageSwapCache(page); in migrate_page_states()
653 ClearPagePrivate(page); in migrate_page_states()
654 set_page_private(page, 0); in migrate_page_states()
657 * If any waiters have accumulated on the new page then in migrate_page_states()
668 if (PageReadahead(page)) in migrate_page_states()
671 copy_page_owner(page, newpage); in migrate_page_states()
673 if (!PageHuge(page)) in migrate_page_states()
674 mem_cgroup_migrate(page, newpage); in migrate_page_states()
678 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
680 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
681 copy_huge_page(newpage, page); in migrate_page_copy()
683 copy_highpage(newpage, page); in migrate_page_copy()
685 migrate_page_states(newpage, page); in migrate_page_copy()
694 struct page *newpage, struct page *page, in migrate_page_extra() argument
699 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page_extra()
701 rc = migrate_page_move_mapping(mapping, newpage, page, extra_count); in migrate_page_extra()
707 migrate_page_copy(newpage, page); in migrate_page_extra()
709 migrate_page_states(newpage, page); in migrate_page_extra()
714 * Common logic to directly migrate a single LRU page suitable for
720 struct page *newpage, struct page *page, in migrate_page() argument
723 return migrate_page_extra(mapping, newpage, page, mode, 0); in migrate_page()
767 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
774 if (!page_has_buffers(page)) in __buffer_migrate_page()
775 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
777 /* Check whether page does not have extra refs before we do more work */ in __buffer_migrate_page()
778 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
779 if (page_count(page) != expected_count) in __buffer_migrate_page()
782 head = page_buffers(page); in __buffer_migrate_page()
813 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
817 attach_page_private(newpage, detach_page_private(page)); in __buffer_migrate_page()
827 migrate_page_copy(newpage, page); in __buffer_migrate_page()
829 migrate_page_states(newpage, page); in __buffer_migrate_page()
847 * if the underlying filesystem guarantees that no other references to "page"
848 * exist. For example attached buffer heads are accessed only under page lock.
851 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
853 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
864 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
866 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
871 * Writeback a page to clean the dirty state
873 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
888 if (!clear_page_dirty_for_io(page)) in writeout()
893 * A dirty page may imply that the underlying filesystem has in writeout()
894 * the page on some queue. So the page must be clean for in writeout()
896 * page state is no longer what we checked for earlier. in writeout()
900 remove_migration_ptes(page, page, false); in writeout()
902 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
906 lock_page(page); in writeout()
915 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
917 if (PageDirty(page)) { in fallback_migrate_page()
926 return writeout(mapping, page); in fallback_migrate_page()
933 if (page_has_private(page) && in fallback_migrate_page()
934 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
937 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
941 * Move a page to a newly allocated page
942 * The page is locked and all ptes have been successfully removed.
944 * The new page will have replaced the old page if this function
951 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
956 bool is_lru = !__PageMovable(page); in move_to_new_page()
958 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
961 mapping = page_mapping(page); in move_to_new_page()
965 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
972 * for page migration. in move_to_new_page()
975 page, mode); in move_to_new_page()
978 page, mode); in move_to_new_page()
981 * In case of non-lru page, it could be released after in move_to_new_page()
984 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
985 if (!PageMovable(page)) { in move_to_new_page()
987 __ClearPageIsolated(page); in move_to_new_page()
992 page, mode); in move_to_new_page()
994 !PageIsolated(page)); in move_to_new_page()
998 * When successful, old pagecache page->mapping must be cleared before in move_to_new_page()
999 * page is freed; but stats require that PageAnon be left as PageAnon. in move_to_new_page()
1002 if (__PageMovable(page)) { in move_to_new_page()
1003 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
1007 * cannot try to migrate this page. in move_to_new_page()
1009 __ClearPageIsolated(page); in move_to_new_page()
1013 * Anonymous and movable page->mapping will be cleared by in move_to_new_page()
1017 if (!PageMappingFlags(page)) in move_to_new_page()
1018 page->mapping = NULL; in move_to_new_page()
1028 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
1034 bool is_lru = !__PageMovable(page); in __unmap_and_move()
1036 if (!trylock_page(page)) { in __unmap_and_move()
1042 * For example, during page readahead pages are added locked in __unmap_and_move()
1047 * second or third page, the process can end up locking in __unmap_and_move()
1048 * the same page twice and deadlocking. Rather than in __unmap_and_move()
1056 lock_page(page); in __unmap_and_move()
1059 if (PageWriteback(page)) { in __unmap_and_move()
1076 wait_on_page_writeback(page); in __unmap_and_move()
1080 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, in __unmap_and_move()
1081 * we cannot notice that anon_vma is freed while we migrates a page. in __unmap_and_move()
1085 * just care Anon page here. in __unmap_and_move()
1090 * because that implies that the anon page is no longer mapped in __unmap_and_move()
1091 * (and cannot be remapped so long as we hold the page lock). in __unmap_and_move()
1093 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1094 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1097 * Block others from accessing the new page when we get around to in __unmap_and_move()
1108 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1114 * 1. When a new swap-cache page is read into, it is added to the LRU in __unmap_and_move()
1116 * Calling try_to_unmap() against a page->mapping==NULL page will in __unmap_and_move()
1118 * 2. An orphaned page (see truncate_complete_page) might have in __unmap_and_move()
1119 * fs-private metadata. The page can be picked up due to memory in __unmap_and_move()
1120 * offlining. Everywhere else except page reclaim, the page is in __unmap_and_move()
1121 * invisible to the vm, so the page can not be migrated. So try to in __unmap_and_move()
1122 * free the metadata, so the page can be freed. in __unmap_and_move()
1124 if (!page->mapping) { in __unmap_and_move()
1125 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1126 if (page_has_private(page)) { in __unmap_and_move()
1127 try_to_free_buffers(page); in __unmap_and_move()
1130 } else if (page_mapped(page)) { in __unmap_and_move()
1132 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1133 page); in __unmap_and_move()
1134 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK); in __unmap_and_move()
1138 if (!page_mapped(page)) in __unmap_and_move()
1139 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1142 remove_migration_ptes(page, in __unmap_and_move()
1143 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1151 unlock_page(page); in __unmap_and_move()
1155 * which will not free the page because new page owner increased in __unmap_and_move()
1156 * refcounter. As well, if it is LRU page, add the page to LRU in __unmap_and_move()
1157 * list in here. Use the old state of the isolated source page to in __unmap_and_move()
1158 * determine if we migrated a LRU page. newpage was already unlocked in __unmap_and_move()
1159 * and possibly modified by its owner - don't rely on the page in __unmap_and_move()
1173 * Obtain the lock on page, remove all ptes and migrate the page
1174 * to the newly allocated page in newpage.
1178 unsigned long private, struct page *page, in unmap_and_move() argument
1183 struct page *newpage = NULL; in unmap_and_move()
1185 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1188 if (page_count(page) == 1) { in unmap_and_move()
1189 /* page was freed from under us. So we are done. */ in unmap_and_move()
1190 ClearPageActive(page); in unmap_and_move()
1191 ClearPageUnevictable(page); in unmap_and_move()
1192 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1193 lock_page(page); in unmap_and_move()
1194 if (!PageMovable(page)) in unmap_and_move()
1195 __ClearPageIsolated(page); in unmap_and_move()
1196 unlock_page(page); in unmap_and_move()
1201 newpage = get_new_page(page, private); in unmap_and_move()
1205 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1212 * A page that has been migrated has all references in unmap_and_move()
1213 * removed and will be freed. A page that has not been in unmap_and_move()
1216 list_del(&page->lru); in unmap_and_move()
1223 if (likely(!__PageMovable(page))) in unmap_and_move()
1224 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1225 page_is_file_lru(page), -thp_nr_pages(page)); in unmap_and_move()
1230 * isolation. Otherwise, restore the page to right list unless in unmap_and_move()
1236 * We release the page in page_handle_poison. in unmap_and_move()
1238 put_page(page); in unmap_and_move()
1241 if (likely(!__PageMovable(page))) { in unmap_and_move()
1242 putback_lru_page(page); in unmap_and_move()
1246 lock_page(page); in unmap_and_move()
1247 if (PageMovable(page)) in unmap_and_move()
1248 putback_movable_page(page); in unmap_and_move()
1250 __ClearPageIsolated(page); in unmap_and_move()
1251 unlock_page(page); in unmap_and_move()
1252 put_page(page); in unmap_and_move()
1272 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1273 * under direct I/O, the reference of the head page is 512 and a bit more.)
1278 * There is also no race when direct I/O is issued on the page under migration,
1280 * will wait in the page fault for migration to complete.
1284 struct page *hpage, int force, in unmap_and_move_huge_page()
1289 struct page *new_hpage; in unmap_and_move_huge_page()
1296 * like soft offline and memory hotremove don't walk through page in unmap_and_move_huge_page()
1324 * page_mapping() set, hugetlbfs specific move page routine will not in unmap_and_move_huge_page()
1404 * supplied as the target for the page migration
1408 * as the target of the page migration.
1413 * page migration, if any.
1414 * @reason: The reason for page migration.
1436 struct page *page; in migrate_pages() local
1437 struct page *page2; in migrate_pages()
1448 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1451 * THP statistics is based on the source huge page. in migrate_pages()
1455 is_thp = PageTransHuge(page) && !PageHuge(page); in migrate_pages()
1456 nr_subpages = thp_nr_pages(page); in migrate_pages()
1459 if (PageHuge(page)) in migrate_pages()
1461 put_new_page, private, page, in migrate_pages()
1465 private, page, pass > 2, mode, in migrate_pages()
1473 * retry on the same page with the THP split in migrate_pages()
1476 * Head page is retried immediately and tail in migrate_pages()
1482 lock_page(page); in migrate_pages()
1483 rc = split_huge_page_to_list(page, from); in migrate_pages()
1484 unlock_page(page); in migrate_pages()
1486 list_safe_reset_next(page, page2, lru); in migrate_pages()
1515 * unlike -EAGAIN case, the failed page is in migrate_pages()
1516 * removed from migration page list and not in migrate_pages()
1547 struct page *alloc_migration_target(struct page *page, unsigned long private) in alloc_migration_target() argument
1552 struct page *new_page = NULL; in alloc_migration_target()
1560 nid = page_to_nid(page); in alloc_migration_target()
1562 if (PageHuge(page)) { in alloc_migration_target()
1563 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target()
1569 if (PageTransHuge(page)) { in alloc_migration_target()
1578 zidx = zone_idx(page_zone(page)); in alloc_migration_target()
1620 * Resolves the given address to a struct page, isolates it from the LRU and
1623 * errno - if the page cannot be found/isolated
1632 struct page *page; in add_page_for_migration() local
1644 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1646 err = PTR_ERR(page); in add_page_for_migration()
1647 if (IS_ERR(page)) in add_page_for_migration()
1651 if (!page) in add_page_for_migration()
1655 if (page_to_nid(page) == node) in add_page_for_migration()
1659 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1662 if (PageHuge(page)) { in add_page_for_migration()
1663 if (PageHead(page)) { in add_page_for_migration()
1664 isolate_huge_page(page, pagelist); in add_page_for_migration()
1668 struct page *head; in add_page_for_migration()
1670 head = compound_head(page); in add_page_for_migration()
1684 * isolate_lru_page() or drop the page ref if it was in add_page_for_migration()
1687 put_page(page); in add_page_for_migration()
1720 * Migrate an array of page address onto an array of nodes and fill
1771 * Errors in the page lookup or isolation are not fatal and we simply in do_pages_move()
1778 /* The page is successfully queued for migration */ in do_pages_move()
1783 * If the page is already on the target node (!err), store the in do_pages_move()
1819 struct page *page; in do_pages_stat_array() local
1827 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1829 err = PTR_ERR(page); in do_pages_stat_array()
1830 if (IS_ERR(page)) in do_pages_stat_array()
1833 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
2016 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
2020 struct page *newpage; in alloc_misplaced_dst_page()
2031 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
2035 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
2038 if (!migrate_balanced_pgdat(pgdat, compound_nr(page))) in numamigrate_isolate_page()
2041 if (isolate_lru_page(page)) in numamigrate_isolate_page()
2045 * migrate_misplaced_transhuge_page() skips page migration's usual in numamigrate_isolate_page()
2046 * check on page_count(), so we must do it here, now that the page in numamigrate_isolate_page()
2048 * The expected page count is 3: 1 for page's mapcount and 1 for the in numamigrate_isolate_page()
2051 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
2052 putback_lru_page(page); in numamigrate_isolate_page()
2056 page_lru = page_is_file_lru(page); in numamigrate_isolate_page()
2057 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
2058 thp_nr_pages(page)); in numamigrate_isolate_page()
2061 * Isolating the page has taken another reference, so the in numamigrate_isolate_page()
2062 * caller's reference can be safely dropped without the page in numamigrate_isolate_page()
2065 put_page(page); in numamigrate_isolate_page()
2071 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
2072 return PageLocked(page); in pmd_trans_migrating()
2076 * Attempt to migrate a misplaced page to the specified destination
2078 * the page that will be dropped by this function before returning.
2080 int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, in migrate_misplaced_page() argument
2092 if (page_mapcount(page) != 1 && page_is_file_lru(page) && in migrate_misplaced_page()
2100 if (page_is_file_lru(page) && PageDirty(page)) in migrate_misplaced_page()
2103 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
2107 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
2113 list_del(&page->lru); in migrate_misplaced_page()
2114 dec_node_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
2115 page_is_file_lru(page)); in migrate_misplaced_page()
2116 putback_lru_page(page); in migrate_misplaced_page()
2125 put_page(page); in migrate_misplaced_page()
2132 * Migrates a THP to a given target node. page must be locked and is unlocked
2139 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
2144 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
2145 int page_lru = page_is_file_lru(page); in migrate_misplaced_transhuge_page()
2155 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
2161 /* Prepare a page as a migration target */ in migrate_misplaced_transhuge_page()
2163 if (PageSwapBacked(page)) in migrate_misplaced_transhuge_page()
2166 /* anon mapping, we can simply copy page->mapping to the new page: */ in migrate_misplaced_transhuge_page()
2167 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
2168 new_page->index = page->index; in migrate_misplaced_transhuge_page()
2171 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
2176 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { in migrate_misplaced_transhuge_page()
2181 SetPageActive(page); in migrate_misplaced_transhuge_page()
2183 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
2189 get_page(page); in migrate_misplaced_transhuge_page()
2190 putback_lru_page(page); in migrate_misplaced_transhuge_page()
2191 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2203 * page blocking on the page lock, block on the page table in migrate_misplaced_transhuge_page()
2204 * lock or observe the new page. The SetPageUptodate on the in migrate_misplaced_transhuge_page()
2205 * new page and page_add_new_anon_rmap guarantee the copy is in migrate_misplaced_transhuge_page()
2223 page_ref_unfreeze(page, 2); in migrate_misplaced_transhuge_page()
2224 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
2225 page_remove_rmap(page, true); in migrate_misplaced_transhuge_page()
2230 /* Take an "isolate" reference and put new page on the LRU. */ in migrate_misplaced_transhuge_page()
2235 unlock_page(page); in migrate_misplaced_transhuge_page()
2236 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
2237 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
2242 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2258 unlock_page(page); in migrate_misplaced_transhuge_page()
2259 put_page(page); in migrate_misplaced_transhuge_page()
2327 struct page *page; in migrate_vma_collect_pmd() local
2335 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2336 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2345 get_page(page); in migrate_vma_collect_pmd()
2347 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2350 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2351 unlock_page(page); in migrate_vma_collect_pmd()
2352 put_page(page); in migrate_vma_collect_pmd()
2370 struct page *page; in migrate_vma_collect_pmd() local
2386 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
2387 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
2388 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
2394 page = device_private_entry_to_page(entry); in migrate_vma_collect_pmd()
2397 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2400 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2413 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2419 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2425 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
2429 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
2430 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
2433 get_page(page); in migrate_vma_collect_pmd()
2437 * Optimize for the common case where page is only mapped once in migrate_vma_collect_pmd()
2438 * in one process. If we can lock the page, then we can safely in migrate_vma_collect_pmd()
2439 * set up a special migration page table entry now. in migrate_vma_collect_pmd()
2441 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2447 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
2448 entry = make_migration_entry(page, mpfn & in migrate_vma_collect_pmd()
2466 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
2469 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2470 put_page(page); in migrate_vma_collect_pmd()
2499 * This will walk the CPU page table. For each virtual address backed by a
2500 * valid page, it updates the src array and takes a reference on the page, in
2501 * order to pin the page until we lock it and unmap it.
2510 * private page mappings that won't be migrated. in migrate_vma_collect()
2525 * migrate_vma_check_page() - check if page is pinned or not
2526 * @page: struct page to check
2530 * ZONE_DEVICE page.
2532 static bool migrate_vma_check_page(struct page *page, struct page *fault_page) in migrate_vma_check_page() argument
2536 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
2537 * a device page. in migrate_vma_check_page()
2539 int extra = 1 + (page == fault_page); in migrate_vma_check_page()
2542 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
2546 if (PageCompound(page)) in migrate_vma_check_page()
2549 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
2550 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2552 * Private page can never be pin as they have no valid pte and in migrate_vma_check_page()
2555 * will bump the page reference count. Sadly there is no way to in migrate_vma_check_page()
2562 * it does not need to take a reference on page. in migrate_vma_check_page()
2564 return is_device_private_page(page); in migrate_vma_check_page()
2567 /* For file back page */ in migrate_vma_check_page()
2568 if (page_mapping(page)) in migrate_vma_check_page()
2569 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2571 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2582 * page is locked it is isolated from the lru (for non-device pages). Finally,
2596 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2599 if (!page) in migrate_vma_prepare()
2606 * are waiting on each other page lock. in migrate_vma_prepare()
2609 * for any page we can not lock right away. in migrate_vma_prepare()
2611 if (!trylock_page(page)) { in migrate_vma_prepare()
2614 put_page(page); in migrate_vma_prepare()
2622 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2623 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2629 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2636 unlock_page(page); in migrate_vma_prepare()
2638 put_page(page); in migrate_vma_prepare()
2644 put_page(page); in migrate_vma_prepare()
2647 if (!migrate_vma_check_page(page, migrate->fault_page)) { in migrate_vma_prepare()
2653 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2654 get_page(page); in migrate_vma_prepare()
2655 putback_lru_page(page); in migrate_vma_prepare()
2659 unlock_page(page); in migrate_vma_prepare()
2662 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2663 putback_lru_page(page); in migrate_vma_prepare()
2665 put_page(page); in migrate_vma_prepare()
2671 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2673 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2676 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2679 unlock_page(page); in migrate_vma_prepare()
2680 put_page(page); in migrate_vma_prepare()
2686 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2689 * Replace page mapping (CPU page table pte) with a special migration pte entry
2694 * destination memory and copy contents of original page over to new page.
2704 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2706 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2709 if (page_mapped(page)) { in migrate_vma_unmap()
2710 try_to_unmap(page, flags); in migrate_vma_unmap()
2711 if (page_mapped(page)) in migrate_vma_unmap()
2715 if (migrate_vma_check_page(page, migrate->fault_page)) in migrate_vma_unmap()
2725 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2727 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2730 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2733 unlock_page(page); in migrate_vma_unmap()
2736 if (is_zone_device_page(page)) in migrate_vma_unmap()
2737 put_page(page); in migrate_vma_unmap()
2739 putback_lru_page(page); in migrate_vma_unmap()
2753 * and unmapped, check whether each page is pinned or not. Pages that aren't
2762 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2767 * device memory to system memory. If the caller cannot migrate a device page
2772 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2784 * then migrate_vma_pages() to migrate struct page information from the source
2785 * struct page to the destination struct page. If it fails to migrate the
2786 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2797 * It is safe to update device page table after migrate_vma_pages() because
2798 * both destination and source page are still locked, and the mmap_lock is held
2801 * Once the caller is done cleaning up things and updating its page table (if it
2803 * migrate_vma_finalize() to update the CPU page table to point to new pages
2804 * for successfully migrated pages or otherwise restore the CPU page table to
2854 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2855 * private page.
2859 struct page *page, in migrate_vma_insert_page() argument
2911 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
2916 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
2919 __SetPageUptodate(page); in migrate_vma_insert_page()
2921 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2922 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2925 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2932 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); in migrate_vma_insert_page()
2936 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2963 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2964 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2965 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
2966 get_page(page); in migrate_vma_insert_page()
2990 * migrate_vma_pages() - migrate meta-data from src page to dst page
2993 * This migrates struct page meta-data from source struct page to destination
2994 * struct page. This effectively finishes the migration from source page to the
2995 * destination page.
3006 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
3007 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
3016 if (!page) { in migrate_vma_pages()
3035 mapping = page_mapping(page); in migrate_vma_pages()
3049 * Other types of ZONE_DEVICE page are not in migrate_vma_pages()
3057 if (migrate->fault_page == page) in migrate_vma_pages()
3058 r = migrate_page_extra(mapping, newpage, page, in migrate_vma_pages()
3061 r = migrate_page(mapping, newpage, page, in migrate_vma_pages()
3078 * migrate_vma_finalize() - restore CPU page table entry
3082 * new page if migration was successful for that page, or to the original page
3094 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
3095 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
3097 if (!page) { in migrate_vma_finalize()
3110 newpage = page; in migrate_vma_finalize()
3113 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
3114 unlock_page(page); in migrate_vma_finalize()
3116 if (is_zone_device_page(page)) in migrate_vma_finalize()
3117 put_page(page); in migrate_vma_finalize()
3119 putback_lru_page(page); in migrate_vma_finalize()
3121 if (newpage != page) { in migrate_vma_finalize()