• Home
  • Raw
  • Download

Lines Matching full:page

10  * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page)
259 * page is mapped.
442 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
446 * have been relevant to this page.
448 * The page might have been remapped to a different anon_vma or the anon_vma
453 * ensure that any anon_vma obtained from the page will still be valid for as
457 * chain and verify that the page in question is indeed mapped in it
461 * that the anon_vma pointer from page->mapping is valid if there is a
464 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
470 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
473 if (!page_mapped(page)) in page_get_anon_vma()
483 * If this page is still mapped, then its anon_vma cannot have been in page_get_anon_vma()
489 if (!page_mapped(page)) { in page_get_anon_vma()
507 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
514 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
517 if (!page_mapped(page)) in page_lock_anon_vma_read()
524 * If the page is still mapped, then this anon_vma is still in page_lock_anon_vma_read()
528 if (!page_mapped(page)) { in page_lock_anon_vma_read()
541 if (!page_mapped(page)) { in page_lock_anon_vma_read()
578 * before any IO is initiated on the page to prevent lost writes. Similarly,
619 * before the page is queued for IO. in set_tlb_ubc_flush_pending()
648 * the page and flushing the page. If this race occurs, it potentially allows
684 * At what user virtual address is page expected in vma?
685 * Caller should check the page is actually part of the vma.
687 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
690 if (PageAnon(page)) { in page_address_in_vma()
691 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
699 } else if (page->mapping) { in page_address_in_vma()
700 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
704 address = __vma_address(page, vma); in page_address_in_vma()
753 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
758 .page = page, in page_referenced_one()
779 * If the page has been used in another mapping, in page_referenced_one()
782 * PG_referenced or activated the page. in page_referenced_one()
792 /* unexpected pmd-mapped page? */ in page_referenced_one()
800 clear_page_idle(page); in page_referenced_one()
801 if (test_and_clear_page_young(page)) in page_referenced_one()
827 * page_referenced - test if the page was referenced
828 * @page: the page to test
829 * @is_locked: caller holds lock on the page
831 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
833 * Quick test_and_clear_referenced for all mappings to a page,
834 * returns the number of ptes which referenced the page.
836 int page_referenced(struct page *page, in page_referenced() argument
843 .mapcount = total_mapcount(page), in page_referenced()
853 if (!page_mapped(page)) in page_referenced()
856 if (!page_rmapping(page)) in page_referenced()
859 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
860 we_locked = trylock_page(page); in page_referenced()
874 rmap_walk(page, &rwc); in page_referenced()
878 unlock_page(page); in page_referenced()
883 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
887 .page = page, in page_mkclean_one()
897 * the page can not be free from this function. in page_mkclean_one()
899 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); in page_mkclean_one()
928 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
936 /* unexpected pmd-mapped page? */ in page_mkclean_one()
943 * downgrading page table protection not changing it to point in page_mkclean_one()
944 * to a new page. in page_mkclean_one()
965 int page_mkclean(struct page *page) in page_mkclean() argument
975 BUG_ON(!PageLocked(page)); in page_mkclean()
977 if (!page_mapped(page)) in page_mkclean()
980 mapping = page_mapping(page); in page_mkclean()
984 rmap_walk(page, &rwc); in page_mkclean()
991 * page_move_anon_rmap - move a page to our anon_vma
992 * @page: the page to move to our anon_vma
993 * @vma: the vma the page belongs to
995 * When a page belongs exclusively to one process after a COW event,
996 * that page can be moved into the anon_vma that belongs to just that
1000 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1004 page = compound_head(page); in page_move_anon_rmap()
1006 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1015 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1020 * @page: Page to add to rmap
1021 * @vma: VM area to add page to.
1023 * @exclusive: the page is exclusively owned by the current process
1025 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1032 if (PageAnon(page)) in __page_set_anon_rmap()
1036 * If the page isn't exclusively mapped into this vma, in __page_set_anon_rmap()
1038 * page mapping! in __page_set_anon_rmap()
1044 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1045 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1050 * @page: the page to add the mapping to
1054 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1059 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1063 * always holds the page locked, except if called from page_dup_rmap, in __page_check_anon_rmap()
1064 * in which case the page is already known to be setup. in __page_check_anon_rmap()
1070 BUG_ON(page_anon_vma(page)->root != vma->anon_vma->root); in __page_check_anon_rmap()
1071 BUG_ON(page_to_pgoff(page) != linear_page_index(vma, address)); in __page_check_anon_rmap()
1076 * page_add_anon_rmap - add pte mapping to an anonymous page
1077 * @page: the page to add the mapping to
1080 * @compound: charge the page as compound or small page
1082 * The caller needs to hold the pte lock, and the page must be locked in
1087 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1090 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1098 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1106 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1107 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1108 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1111 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1115 int nr = compound ? hpage_nr_pages(page) : 1; in do_page_add_anon_rmap()
1123 __inc_node_page_state(page, NR_ANON_THPS); in do_page_add_anon_rmap()
1124 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1126 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1129 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1133 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1136 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1140 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1141 * @page: the page to add the mapping to
1144 * @compound: charge the page as compound or small page
1148 * Page does not have to be locked.
1150 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1153 int nr = compound ? hpage_nr_pages(page) : 1; in page_add_new_anon_rmap()
1156 __SetPageSwapBacked(page); in page_add_new_anon_rmap()
1158 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in page_add_new_anon_rmap()
1160 atomic_set(compound_mapcount_ptr(page), 0); in page_add_new_anon_rmap()
1161 __inc_node_page_state(page, NR_ANON_THPS); in page_add_new_anon_rmap()
1164 VM_BUG_ON_PAGE(PageTransCompound(page), page); in page_add_new_anon_rmap()
1166 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1168 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, nr); in page_add_new_anon_rmap()
1169 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1173 * page_add_file_rmap - add pte mapping to a file page
1174 * @page: the page to add the mapping to
1175 * @compound: charge the page as compound or small page
1179 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1183 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1184 lock_page_memcg(page); in page_add_file_rmap()
1185 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1187 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1190 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1192 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in page_add_file_rmap()
1193 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_add_file_rmap()
1195 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1196 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1198 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap()
1199 if (PageMlocked(page)) in page_add_file_rmap()
1200 clear_page_mlock(compound_head(page)); in page_add_file_rmap()
1202 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1205 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1207 unlock_page_memcg(page); in page_add_file_rmap()
1210 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1214 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1215 lock_page_memcg(page); in page_remove_file_rmap()
1218 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1220 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1224 /* page still mapped by someone else? */ in page_remove_file_rmap()
1225 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1227 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1230 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1232 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in page_remove_file_rmap()
1233 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_remove_file_rmap()
1235 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1244 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1246 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1247 clear_page_mlock(page); in page_remove_file_rmap()
1249 unlock_page_memcg(page); in page_remove_file_rmap()
1252 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1256 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1260 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1266 __dec_node_page_state(page, NR_ANON_THPS); in page_remove_anon_compound_rmap()
1268 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1274 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1281 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1282 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1285 __mod_node_page_state(page_pgdat(page), NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1286 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1291 * page_remove_rmap - take down pte mapping from a page
1292 * @page: page to remove mapping from
1293 * @compound: uncharge the page as compound or small page
1297 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1299 if (!PageAnon(page)) in page_remove_rmap()
1300 return page_remove_file_rmap(page, compound); in page_remove_rmap()
1303 return page_remove_anon_compound_rmap(page); in page_remove_rmap()
1305 /* page still mapped by someone else? */ in page_remove_rmap()
1306 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1314 __dec_node_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1316 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1317 clear_page_mlock(page); in page_remove_rmap()
1319 if (PageTransCompound(page)) in page_remove_rmap()
1320 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1336 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1341 .page = page, in try_to_unmap_one()
1346 struct page *subpage; in try_to_unmap_one()
1356 is_zone_device_page(page) && !is_device_private_page(page)) in try_to_unmap_one()
1361 flags & TTU_SPLIT_FREEZE, page); in try_to_unmap_one()
1369 * Note that the page can not be free in this function as call of in try_to_unmap_one()
1370 * try_to_unmap() must hold a reference on the page. in try_to_unmap_one()
1372 end = min(vma->vm_end, start + (PAGE_SIZE << compound_order(page))); in try_to_unmap_one()
1373 if (PageHuge(page)) { in try_to_unmap_one()
1386 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in try_to_unmap_one()
1388 set_pmd_migration_entry(&pvmw, page); in try_to_unmap_one()
1394 * If the page is mlock()d, we cannot swap it out. in try_to_unmap_one()
1401 if (!PageTransCompound(page)) { in try_to_unmap_one()
1406 mlock_vma_page(page); in try_to_unmap_one()
1417 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1419 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1422 if (PageHuge(page)) { in try_to_unmap_one()
1426 * page. There is no way of knowing exactly in try_to_unmap_one()
1436 * The ref count of the PMD page was dropped in try_to_unmap_one()
1441 * unmap the actual page and drop map count in try_to_unmap_one()
1451 is_zone_device_page(page)) { in try_to_unmap_one()
1458 * Store the pfn of the page in a special migration in try_to_unmap_one()
1462 entry = make_migration_entry(page, 0); in try_to_unmap_one()
1474 * migrated, just set it to page. This will need to be in try_to_unmap_one()
1478 subpage = page; in try_to_unmap_one()
1491 /* Nuke the page table entry. */ in try_to_unmap_one()
1496 * a remote CPU could still be writing to the page. in try_to_unmap_one()
1509 /* Move the dirty bit to the page. Now the pte is gone. */ in try_to_unmap_one()
1511 set_page_dirty(page); in try_to_unmap_one()
1516 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1518 if (PageHuge(page)) { in try_to_unmap_one()
1519 int nr = 1 << compound_order(page); in try_to_unmap_one()
1525 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1531 * The guest indicated that the page content is of no in try_to_unmap_one()
1535 * page. When userfaultfd is active, we must not drop in try_to_unmap_one()
1536 * this page though, as its main user (postcopy in try_to_unmap_one()
1540 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1557 * Store the pfn of the page in a special migration in try_to_unmap_one()
1571 } else if (PageAnon(page)) { in try_to_unmap_one()
1578 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1588 /* MADV_FREE page check */ in try_to_unmap_one()
1589 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1590 if (!PageDirty(page)) { in try_to_unmap_one()
1599 * If the page was redirtied, it cannot be in try_to_unmap_one()
1600 * discarded. Remap the page to page table. in try_to_unmap_one()
1603 SetPageSwapBacked(page); in try_to_unmap_one()
1638 * This is a locked file-backed page, thus it cannot in try_to_unmap_one()
1639 * be removed from the page cache and replaced by a new in try_to_unmap_one()
1640 * page before mmu_notifier_invalidate_range_end, so no in try_to_unmap_one()
1641 * concurrent thread might update its page table to in try_to_unmap_one()
1642 * point at new page while a device still is using this in try_to_unmap_one()
1643 * page. in try_to_unmap_one()
1647 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1652 * done above for all cases requiring it to happen under page in try_to_unmap_one()
1657 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1658 put_page(page); in try_to_unmap_one()
1685 static int page_mapcount_is_zero(struct page *page) in page_mapcount_is_zero() argument
1687 return !total_mapcount(page); in page_mapcount_is_zero()
1691 * try_to_unmap - try to remove all page table mappings to a page
1692 * @page: the page to get unmapped
1695 * Tries to remove all the page table entries which are mapping this
1696 * page, used in the pageout path. Caller must hold the page lock.
1700 bool try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1712 * page tables leading to a race where migration cannot in try_to_unmap()
1718 && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1722 rmap_walk_locked(page, &rwc); in try_to_unmap()
1724 rmap_walk(page, &rwc); in try_to_unmap()
1726 return !page_mapcount(page) ? true : false; in try_to_unmap()
1729 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1731 return !page_mapped(page); in page_not_mapped()
1735 * try_to_munlock - try to munlock a page
1736 * @page: the page to be munlocked
1738 * Called from munlock code. Checks all of the VMAs mapping the page
1739 * to make sure nobody else has this page mlocked. The page will be
1743 void try_to_munlock(struct page *page) in try_to_munlock() argument
1753 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1754 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in try_to_munlock()
1756 rmap_walk(page, &rwc); in try_to_munlock()
1768 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1774 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1782 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1791 * rmap_walk_anon - do something to anonymous page using the object-based
1793 * @page: the page to be handled
1796 * Find all the mappings of a page using the mapping pointer and the vma chains
1800 * where the page was found will be held for write. So, we won't recheck
1804 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
1812 anon_vma = page_anon_vma(page); in rmap_walk_anon()
1814 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
1816 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1821 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
1822 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; in rmap_walk_anon()
1826 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1833 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1835 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1844 * rmap_walk_file - do something to file page using the object-based rmap method
1845 * @page: the page to be handled
1848 * Find all the mappings of a page using the mapping pointer and the vma chains
1852 * where the page was found will be held for write. So, we won't recheck
1856 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
1859 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
1864 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
1869 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
1874 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
1875 pgoff_end = pgoff_start + hpage_nr_pages(page) - 1; in rmap_walk_file()
1880 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1887 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1889 if (rwc->done && rwc->done(page)) in rmap_walk_file()
1898 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
1900 if (unlikely(PageKsm(page))) in rmap_walk()
1901 rmap_walk_ksm(page, rwc); in rmap_walk()
1902 else if (PageAnon(page)) in rmap_walk()
1903 rmap_walk_anon(page, rwc, false); in rmap_walk()
1905 rmap_walk_file(page, rwc, false); in rmap_walk()
1909 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
1912 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
1913 if (PageAnon(page)) in rmap_walk_locked()
1914 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
1916 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
1925 static void __hugepage_set_anon_rmap(struct page *page, in __hugepage_set_anon_rmap() argument
1932 if (PageAnon(page)) in __hugepage_set_anon_rmap()
1938 page->mapping = (struct address_space *) anon_vma; in __hugepage_set_anon_rmap()
1939 page->index = linear_page_index(vma, address); in __hugepage_set_anon_rmap()
1942 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
1948 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
1951 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
1953 __hugepage_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1956 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
1960 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
1961 __hugepage_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()