Lines Matching refs:page
484 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
490 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
493 if (!page_mapped(page)) in page_get_anon_vma()
509 if (!page_mapped(page)) { in page_get_anon_vma()
528 struct anon_vma *page_lock_anon_vma_read(struct page *page, in page_lock_anon_vma_read() argument
537 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
540 if (!page_mapped(page)) in page_lock_anon_vma_read()
551 if (!page_mapped(page)) { in page_lock_anon_vma_read()
557 trace_android_vh_do_page_trylock(page, NULL, NULL, &success); in page_lock_anon_vma_read()
575 if (!page_mapped(page)) { in page_lock_anon_vma_read()
721 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
723 if (PageAnon(page)) { in page_address_in_vma()
724 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
734 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
738 return vma_address(page, vma); in page_address_in_vma()
784 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
789 .page = page, in page_referenced_one()
805 trace_android_vh_look_around(&pvmw, page, vma, &referenced); in page_referenced_one()
832 clear_page_idle(page); in page_referenced_one()
833 if (test_and_clear_page_young(page)) in page_referenced_one()
841 trace_android_vh_page_referenced_one_end(vma, page, referenced); in page_referenced_one()
871 int page_referenced(struct page *page, in page_referenced() argument
878 .mapcount = total_mapcount(page), in page_referenced()
892 if (!page_rmapping(page)) in page_referenced()
895 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
896 we_locked = trylock_page(page); in page_referenced()
910 rmap_walk(page, &rwc); in page_referenced()
914 unlock_page(page); in page_referenced()
920 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
924 .page = page, in page_mkclean_one()
938 vma_address_end(page, vma)); in page_mkclean_one()
966 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
1002 int page_mkclean(struct page *page) in page_mkclean() argument
1012 BUG_ON(!PageLocked(page)); in page_mkclean()
1014 if (!page_mapped(page)) in page_mkclean()
1017 mapping = page_mapping(page); in page_mkclean()
1021 rmap_walk(page, &rwc); in page_mkclean()
1037 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1041 page = compound_head(page); in page_move_anon_rmap()
1043 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1052 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1062 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1069 if (PageAnon(page)) in __page_set_anon_rmap()
1081 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1082 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1091 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1106 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1107 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1108 page); in __page_check_anon_rmap()
1123 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1126 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1134 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1141 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1142 lock_page_memcg(page); in do_page_add_anon_rmap()
1144 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1148 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1149 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1150 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1153 trace_android_vh_update_page_mapcount(page, true, compound, in do_page_add_anon_rmap()
1156 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1160 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1168 __inc_lruvec_page_state(page, NR_ANON_THPS); in do_page_add_anon_rmap()
1169 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1172 if (unlikely(PageKsm(page))) { in do_page_add_anon_rmap()
1173 unlock_page_memcg(page); in do_page_add_anon_rmap()
1179 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1182 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1196 void __page_add_new_anon_rmap(struct page *page, in __page_add_new_anon_rmap() argument
1199 int nr = compound ? thp_nr_pages(page) : 1; in __page_add_new_anon_rmap()
1201 __SetPageSwapBacked(page); in __page_add_new_anon_rmap()
1203 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in __page_add_new_anon_rmap()
1205 atomic_set(compound_mapcount_ptr(page), 0); in __page_add_new_anon_rmap()
1206 if (hpage_pincount_available(page)) in __page_add_new_anon_rmap()
1207 atomic_set(compound_pincount_ptr(page), 0); in __page_add_new_anon_rmap()
1209 __inc_lruvec_page_state(page, NR_ANON_THPS); in __page_add_new_anon_rmap()
1212 VM_BUG_ON_PAGE(PageTransCompound(page), page); in __page_add_new_anon_rmap()
1214 atomic_set(&page->_mapcount, 0); in __page_add_new_anon_rmap()
1216 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in __page_add_new_anon_rmap()
1217 __page_set_anon_rmap(page, vma, address, 1); in __page_add_new_anon_rmap()
1227 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1233 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1234 lock_page_memcg(page); in page_add_file_rmap()
1235 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1236 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_add_file_rmap()
1237 trace_android_vh_update_page_mapcount(&page[i], true, in page_add_file_rmap()
1243 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1247 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1249 if (PageSwapBacked(page)) in page_add_file_rmap()
1250 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_add_file_rmap()
1252 __inc_node_page_state(page, NR_FILE_PMDMAPPED); in page_add_file_rmap()
1254 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1255 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1257 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap()
1258 if (PageMlocked(page)) in page_add_file_rmap()
1259 clear_page_mlock(compound_head(page)); in page_add_file_rmap()
1261 trace_android_vh_update_page_mapcount(page, true, in page_add_file_rmap()
1267 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1271 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1273 unlock_page_memcg(page); in page_add_file_rmap()
1276 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1282 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1285 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1287 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1292 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1293 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_file_rmap()
1294 trace_android_vh_update_page_mapcount(&page[i], false, in page_remove_file_rmap()
1300 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1304 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1306 if (PageSwapBacked(page)) in page_remove_file_rmap()
1307 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_remove_file_rmap()
1309 __dec_node_page_state(page, NR_FILE_PMDMAPPED); in page_remove_file_rmap()
1311 trace_android_vh_update_page_mapcount(page, false, in page_remove_file_rmap()
1317 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1327 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1329 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1330 clear_page_mlock(page); in page_remove_file_rmap()
1333 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1339 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1343 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1349 __dec_lruvec_page_state(page, NR_ANON_THPS); in page_remove_anon_compound_rmap()
1351 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1356 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_anon_compound_rmap()
1357 trace_android_vh_update_page_mapcount(&page[i], false, in page_remove_anon_compound_rmap()
1363 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1373 if (nr && nr < thp_nr_pages(page)) in page_remove_anon_compound_rmap()
1374 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1376 nr = thp_nr_pages(page); in page_remove_anon_compound_rmap()
1379 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1380 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1383 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1393 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1397 lock_page_memcg(page); in page_remove_rmap()
1399 if (!PageAnon(page)) { in page_remove_rmap()
1400 page_remove_file_rmap(page, compound); in page_remove_rmap()
1405 page_remove_anon_compound_rmap(page); in page_remove_rmap()
1409 trace_android_vh_update_page_mapcount(page, false, in page_remove_rmap()
1416 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1424 __dec_lruvec_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1426 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1427 clear_page_mlock(page); in page_remove_rmap()
1429 if (PageTransCompound(page)) in page_remove_rmap()
1430 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1442 unlock_page_memcg(page); in page_remove_rmap()
1448 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1453 .page = page, in try_to_unmap_one()
1458 struct page *subpage; in try_to_unmap_one()
1477 is_zone_device_page(page) && !is_device_private_page(page)) in try_to_unmap_one()
1482 flags & TTU_SPLIT_FREEZE, page); in try_to_unmap_one()
1493 range.end = PageKsm(page) ? in try_to_unmap_one()
1494 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_unmap_one()
1497 if (PageHuge(page)) { in try_to_unmap_one()
1511 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in try_to_unmap_one()
1513 set_pmd_migration_entry(&pvmw, page); in try_to_unmap_one()
1526 if (!PageTransCompound(page)) { in try_to_unmap_one()
1531 mlock_vma_page(page); in try_to_unmap_one()
1542 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1544 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1547 if (PageHuge(page) && !PageAnon(page)) { in try_to_unmap_one()
1583 is_zone_device_page(page)) { in try_to_unmap_one()
1594 entry = make_migration_entry(page, 0); in try_to_unmap_one()
1617 subpage = page; in try_to_unmap_one()
1641 set_page_dirty(page); in try_to_unmap_one()
1646 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1648 if (PageHuge(page)) { in try_to_unmap_one()
1649 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1654 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1669 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1702 } else if (PageAnon(page)) { in try_to_unmap_one()
1709 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1720 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1730 ref_count = page_ref_count(page); in try_to_unmap_one()
1731 map_count = page_mapcount(page); in try_to_unmap_one()
1744 !PageDirty(page)) { in try_to_unmap_one()
1757 SetPageSwapBacked(page); in try_to_unmap_one()
1803 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1813 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1814 put_page(page); in try_to_unmap_one()
1818 trace_android_vh_try_to_unmap_one(vma, page, address, ret); in try_to_unmap_one()
1828 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1830 return !page_mapped(page); in page_not_mapped()
1843 bool try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1861 && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1865 rmap_walk_locked(page, &rwc); in try_to_unmap()
1867 rmap_walk(page, &rwc); in try_to_unmap()
1875 return !page_mapcount(page); in try_to_unmap()
1887 void try_to_munlock(struct page *page) in try_to_munlock() argument
1897 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1898 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in try_to_munlock()
1900 rmap_walk(page, &rwc); in try_to_munlock()
1912 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1918 return rwc->anon_lock(page, rwc); in rmap_walk_anon_lock()
1926 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1958 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
1966 anon_vma = page_anon_vma(page); in rmap_walk_anon()
1968 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
1970 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1975 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
1976 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
1980 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1988 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1990 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
2011 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
2014 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
2025 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
2030 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
2031 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
2033 trace_android_vh_do_page_trylock(page, in rmap_walk_file()
2053 unsigned long address = vma_address(page, vma); in rmap_walk_file()
2061 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
2063 if (rwc->done && rwc->done(page)) in rmap_walk_file()
2072 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
2074 if (unlikely(PageKsm(page))) in rmap_walk()
2075 rmap_walk_ksm(page, rwc); in rmap_walk()
2076 else if (PageAnon(page)) in rmap_walk()
2077 rmap_walk_anon(page, rwc, false); in rmap_walk()
2079 rmap_walk_file(page, rwc, false); in rmap_walk()
2083 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2086 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
2087 if (PageAnon(page)) in rmap_walk_locked()
2088 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
2090 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
2099 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
2105 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
2108 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
2110 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
2113 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
2117 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2118 if (hpage_pincount_available(page)) in hugepage_add_new_anon_rmap()
2119 atomic_set(compound_pincount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2121 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()