Lines Matching refs:page
169 struct page *prev; \
754 static inline int is_page_cache_freeable(struct page *page) in is_page_cache_freeable() argument
761 int page_cache_pins = thp_nr_pages(page); in is_page_cache_freeable()
762 return page_count(page) - page_has_private(page) == 1 + page_cache_pins; in is_page_cache_freeable()
789 struct page *page, int error) in handle_write_error() argument
791 lock_page(page); in handle_write_error()
792 if (page_mapping(page) == mapping) in handle_write_error()
794 unlock_page(page); in handle_write_error()
813 static pageout_t pageout(struct page *page, struct address_space *mapping) in pageout() argument
831 if (!is_page_cache_freeable(page)) in pageout()
838 if (page_has_private(page)) { in pageout()
839 if (try_to_free_buffers(page)) { in pageout()
840 ClearPageDirty(page); in pageout()
852 if (clear_page_dirty_for_io(page)) { in pageout()
862 SetPageReclaim(page); in pageout()
863 res = mapping->a_ops->writepage(page, &wbc); in pageout()
865 handle_write_error(mapping, page, res); in pageout()
867 ClearPageReclaim(page); in pageout()
871 if (!PageWriteback(page)) { in pageout()
873 ClearPageReclaim(page); in pageout()
875 trace_mm_vmscan_writepage(page); in pageout()
876 inc_node_page_state(page, NR_VMSCAN_WRITE); in pageout()
887 static int __remove_mapping(struct address_space *mapping, struct page *page, in __remove_mapping() argument
894 BUG_ON(!PageLocked(page)); in __remove_mapping()
895 BUG_ON(mapping != page_mapping(page)); in __remove_mapping()
923 refcount = 1 + compound_nr(page); in __remove_mapping()
924 if (!page_ref_freeze(page, refcount)) in __remove_mapping()
927 if (unlikely(PageDirty(page))) { in __remove_mapping()
928 page_ref_unfreeze(page, refcount); in __remove_mapping()
932 if (PageSwapCache(page)) { in __remove_mapping()
933 swp_entry_t swap = { .val = page_private(page) }; in __remove_mapping()
934 mem_cgroup_swapout(page, swap); in __remove_mapping()
936 shadow = workingset_eviction(page, target_memcg); in __remove_mapping()
937 __delete_from_swap_cache(page, swap, shadow); in __remove_mapping()
939 put_swap_page(page, swap); in __remove_mapping()
941 void (*freepage)(struct page *); in __remove_mapping()
960 if (reclaimed && page_is_file_lru(page) && in __remove_mapping()
962 shadow = workingset_eviction(page, target_memcg); in __remove_mapping()
963 __delete_from_page_cache(page, shadow); in __remove_mapping()
967 freepage(page); in __remove_mapping()
983 int remove_mapping(struct address_space *mapping, struct page *page) in remove_mapping() argument
985 if (__remove_mapping(mapping, page, false, NULL)) { in remove_mapping()
991 page_ref_unfreeze(page, 1); in remove_mapping()
1006 void putback_lru_page(struct page *page) in putback_lru_page() argument
1008 lru_cache_add(page); in putback_lru_page()
1009 put_page(page); /* drop ref from isolate */ in putback_lru_page()
1019 static enum page_references page_check_references(struct page *page, in page_check_references() argument
1028 trace_android_vh_page_should_be_protected(page, &should_protect); in page_check_references()
1032 trace_android_vh_page_trylock_set(page); in page_check_references()
1033 trace_android_vh_check_page_look_around_ref(page, &ret); in page_check_references()
1036 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, in page_check_references()
1038 referenced_page = TestClearPageReferenced(page); in page_check_references()
1039 trace_android_vh_page_trylock_get_result(page, &trylock_fail); in page_check_references()
1068 SetPageReferenced(page); in page_check_references()
1076 if ((vm_flags & VM_EXEC) && !PageSwapBacked(page)) in page_check_references()
1083 if (referenced_page && !PageSwapBacked(page)) in page_check_references()
1090 static void page_check_dirty_writeback(struct page *page, in page_check_dirty_writeback() argument
1099 if (!page_is_file_lru(page) || in page_check_dirty_writeback()
1100 (PageAnon(page) && !PageSwapBacked(page))) { in page_check_dirty_writeback()
1107 *dirty = PageDirty(page); in page_check_dirty_writeback()
1108 *writeback = PageWriteback(page); in page_check_dirty_writeback()
1111 if (!page_has_private(page)) in page_check_dirty_writeback()
1114 mapping = page_mapping(page); in page_check_dirty_writeback()
1116 mapping->a_ops->is_dirty_writeback(page, dirty, writeback); in page_check_dirty_writeback()
1139 struct page *page; in shrink_page_list() local
1146 page = lru_to_page(page_list); in shrink_page_list()
1147 list_del(&page->lru); in shrink_page_list()
1149 if (!trylock_page(page)) in shrink_page_list()
1152 VM_BUG_ON_PAGE(PageActive(page), page); in shrink_page_list()
1154 nr_pages = compound_nr(page); in shrink_page_list()
1159 if (unlikely(!page_evictable(page))) in shrink_page_list()
1162 if (!sc->may_unmap && page_mapped(page)) in shrink_page_list()
1166 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1174 page_check_dirty_writeback(page, &dirty, &writeback); in shrink_page_list()
1187 mapping = page_mapping(page); in shrink_page_list()
1190 (writeback && PageReclaim(page))) in shrink_page_list()
1235 if (PageWriteback(page)) { in shrink_page_list()
1238 PageReclaim(page) && in shrink_page_list()
1245 !PageReclaim(page) || !may_enter_fs) { in shrink_page_list()
1257 SetPageReclaim(page); in shrink_page_list()
1263 unlock_page(page); in shrink_page_list()
1264 wait_on_page_writeback(page); in shrink_page_list()
1266 list_add_tail(&page->lru, page_list); in shrink_page_list()
1272 references = page_check_references(page, sc); in shrink_page_list()
1290 if (PageAnon(page) && PageSwapBacked(page)) { in shrink_page_list()
1291 if (!PageSwapCache(page)) { in shrink_page_list()
1294 if (page_maybe_dma_pinned(page)) in shrink_page_list()
1296 if (PageTransHuge(page)) { in shrink_page_list()
1298 if (!can_split_huge_page(page, NULL)) in shrink_page_list()
1305 if (!compound_mapcount(page) && in shrink_page_list()
1306 split_huge_page_to_list(page, in shrink_page_list()
1310 if (!add_to_swap(page)) { in shrink_page_list()
1311 if (!PageTransHuge(page)) in shrink_page_list()
1314 if (split_huge_page_to_list(page, in shrink_page_list()
1320 if (!add_to_swap(page)) in shrink_page_list()
1327 mapping = page_mapping(page); in shrink_page_list()
1329 } else if (unlikely(PageTransHuge(page))) { in shrink_page_list()
1331 if (split_huge_page_to_list(page, page_list)) in shrink_page_list()
1342 if ((nr_pages > 1) && !PageTransHuge(page)) { in shrink_page_list()
1351 if (page_mapped(page)) { in shrink_page_list()
1353 bool was_swapbacked = PageSwapBacked(page); in shrink_page_list()
1355 if (unlikely(PageTransHuge(page))) in shrink_page_list()
1358 trace_android_vh_page_trylock_set(page); in shrink_page_list()
1359 if (!try_to_unmap(page, flags)) { in shrink_page_list()
1361 if (!was_swapbacked && PageSwapBacked(page)) in shrink_page_list()
1367 if (PageDirty(page)) { in shrink_page_list()
1378 if (page_is_file_lru(page) && in shrink_page_list()
1379 (!current_is_kswapd() || !PageReclaim(page) || in shrink_page_list()
1387 inc_node_page_state(page, NR_VMSCAN_IMMEDIATE); in shrink_page_list()
1388 SetPageReclaim(page); in shrink_page_list()
1406 switch (pageout(page, mapping)) { in shrink_page_list()
1412 stat->nr_pageout += thp_nr_pages(page); in shrink_page_list()
1414 if (PageWriteback(page)) in shrink_page_list()
1416 if (PageDirty(page)) in shrink_page_list()
1423 if (!trylock_page(page)) in shrink_page_list()
1425 if (PageDirty(page) || PageWriteback(page)) in shrink_page_list()
1427 mapping = page_mapping(page); in shrink_page_list()
1454 if (page_has_private(page)) { in shrink_page_list()
1455 if (!try_to_release_page(page, sc->gfp_mask)) in shrink_page_list()
1457 if (!mapping && page_count(page) == 1) { in shrink_page_list()
1458 unlock_page(page); in shrink_page_list()
1459 if (put_page_testzero(page)) in shrink_page_list()
1469 trace_android_vh_page_trylock_clear(page); in shrink_page_list()
1476 if (PageAnon(page) && !PageSwapBacked(page)) { in shrink_page_list()
1478 if (!page_ref_freeze(page, 1)) in shrink_page_list()
1480 if (PageDirty(page)) { in shrink_page_list()
1481 page_ref_unfreeze(page, 1); in shrink_page_list()
1486 count_memcg_page_event(page, PGLAZYFREED); in shrink_page_list()
1487 } else if (!mapping || !__remove_mapping(mapping, page, true, in shrink_page_list()
1491 unlock_page(page); in shrink_page_list()
1503 trace_android_vh_page_trylock_clear(page); in shrink_page_list()
1504 if (unlikely(PageTransHuge(page))) in shrink_page_list()
1505 destroy_compound_page(page); in shrink_page_list()
1507 list_add(&page->lru, &free_pages); in shrink_page_list()
1521 if (PageSwapCache(page) && (mem_cgroup_swap_full(page) || in shrink_page_list()
1522 PageMlocked(page))) in shrink_page_list()
1523 try_to_free_swap(page); in shrink_page_list()
1524 VM_BUG_ON_PAGE(PageActive(page), page); in shrink_page_list()
1525 if (!PageMlocked(page)) { in shrink_page_list()
1526 int type = page_is_file_lru(page); in shrink_page_list()
1527 SetPageActive(page); in shrink_page_list()
1529 count_memcg_page_event(page, PGACTIVATE); in shrink_page_list()
1543 trace_android_vh_page_trylock_get_result(page, &page_trylock_result); in shrink_page_list()
1544 unlock_page(page); in shrink_page_list()
1546 list_add(&page->lru, &ret_pages); in shrink_page_list()
1547 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); in shrink_page_list()
1572 struct page *page, *next; in reclaim_clean_pages_from_list() local
1575 list_for_each_entry_safe(page, next, page_list, lru) { in reclaim_clean_pages_from_list()
1576 if (page_is_file_lru(page) && !PageDirty(page) && in reclaim_clean_pages_from_list()
1577 !__PageMovable(page) && !PageUnevictable(page)) { in reclaim_clean_pages_from_list()
1578 ClearPageActive(page); in reclaim_clean_pages_from_list()
1579 list_move(&page->lru, &clean_pages); in reclaim_clean_pages_from_list()
1611 int __isolate_lru_page(struct page *page, isolate_mode_t mode) in __isolate_lru_page() argument
1616 if (!PageLRU(page)) in __isolate_lru_page()
1620 if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) in __isolate_lru_page()
1635 if (PageWriteback(page)) in __isolate_lru_page()
1638 if (PageDirty(page)) { in __isolate_lru_page()
1651 if (!trylock_page(page)) in __isolate_lru_page()
1654 mapping = page_mapping(page); in __isolate_lru_page()
1656 unlock_page(page); in __isolate_lru_page()
1662 if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) in __isolate_lru_page()
1665 if (likely(get_page_unless_zero(page))) { in __isolate_lru_page()
1671 ClearPageLRU(page); in __isolate_lru_page()
1733 struct page *page; in isolate_lru_pages() local
1735 page = lru_to_page(src); in isolate_lru_pages()
1736 prefetchw_prev_lru_page(page, src, flags); in isolate_lru_pages()
1738 VM_BUG_ON_PAGE(!PageLRU(page), page); in isolate_lru_pages()
1740 nr_pages = compound_nr(page); in isolate_lru_pages()
1743 if (page_zonenum(page) > sc->reclaim_idx) { in isolate_lru_pages()
1744 list_move(&page->lru, &pages_skipped); in isolate_lru_pages()
1745 nr_skipped[page_zonenum(page)] += nr_pages; in isolate_lru_pages()
1760 switch (__isolate_lru_page(page, mode)) { in isolate_lru_pages()
1763 nr_zone_taken[page_zonenum(page)] += nr_pages; in isolate_lru_pages()
1764 trace_android_vh_del_page_from_lrulist(page, false, lru); in isolate_lru_pages()
1765 list_move(&page->lru, dst); in isolate_lru_pages()
1770 list_move(&page->lru, src); in isolate_lru_pages()
1830 int isolate_lru_page(struct page *page) in isolate_lru_page() argument
1834 VM_BUG_ON_PAGE(!page_count(page), page); in isolate_lru_page()
1835 WARN_RATELIMIT(PageTail(page), "trying to isolate tail page"); in isolate_lru_page()
1837 if (PageLRU(page)) { in isolate_lru_page()
1838 pg_data_t *pgdat = page_pgdat(page); in isolate_lru_page()
1842 lruvec = mem_cgroup_page_lruvec(page, pgdat); in isolate_lru_page()
1843 if (PageLRU(page)) { in isolate_lru_page()
1844 int lru = page_lru(page); in isolate_lru_page()
1845 get_page(page); in isolate_lru_page()
1846 ClearPageLRU(page); in isolate_lru_page()
1847 del_page_from_lru_list(page, lruvec, lru); in isolate_lru_page()
1918 struct page *page; in move_pages_to_lru() local
1922 page = lru_to_page(list); in move_pages_to_lru()
1923 VM_BUG_ON_PAGE(PageLRU(page), page); in move_pages_to_lru()
1924 if (unlikely(!page_evictable(page))) { in move_pages_to_lru()
1925 list_del(&page->lru); in move_pages_to_lru()
1927 putback_lru_page(page); in move_pages_to_lru()
1931 lruvec = mem_cgroup_page_lruvec(page, pgdat); in move_pages_to_lru()
1933 SetPageLRU(page); in move_pages_to_lru()
1934 lru = page_lru(page); in move_pages_to_lru()
1936 nr_pages = thp_nr_pages(page); in move_pages_to_lru()
1937 update_lru_size(lruvec, lru, page_zonenum(page), nr_pages); in move_pages_to_lru()
1938 list_move(&page->lru, &lruvec->lists[lru]); in move_pages_to_lru()
1939 trace_android_vh_add_page_to_lrulist(page, false, lru); in move_pages_to_lru()
1941 if (put_page_testzero(page)) { in move_pages_to_lru()
1942 __ClearPageLRU(page); in move_pages_to_lru()
1943 __ClearPageActive(page); in move_pages_to_lru()
1944 del_page_from_lru_list(page, lruvec, lru); in move_pages_to_lru()
1946 if (unlikely(PageCompound(page))) { in move_pages_to_lru()
1948 destroy_compound_page(page); in move_pages_to_lru()
1951 list_add(&page->lru, &pages_to_free); in move_pages_to_lru()
1954 if (PageActive(page)) in move_pages_to_lru()
2088 struct page *page; in shrink_active_list() local
2113 page = lru_to_page(&l_hold); in shrink_active_list()
2114 list_del(&page->lru); in shrink_active_list()
2116 if (unlikely(!page_evictable(page))) { in shrink_active_list()
2117 putback_lru_page(page); in shrink_active_list()
2122 if (page_has_private(page) && trylock_page(page)) { in shrink_active_list()
2123 if (page_has_private(page)) in shrink_active_list()
2124 try_to_release_page(page, 0); in shrink_active_list()
2125 unlock_page(page); in shrink_active_list()
2129 trace_android_vh_page_should_be_protected(page, &should_protect); in shrink_active_list()
2131 nr_rotated += thp_nr_pages(page); in shrink_active_list()
2132 list_add(&page->lru, &l_active); in shrink_active_list()
2136 trace_android_vh_page_referenced_check_bypass(page, nr_to_scan, lru, &bypass); in shrink_active_list()
2139 trace_android_vh_page_trylock_set(page); in shrink_active_list()
2141 if (page_referenced(page, 0, sc->target_mem_cgroup, in shrink_active_list()
2152 if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) { in shrink_active_list()
2153 trace_android_vh_page_trylock_clear(page); in shrink_active_list()
2154 nr_rotated += thp_nr_pages(page); in shrink_active_list()
2155 list_add(&page->lru, &l_active); in shrink_active_list()
2159 trace_android_vh_page_trylock_clear(page); in shrink_active_list()
2161 ClearPageActive(page); /* we are de-activating */ in shrink_active_list()
2162 SetPageWorkingset(page); in shrink_active_list()
2163 list_add(&page->lru, &l_inactive); in shrink_active_list()
2194 struct page *page; in reclaim_pages() local
2204 page = lru_to_page(page_list); in reclaim_pages()
2206 nid = page_to_nid(page); in reclaim_pages()
2210 if (nid == page_to_nid(page)) { in reclaim_pages()
2211 ClearPageActive(page); in reclaim_pages()
2212 list_move(&page->lru, &node_page_list); in reclaim_pages()
2220 page = lru_to_page(&node_page_list); in reclaim_pages()
2221 list_del(&page->lru); in reclaim_pages()
2222 putback_lru_page(page); in reclaim_pages()
2233 page = lru_to_page(&node_page_list); in reclaim_pages()
2234 list_del(&page->lru); in reclaim_pages()
2235 putback_lru_page(page); in reclaim_pages()
4438 struct page *page = pvec->pages[i]; in check_move_unevictable_pages() local
4439 struct pglist_data *pagepgdat = page_pgdat(page); in check_move_unevictable_pages()
4442 if (PageTransTail(page)) in check_move_unevictable_pages()
4445 nr_pages = thp_nr_pages(page); in check_move_unevictable_pages()
4454 lruvec = mem_cgroup_page_lruvec(page, pgdat); in check_move_unevictable_pages()
4456 if (!PageLRU(page) || !PageUnevictable(page)) in check_move_unevictable_pages()
4459 if (page_evictable(page)) { in check_move_unevictable_pages()
4460 enum lru_list lru = page_lru_base_type(page); in check_move_unevictable_pages()
4462 VM_BUG_ON_PAGE(PageActive(page), page); in check_move_unevictable_pages()
4463 ClearPageUnevictable(page); in check_move_unevictable_pages()
4464 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE); in check_move_unevictable_pages()
4465 add_page_to_lru_list(page, lruvec, lru); in check_move_unevictable_pages()