Lines Matching refs:page
559 static void release_pte_page(struct page *page) in release_pte_page() argument
561 mod_node_page_state(page_pgdat(page), in release_pte_page()
562 NR_ISOLATED_ANON + page_is_file_lru(page), in release_pte_page()
563 -compound_nr(page)); in release_pte_page()
564 unlock_page(page); in release_pte_page()
565 putback_lru_page(page); in release_pte_page()
571 struct page *page, *tmp; in release_pte_pages() local
576 page = pte_page(pteval); in release_pte_pages()
578 !PageCompound(page)) in release_pte_pages()
579 release_pte_page(page); in release_pte_pages()
582 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) { in release_pte_pages()
583 list_del(&page->lru); in release_pte_pages()
584 release_pte_page(page); in release_pte_pages()
588 static bool is_refcount_suitable(struct page *page) in is_refcount_suitable() argument
592 expected_refcount = total_mapcount(page); in is_refcount_suitable()
593 if (PageSwapCache(page)) in is_refcount_suitable()
594 expected_refcount += compound_nr(page); in is_refcount_suitable()
596 return page_count(page) == expected_refcount; in is_refcount_suitable()
604 struct page *page = NULL; in __collapse_huge_page_isolate() local
630 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate()
631 if (unlikely(!page)) { in __collapse_huge_page_isolate()
636 VM_BUG_ON_PAGE(!PageAnon(page), page); in __collapse_huge_page_isolate()
638 if (page_mapcount(page) > 1 && in __collapse_huge_page_isolate()
644 if (PageCompound(page)) { in __collapse_huge_page_isolate()
645 struct page *p; in __collapse_huge_page_isolate()
646 page = compound_head(page); in __collapse_huge_page_isolate()
653 if (page == p) in __collapse_huge_page_isolate()
664 if (!trylock_page(page)) { in __collapse_huge_page_isolate()
680 if (!is_refcount_suitable(page)) { in __collapse_huge_page_isolate()
681 unlock_page(page); in __collapse_huge_page_isolate()
685 if (!pte_write(pteval) && PageSwapCache(page) && in __collapse_huge_page_isolate()
686 !reuse_swap_page(page, NULL)) { in __collapse_huge_page_isolate()
691 unlock_page(page); in __collapse_huge_page_isolate()
700 if (isolate_lru_page(page)) { in __collapse_huge_page_isolate()
701 unlock_page(page); in __collapse_huge_page_isolate()
705 mod_node_page_state(page_pgdat(page), in __collapse_huge_page_isolate()
706 NR_ISOLATED_ANON + page_is_file_lru(page), in __collapse_huge_page_isolate()
707 compound_nr(page)); in __collapse_huge_page_isolate()
708 VM_BUG_ON_PAGE(!PageLocked(page), page); in __collapse_huge_page_isolate()
709 VM_BUG_ON_PAGE(PageLRU(page), page); in __collapse_huge_page_isolate()
711 if (PageCompound(page)) in __collapse_huge_page_isolate()
712 list_add_tail(&page->lru, compound_pagelist); in __collapse_huge_page_isolate()
716 page_is_young(page) || PageReferenced(page) || in __collapse_huge_page_isolate()
730 trace_mm_collapse_huge_page_isolate(page, none_or_zero, in __collapse_huge_page_isolate()
736 trace_mm_collapse_huge_page_isolate(page, none_or_zero, in __collapse_huge_page_isolate()
741 static void __collapse_huge_page_copy(pte_t *pte, struct page *page, in __collapse_huge_page_copy() argument
747 struct page *src_page, *tmp; in __collapse_huge_page_copy()
750 _pte++, page++, address += PAGE_SIZE) { in __collapse_huge_page_copy()
754 clear_user_highpage(page, address); in __collapse_huge_page_copy()
770 copy_user_highpage(page, src_page, address, vma); in __collapse_huge_page_copy()
864 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
881 static struct page *
882 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page()
903 static inline struct page *alloc_khugepaged_hugepage(void) in alloc_khugepaged_hugepage()
905 struct page *page; in alloc_khugepaged_hugepage() local
907 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(), in alloc_khugepaged_hugepage()
909 if (page) in alloc_khugepaged_hugepage()
910 prep_transhuge_page(page); in alloc_khugepaged_hugepage()
911 return page; in alloc_khugepaged_hugepage()
914 static struct page *khugepaged_alloc_hugepage(bool *wait) in khugepaged_alloc_hugepage()
916 struct page *hpage; in khugepaged_alloc_hugepage()
934 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait) in khugepaged_prealloc_page()
957 static struct page *
958 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node) in khugepaged_alloc_page()
1065 struct page **hpage, in collapse_huge_page()
1072 struct page *new_page; in collapse_huge_page()
1238 struct page **hpage) in khugepaged_scan_pmd()
1244 struct page *page = NULL; in khugepaged_scan_pmd() local
1309 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd()
1310 if (unlikely(!page)) { in khugepaged_scan_pmd()
1315 if (page_mapcount(page) > 1 && in khugepaged_scan_pmd()
1321 page = compound_head(page); in khugepaged_scan_pmd()
1329 node = page_to_nid(page); in khugepaged_scan_pmd()
1335 if (!PageLRU(page)) { in khugepaged_scan_pmd()
1339 if (PageLocked(page)) { in khugepaged_scan_pmd()
1343 if (!PageAnon(page)) { in khugepaged_scan_pmd()
1365 if (!is_refcount_suitable(page)) { in khugepaged_scan_pmd()
1370 page_is_young(page) || PageReferenced(page) || in khugepaged_scan_pmd()
1391 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced, in khugepaged_scan_pmd()
1450 struct page *hpage; in collapse_pte_mapped_thp()
1503 struct page *page; in collapse_pte_mapped_thp() local
1513 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp()
1519 if (hpage + i != page) in collapse_pte_mapped_thp()
1527 struct page *page; in collapse_pte_mapped_thp() local
1531 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp()
1532 page_remove_rmap(page, false); in collapse_pte_mapped_thp()
1688 struct page **hpage, int node) in collapse_file()
1692 struct page *new_page; in collapse_file()
1744 struct page *page = xas_next(&xas); in collapse_file() local
1748 if (!page) { in collapse_file()
1770 if (xa_is_value(page) || !PageUptodate(page)) { in collapse_file()
1773 if (shmem_getpage(mapping->host, index, &page, in collapse_file()
1778 } else if (trylock_page(page)) { in collapse_file()
1779 get_page(page); in collapse_file()
1786 if (!page || xa_is_value(page)) { in collapse_file()
1793 page = find_lock_page(mapping, index); in collapse_file()
1794 if (unlikely(page == NULL)) { in collapse_file()
1798 } else if (PageDirty(page)) { in collapse_file()
1816 } else if (PageWriteback(page)) { in collapse_file()
1820 } else if (trylock_page(page)) { in collapse_file()
1821 get_page(page); in collapse_file()
1833 VM_BUG_ON_PAGE(!PageLocked(page), page); in collapse_file()
1836 if (unlikely(!PageUptodate(page))) { in collapse_file()
1845 if (PageTransCompound(page)) { in collapse_file()
1850 if (page_mapping(page) != mapping) { in collapse_file()
1855 if (!is_shmem && (PageDirty(page) || in collapse_file()
1856 PageWriteback(page))) { in collapse_file()
1866 if (isolate_lru_page(page)) { in collapse_file()
1871 if (page_has_private(page) && in collapse_file()
1872 !try_to_release_page(page, GFP_KERNEL)) { in collapse_file()
1874 putback_lru_page(page); in collapse_file()
1878 if (page_mapped(page)) in collapse_file()
1884 VM_BUG_ON_PAGE(page != xas_load(&xas), page); in collapse_file()
1885 VM_BUG_ON_PAGE(page_mapped(page), page); in collapse_file()
1893 if (!page_ref_freeze(page, 3)) { in collapse_file()
1896 putback_lru_page(page); in collapse_file()
1904 list_add_tail(&page->lru, &pagelist); in collapse_file()
1910 unlock_page(page); in collapse_file()
1911 put_page(page); in collapse_file()
1946 struct page *page, *tmp; in collapse_file() local
1953 list_for_each_entry_safe(page, tmp, &pagelist, lru) { in collapse_file()
1954 while (index < page->index) { in collapse_file()
1958 copy_highpage(new_page + (page->index % HPAGE_PMD_NR), in collapse_file()
1959 page); in collapse_file()
1960 list_del(&page->lru); in collapse_file()
1961 page->mapping = NULL; in collapse_file()
1962 page_ref_unfreeze(page, 1); in collapse_file()
1963 ClearPageActive(page); in collapse_file()
1964 ClearPageUnevictable(page); in collapse_file()
1965 unlock_page(page); in collapse_file()
1966 put_page(page); in collapse_file()
1988 struct page *page; in collapse_file() local
1998 xas_for_each(&xas, page, end - 1) { in collapse_file()
1999 page = list_first_entry_or_null(&pagelist, in collapse_file()
2000 struct page, lru); in collapse_file()
2001 if (!page || xas.xa_index < page->index) { in collapse_file()
2010 VM_BUG_ON_PAGE(page->index != xas.xa_index, page); in collapse_file()
2013 list_del(&page->lru); in collapse_file()
2014 page_ref_unfreeze(page, 2); in collapse_file()
2015 xas_store(&xas, page); in collapse_file()
2018 unlock_page(page); in collapse_file()
2019 putback_lru_page(page); in collapse_file()
2037 struct file *file, pgoff_t start, struct page **hpage) in khugepaged_scan_file()
2039 struct page *page = NULL; in khugepaged_scan_file() local
2050 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) { in khugepaged_scan_file()
2051 if (xas_retry(&xas, page)) in khugepaged_scan_file()
2054 if (xa_is_value(page)) { in khugepaged_scan_file()
2062 if (PageTransCompound(page)) { in khugepaged_scan_file()
2067 node = page_to_nid(page); in khugepaged_scan_file()
2074 if (!PageLRU(page)) { in khugepaged_scan_file()
2079 if (page_count(page) != in khugepaged_scan_file()
2080 1 + page_mapcount(page) + page_has_private(page)) { in khugepaged_scan_file()
2113 struct file *file, pgoff_t start, struct page **hpage) in khugepaged_scan_file()
2125 struct page **hpage) in khugepaged_scan_mm_slot()
2264 struct page *hpage = NULL; in khugepaged_do_scan()