• Home
  • Raw
  • Download

Lines Matching refs:page

115 	struct page *page;  in get_shadow_from_swap_cache()  local
117 page = find_get_entry(address_space, idx); in get_shadow_from_swap_cache()
118 if (xa_is_value(page)) in get_shadow_from_swap_cache()
119 return page; in get_shadow_from_swap_cache()
120 if (page) in get_shadow_from_swap_cache()
121 put_page(page); in get_shadow_from_swap_cache()
129 int add_to_swap_cache(struct page *page, swp_entry_t entry, in add_to_swap_cache() argument
134 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
135 unsigned long i, nr = thp_nr_pages(page); in add_to_swap_cache()
138 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap_cache()
139 VM_BUG_ON_PAGE(PageSwapCache(page), page); in add_to_swap_cache()
140 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in add_to_swap_cache()
142 page_ref_add(page, nr); in add_to_swap_cache()
143 SetPageSwapCache(page); in add_to_swap_cache()
153 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
160 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
161 xas_store(&xas, page); in add_to_swap_cache()
166 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in add_to_swap_cache()
175 ClearPageSwapCache(page); in add_to_swap_cache()
176 page_ref_sub(page, nr); in add_to_swap_cache()
184 void __delete_from_swap_cache(struct page *page, in __delete_from_swap_cache() argument
188 int i, nr = thp_nr_pages(page); in __delete_from_swap_cache()
192 VM_BUG_ON_PAGE(!PageLocked(page), page); in __delete_from_swap_cache()
193 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in __delete_from_swap_cache()
194 VM_BUG_ON_PAGE(PageWriteback(page), page); in __delete_from_swap_cache()
198 VM_BUG_ON_PAGE(entry != page, entry); in __delete_from_swap_cache()
199 set_page_private(page + i, 0); in __delete_from_swap_cache()
202 ClearPageSwapCache(page); in __delete_from_swap_cache()
206 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in __delete_from_swap_cache()
217 int add_to_swap(struct page *page) in add_to_swap() argument
222 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap()
223 VM_BUG_ON_PAGE(!PageUptodate(page), page); in add_to_swap()
225 entry = get_swap_page(page); in add_to_swap()
240 err = add_to_swap_cache(page, entry, in add_to_swap()
258 set_page_dirty(page); in add_to_swap()
263 put_swap_page(page, entry); in add_to_swap()
273 void delete_from_swap_cache(struct page *page) in delete_from_swap_cache() argument
275 swp_entry_t entry = { .val = page_private(page) }; in delete_from_swap_cache()
279 __delete_from_swap_cache(page, entry, NULL); in delete_from_swap_cache()
282 put_swap_page(page, entry); in delete_from_swap_cache()
283 page_ref_sub(page, thp_nr_pages(page)); in delete_from_swap_cache()
325 static inline void free_swap_cache(struct page *page) in free_swap_cache() argument
327 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { in free_swap_cache()
328 try_to_free_swap(page); in free_swap_cache()
329 unlock_page(page); in free_swap_cache()
337 void free_page_and_swap_cache(struct page *page) in free_page_and_swap_cache() argument
339 free_swap_cache(page); in free_page_and_swap_cache()
340 if (!is_huge_zero_page(page)) in free_page_and_swap_cache()
341 put_page(page); in free_page_and_swap_cache()
348 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache()
350 struct page **pagep = pages; in free_pages_and_swap_cache()
370 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache()
373 struct page *page; in lookup_swap_cache() local
379 page = find_get_page(swap_address_space(entry), swp_offset(entry)); in lookup_swap_cache()
383 if (page) { in lookup_swap_cache()
392 if (unlikely(PageTransCompound(page))) in lookup_swap_cache()
393 return page; in lookup_swap_cache()
395 readahead = TestClearPageReadahead(page); in lookup_swap_cache()
416 return page; in lookup_swap_cache()
429 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) in find_get_incore_page()
433 struct page *page = find_get_entry(mapping, index); in find_get_incore_page() local
435 if (!page) in find_get_incore_page()
436 return page; in find_get_incore_page()
437 if (!xa_is_value(page)) in find_get_incore_page()
438 return find_subpage(page, index); in find_get_incore_page()
442 swp = radix_to_swp_entry(page); in find_get_incore_page()
447 page = find_get_page(swap_address_space(swp), swp_offset(swp)); in find_get_incore_page()
449 return page; in find_get_incore_page()
452 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async()
457 struct page *page; in __read_swap_cache_async() local
472 page = find_get_page(swap_address_space(entry), in __read_swap_cache_async()
475 if (page) in __read_swap_cache_async()
476 return page; in __read_swap_cache_async()
494 page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
495 if (!page) in __read_swap_cache_async()
505 put_page(page); in __read_swap_cache_async()
523 __SetPageLocked(page); in __read_swap_cache_async()
524 __SetPageSwapBacked(page); in __read_swap_cache_async()
527 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { in __read_swap_cache_async()
528 put_swap_page(page, entry); in __read_swap_cache_async()
532 if (mem_cgroup_charge(page, NULL, gfp_mask)) { in __read_swap_cache_async()
533 delete_from_swap_cache(page); in __read_swap_cache_async()
538 workingset_refault(page, shadow); in __read_swap_cache_async()
541 SetPageWorkingset(page); in __read_swap_cache_async()
542 lru_cache_add(page); in __read_swap_cache_async()
544 return page; in __read_swap_cache_async()
547 unlock_page(page); in __read_swap_cache_async()
548 put_page(page); in __read_swap_cache_async()
558 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async()
562 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
654 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead()
657 struct page *page; in swap_cluster_readahead() local
691 page = __read_swap_cache_async( in swap_cluster_readahead()
694 if (!page) in swap_cluster_readahead()
697 swap_readpage(page, false); in swap_cluster_readahead()
699 SetPageReadahead(page); in swap_cluster_readahead()
703 put_page(page); in swap_cluster_readahead()
837 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead()
842 struct page *page; in swap_vma_readahead() local
864 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead()
866 if (!page) in swap_vma_readahead()
869 swap_readpage(page, false); in swap_vma_readahead()
871 SetPageReadahead(page); in swap_vma_readahead()
875 put_page(page); in swap_vma_readahead()
896 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead()