• Home
  • Raw
  • Download

Lines Matching refs:page

114 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp)  in add_to_swap_cache()  argument
118 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); in add_to_swap_cache()
119 unsigned long i, nr = compound_nr(page); in add_to_swap_cache()
121 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap_cache()
122 VM_BUG_ON_PAGE(PageSwapCache(page), page); in add_to_swap_cache()
123 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); in add_to_swap_cache()
125 page_ref_add(page, nr); in add_to_swap_cache()
126 SetPageSwapCache(page); in add_to_swap_cache()
134 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); in add_to_swap_cache()
135 set_page_private(page + i, entry.val + i); in add_to_swap_cache()
136 xas_store(&xas, page); in add_to_swap_cache()
140 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); in add_to_swap_cache()
149 ClearPageSwapCache(page); in add_to_swap_cache()
150 page_ref_sub(page, nr); in add_to_swap_cache()
158 void __delete_from_swap_cache(struct page *page, swp_entry_t entry) in __delete_from_swap_cache() argument
161 int i, nr = hpage_nr_pages(page); in __delete_from_swap_cache()
165 VM_BUG_ON_PAGE(!PageLocked(page), page); in __delete_from_swap_cache()
166 VM_BUG_ON_PAGE(!PageSwapCache(page), page); in __delete_from_swap_cache()
167 VM_BUG_ON_PAGE(PageWriteback(page), page); in __delete_from_swap_cache()
171 VM_BUG_ON_PAGE(entry != page, entry); in __delete_from_swap_cache()
172 set_page_private(page + i, 0); in __delete_from_swap_cache()
175 ClearPageSwapCache(page); in __delete_from_swap_cache()
177 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in __delete_from_swap_cache()
188 int add_to_swap(struct page *page) in add_to_swap() argument
193 VM_BUG_ON_PAGE(!PageLocked(page), page); in add_to_swap()
194 VM_BUG_ON_PAGE(!PageUptodate(page), page); in add_to_swap()
196 entry = get_swap_page(page); in add_to_swap()
211 err = add_to_swap_cache(page, entry, in add_to_swap()
229 set_page_dirty(page); in add_to_swap()
234 put_swap_page(page, entry); in add_to_swap()
244 void delete_from_swap_cache(struct page *page) in delete_from_swap_cache() argument
246 swp_entry_t entry = { .val = page_private(page) }; in delete_from_swap_cache()
250 __delete_from_swap_cache(page, entry); in delete_from_swap_cache()
253 put_swap_page(page, entry); in delete_from_swap_cache()
254 page_ref_sub(page, hpage_nr_pages(page)); in delete_from_swap_cache()
265 static inline void free_swap_cache(struct page *page) in free_swap_cache() argument
267 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { in free_swap_cache()
268 try_to_free_swap(page); in free_swap_cache()
269 unlock_page(page); in free_swap_cache()
277 void free_page_and_swap_cache(struct page *page) in free_page_and_swap_cache() argument
279 free_swap_cache(page); in free_page_and_swap_cache()
280 if (!is_huge_zero_page(page)) in free_page_and_swap_cache()
281 put_page(page); in free_page_and_swap_cache()
288 void free_pages_and_swap_cache(struct page **pages, int nr) in free_pages_and_swap_cache()
290 struct page **pagep = pages; in free_pages_and_swap_cache()
310 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, in lookup_swap_cache()
313 struct page *page; in lookup_swap_cache() local
319 page = find_get_page(swap_address_space(entry), swp_offset(entry)); in lookup_swap_cache()
323 if (page) { in lookup_swap_cache()
332 if (unlikely(PageTransCompound(page))) in lookup_swap_cache()
333 return page; in lookup_swap_cache()
335 readahead = TestClearPageReadahead(page); in lookup_swap_cache()
356 return page; in lookup_swap_cache()
359 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async()
363 struct page *found_page = NULL, *new_page = NULL; in __read_swap_cache_async()
448 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async()
452 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
539 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead()
542 struct page *page; in swap_cluster_readahead() local
576 page = __read_swap_cache_async( in swap_cluster_readahead()
579 if (!page) in swap_cluster_readahead()
582 swap_readpage(page, false); in swap_cluster_readahead()
584 SetPageReadahead(page); in swap_cluster_readahead()
588 put_page(page); in swap_cluster_readahead()
722 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead()
727 struct page *page; in swap_vma_readahead() local
749 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead()
751 if (!page) in swap_vma_readahead()
754 swap_readpage(page, false); in swap_vma_readahead()
756 SetPageReadahead(page); in swap_vma_readahead()
760 put_page(page); in swap_vma_readahead()
781 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, in swapin_readahead()