Lines Matching refs:page
182 struct anon_vma *page_get_anon_vma(struct page *page);
191 void page_move_anon_rmap(struct page *, struct vm_area_struct *);
192 void page_add_anon_rmap(struct page *, struct vm_area_struct *,
194 void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
196 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
198 void page_add_file_rmap(struct page *, bool);
199 void page_remove_rmap(struct page *, bool);
201 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
203 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *,
206 static inline void page_dup_rmap(struct page *page, bool compound) in page_dup_rmap() argument
208 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); in page_dup_rmap()
214 int page_referenced(struct page *, int is_locked,
217 bool try_to_unmap(struct page *, enum ttu_flags flags);
225 struct page *page; member
237 if (pvmw->pte && !PageHuge(pvmw->page)) in page_vma_mapped_walk_done()
248 unsigned long page_address_in_vma(struct page *, struct vm_area_struct *);
256 int page_mkclean(struct page *);
262 void try_to_munlock(struct page *);
264 void remove_migration_ptes(struct page *old, struct page *new, bool locked);
266 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
287 bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
289 int (*done)(struct page *page);
290 struct anon_vma *(*anon_lock)(struct page *page,
295 void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
296 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
301 struct anon_vma *page_lock_anon_vma_read(struct page *page,
311 static inline int page_referenced(struct page *page, int is_locked, in page_referenced() argument
319 #define try_to_unmap(page, refs) false argument
321 static inline int page_mkclean(struct page *page) in page_mkclean() argument