Lines Matching refs:page
20 static inline void set_page_count(struct page *page, int v) in set_page_count() argument
22 atomic_set(&page->_count, v); in set_page_count()
43 static inline void set_page_refcounted(struct page *page) in set_page_refcounted() argument
45 VM_BUG_ON_PAGE(PageTail(page), page); in set_page_refcounted()
46 VM_BUG_ON_PAGE(atomic_read(&page->_count), page); in set_page_refcounted()
47 set_page_count(page, 1); in set_page_refcounted()
50 static inline void __get_page_tail_foll(struct page *page, in __get_page_tail_foll() argument
64 VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); in __get_page_tail_foll()
66 atomic_inc(&page->first_page->_count); in __get_page_tail_foll()
67 get_huge_page_tail(page); in __get_page_tail_foll()
75 static inline void get_page_foll(struct page *page) in get_page_foll() argument
77 if (unlikely(PageTail(page))) in get_page_foll()
83 __get_page_tail_foll(page, true); in get_page_foll()
89 VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page); in get_page_foll()
90 atomic_inc(&page->_count); in get_page_foll()
99 extern int isolate_lru_page(struct page *page);
100 extern void putback_lru_page(struct page *page);
135 extern int __isolate_free_page(struct page *page, unsigned int order);
136 extern void __free_pages_bootmem(struct page *page, unsigned long pfn,
138 extern void prep_compound_page(struct page *page, unsigned int order);
140 extern bool is_free_buddy_page(struct page *page);
196 static inline unsigned int page_order(struct page *page) in page_order() argument
199 return page_private(page); in page_order()
213 #define page_order_unsafe(page) ACCESS_ONCE(page_private(page)) argument
237 extern void mlock_vma_page(struct page *page);
238 extern unsigned int munlock_vma_page(struct page *page);
249 extern void clear_page_mlock(struct page *page);
255 static inline void mlock_migrate_page(struct page *newpage, struct page *page) in mlock_migrate_page() argument
257 if (TestClearPageMlocked(page)) { in mlock_migrate_page()
259 int nr_pages = hpage_nr_pages(page); in mlock_migrate_page()
262 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in mlock_migrate_page()
272 extern unsigned long vma_address(struct page *page,
276 static inline void clear_page_mlock(struct page *page) { } in clear_page_mlock() argument
277 static inline void mlock_vma_page(struct page *page) { } in mlock_vma_page() argument
278 static inline void mlock_migrate_page(struct page *new, struct page *old) { } in mlock_migrate_page()
287 static inline struct page *mem_map_offset(struct page *base, int offset) in mem_map_offset()
298 static inline struct page *mem_map_next(struct page *iter, in mem_map_next()
299 struct page *base, int offset) in mem_map_next()
342 extern void mminit_verify_page_links(struct page *page,
357 static inline void mminit_verify_page_links(struct page *page, in mminit_verify_page_links() argument
383 extern int hwpoison_filter(struct page *p);