Lines Matching refs:page
152 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument
154 unsigned long *_pp = (void *)page; in __mm_zero_struct_page()
157 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page()
158 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page()
159 BUILD_BUG_ON(sizeof(struct page) > 96); in __mm_zero_struct_page()
161 switch (sizeof(struct page)) { in __mm_zero_struct_page()
188 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page)))
227 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument
230 #define nth_page(page,n) ((page) + (n)) argument
231 #define folio_page_idx(folio, p) ((p) - &(folio)->page)
571 struct page *cow_page; /* Page handler may use for COW fault */
572 struct page *page; /* ->fault handlers should return a member
670 struct page *(*find_special_page)(struct vm_area_struct *vma,
1201 static inline unsigned int compound_order(struct page *page) in compound_order() argument
1203 struct folio *folio = (struct folio *)page; in compound_order()
1244 static inline int put_page_testzero(struct page *page) in put_page_testzero() argument
1248 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); in put_page_testzero()
1249 ret = page_ref_dec_and_test(page); in put_page_testzero()
1250 page_pinner_put_page(page); in put_page_testzero()
1257 return put_page_testzero(&folio->page); in folio_put_testzero()
1266 static inline bool get_page_unless_zero(struct page *page) in get_page_unless_zero() argument
1268 return page_ref_add_unless(page, 1, 0); in get_page_unless_zero()
1271 static inline struct folio *folio_get_nontail_page(struct page *page) in folio_get_nontail_page() argument
1273 if (unlikely(!get_page_unless_zero(page))) in folio_get_nontail_page()
1275 return (struct folio *)page; in folio_get_nontail_page()
1290 struct page *vmalloc_to_page(const void *addr);
1379 static inline bool page_mapped(const struct page *page) in page_mapped() argument
1381 return folio_mapped(page_folio(page)); in page_mapped()
1384 static inline struct page *virt_to_head_page(const void *x) in virt_to_head_page()
1386 struct page *page = virt_to_page(x); in virt_to_head_page() local
1388 return compound_head(page); in virt_to_head_page()
1393 struct page *page = virt_to_page(x); in virt_to_folio() local
1395 return page_folio(page); in virt_to_folio()
1402 void split_page(struct page *page, unsigned int order);
1409 static inline unsigned long page_size(struct page *page) in page_size() argument
1411 return PAGE_SIZE << compound_order(page); in page_size()
1415 static inline unsigned int page_shift(struct page *page) in page_shift() argument
1417 return PAGE_SHIFT + compound_order(page); in page_shift()
1424 static inline unsigned int thp_order(struct page *page) in thp_order() argument
1426 VM_BUG_ON_PGFLAGS(PageTail(page), page); in thp_order()
1427 return compound_order(page); in thp_order()
1436 static inline unsigned long thp_size(struct page *page) in thp_size() argument
1438 return PAGE_SIZE << thp_order(page); in thp_size()
1455 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1457 struct page *page, unsigned int nr, unsigned long addr);
1559 static inline void get_page(struct page *page) in get_page() argument
1561 folio_get(page_folio(page)); in get_page()
1564 static inline __must_check bool try_get_page(struct page *page) in try_get_page() argument
1566 page = compound_head(page); in try_get_page()
1567 if (WARN_ON_ONCE(page_ref_count(page) <= 0)) in try_get_page()
1569 page_ref_inc(page); in try_get_page()
1626 struct page **pages;
1651 static inline void put_page(struct page *page) in put_page() argument
1653 struct folio *folio = page_folio(page); in put_page()
1696 void unpin_user_page(struct page *page);
1698 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1700 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1702 void unpin_user_pages(struct page **pages, unsigned long npages);
1738 static inline int page_zone_id(struct page *page) in page_zone_id() argument
1740 return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK; in page_zone_id()
1744 int page_to_nid(const struct page *page);
1746 static inline int page_to_nid(const struct page *page) in page_to_nid() argument
1748 return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK; in page_to_nid()
1754 return page_to_nid(&folio->page); in folio_nid()
1816 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1818 page->_last_cpupid = -1 & LAST_CPUPID_MASK; in page_cpupid_reset_last()
1828 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1830 page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT; in page_cpupid_reset_last()
1895 static inline void page_cpupid_reset_last(struct page *page) in page_cpupid_reset_last() argument
1921 static inline u8 page_kasan_tag(const struct page *page) in page_kasan_tag() argument
1926 tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK; in page_kasan_tag()
1933 static inline void page_kasan_tag_set(struct page *page, u8 tag) in page_kasan_tag_set() argument
1941 old_flags = READ_ONCE(page->flags); in page_kasan_tag_set()
1946 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); in page_kasan_tag_set()
1949 static inline void page_kasan_tag_reset(struct page *page) in page_kasan_tag_reset() argument
1952 page_kasan_tag_set(page, KASAN_TAG_KERNEL); in page_kasan_tag_reset()
1957 static inline u8 page_kasan_tag(const struct page *page) in page_kasan_tag() argument
1962 static inline void page_kasan_tag_set(struct page *page, u8 tag) { } in page_kasan_tag_set() argument
1963 static inline void page_kasan_tag_reset(struct page *page) { } in page_kasan_tag_reset() argument
1967 static inline struct zone *page_zone(const struct page *page) in page_zone() argument
1969 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
1972 static inline pg_data_t *page_pgdat(const struct page *page) in page_pgdat() argument
1974 return NODE_DATA(page_to_nid(page)); in page_pgdat()
1979 return page_zone(&folio->page); in folio_zone()
1984 return page_pgdat(&folio->page); in folio_pgdat()
1988 static inline void set_page_section(struct page *page, unsigned long section) in set_page_section() argument
1990 page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT); in set_page_section()
1991 page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT; in set_page_section()
1994 static inline unsigned long page_to_section(const struct page *page) in page_to_section() argument
1996 return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK; in page_to_section()
2011 return page_to_pfn(&folio->page); in folio_pfn()
2084 static inline bool is_zero_page(const struct page *page) in is_zero_page() argument
2086 return is_zero_pfn(page_to_pfn(page)); in is_zero_page()
2097 return is_zero_page(&folio->page); in is_zero_folio()
2138 static inline void set_page_zone(struct page *page, enum zone_type zone) in set_page_zone() argument
2140 page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT); in set_page_zone()
2141 page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT; in set_page_zone()
2144 static inline void set_page_node(struct page *page, unsigned long node) in set_page_node() argument
2146 page->flags &= ~(NODES_MASK << NODES_PGSHIFT); in set_page_node()
2147 page->flags |= (node & NODES_MASK) << NODES_PGSHIFT; in set_page_node()
2150 static inline void set_page_links(struct page *page, enum zone_type zone, in set_page_links() argument
2153 set_page_zone(page, zone); in set_page_links()
2154 set_page_node(page, node); in set_page_links()
2156 set_page_section(page, pfn_to_section_nr(pfn)); in set_page_links()
2189 static inline unsigned long compound_nr(struct page *page) in compound_nr() argument
2191 struct folio *folio = (struct folio *)page; in compound_nr()
2206 static inline int thp_nr_pages(struct page *page) in thp_nr_pages() argument
2208 return folio_nr_pages((struct folio *)page); in thp_nr_pages()
2395 static inline void *page_address(const struct page *page) in page_address() argument
2397 return page->virtual; in page_address()
2399 static inline void set_page_address(struct page *page, void *address) in set_page_address() argument
2401 page->virtual = address; in set_page_address()
2407 void *page_address(const struct page *page);
2408 void set_page_address(struct page *page, void *virtual);
2412 static __always_inline void *lowmem_page_address(const struct page *page) in lowmem_page_address() argument
2414 return page_to_virt(page); in lowmem_page_address()
2418 #define page_address(page) lowmem_page_address(page) argument
2419 #define set_page_address(page, address) do { } while(0) argument
2425 return page_address(&folio->page); in folio_address()
2433 static inline bool page_is_pfmemalloc(const struct page *page) in page_is_pfmemalloc() argument
2440 return (uintptr_t)page->lru.next & BIT(1); in page_is_pfmemalloc()
2462 static inline void set_page_pfmemalloc(struct page *page) in set_page_pfmemalloc() argument
2464 page->lru.next = (void *)BIT(1); in set_page_pfmemalloc()
2467 static inline void clear_page_pfmemalloc(struct page *page) in clear_page_pfmemalloc() argument
2469 page->lru.next = NULL; in clear_page_pfmemalloc()
2478 #define offset_in_thp(page, p) ((unsigned long)(p) & (thp_size(page) - 1)) argument
2535 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2539 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2654 unsigned int gup_flags, struct page **pages,
2658 unsigned int gup_flags, struct page **pages,
2664 static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, in get_user_page_vma_remote()
2669 struct page *page; in get_user_page_vma_remote() local
2676 got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); in get_user_page_vma_remote()
2683 put_page(page); in get_user_page_vma_remote()
2688 return page; in get_user_page_vma_remote()
2692 unsigned int gup_flags, struct page **pages);
2694 unsigned int gup_flags, struct page **pages);
2696 struct page **pages, unsigned int gup_flags);
2698 struct page **pages, unsigned int gup_flags);
2704 unsigned int gup_flags, struct page **pages);
2706 unsigned int gup_flags, struct page **pages);
2714 struct page *get_dump_page(unsigned long addr);
2717 bool set_page_dirty(struct page *page);
2718 int set_page_dirty_lock(struct page *page);
2755 unsigned int gup_flags, struct page **pages);
2758 unsigned int gup_flags, struct page **pagep) in get_user_page_fast_only()
3066 struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); in pagetable_alloc_noprof() local
3068 return page_ptdesc(page); in pagetable_alloc_noprof()
3081 struct page *page = ptdesc_page(pt); in pagetable_free() local
3083 __free_pages(page, compound_order(page)); in pagetable_free()
3224 static inline struct page *pmd_pgtable_page(pmd_t *pmd) in pmd_pgtable_page()
3347 extern void adjust_managed_page_count(struct page *page, long count);
3353 void free_reserved_page(struct page *page);
3354 #define free_highmem_page(page) free_reserved_page(page) argument
3356 static inline void mark_page_reserved(struct page *page) in mark_page_reserved() argument
3358 SetPageReserved(page); in mark_page_reserved()
3359 adjust_managed_page_count(page, -1); in mark_page_reserved()
3711 int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3713 struct page **pages, unsigned long *num);
3714 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3716 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3729 unsigned long addr, struct page *page) in vmf_insert_page() argument
3731 int err = vm_insert_page(vma, addr, page); in vmf_insert_page()
3820 extern void __kernel_poison_pages(struct page *page, int numpages);
3821 extern void __kernel_unpoison_pages(struct page *page, int numpages);
3836 static inline void kernel_poison_pages(struct page *page, int numpages) in kernel_poison_pages() argument
3839 __kernel_poison_pages(page, numpages); in kernel_poison_pages()
3841 static inline void kernel_unpoison_pages(struct page *page, int numpages) in kernel_unpoison_pages() argument
3844 __kernel_unpoison_pages(page, numpages); in kernel_unpoison_pages()
3849 static inline void __kernel_poison_pages(struct page *page, int nunmpages) { } in __kernel_poison_pages() argument
3850 static inline void kernel_poison_pages(struct page *page, int numpages) { } in kernel_poison_pages() argument
3851 static inline void kernel_unpoison_pages(struct page *page, int numpages) { } in kernel_unpoison_pages() argument
3895 extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3897 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) in debug_pagealloc_map_pages() argument
3900 __kernel_map_pages(page, numpages, 1); in debug_pagealloc_map_pages()
3903 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) in debug_pagealloc_unmap_pages() argument
3906 __kernel_map_pages(page, numpages, 0); in debug_pagealloc_unmap_pages()
3922 static inline bool page_is_guard(struct page *page) in page_is_guard() argument
3927 return PageGuard(page); in page_is_guard()
3930 bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
3931 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
3936 return __set_page_guard(zone, page, order); in set_page_guard()
3939 void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
3940 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
3945 __clear_page_guard(zone, page, order); in clear_page_guard()
3949 static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} in debug_pagealloc_map_pages() argument
3950 static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} in debug_pagealloc_unmap_pages() argument
3953 static inline bool page_is_guard(struct page *page) { return false; } in page_is_guard() argument
3954 static inline bool set_page_guard(struct zone *zone, struct page *page, in set_page_guard() argument
3956 static inline void clear_page_guard(struct zone *zone, struct page *page, in clear_page_guard() argument
4002 struct page * __populate_section_memmap(unsigned long pfn,
4013 struct vmem_altmap *altmap, struct page *reuse);
4069 if (!pgmap || !is_power_of_2(sizeof(struct page))) in __vmemmap_can_optimize()
4073 nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT); in __vmemmap_can_optimize()
4095 void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
4239 extern int memcmp_pages(struct page *page1, struct page *page2);
4241 static inline int pages_identical(struct page *page1, struct page *page2) in pages_identical()
4377 void free_hpage(struct page *page, int __bitwise fpi_flags);
4378 void prep_new_hpage(struct page *page, gfp_t gfp_flags, unsigned int alloc_flags);
4379 void prep_compound_page(struct page *page, unsigned int order);
4391 static inline bool page_pool_page_is_pp(struct page *page) in page_pool_page_is_pp() argument
4393 return (page->pp_magic & PP_MAGIC_MASK) == PP_SIGNATURE; in page_pool_page_is_pp()
4396 static inline bool page_pool_page_is_pp(struct page *page) in page_pool_page_is_pp() argument