| /include/linux/ |
| D | page_ref.h | 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument 47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument [all …]
|
| D | balloon_compaction.h | 58 int (*migratepage)(struct balloon_dev_info *, struct page *newpage, 59 struct page *page, enum migrate_mode mode); 62 extern struct page *balloon_page_alloc(void); 64 struct page *page); 65 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 92 struct page *page) in balloon_page_insert() argument 94 __SetPageOffline(page); in balloon_page_insert() 95 __SetPageMovable(page, &balloon_mops); in balloon_page_insert() 96 set_page_private(page, (unsigned long)balloon); in balloon_page_insert() 97 list_add(&page->lru, &balloon->pages); in balloon_page_insert() [all …]
|
| D | page_owner.h | 14 extern void __reset_page_owner(struct page *page, unsigned short order); 15 extern void __set_page_owner(struct page *page, 17 extern void __split_page_owner(struct page *page, int old_order, 20 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 21 extern void __dump_page_owner(const struct page *page); 25 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 28 __reset_page_owner(page, order); in reset_page_owner() 31 static inline void set_page_owner(struct page *page, in set_page_owner() argument 35 __set_page_owner(page, order, gfp_mask); in set_page_owner() 38 static inline void split_page_owner(struct page *page, int old_order, in split_page_owner() argument [all …]
|
| D | page_pinner.h | 12 extern void __free_page_pinner(struct page *page, unsigned int order); 13 void __page_pinner_failure_detect(struct page *page); 14 void __page_pinner_put_page(struct page *page); 16 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument 19 __free_page_pinner(page, order); in free_page_pinner() 22 static inline void page_pinner_put_page(struct page *page) in page_pinner_put_page() argument 27 __page_pinner_put_page(page); in page_pinner_put_page() 30 static inline void page_pinner_failure_detect(struct page *page) in page_pinner_failure_detect() argument 35 __page_pinner_failure_detect(page); in page_pinner_failure_detect() 38 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument [all …]
|
| D | highmem-internal.h | 10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot); 35 void *kmap_high(struct page *page); 36 void kunmap_high(struct page *page); 38 struct page *__kmap_to_page(void *addr); 40 static inline void *kmap(struct page *page) in kmap() argument 45 if (!PageHighMem(page)) in kmap() 46 addr = page_address(page); in kmap() 48 addr = kmap_high(page); in kmap() 53 static inline void kunmap(struct page *page) in kunmap() argument 56 if (!PageHighMem(page)) in kunmap() [all …]
|
| D | page-flags.h | 213 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 216 return page; in page_fixed_fake_head() 224 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head() 225 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head() 231 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head() 234 return (const struct page *)(head - 1); in page_fixed_fake_head() 236 return page; in page_fixed_fake_head() 239 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 241 return page; in page_fixed_fake_head() 245 static __always_inline int page_is_fake_head(const struct page *page) in page_is_fake_head() argument [all …]
|
| D | bootmem_info.h | 23 void get_page_bootmem(unsigned long info, struct page *page, 25 void put_page_bootmem(struct page *page); 32 static inline void free_bootmem_page(struct page *page) in free_bootmem_page() argument 34 unsigned long magic = page->index; in free_bootmem_page() 40 VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); in free_bootmem_page() 43 put_page_bootmem(page); in free_bootmem_page() 45 VM_BUG_ON_PAGE(1, page); in free_bootmem_page() 52 static inline void put_page_bootmem(struct page *page) in put_page_bootmem() argument 56 static inline void get_page_bootmem(unsigned long info, struct page *page, in get_page_bootmem() argument 61 static inline void free_bootmem_page(struct page *page) in free_bootmem_page() argument [all …]
|
| D | pageblock-flags.h | 66 struct page; 68 unsigned long get_pfnblock_flags_mask(const struct page *page, 72 void set_pfnblock_flags_mask(struct page *page, 79 #define get_pageblock_skip(page) \ argument 80 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 82 #define clear_pageblock_skip(page) \ argument 83 set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ 85 #define set_pageblock_skip(page) \ argument 86 set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ 87 page_to_pfn(page), \ [all …]
|
| D | highmem.h | 37 static inline void *kmap(struct page *page); 46 static inline void kunmap(struct page *page); 54 static inline struct page *kmap_to_page(void *addr); 96 static inline void *kmap_local_page(struct page *page); 179 static inline void *kmap_atomic(struct page *page); 186 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument 202 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument 204 void *addr = kmap_local_page(page); in clear_user_highpage() 205 clear_user_page(addr, vaddr, page); in clear_user_highpage() 231 clear_user_highpage(&folio->page, vaddr); in vma_alloc_zeroed_movable_folio() [all …]
|
| D | cleancache.h | 33 pgoff_t, struct page *); 35 pgoff_t, struct page *); 44 extern int __cleancache_get_page(struct page *); 45 extern void __cleancache_put_page(struct page *); 46 extern void __cleancache_invalidate_page(struct address_space *, struct page *); 56 static inline bool cleancache_fs_enabled(struct page *page) in cleancache_fs_enabled() argument 58 return cleancache_fs_enabled_mapping(page->mapping); in cleancache_fs_enabled() 91 static inline int cleancache_get_page(struct page *page) in cleancache_get_page() argument 93 if (cleancache_enabled && cleancache_fs_enabled(page)) in cleancache_get_page() 94 return __cleancache_get_page(page); in cleancache_get_page() [all …]
|
| D | migrate.h | 54 bool (*isolate_page)(struct page *, isolate_mode_t); 55 int (*migrate_page)(struct page *dst, struct page *src, 57 void (*putback_page)(struct page *); 72 bool isolate_movable_page(struct page *page, isolate_mode_t mode); 93 static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 107 bool PageMovable(struct page *page); 108 void __SetPageMovable(struct page *page, const struct movable_operations *ops); 109 void __ClearPageMovable(struct page *page); 111 static inline bool PageMovable(struct page *page) { return false; } in PageMovable() argument 112 static inline void __SetPageMovable(struct page *page, in __SetPageMovable() argument [all …]
|
| D | rmap.h | 198 struct page *page, int nr_pages, enum rmap_level level) in __folio_rmap_sanity_checks() argument 216 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks() 217 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks() 240 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages, 242 #define folio_add_anon_rmap_pte(folio, page, vma, address, flags) \ argument 243 folio_add_anon_rmap_ptes(folio, page, 1, vma, address, flags) 244 void folio_add_anon_rmap_pmd(struct folio *, struct page *, 248 void folio_add_file_rmap_ptes(struct folio *, struct page *, int nr_pages, 250 #define folio_add_file_rmap_pte(folio, page, vma) \ argument 251 folio_add_file_rmap_ptes(folio, page, 1, vma) [all …]
|
| D | kmsan.h | 17 struct page; 57 bool __init __must_check kmsan_memblock_free_pages(struct page *page, 69 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags); 78 void kmsan_free_page(struct page *page, unsigned int order); 89 void kmsan_copy_page_meta(struct page *dst, struct page *src); 144 struct page **pages, 195 void kmsan_handle_dma(struct page *page, size_t offset, size_t size, 304 static inline bool __must_check kmsan_memblock_free_pages(struct page *page, in kmsan_memblock_free_pages() argument 318 static inline void kmsan_alloc_page(struct page *page, unsigned int order, in kmsan_alloc_page() argument 323 static inline void kmsan_free_page(struct page *page, unsigned int order) in kmsan_free_page() argument [all …]
|
| D | memremap.h | 84 void (*page_free)(struct page *page); 160 static inline bool is_device_private_page(const struct page *page) in is_device_private_page() argument 163 is_zone_device_page(page) && in is_device_private_page() 164 page->pgmap->type == MEMORY_DEVICE_PRIVATE; in is_device_private_page() 169 return is_device_private_page(&folio->page); in folio_is_device_private() 172 static inline bool is_pci_p2pdma_page(const struct page *page) in is_pci_p2pdma_page() argument 175 is_zone_device_page(page) && in is_pci_p2pdma_page() 176 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in is_pci_p2pdma_page() 179 static inline bool is_device_coherent_page(const struct page *page) in is_device_coherent_page() argument 181 return is_zone_device_page(page) && in is_device_coherent_page() [all …]
|
| D | page-isolation.h | 10 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; in is_migrate_isolate_page() 23 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 36 void set_pageblock_migratetype(struct page *page, int migratetype); 38 bool move_freepages_block_isolate(struct zone *zone, struct page *page,
|
| D | virtio_balloon.h | 12 struct page; 18 void (*page_relinquish)(struct page *page, unsigned int nr); 32 static inline void page_relinquish(struct page *page, unsigned int nr) in page_relinquish() argument 36 return virtio_balloon_hyp_ops->page_relinquish(page, nr); in page_relinquish() 48 static inline void page_relinquish(struct page *page, unsigned int nr) { } in page_relinquish() argument
|
| D | pgalloc_tag.h | 25 struct page *page; /* reference in page flags */ member 101 static inline bool get_page_tag_ref(struct page *page, union codetag_ref *ref, in get_page_tag_ref() argument 104 if (!page) in get_page_tag_ref() 110 idx = (page->flags >> alloc_tag_ref_offs) & alloc_tag_ref_mask; in get_page_tag_ref() 112 handle->page = page; in get_page_tag_ref() 117 page_ext = page_ext_get(page); in get_page_tag_ref() 141 struct page *page = handle.page; in update_page_tag_ref() local 146 if (WARN_ON(!page || !ref)) in update_page_tag_ref() 152 old_flags = READ_ONCE(page->flags); in update_page_tag_ref() 156 } while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags))); in update_page_tag_ref() [all …]
|
| D | mm.h | 152 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument 154 unsigned long *_pp = (void *)page; in __mm_zero_struct_page() 157 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page() 158 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page() 159 BUILD_BUG_ON(sizeof(struct page) > 96); in __mm_zero_struct_page() 161 switch (sizeof(struct page)) { in __mm_zero_struct_page() 188 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 227 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument 230 #define nth_page(page,n) ((page) + (n)) argument 231 #define folio_page_idx(folio, p) ((p) - &(folio)->page) [all …]
|
| D | async_tx.h | 119 enum dma_transaction_type tx_type, struct page **dst, in async_tx_find_channel() 120 int dst_count, struct page **src, int src_count, in async_tx_find_channel() 141 struct page *page; member 162 async_xor(struct page *dest, struct page **src_list, unsigned int offset, 166 async_xor_offs(struct page *dest, unsigned int offset, 167 struct page **src_list, unsigned int *src_offset, 171 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, 176 async_xor_val_offs(struct page *dest, unsigned int offset, 177 struct page **src_list, unsigned int *src_offset, 182 async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, [all …]
|
| /include/trace/events/ |
| D | page_ref.h | 15 TP_PROTO(struct page *page, int v), 17 TP_ARGS(page, v), 30 __entry->pfn = page_to_pfn(page); 31 __entry->flags = page->flags; 32 __entry->count = page_ref_count(page); 33 __entry->mapcount = atomic_read(&page->_mapcount); 34 __entry->mapping = page->mapping; 35 __entry->mt = get_pageblock_migratetype(page); 49 TP_PROTO(struct page *page, int v), 51 TP_ARGS(page, v) [all …]
|
| D | cma.h | 13 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 16 TP_ARGS(name, pfn, page, count), 21 __field(const struct page *, page) 28 __entry->page = page; 35 __entry->page, 65 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 68 TP_ARGS(name, pfn, page, count, align, errorno), 73 __field(const struct page *, page) 82 __entry->page = page; 91 __entry->page, [all …]
|
| /include/net/page_pool/ |
| D | helpers.h | 92 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() 110 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() 119 static inline struct page *page_pool_alloc(struct page_pool *pool, in page_pool_alloc() 124 struct page *page; in page_pool_alloc() local 132 page = page_pool_alloc_frag(pool, offset, *size, gfp); in page_pool_alloc() 133 if (unlikely(!page)) in page_pool_alloc() 145 return page; in page_pool_alloc() 161 static inline struct page *page_pool_dev_alloc(struct page_pool *pool, in page_pool_dev_alloc() 174 struct page *page; in page_pool_alloc_va() local 177 page = page_pool_alloc(pool, &offset, size, gfp & ~__GFP_HIGHMEM); in page_pool_alloc_va() [all …]
|
| /include/xen/ |
| D | grant_table.h | 81 struct page **pages; 115 void gnttab_end_foreign_access(grant_ref_t ref, struct page *page); 155 struct page *page, int readonly) in gnttab_page_grant_foreign_access_ref_one() argument 157 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page), in gnttab_page_grant_foreign_access_ref_one() 214 int gnttab_alloc_pages(int nr_pages, struct page **pages); 215 void gnttab_free_pages(int nr_pages, struct page **pages); 220 struct page *pages; 228 int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page); 229 void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page, 242 struct page **pages; [all …]
|
| D | mem-reservation.h | 22 static inline void xenmem_reservation_scrub_page(struct page *page) in xenmem_reservation_scrub_page() argument 25 clear_highpage(page); in xenmem_reservation_scrub_page() 30 struct page **pages, 34 struct page **pages); 38 struct page **pages, in xenmem_reservation_va_mapping_update() 48 struct page **pages) in xenmem_reservation_va_mapping_reset()
|
| /include/asm-generic/ |
| D | memory_model.h | 19 #define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \ argument 38 #define __page_to_pfn(page) (unsigned long)((page) - vmemmap) argument 46 ({ const struct page *__pg = (pg); \ 68 #define page_to_phys(page) \ argument 70 unsigned long __pfn = page_to_pfn(page); \ 76 #define page_to_phys(page) PFN_PHYS(page_to_pfn(page)) argument
|