/include/linux/ |
D | page_ref.h | 29 extern void __page_ref_set(struct page *page, int v); 30 extern void __page_ref_mod(struct page *page, int v); 31 extern void __page_ref_mod_and_test(struct page *page, int v, int ret); 32 extern void __page_ref_mod_and_return(struct page *page, int v, int ret); 33 extern void __page_ref_mod_unless(struct page *page, int v, int u); 34 extern void __page_ref_freeze(struct page *page, int v, int ret); 35 extern void __page_ref_unfreeze(struct page *page, int v); 41 static inline void __page_ref_set(struct page *page, int v) in __page_ref_set() argument 44 static inline void __page_ref_mod(struct page *page, int v) in __page_ref_mod() argument 47 static inline void __page_ref_mod_and_test(struct page *page, int v, int ret) in __page_ref_mod_and_test() argument [all …]
|
D | balloon_compaction.h | 59 int (*migratepage)(struct balloon_dev_info *, struct page *newpage, 60 struct page *page, enum migrate_mode mode); 63 extern struct page *balloon_page_alloc(void); 65 struct page *page); 66 extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); 93 struct page *page) in balloon_page_insert() argument 95 __SetPageOffline(page); in balloon_page_insert() 96 __SetPageMovable(page, &balloon_mops); in balloon_page_insert() 97 set_page_private(page, (unsigned long)balloon); in balloon_page_insert() 98 list_add(&page->lru, &balloon->pages); in balloon_page_insert() [all …]
|
D | page_owner.h | 13 extern void __reset_page_owner(struct page *page, unsigned short order); 14 extern void __set_page_owner(struct page *page, 16 extern void __split_page_owner(struct page *page, unsigned int nr); 18 extern void __set_page_owner_migrate_reason(struct page *page, int reason); 19 extern void __dump_page_owner(const struct page *page); 23 static inline void reset_page_owner(struct page *page, unsigned short order) in reset_page_owner() argument 26 __reset_page_owner(page, order); in reset_page_owner() 29 static inline void set_page_owner(struct page *page, in set_page_owner() argument 33 __set_page_owner(page, order, gfp_mask); in set_page_owner() 36 static inline void split_page_owner(struct page *page, unsigned int nr) in split_page_owner() argument [all …]
|
D | page_pinner.h | 12 extern void __free_page_pinner(struct page *page, unsigned int order); 13 void __page_pinner_failure_detect(struct page *page); 14 void __page_pinner_put_page(struct page *page); 16 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument 19 __free_page_pinner(page, order); in free_page_pinner() 22 static inline void page_pinner_put_page(struct page *page) in page_pinner_put_page() argument 27 __page_pinner_put_page(page); in page_pinner_put_page() 30 static inline void page_pinner_failure_detect(struct page *page) in page_pinner_failure_detect() argument 35 __page_pinner_failure_detect(page); in page_pinner_failure_detect() 38 static inline void free_page_pinner(struct page *page, unsigned int order) in free_page_pinner() argument [all …]
|
D | page-flags.h | 214 static __always_inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 217 return page; in page_fixed_fake_head() 225 if (IS_ALIGNED((unsigned long)page, PAGE_SIZE) && in page_fixed_fake_head() 226 test_bit(PG_head, &page->flags)) { in page_fixed_fake_head() 232 unsigned long head = READ_ONCE(page[1].compound_head); in page_fixed_fake_head() 235 return (const struct page *)(head - 1); in page_fixed_fake_head() 237 return page; in page_fixed_fake_head() 240 static inline const struct page *page_fixed_fake_head(const struct page *page) in page_fixed_fake_head() argument 242 return page; in page_fixed_fake_head() 246 static __always_inline int page_is_fake_head(struct page *page) in page_is_fake_head() argument [all …]
|
D | hugetlb_cgroup.h | 70 __hugetlb_cgroup_from_page(struct page *page, bool rsvd) in __hugetlb_cgroup_from_page() argument 72 VM_BUG_ON_PAGE(!PageHuge(page), page); in __hugetlb_cgroup_from_page() 74 if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) in __hugetlb_cgroup_from_page() 77 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP_RSVD); in __hugetlb_cgroup_from_page() 79 return (void *)page_private(page + SUBPAGE_INDEX_CGROUP); in __hugetlb_cgroup_from_page() 82 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) in hugetlb_cgroup_from_page() argument 84 return __hugetlb_cgroup_from_page(page, false); in hugetlb_cgroup_from_page() 88 hugetlb_cgroup_from_page_rsvd(struct page *page) in hugetlb_cgroup_from_page_rsvd() argument 90 return __hugetlb_cgroup_from_page(page, true); in hugetlb_cgroup_from_page_rsvd() 93 static inline void __set_hugetlb_cgroup(struct page *page, in __set_hugetlb_cgroup() argument [all …]
|
D | highmem-internal.h | 10 void *__kmap_local_page_prot(struct page *page, pgprot_t prot); 35 void *kmap_high(struct page *page); 36 void kunmap_high(struct page *page); 38 struct page *__kmap_to_page(void *addr); 40 static inline void *kmap(struct page *page) in kmap() argument 45 if (!PageHighMem(page)) in kmap() 46 addr = page_address(page); in kmap() 48 addr = kmap_high(page); in kmap() 53 static inline void kunmap(struct page *page) in kunmap() argument 56 if (!PageHighMem(page)) in kunmap() [all …]
|
D | highmem.h | 37 static inline void *kmap(struct page *page); 46 static inline void kunmap(struct page *page); 54 static inline struct page *kmap_to_page(void *addr); 96 static inline void *kmap_local_page(struct page *page); 180 static inline void *kmap_atomic(struct page *page); 187 static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma… in flush_anon_page() argument 203 static inline void clear_user_highpage(struct page *page, unsigned long vaddr) in clear_user_highpage() argument 205 void *addr = kmap_local_page(page); in clear_user_highpage() 206 clear_user_page(addr, vaddr, page); in clear_user_highpage() 226 static inline struct page * [all …]
|
D | bootmem_info.h | 23 void get_page_bootmem(unsigned long info, struct page *page, 25 void put_page_bootmem(struct page *page); 32 static inline void free_bootmem_page(struct page *page) in free_bootmem_page() argument 34 unsigned long magic = page->index; in free_bootmem_page() 40 VM_BUG_ON_PAGE(page_ref_count(page) != 2, page); in free_bootmem_page() 43 put_page_bootmem(page); in free_bootmem_page() 45 VM_BUG_ON_PAGE(1, page); in free_bootmem_page() 52 static inline void put_page_bootmem(struct page *page) in put_page_bootmem() argument 56 static inline void get_page_bootmem(unsigned long info, struct page *page, in get_page_bootmem() argument 61 static inline void free_bootmem_page(struct page *page) in free_bootmem_page() argument [all …]
|
D | migrate.h | 10 typedef struct page *new_page_t(struct page *page, unsigned long private); 11 typedef void free_page_t(struct page *page, unsigned long private); 53 bool (*isolate_page)(struct page *, isolate_mode_t); 54 int (*migrate_page)(struct page *dst, struct page *src, 56 void (*putback_page)(struct page *); 72 extern struct page *alloc_migration_target(struct page *page, unsigned long private); 73 extern int isolate_movable_page(struct page *page, isolate_mode_t mode); 91 static inline struct page *alloc_migration_target(struct page *page, in alloc_migration_target() argument 94 static inline int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument 106 bool PageMovable(struct page *page); [all …]
|
D | pageblock-flags.h | 62 struct page; 64 unsigned long get_pfnblock_flags_mask(const struct page *page, 68 void set_pfnblock_flags_mask(struct page *page, 75 #define get_pageblock_skip(page) \ argument 76 get_pfnblock_flags_mask(page, page_to_pfn(page), \ 78 #define clear_pageblock_skip(page) \ argument 79 set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ 81 #define set_pageblock_skip(page) \ argument 82 set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ 83 page_to_pfn(page), \ [all …]
|
D | page_idle.h | 18 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_young() 32 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_young() 43 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_clear_young() 57 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_idle() 71 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_idle() 82 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_clear_idle() 123 static inline bool page_is_young(struct page *page) in page_is_young() argument 125 return folio_test_young(page_folio(page)); in page_is_young() 128 static inline void set_page_young(struct page *page) in set_page_young() argument 130 folio_set_young(page_folio(page)); in set_page_young() [all …]
|
D | mm.h | 148 static inline void __mm_zero_struct_page(struct page *page) in __mm_zero_struct_page() argument 150 unsigned long *_pp = (void *)page; in __mm_zero_struct_page() 153 BUILD_BUG_ON(sizeof(struct page) & 7); in __mm_zero_struct_page() 154 BUILD_BUG_ON(sizeof(struct page) < 56); in __mm_zero_struct_page() 155 BUILD_BUG_ON(sizeof(struct page) > 96); in __mm_zero_struct_page() 157 switch (sizeof(struct page)) { in __mm_zero_struct_page() 184 #define mm_zero_struct_page(pp) ((void)memset((pp), 0, sizeof(struct page))) 223 #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n)) argument 226 #define nth_page(page,n) ((page) + (n)) argument 227 #define folio_page_idx(folio, p) ((p) - &(folio)->page) [all …]
|
D | page-isolation.h | 10 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 12 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; in is_migrate_isolate_page() 23 static inline bool is_migrate_isolate_page(struct page *page) in is_migrate_isolate_page() argument 36 void set_pageblock_migratetype(struct page *page, int migratetype); 37 int move_freepages_block(struct zone *zone, struct page *page, 61 struct page *alloc_migrate_target(struct page *page, unsigned long private);
|
D | cleancache.h | 33 pgoff_t, struct page *); 35 pgoff_t, struct page *); 44 extern int __cleancache_get_page(struct page *); 45 extern void __cleancache_put_page(struct page *); 46 extern void __cleancache_invalidate_page(struct address_space *, struct page *); 56 static inline bool cleancache_fs_enabled(struct page *page) in cleancache_fs_enabled() argument 58 return cleancache_fs_enabled_mapping(page->mapping); in cleancache_fs_enabled() 91 static inline int cleancache_get_page(struct page *page) in cleancache_get_page() argument 93 if (cleancache_enabled && cleancache_fs_enabled(page)) in cleancache_get_page() 94 return __cleancache_get_page(page); in cleancache_get_page() [all …]
|
D | kmsan.h | 17 struct page; 57 bool __init kmsan_memblock_free_pages(struct page *page, unsigned int order); 68 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags); 77 void kmsan_free_page(struct page *page, unsigned int order); 88 void kmsan_copy_page_meta(struct page *dst, struct page *src); 141 pgprot_t prot, struct page **pages, 192 void kmsan_handle_dma(struct page *page, size_t offset, size_t size, 240 static inline bool kmsan_memblock_free_pages(struct page *page, in kmsan_memblock_free_pages() argument 254 static inline int kmsan_alloc_page(struct page *page, unsigned int order, in kmsan_alloc_page() argument 260 static inline void kmsan_free_page(struct page *page, unsigned int order) in kmsan_free_page() argument [all …]
|
D | rmap.h | 192 void page_move_anon_rmap(struct page *, struct vm_area_struct *); 193 void page_add_anon_rmap(struct page *, struct vm_area_struct *, 195 void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, 197 void page_add_file_rmap(struct page *, struct vm_area_struct *, 199 void page_remove_rmap(struct page *, struct vm_area_struct *, 202 void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *, 204 void hugepage_add_new_anon_rmap(struct page *, struct vm_area_struct *, 207 static inline void __page_dup_rmap(struct page *page, bool compound) in __page_dup_rmap() argument 209 atomic_inc(compound ? compound_mapcount_ptr(page) : &page->_mapcount); in __page_dup_rmap() 212 static inline void page_dup_file_rmap(struct page *page, bool compound) in page_dup_file_rmap() argument [all …]
|
D | frontswap.h | 12 int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ 13 int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ 21 extern int __frontswap_store(struct page *page); 22 extern int __frontswap_load(struct page *page); 63 static inline int frontswap_store(struct page *page) in frontswap_store() argument 66 return __frontswap_store(page); in frontswap_store() 71 static inline int frontswap_load(struct page *page) in frontswap_load() argument 74 return __frontswap_load(page); in frontswap_load()
|
D | secretmem.h | 9 static inline bool page_is_secretmem(struct page *page) in page_is_secretmem() argument 20 if (PageCompound(page) || !PageLRU(page)) in page_is_secretmem() 24 ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS); in page_is_secretmem() 26 if (!mapping || mapping != page->mapping) in page_is_secretmem() 42 static inline bool page_is_secretmem(struct page *page) in page_is_secretmem() argument
|
D | pagemap.h | 381 struct address_space *page_mapping(struct page *); 405 static inline struct address_space *page_file_mapping(struct page *page) in page_file_mapping() argument 407 return folio_file_mapping(page_folio(page)); in page_file_mapping() 413 static inline struct address_space *page_mapping_file(struct page *page) in page_mapping_file() argument 415 struct folio *folio = page_folio(page); in page_mapping_file() 492 static inline void attach_page_private(struct page *page, void *data) in attach_page_private() argument 494 folio_attach_private(page_folio(page), data); in attach_page_private() 497 static inline void *detach_page_private(struct page *page) in detach_page_private() argument 499 return folio_detach_private(page_folio(page)); in detach_page_private() 511 static inline struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc() [all …]
|
D | memremap.h | 83 void (*page_free)(struct page *page); 159 static inline bool is_device_private_page(const struct page *page) in is_device_private_page() argument 162 is_zone_device_page(page) && in is_device_private_page() 163 page->pgmap->type == MEMORY_DEVICE_PRIVATE; in is_device_private_page() 168 return is_device_private_page(&folio->page); in folio_is_device_private() 171 static inline bool is_pci_p2pdma_page(const struct page *page) in is_pci_p2pdma_page() argument 174 is_zone_device_page(page) && in is_pci_p2pdma_page() 175 page->pgmap->type == MEMORY_DEVICE_PCI_P2PDMA; in is_pci_p2pdma_page() 178 static inline bool is_device_coherent_page(const struct page *page) in is_device_coherent_page() argument 180 return is_zone_device_page(page) && in is_device_coherent_page() [all …]
|
/include/trace/events/ |
D | page_ref.h | 15 TP_PROTO(struct page *page, int v), 17 TP_ARGS(page, v), 30 __entry->pfn = page_to_pfn(page); 31 __entry->flags = page->flags; 32 __entry->count = page_ref_count(page); 33 __entry->mapcount = page_mapcount(page); 34 __entry->mapping = page->mapping; 35 __entry->mt = get_pageblock_migratetype(page); 49 TP_PROTO(struct page *page, int v), 51 TP_ARGS(page, v) [all …]
|
D | cma.h | 13 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 16 TP_ARGS(name, pfn, page, count, align), 21 __field(const struct page *, page) 29 __entry->page = page; 37 __entry->page, 44 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 47 TP_ARGS(name, pfn, page, count), 52 __field(const struct page *, page) 59 __entry->page = page; 66 __entry->page, [all …]
|
D | page_pool.h | 45 const struct page *page, u32 release), 47 TP_ARGS(pool, page, release), 51 __field(const struct page *, page) 58 __entry->page = page; 60 __entry->pfn = page_to_pfn(page); 64 __entry->pool, __entry->page, __entry->pfn, __entry->release) 70 const struct page *page, u32 hold), 72 TP_ARGS(pool, page, hold), 76 __field(const struct page *, page) 83 __entry->page = page; [all …]
|
/include/net/ |
D | page_pool.h | 72 struct page *cache[PP_ALLOC_CACHE_SIZE]; 84 void (*init_callback)(struct page *page, void *arg); 161 struct page *frag_page; 215 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); 217 static inline struct page *page_pool_dev_alloc_pages(struct page_pool *pool) in page_pool_dev_alloc_pages() 224 struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, 227 static inline struct page *page_pool_dev_alloc_frag(struct page_pool *pool, in page_pool_dev_alloc_frag() 245 bool page_pool_return_skb_page(struct page *page); 255 void page_pool_release_page(struct page_pool *pool, struct page *page); 269 struct page *page) in page_pool_release_page() argument [all …]
|