Home
last modified time | relevance | path

Searched refs:folio (Results 1 – 25 of 70) sorted by relevance

123

/include/linux/
Drmap.h174 struct anon_vma *folio_get_anon_vma(struct folio *folio);
197 static inline void __folio_rmap_sanity_checks(struct folio *folio, in __folio_rmap_sanity_checks() argument
201 VM_WARN_ON_FOLIO(folio_test_hugetlb(folio), folio); in __folio_rmap_sanity_checks()
204 VM_WARN_ON_FOLIO(is_zero_folio(folio), folio); in __folio_rmap_sanity_checks()
216 VM_WARN_ON_FOLIO(page_folio(page) != folio, folio); in __folio_rmap_sanity_checks()
217 VM_WARN_ON_FOLIO(page_folio(page + nr_pages - 1) != folio, folio); in __folio_rmap_sanity_checks()
228 VM_WARN_ON_FOLIO(folio_nr_pages(folio) != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
229 VM_WARN_ON_FOLIO(nr_pages != HPAGE_PMD_NR, folio); in __folio_rmap_sanity_checks()
239 void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
240 void folio_add_anon_rmap_ptes(struct folio *, struct page *, int nr_pages,
[all …]
Dhugetlb_cgroup.h61 __hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd) in __hugetlb_cgroup_from_folio() argument
63 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __hugetlb_cgroup_from_folio()
65 return folio->_hugetlb_cgroup_rsvd; in __hugetlb_cgroup_from_folio()
67 return folio->_hugetlb_cgroup; in __hugetlb_cgroup_from_folio()
70 static inline struct hugetlb_cgroup *hugetlb_cgroup_from_folio(struct folio *folio) in hugetlb_cgroup_from_folio() argument
72 return __hugetlb_cgroup_from_folio(folio, false); in hugetlb_cgroup_from_folio()
76 hugetlb_cgroup_from_folio_rsvd(struct folio *folio) in hugetlb_cgroup_from_folio_rsvd() argument
78 return __hugetlb_cgroup_from_folio(folio, true); in hugetlb_cgroup_from_folio_rsvd()
81 static inline void __set_hugetlb_cgroup(struct folio *folio, in __set_hugetlb_cgroup() argument
84 VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); in __set_hugetlb_cgroup()
[all …]
Dmm_inline.h31 static inline int folio_is_file_lru(struct folio *folio) in folio_is_file_lru() argument
33 return !folio_test_swapbacked(folio); in folio_is_file_lru()
69 static __always_inline void __folio_clear_lru_flags(struct folio *folio) in __folio_clear_lru_flags() argument
71 VM_BUG_ON_FOLIO(!folio_test_lru(folio), folio); in __folio_clear_lru_flags()
73 __folio_clear_lru(folio); in __folio_clear_lru_flags()
76 if (folio_test_active(folio) && folio_test_unevictable(folio)) in __folio_clear_lru_flags()
79 __folio_clear_active(folio); in __folio_clear_lru_flags()
80 __folio_clear_unevictable(folio); in __folio_clear_lru_flags()
90 static __always_inline enum lru_list folio_lru_list(struct folio *folio) in folio_lru_list() argument
94 VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); in folio_lru_list()
[all …]
Dmigrate.h10 typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
11 typedef void free_folio_t(struct folio *folio, unsigned long private);
66 int migrate_folio(struct address_space *mapping, struct folio *dst,
67 struct folio *src, enum migrate_mode mode);
71 struct folio *alloc_migration_target(struct folio *src, unsigned long private);
73 bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
76 struct folio *dst, struct folio *src);
79 void folio_migrate_flags(struct folio *newfolio, struct folio *folio);
81 struct folio *newfolio, struct folio *folio, int extra_count);
90 static inline struct folio *alloc_migration_target(struct folio *src, in alloc_migration_target()
[all …]
Dpage_ref.h87 static inline int folio_ref_count(const struct folio *folio) in folio_ref_count() argument
89 return page_ref_count(&folio->page); in folio_ref_count()
104 static inline void folio_set_count(struct folio *folio, int v) in folio_set_count() argument
106 set_page_count(&folio->page, v); in folio_set_count()
125 static inline void folio_ref_add(struct folio *folio, int nr) in folio_ref_add() argument
127 page_ref_add(&folio->page, nr); in folio_ref_add()
137 static inline void folio_ref_sub(struct folio *folio, int nr) in folio_ref_sub() argument
139 page_ref_sub(&folio->page, nr); in folio_ref_sub()
142 static inline int folio_ref_sub_return(struct folio *folio, int nr) in folio_ref_sub_return() argument
144 int ret = atomic_sub_return(nr, &folio->_refcount); in folio_ref_sub_return()
[all …]
Dpagemap.h541 struct address_space *folio_mapping(struct folio *);
542 struct address_space *swapcache_mapping(struct folio *);
556 static inline struct address_space *folio_file_mapping(struct folio *folio) in folio_file_mapping() argument
558 if (unlikely(folio_test_swapcache(folio))) in folio_file_mapping()
559 return swapcache_mapping(folio); in folio_file_mapping()
561 return folio->mapping; in folio_file_mapping()
576 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument
578 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping()
581 return folio_mapping(folio); in folio_flush_mapping()
598 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument
[all …]
Dpage_idle.h14 static inline bool folio_test_young(const struct folio *folio) in folio_test_young() argument
16 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_young()
28 static inline void folio_set_young(struct folio *folio) in folio_set_young() argument
30 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_young()
39 static inline bool folio_test_clear_young(struct folio *folio) in folio_test_clear_young() argument
41 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_clear_young()
53 static inline bool folio_test_idle(const struct folio *folio) in folio_test_idle() argument
55 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_test_idle()
67 static inline void folio_set_idle(struct folio *folio) in folio_set_idle() argument
69 struct page_ext *page_ext = page_ext_get(&folio->page); in folio_set_idle()
[all …]
Dzswap.h28 bool zswap_store(struct folio *folio);
29 bool zswap_load(struct folio *folio);
35 void zswap_folio_swapin(struct folio *folio);
42 static inline bool zswap_store(struct folio *folio) in zswap_store() argument
47 static inline bool zswap_load(struct folio *folio) in zswap_load() argument
60 static inline void zswap_folio_swapin(struct folio *folio) {} in zswap_folio_swapin() argument
Dksm.h89 struct folio *ksm_might_need_to_copy(struct folio *folio,
92 void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
93 void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
94 void collect_procs_ksm(struct folio *folio, struct page *page,
126 static inline void collect_procs_ksm(struct folio *folio, struct page *page, in collect_procs_ksm() argument
138 static inline struct folio *ksm_might_need_to_copy(struct folio *folio, in ksm_might_need_to_copy() argument
141 return folio; in ksm_might_need_to_copy()
144 static inline void rmap_walk_ksm(struct folio *folio, in rmap_walk_ksm() argument
149 static inline void folio_migrate_ksm(struct folio *newfolio, struct folio *old) in folio_migrate_ksm()
Dswap.h356 struct folio *folio = page_folio(page); in page_swap_entry() local
357 swp_entry_t entry = folio->swap; in page_swap_entry()
359 entry.val += folio_page_idx(folio, page); in page_swap_entry()
367 void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
368 void workingset_refault(struct folio *folio, void *shadow);
369 void workingset_activation(struct folio *folio);
381 void lru_note_cost_refault(struct folio *);
382 void folio_add_lru(struct folio *);
383 void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
385 void folio_mark_accessed(struct folio *);
[all …]
Dmemcontrol.h372 static inline bool folio_memcg_kmem(struct folio *folio);
379 int mem_cgroup_move_account(struct folio *folio,
406 static inline struct mem_cgroup *__folio_memcg(struct folio *folio) in __folio_memcg() argument
408 unsigned long memcg_data = folio->memcg_data; in __folio_memcg()
410 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_memcg()
411 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_OBJEXTS, folio); in __folio_memcg()
412 VM_BUG_ON_FOLIO(memcg_data & MEMCG_DATA_KMEM, folio); in __folio_memcg()
427 static inline struct obj_cgroup *__folio_objcg(struct folio *folio) in __folio_objcg() argument
429 unsigned long memcg_data = folio->memcg_data; in __folio_objcg()
431 VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); in __folio_objcg()
[all …]
Dvmstat.h422 static inline void __zone_stat_mod_folio(struct folio *folio, in __zone_stat_mod_folio() argument
425 __mod_zone_page_state(folio_zone(folio), item, nr); in __zone_stat_mod_folio()
428 static inline void __zone_stat_add_folio(struct folio *folio, in __zone_stat_add_folio() argument
431 __mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in __zone_stat_add_folio()
434 static inline void __zone_stat_sub_folio(struct folio *folio, in __zone_stat_sub_folio() argument
437 __mod_zone_page_state(folio_zone(folio), item, -folio_nr_pages(folio)); in __zone_stat_sub_folio()
440 static inline void zone_stat_mod_folio(struct folio *folio, in zone_stat_mod_folio() argument
443 mod_zone_page_state(folio_zone(folio), item, nr); in zone_stat_mod_folio()
446 static inline void zone_stat_add_folio(struct folio *folio, in zone_stat_add_folio() argument
449 mod_zone_page_state(folio_zone(folio), item, folio_nr_pages(folio)); in zone_stat_add_folio()
[all …]
Dpage-flags.h275 const struct page *: (const struct folio *)_compound_head(p), \
276 struct page *: (struct folio *)_compound_head(p)))
287 #define folio_page(folio, n) nth_page(&(folio)->page, n) argument
314 static const unsigned long *const_folio_flags(const struct folio *folio, in const_folio_flags() argument
317 const struct page *page = &folio->page; in const_folio_flags()
324 static unsigned long *folio_flags(struct folio *folio, unsigned n) in folio_flags() argument
326 struct page *page = &folio->page; in folio_flags()
385 static __always_inline bool folio_test_##name(const struct folio *folio) \
386 { return test_bit(PG_##name, const_folio_flags(folio, page)); }
389 static __always_inline void folio_set_##name(struct folio *folio) \
[all …]
Dhighmem.h132 static inline void *kmap_local_folio(struct folio *folio, size_t offset);
224 struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, in vma_alloc_zeroed_movable_folio()
227 struct folio *folio; in vma_alloc_zeroed_movable_folio() local
229 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE | __GFP_CMA, 0, vma, vaddr, false); in vma_alloc_zeroed_movable_folio()
230 if (folio) in vma_alloc_zeroed_movable_folio()
231 clear_user_highpage(&folio->page, vaddr); in vma_alloc_zeroed_movable_folio()
233 return folio; in vma_alloc_zeroed_movable_folio()
455 static inline void memcpy_from_folio(char *to, struct folio *folio, in memcpy_from_folio() argument
458 VM_BUG_ON(offset + len > folio_size(folio)); in memcpy_from_folio()
461 const char *from = kmap_local_folio(folio, offset); in memcpy_from_folio()
[all …]
Dhuge_mm.h349 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
352 int min_order_for_split(struct folio *folio);
353 int split_folio_to_list(struct folio *folio, struct list_head *list);
356 struct folio *folio = page_folio(page); in split_huge_page() local
357 int ret = min_order_for_split(folio); in split_huge_page()
371 void deferred_split_folio(struct folio *folio, bool partially_mapped);
374 unsigned long address, bool freeze, struct folio *folio);
387 bool freeze, struct folio *folio);
448 static inline bool folio_test_pmd_mappable(struct folio *folio) in folio_test_pmd_mappable() argument
450 return folio_order(folio) >= HPAGE_PMD_ORDER; in folio_test_pmd_mappable()
[all …]
Dcacheflush.h7 struct folio;
11 void flush_dcache_folio(struct folio *folio);
14 static inline void flush_dcache_folio(struct folio *folio) in flush_dcache_folio() argument
Dmm.h228 #define folio_page_idx(folio, p) (page_to_pfn(p) - folio_pfn(folio)) argument
231 #define folio_page_idx(folio, p) ((p) - &(folio)->page) argument
243 static inline struct folio *lru_to_folio(struct list_head *head) in lru_to_folio()
245 return list_entry((head)->prev, struct folio, lru); in lru_to_folio()
1203 struct folio *folio = (struct folio *)page; in compound_order() local
1205 if (!test_bit(PG_head, &folio->flags)) in compound_order()
1207 return folio->_flags_1 & 0xff; in compound_order()
1219 static inline unsigned int folio_order(const struct folio *folio) in folio_order() argument
1221 if (!folio_test_large(folio)) in folio_order()
1223 return folio->_flags_1 & 0xff; in folio_order()
[all …]
Dhugetlb.h23 void free_huge_folio(struct folio *folio);
149 struct folio **foliop);
156 bool isolate_hugetlb(struct folio *folio, struct list_head *list);
157 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
160 void folio_putback_active_hugetlb(struct folio *folio);
161 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
172 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
293 struct folio *folio) in hugetlb_folio_mapping_lock_write() argument
405 struct folio **foliop) in hugetlb_mfill_atomic_pte()
418 static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) in isolate_hugetlb() argument
[all …]
Dfolio_queue.h213 static inline unsigned int __folio_order(struct folio *folio) in __folio_order() argument
215 if (!folio_test_large(folio)) in __folio_order()
217 return folio->_flags_1 & 0xff; in __folio_order()
233 static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio) in folioq_append() argument
237 folioq->vec.folios[slot] = folio; in folioq_append()
238 folioq->orders[slot] = __folio_order(folio); in folioq_append()
255 static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio) in folioq_append_mark() argument
259 folioq->vec.folios[slot] = folio; in folioq_append_mark()
260 folioq->orders[slot] = __folio_order(folio); in folioq_append_mark()
275 static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot) in folioq_folio()
Dbuffer_head.h64 struct folio *b_folio; /* the folio this bh is mapped to */
186 #define folio_buffers(folio) folio_get_private(folio) argument
188 void buffer_check_dirty_writeback(struct folio *folio,
198 void folio_set_bh(struct buffer_head *bh, struct folio *folio,
200 struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
203 struct buffer_head *create_empty_buffers(struct folio *folio,
254 void block_invalidate_folio(struct folio *folio, size_t offset, size_t length);
255 int block_write_full_folio(struct folio *folio, struct writeback_control *wbc,
257 int __block_write_full_folio(struct inode *inode, struct folio *folio,
259 int block_read_full_folio(struct folio *, get_block_t *);
[all …]
Dwriteback.h215 void __inode_attach_wb(struct inode *inode, struct folio *folio);
220 void wbc_account_cgroup_owner(struct writeback_control *wbc, struct folio *folio,
236 static inline void inode_attach_wb(struct inode *inode, struct folio *folio) in inode_attach_wb() argument
239 __inode_attach_wb(inode, folio); in inode_attach_wb()
298 static inline void inode_attach_wb(struct inode *inode, struct folio *folio) in inode_attach_wb() argument
327 struct folio *folio, size_t bytes) in wbc_account_cgroup_owner() argument
375 struct folio *writeback_iter(struct address_space *mapping,
376 struct writeback_control *wbc, struct folio *folio, int *error);
378 typedef int (*writepage_t)(struct folio *folio, struct writeback_control *wbc,
389 bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio);
[all …]
Dnfs_page.h47 struct folio *wb_folio;
133 struct folio *folio,
174 static inline struct folio *nfs_page_to_folio(const struct nfs_page *req) in nfs_page_to_folio()
193 struct folio *folio = nfs_page_to_folio(req); in nfs_page_to_page() local
195 if (folio == NULL) in nfs_page_to_page()
197 return folio_page(folio, pgbase >> PAGE_SHIFT); in nfs_page_to_page()
206 struct folio *folio = nfs_page_to_folio(req); in nfs_page_to_inode() local
208 if (folio == NULL) in nfs_page_to_inode()
210 return folio->mapping->host; in nfs_page_to_inode()
221 struct folio *folio = nfs_page_to_folio(req); in nfs_page_max_length() local
[all …]
Dpagevec.h17 struct folio;
32 struct folio *folios[PAGEVEC_SIZE];
75 struct folio *folio) in folio_batch_add() argument
77 fbatch->folios[fbatch->nr++] = folio; in folio_batch_add()
89 static inline struct folio *folio_batch_next(struct folio_batch *fbatch) in folio_batch_next()
/include/trace/events/
Dpagemap.h19 #define trace_pagemap_flags(folio) ( \ argument
20 (folio_test_anon(folio) ? PAGEMAP_ANONYMOUS : PAGEMAP_FILE) | \
21 (folio_mapped(folio) ? PAGEMAP_MAPPED : 0) | \
22 (folio_test_swapcache(folio) ? PAGEMAP_SWAPCACHE : 0) | \
23 (folio_test_swapbacked(folio) ? PAGEMAP_SWAPBACKED : 0) | \
24 (folio_test_mappedtodisk(folio) ? PAGEMAP_MAPPEDDISK : 0) | \
25 (folio_test_private(folio) ? PAGEMAP_BUFFERS : 0) \
30 TP_PROTO(struct folio *folio),
32 TP_ARGS(folio),
35 __field(struct folio *, folio )
[all …]
/include/trace/hooks/
Dmm.h15 struct folio;
21 TP_PROTO(struct shmem_inode_info *info, struct folio **folio, int order),
22 TP_ARGS(info, folio, order), 3);
53 TP_PROTO(struct vm_area_struct *vma, struct folio *folio, bool pageout, bool *need_skip),
54 TP_ARGS(vma, folio, pageout, need_skip));
150 int fgp_flags, gfp_t gfp_mask, struct folio *folio),
151 TP_ARGS(mapping, index, fgp_flags, gfp_mask, folio));
183 TP_PROTO(struct folio *old_folio, struct folio *new_folio),
186 TP_PROTO(struct page_vma_mapped_walk *pvmw, struct folio *folio,
188 TP_ARGS(pvmw, folio, vma, referenced));
[all …]

123