Lines Matching refs:folio
541 struct address_space *folio_mapping(struct folio *);
542 struct address_space *swapcache_mapping(struct folio *);
556 static inline struct address_space *folio_file_mapping(struct folio *folio) in folio_file_mapping() argument
558 if (unlikely(folio_test_swapcache(folio))) in folio_file_mapping()
559 return swapcache_mapping(folio); in folio_file_mapping()
561 return folio->mapping; in folio_file_mapping()
576 static inline struct address_space *folio_flush_mapping(struct folio *folio) in folio_flush_mapping() argument
578 if (unlikely(folio_test_swapcache(folio))) in folio_flush_mapping()
581 return folio_mapping(folio); in folio_flush_mapping()
598 static inline struct inode *folio_inode(struct folio *folio) in folio_inode() argument
600 return folio->mapping->host; in folio_inode()
611 static inline void folio_attach_private(struct folio *folio, void *data) in folio_attach_private() argument
613 folio_get(folio); in folio_attach_private()
614 folio->private = data; in folio_attach_private()
615 folio_set_private(folio); in folio_attach_private()
629 static inline void *folio_change_private(struct folio *folio, void *data) in folio_change_private() argument
631 void *old = folio_get_private(folio); in folio_change_private()
633 folio->private = data; in folio_change_private()
646 static inline void *folio_detach_private(struct folio *folio) in folio_detach_private() argument
648 void *data = folio_get_private(folio); in folio_detach_private()
650 if (!folio_test_private(folio)) in folio_detach_private()
652 folio_clear_private(folio); in folio_detach_private()
653 folio->private = NULL; in folio_detach_private()
654 folio_put(folio); in folio_detach_private()
670 struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order);
672 static inline struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order) in filemap_alloc_folio_noprof()
693 typedef int filler_t(struct file *, struct folio *);
761 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
777 static inline struct folio *filemap_get_folio(struct address_space *mapping, in filemap_get_folio()
795 static inline struct folio *filemap_lock_folio(struct address_space *mapping, in filemap_lock_folio()
813 static inline struct folio *filemap_grab_folio(struct address_space *mapping, in filemap_grab_folio()
910 extern pgoff_t __folio_swap_cache_index(struct folio *folio);
923 static inline pgoff_t folio_index(struct folio *folio) in folio_index() argument
925 if (unlikely(folio_test_swapcache(folio))) in folio_index()
926 return __folio_swap_cache_index(folio); in folio_index()
927 return folio->index; in folio_index()
936 static inline pgoff_t folio_next_index(struct folio *folio) in folio_next_index() argument
938 return folio->index + folio_nr_pages(folio); in folio_next_index()
951 static inline struct page *folio_file_page(struct folio *folio, pgoff_t index) in folio_file_page() argument
953 return folio_page(folio, index & (folio_nr_pages(folio) - 1)); in folio_file_page()
966 static inline bool folio_contains(struct folio *folio, pgoff_t index) in folio_contains() argument
968 return index - folio_index(folio) < folio_nr_pages(folio); in folio_contains()
1003 struct folio *read_cache_folio(struct address_space *, pgoff_t index,
1005 struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
1018 static inline struct folio *read_mapping_folio(struct address_space *mapping, in read_mapping_folio()
1054 static inline loff_t folio_pos(struct folio *folio) in folio_pos() argument
1056 return page_offset(&folio->page); in folio_pos()
1062 static inline pgoff_t folio_pgoff(struct folio *folio) in folio_pgoff() argument
1064 return folio->index; in folio_pgoff()
1077 struct folio *folio; member
1083 struct folio *folio; member
1091 if (wait_page->folio != key->folio) in wake_page_match()
1101 void __folio_lock(struct folio *folio);
1102 int __folio_lock_killable(struct folio *folio);
1103 vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
1105 void folio_unlock(struct folio *folio);
1119 static inline bool folio_trylock(struct folio *folio) in folio_trylock() argument
1121 return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0))); in folio_trylock()
1154 static inline void folio_lock(struct folio *folio) in folio_lock() argument
1157 if (!folio_trylock(folio)) in folio_lock()
1158 __folio_lock(folio); in folio_lock()
1174 struct folio *folio; in lock_page() local
1177 folio = page_folio(page); in lock_page()
1178 if (!folio_trylock(folio)) in lock_page()
1179 __folio_lock(folio); in lock_page()
1192 static inline int folio_lock_killable(struct folio *folio) in folio_lock_killable() argument
1195 if (!folio_trylock(folio)) in folio_lock_killable()
1196 return __folio_lock_killable(folio); in folio_lock_killable()
1207 static inline vm_fault_t folio_lock_or_retry(struct folio *folio, in folio_lock_or_retry() argument
1211 if (!folio_trylock(folio)) in folio_lock_or_retry()
1212 return __folio_lock_or_retry(folio, vmf); in folio_lock_or_retry()
1220 void folio_wait_bit(struct folio *folio, int bit_nr);
1221 int folio_wait_bit_killable(struct folio *folio, int bit_nr);
1230 static inline void folio_wait_locked(struct folio *folio) in folio_wait_locked() argument
1232 if (folio_test_locked(folio)) in folio_wait_locked()
1233 folio_wait_bit(folio, PG_locked); in folio_wait_locked()
1236 static inline int folio_wait_locked_killable(struct folio *folio) in folio_wait_locked_killable() argument
1238 if (!folio_test_locked(folio)) in folio_wait_locked_killable()
1240 return folio_wait_bit_killable(folio, PG_locked); in folio_wait_locked_killable()
1248 void folio_end_read(struct folio *folio, bool success);
1250 void folio_wait_writeback(struct folio *folio);
1251 int folio_wait_writeback_killable(struct folio *folio);
1253 void folio_end_writeback(struct folio *folio);
1255 void folio_wait_stable(struct folio *folio);
1256 void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1257 void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1258 void __folio_cancel_dirty(struct folio *folio);
1259 static inline void folio_cancel_dirty(struct folio *folio) in folio_cancel_dirty() argument
1262 if (folio_test_dirty(folio)) in folio_cancel_dirty()
1263 __folio_cancel_dirty(folio); in folio_cancel_dirty()
1265 bool folio_clear_dirty_for_io(struct folio *folio);
1267 void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1268 bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1271 int filemap_migrate_folio(struct address_space *mapping, struct folio *dst,
1272 struct folio *src, enum migrate_mode mode);
1276 void folio_end_private_2(struct folio *folio);
1277 void folio_wait_private_2(struct folio *folio);
1278 int folio_wait_private_2_killable(struct folio *folio);
1283 void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1295 int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1297 void filemap_remove_folio(struct folio *folio);
1298 void __filemap_remove_folio(struct folio *folio, void *shadow);
1299 void replace_page_cache_folio(struct folio *old, struct folio *new);
1302 bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1307 int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1382 void page_cache_async_ra(struct readahead_control *, struct folio *,
1425 struct folio *folio, unsigned long req_count) in page_cache_async_readahead() argument
1427 DEFINE_READAHEAD(ractl, file, ra, mapping, folio->index); in page_cache_async_readahead()
1428 page_cache_async_ra(&ractl, folio, req_count); in page_cache_async_readahead()
1431 static inline struct folio *__readahead_folio(struct readahead_control *ractl) in __readahead_folio()
1433 struct folio *folio; in __readahead_folio() local
1444 folio = xa_load(&ractl->mapping->i_pages, ractl->_index); in __readahead_folio()
1445 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); in __readahead_folio()
1446 ractl->_batch_count = folio_nr_pages(folio); in __readahead_folio()
1448 return folio; in __readahead_folio()
1462 struct folio *folio = __readahead_folio(ractl); in readahead_page() local
1464 return &folio->page; in readahead_page()
1475 static inline struct folio *readahead_folio(struct readahead_control *ractl) in readahead_folio()
1477 struct folio *folio = __readahead_folio(ractl); in readahead_folio() local
1479 if (folio) in readahead_folio()
1480 folio_put(folio); in readahead_folio()
1481 return folio; in readahead_folio()
1586 static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio, in folio_mkwrite_check_truncate() argument
1591 size_t offset = offset_in_folio(folio, size); in folio_mkwrite_check_truncate()
1593 if (!folio->mapping) in folio_mkwrite_check_truncate()
1597 if (folio_next_index(folio) - 1 < index) in folio_mkwrite_check_truncate()
1598 return folio_size(folio); in folio_mkwrite_check_truncate()
1600 if (folio->index > index || !offset) in folio_mkwrite_check_truncate()
1646 unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio) in i_blocks_per_folio() argument
1648 return folio_size(folio) >> inode->i_blkbits; in i_blocks_per_folio()