Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1994-1999 Linus Torvalds
30 #include <linux/error-injection.h>
33 #include <linux/backing-dev.h>
70 * finished 'unifying' the page and buffer cache and SMP-threaded the
71 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
73 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
79 * ->i_mmap_rwsem (truncate_pagecache)
80 * ->private_lock (__free_pte->block_dirty_folio)
81 * ->swap_lock (exclusive_swap_page, others)
82 * ->i_pages lock
84 * ->i_rwsem
85 * ->invalidate_lock (acquired by fs in truncate path)
86 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
88 * ->mmap_lock
89 * ->i_mmap_rwsem
90 * ->page_table_lock or pte_lock (various, mainly in memory.c)
91 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
93 * ->mmap_lock
94 * ->invalidate_lock (filemap_fault)
95 * ->lock_page (filemap_fault, access_process_vm)
97 * ->i_rwsem (generic_perform_write)
98 * ->mmap_lock (fault_in_readable->do_page_fault)
100 * bdi->wb.list_lock
101 * sb_lock (fs/fs-writeback.c)
102 * ->i_pages lock (__sync_single_inode)
104 * ->i_mmap_rwsem
105 * ->anon_vma.lock (vma_merge)
107 * ->anon_vma.lock
108 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
110 * ->page_table_lock or pte_lock
111 * ->swap_lock (try_to_unmap_one)
112 * ->private_lock (try_to_unmap_one)
113 * ->i_pages lock (try_to_unmap_one)
114 * ->lruvec->lru_lock (follow_page->mark_page_accessed)
115 * ->lruvec->lru_lock (check_pte_range->isolate_lru_page)
116 * ->private_lock (page_remove_rmap->set_page_dirty)
117 * ->i_pages lock (page_remove_rmap->set_page_dirty)
118 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
119 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
120 * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock)
121 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
122 * ->inode->i_lock (zap_pte_range->set_page_dirty)
123 * ->private_lock (zap_pte_range->block_dirty_folio)
126 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
129 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
132 mapping_set_update(&xas, mapping); in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
145 folio->mapping = NULL; in page_cache_delete()
146 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
147 mapping->nrpages -= nr; in page_cache_delete()
150 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
158 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
159 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
163 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
164 int mapcount = page_mapcount(&folio->page); in filemap_unaccount_folio()
173 page_mapcount_reset(&folio->page); in filemap_unaccount_folio()
185 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
187 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
189 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
191 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
192 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
198 * unwritten data - on ordinary filesystems. in filemap_unaccount_folio()
200 * But it's harmless on in-memory filesystems like tmpfs; and can in filemap_unaccount_folio()
210 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
211 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
216 * sure the page is locked and that nobody else uses it - or that usage
221 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local
224 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
225 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
228 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
233 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
243 * filemap_remove_folio - Remove folio from page cache.
252 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local
255 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
256 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
258 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
259 if (mapping_shrinkable(mapping)) in filemap_remove_folio()
260 inode_add_lru(mapping->host); in filemap_remove_folio()
261 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
263 filemap_free_folio(mapping, folio); in filemap_remove_folio()
267 * page_cache_delete_batch - delete several folios from page cache
268 * @mapping: the mapping to which folios belong
271 * The function walks over mapping->i_pages and removes folios passed in
272 * @fbatch from the mapping. The function expects @fbatch to be sorted
274 * It tolerates holes in @fbatch (mapping entries at those indices are not
279 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
282 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
287 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
302 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
303 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
304 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
310 folio->mapping = NULL; in page_cache_delete_batch()
311 /* Leave folio->index set: truncation lookup relies on it */ in page_cache_delete_batch()
317 mapping->nrpages -= total_pages; in page_cache_delete_batch()
320 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
328 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
329 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
331 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
334 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
336 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
337 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
338 if (mapping_shrinkable(mapping)) in delete_from_page_cache_batch()
339 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
340 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
343 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
346 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
350 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
351 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
352 ret = -ENOSPC; in filemap_check_errors()
353 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
354 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
355 ret = -EIO; in filemap_check_errors()
360 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
363 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
364 return -EIO; in filemap_check_and_keep_errors()
365 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
366 return -ENOSPC; in filemap_check_and_keep_errors()
371 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
372 * @mapping: address space structure to write
375 * Call writepages on the mapping using the provided wbc to control the
380 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
385 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
386 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
389 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
390 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
397 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
398 * @mapping: address space structure to write
403 * Start writeback against all of a mapping's dirty pages that lie
406 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
413 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
423 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
426 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
429 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
432 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
434 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
438 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
441 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
446 * filemap_flush - mostly a non-blocking flush
447 * @mapping: target address_space
449 * This is a mostly non-blocking flush. Not suitable for data-integrity
450 * purposes - I/O may not be started against all dirty pages.
454 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
456 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
461 * filemap_range_has_page - check if a page exists in range.
462 * @mapping: address space within which to check
472 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
476 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
503 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
516 nr_folios = filemap_get_folios_tag(mapping, &index, end, in __filemap_fdatawait_range()
534 * filemap_fdatawait_range - wait for writeback to complete
535 * @mapping: address space structure to wait for
539 * Walk the list of under-writeback pages of the given address space
549 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
552 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
553 return filemap_check_errors(mapping); in filemap_fdatawait_range()
558 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
559 * @mapping: address space structure to wait for
563 * Walk the list of under-writeback pages of the given address space in the
568 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
571 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
574 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
575 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
580 * file_fdatawait_range - wait for writeback to complete
585 * Walk the list of under-writeback pages of the address space that file
587 * status of the address space vs. the file->f_wb_err cursor and return it.
593 * Return: error status of the address space vs. the file->f_wb_err cursor.
597 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
599 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
605 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
606 * @mapping: address space structure to wait for
608 * Walk the list of under-writeback pages of the given address space
613 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
618 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
620 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
621 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
626 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
628 return mapping->nrpages; in mapping_needs_writeback()
631 bool filemap_range_has_writeback(struct address_space *mapping, in filemap_range_has_writeback() argument
634 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
657 * filemap_write_and_wait_range - write out & wait on a file range
658 * @mapping: the address_space for the pages
662 * Write out and wait upon file offsets lstart->lend, inclusive.
665 * that this function can be used to write to the very end-of-file (end = -1).
669 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
677 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
678 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
682 * written partially (e.g. -ENOSPC), so we wait for it. in filemap_write_and_wait_range()
683 * But the -EIO is special case, it may indicate the worst in filemap_write_and_wait_range()
686 if (err != -EIO) in filemap_write_and_wait_range()
687 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
689 err2 = filemap_check_errors(mapping); in filemap_write_and_wait_range()
696 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
698 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
700 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
705 * file_check_and_advance_wb_err - report wb error (if any) that was previously
713 * Grab the wb_err from the mapping. If it matches what we have in the file,
716 * If it doesn't match, then take the mapping value, set the "seen" flag in
722 * While we handle mapping->wb_err with atomic operations, the f_wb_err
731 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err()
732 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
735 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
737 spin_lock(&file->f_lock); in file_check_and_advance_wb_err()
738 old = file->f_wb_err; in file_check_and_advance_wb_err()
739 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
740 &file->f_wb_err); in file_check_and_advance_wb_err()
742 spin_unlock(&file->f_lock); in file_check_and_advance_wb_err()
750 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
751 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
757 * file_write_and_wait_range - write out & wait on a file range
762 * Write out and wait upon file offsets lstart->lend, inclusive.
765 * that this function can be used to write to the very end-of-file (end = -1).
767 * After writing out and waiting on the data, we check and advance the
775 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
780 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
781 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
784 if (err != -EIO) in file_write_and_wait_range()
785 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
795 * replace_page_cache_folio - replace a pagecache folio with a new one
809 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local
810 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
811 pgoff_t offset = old->index; in replace_page_cache_folio()
812 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
816 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
819 new->mapping = mapping; in replace_page_cache_folio()
820 new->index = offset; in replace_page_cache_folio()
827 old->mapping = NULL; in replace_page_cache_folio()
844 noinline int __filemap_add_folio(struct address_space *mapping, in __filemap_add_folio() argument
847 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
856 mapping_set_update(&xas, mapping); in __filemap_add_folio()
860 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
870 folio->mapping = mapping; in __filemap_add_folio()
871 folio->index = xas.xa_index; in __filemap_add_folio()
874 int order = -1, split_order = 0; in __filemap_add_folio()
881 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
888 if (order == -1) in __filemap_add_folio()
892 /* entry may have changed before we re-acquire the lock */ in __filemap_add_folio()
901 BUG_ON(shmem_mapping(mapping)); in __filemap_add_folio()
917 mapping->nrpages += nr; in __filemap_add_folio()
953 folio->mapping = NULL; in __filemap_add_folio()
954 /* Leave page->index set: truncation relies upon it */ in __filemap_add_folio()
960 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
967 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
976 * data from the working set, only to cache data that will in filemap_add_folio()
1010 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1012 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
1014 * @mapping1: the first mapping to lock
1015 * @mapping2: the second mapping to lock
1023 down_write(&mapping1->invalidate_lock); in filemap_invalidate_lock_two()
1025 down_write_nested(&mapping2->invalidate_lock, 1); in filemap_invalidate_lock_two()
1030 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1032 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1034 * @mapping1: the first mapping to unlock
1035 * @mapping2: the second mapping to unlock
1041 up_write(&mapping1->invalidate_lock); in filemap_invalidate_unlock_two()
1043 up_write(&mapping2->invalidate_lock); in filemap_invalidate_unlock_two()
1077 * The page wait code treats the "wait->flags" somewhat unusually, because
1124 flags = wait->flags; in wake_page_function()
1126 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1127 return -1; in wake_page_function()
1129 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1130 return -1; in wake_page_function()
1136 * We are holding the wait-queue lock, but the waiter that in wake_page_function()
1141 * afterwards to avoid any races. This store-release pairs in wake_page_function()
1142 * with the load-acquire in folio_wait_bit_common(). in wake_page_function()
1144 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); in wake_page_function()
1145 wake_up_state(wait->private, mode); in wake_page_function()
1153 * After this list_del_init(&wait->entry) the wait entry in wake_page_function()
1154 * might be de-allocated and the process might even have in wake_page_function()
1157 list_del_init_careful(&wait->entry); in wake_page_function()
1177 spin_lock_irqsave(&q->lock, flags); in folio_wake_bit()
1187 spin_unlock_irqrestore(&q->lock, flags); in folio_wake_bit()
1189 spin_lock_irqsave(&q->lock, flags); in folio_wake_bit()
1205 spin_unlock_irqrestore(&q->lock, flags); in folio_wake_bit()
1237 if (wait->flags & WQ_FLAG_EXCLUSIVE) { in folio_trylock_flag()
1238 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1240 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1243 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; in folio_trylock_flag()
1269 wait->func = wake_page_function; in folio_wait_bit_common()
1274 wait->flags = 0; in folio_wait_bit_common()
1276 wait->flags = WQ_FLAG_EXCLUSIVE; in folio_wait_bit_common()
1277 if (--unfairness < 0) in folio_wait_bit_common()
1278 wait->flags |= WQ_FLAG_CUSTOM; in folio_wait_bit_common()
1295 spin_lock_irq(&q->lock); in folio_wait_bit_common()
1299 spin_unlock_irq(&q->lock); in folio_wait_bit_common()
1315 * be very careful with the 'wait->flags', because in folio_wait_bit_common()
1324 flags = smp_load_acquire(&wait->flags); in folio_wait_bit_common()
1333 /* If we were non-exclusive, we're done */ in folio_wait_bit_common()
1350 wait->flags |= WQ_FLAG_DONE; in folio_wait_bit_common()
1356 * waiter from the wait-queues, but the folio waiters bit will remain in folio_wait_bit_common()
1368 * NOTE! The wait->flags weren't stable until we've done the in folio_wait_bit_common()
1377 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive in folio_wait_bit_common()
1381 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; in folio_wait_bit_common()
1383 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; in folio_wait_bit_common()
1388 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1422 wait->func = wake_page_function; in migration_entry_wait_on_locked()
1425 wait->flags = 0; in migration_entry_wait_on_locked()
1427 spin_lock_irq(&q->lock); in migration_entry_wait_on_locked()
1431 spin_unlock_irq(&q->lock); in migration_entry_wait_on_locked()
1446 flags = smp_load_acquire(&wait->flags); in migration_entry_wait_on_locked()
1479 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1489 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1497 * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue
1508 spin_lock_irqsave(&q->lock, flags); in folio_add_wait_queue()
1511 spin_unlock_irqrestore(&q->lock, flags); in folio_add_wait_queue()
1539 * folio_unlock - Unlock a locked folio.
1559 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1579 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1592 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1599 * - 0 if successful.
1600 * - -EINTR if a fatal signal was encountered.
1617 * folio_end_writeback - End writeback against a folio.
1652 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1674 wait->folio = folio; in __folio_lock_async()
1675 wait->bit_nr = PG_locked; in __folio_lock_async()
1677 spin_lock_irq(&q->lock); in __folio_lock_async()
1678 __add_wait_queue_entry_tail(q, &wait->wait); in __folio_lock_async()
1688 __remove_wait_queue(q, &wait->wait); in __folio_lock_async()
1690 ret = -EIOCBQUEUED; in __folio_lock_async()
1691 spin_unlock_irq(&q->lock); in __folio_lock_async()
1697 * 0 - folio is locked.
1698 * non-zero - folio is not locked.
1699 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1704 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1708 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1712 * CAUTION! In this case, mmap_lock/per-VMA lock is not in __folio_lock_or_retry()
1741 * page_cache_next_miss() - Find the next gap in the page cache.
1742 * @mapping: Mapping.
1746 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1756 * range specified (in which case 'return - index >= max_scan' will be true).
1757 * In the rare case of index wrap-around, 0 will be returned.
1759 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1762 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1764 while (max_scan--) { in page_cache_next_miss()
1777 * page_cache_prev_miss() - Find the previous gap in the page cache.
1778 * @mapping: Mapping.
1782 * Search the range [max(index - max_scan + 1, 0), index] for the
1792 * range specified (in which case 'index - return >= max_scan' will be true).
1793 * In the rare case of wrap-around, ULONG_MAX will be returned.
1795 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1798 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1800 while (max_scan--) { in page_cache_prev_miss()
1833 * filemap_get_entry - Get a page cache entry.
1834 * @mapping: the address_space to search
1837 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1844 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) in filemap_get_entry() argument
1846 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1876 * __filemap_get_folio - Find and get a reference to a folio.
1877 * @mapping: The address_space to search.
1882 * Looks up the page cache entry at @mapping & @index.
1891 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio() argument
1897 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1907 return ERR_PTR(-EAGAIN); in __filemap_get_folio()
1914 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1937 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in __filemap_get_folio()
1948 if (!mapping_large_folio_support(mapping)) in __filemap_get_folio()
1953 if (index & ((1UL << order) - 1)) in __filemap_get_folio()
1959 err = -ENOMEM; in __filemap_get_folio()
1970 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1975 } while (order-- > 0); in __filemap_get_folio()
1977 if (err == -EEXIST) in __filemap_get_folio()
1984 * Return -EAGAIN so that there caller retries in a in __filemap_get_folio()
1985 * blocking fashion instead of propagating -ENOMEM in __filemap_get_folio()
1988 if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) in __filemap_get_folio()
1989 err = -EAGAIN; in __filemap_get_folio()
2001 return ERR_PTR(-ENOENT); in __filemap_get_folio()
2042 * find_get_entries - gang pagecache lookup
2043 * @mapping: The address_space to search
2050 * the mapping. The entries are placed in @fbatch. find_get_entries()
2054 * due to not-present entries or large folios.
2061 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, in find_get_entries() argument
2064 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2069 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2077 int idx = folio_batch_count(fbatch) - 1; in find_get_entries()
2079 folio = fbatch->folios[idx]; in find_get_entries()
2088 * find_lock_entries - Find a batch of pagecache entries.
2089 * @mapping: The address_space to search.
2095 * find_lock_entries() will return a batch of entries from @mapping.
2102 * due to not-present entries, large folios, folios which could not be
2107 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, in find_lock_entries() argument
2110 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2116 if (folio->index < *start) in find_lock_entries()
2118 if (folio_next_index(folio) - 1 > end) in find_lock_entries()
2122 if (folio->mapping != mapping || in find_lock_entries()
2128 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2141 int idx = folio_batch_count(fbatch) - 1; in find_lock_entries()
2143 folio = fbatch->folios[idx]; in find_lock_entries()
2152 * filemap_get_folios - Get a batch of folios
2153 * @mapping: The address_space to search
2158 * Search for and return a batch of folios in the mapping starting at
2172 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, in filemap_get_folios() argument
2175 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios()
2188 *start = folio->index + nr; in filemap_get_folios()
2196 * breaks the iteration when there is a page at index -1 but that is in filemap_get_folios()
2199 if (end == (pgoff_t)-1) in filemap_get_folios()
2200 *start = (pgoff_t)-1; in filemap_get_folios()
2211 * filemap_get_folios_contig - Get a batch of contiguous folios
2212 * @mapping: The address_space to search
2225 unsigned filemap_get_folios_contig(struct address_space *mapping, in filemap_get_folios_contig() argument
2228 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2256 *start = folio->index + nr; in filemap_get_folios_contig()
2271 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2273 *start = folio->index + 1; in filemap_get_folios_contig()
2284 * filemap_get_folios_tag - Get a batch of folios matching @tag
2285 * @mapping: The address_space to search
2296 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, in filemap_get_folios_tag() argument
2299 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2316 *start = folio->index + nr; in filemap_get_folios_tag()
2323 * breaks the iteration when there is a page at index -1 but that is in filemap_get_folios_tag()
2326 if (end == (pgoff_t)-1) in filemap_get_folios_tag()
2327 *start = (pgoff_t)-1; in filemap_get_folios_tag()
2341 * ---R__________________________________________B__________
2354 ra->ra_pages /= 4; in shrink_readahead_size_eio()
2358 * filemap_get_read_batch - Get a batch of folios for read
2366 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2369 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2392 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2431 shrink_readahead_size_eio(&file->f_ra); in filemap_read_folio()
2432 return -EIO; in filemap_read_folio()
2435 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2444 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2446 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2450 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2453 pos -= folio_pos(folio); in filemap_range_uptodate()
2456 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2460 struct address_space *mapping, size_t count, in filemap_update_page() argument
2465 if (iocb->ki_flags & IOCB_NOWAIT) { in filemap_update_page()
2466 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2467 return -EAGAIN; in filemap_update_page()
2469 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2473 error = -EAGAIN; in filemap_update_page()
2474 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) in filemap_update_page()
2476 if (!(iocb->ki_flags & IOCB_WAITQ)) { in filemap_update_page()
2477 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2485 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2491 if (!folio->mapping) in filemap_update_page()
2495 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2499 error = -EAGAIN; in filemap_update_page()
2500 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) in filemap_update_page()
2503 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2509 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2516 struct address_space *mapping, pgoff_t index, in filemap_create_folio() argument
2522 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2524 return -ENOMEM; in filemap_create_folio()
2535 * pages or ->readahead() that need to hold invalidate_lock in filemap_create_folio()
2536 * while mapping blocks for IO so let's hold the lock here as in filemap_create_folio()
2539 filemap_invalidate_lock_shared(mapping); in filemap_create_folio()
2540 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2541 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_folio()
2542 if (error == -EEXIST) in filemap_create_folio()
2547 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2551 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2555 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2561 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2564 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2566 if (iocb->ki_flags & IOCB_NOIO) in filemap_readahead()
2567 return -EAGAIN; in filemap_readahead()
2568 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2575 struct file *filp = iocb->ki_filp; in filemap_get_pages()
2576 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2577 struct file_ra_state *ra = &filp->f_ra; in filemap_get_pages()
2578 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; in filemap_get_pages()
2584 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); in filemap_get_pages()
2587 return -EINTR; in filemap_get_pages()
2589 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2591 if (iocb->ki_flags & IOCB_NOIO) in filemap_get_pages()
2592 return -EAGAIN; in filemap_get_pages()
2593 page_cache_sync_readahead(mapping, ra, filp, index, in filemap_get_pages()
2594 last_index - index); in filemap_get_pages()
2595 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2598 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) in filemap_get_pages()
2599 return -EAGAIN; in filemap_get_pages()
2600 err = filemap_create_folio(filp, mapping, in filemap_get_pages()
2601 iocb->ki_pos >> PAGE_SHIFT, fbatch); in filemap_get_pages()
2607 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2609 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2614 if ((iocb->ki_flags & IOCB_WAITQ) && in filemap_get_pages()
2616 iocb->ki_flags |= IOCB_NOWAIT; in filemap_get_pages()
2617 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2627 if (likely(--fbatch->nr)) in filemap_get_pages()
2642 * filemap_read - Read data from the page cache.
2644 * @iter: Destination for the data.
2647 * Copies data from the page cache. If the data is not currently present,
2657 struct file *filp = iocb->ki_filp; in filemap_read()
2658 struct file_ra_state *ra = &filp->f_ra; in filemap_read()
2659 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2660 struct inode *inode = mapping->host; in filemap_read()
2665 loff_t last_pos = ra->prev_pos; in filemap_read()
2667 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) in filemap_read()
2672 iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos); in filemap_read()
2679 * If we've already successfully copied some data, then we in filemap_read()
2680 * can no longer safely return -EIOCBQUEUED. Hence mark in filemap_read()
2683 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) in filemap_read()
2684 iocb->ki_flags |= IOCB_NOWAIT; in filemap_read()
2686 if (unlikely(iocb->ki_pos >= i_size_read(inode))) in filemap_read()
2689 error = filemap_get_pages(iocb, iter->count, &fbatch, false); in filemap_read()
2697 * the correct value for "nr", which means the zero-filled in filemap_read()
2699 * another truncate extends the file - this is desired though). in filemap_read()
2702 if (unlikely(iocb->ki_pos >= isize)) in filemap_read()
2704 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); in filemap_read()
2708 * block_write_end()->mark_buffer_dirty() or other page in filemap_read()
2716 * Once we start copying data, we don't want to be touching any in filemap_read()
2719 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2725 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, in filemap_read()
2732 size_t offset = iocb->ki_pos & (fsize - 1); in filemap_read()
2733 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, in filemap_read()
2734 fsize - offset); in filemap_read()
2752 iocb->ki_pos += copied; in filemap_read()
2753 last_pos = iocb->ki_pos; in filemap_read()
2756 error = -EFAULT; in filemap_read()
2764 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); in filemap_read()
2767 ra->prev_pos = last_pos; in filemap_read()
2774 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait() local
2775 loff_t pos = iocb->ki_pos; in kiocb_write_and_wait()
2776 loff_t end = pos + count - 1; in kiocb_write_and_wait()
2778 if (iocb->ki_flags & IOCB_NOWAIT) { in kiocb_write_and_wait()
2779 if (filemap_range_needs_writeback(mapping, pos, end)) in kiocb_write_and_wait()
2780 return -EAGAIN; in kiocb_write_and_wait()
2784 return filemap_write_and_wait_range(mapping, pos, end); in kiocb_write_and_wait()
2789 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages() local
2790 loff_t pos = iocb->ki_pos; in kiocb_invalidate_pages()
2791 loff_t end = pos + count - 1; in kiocb_invalidate_pages()
2794 if (iocb->ki_flags & IOCB_NOWAIT) { in kiocb_invalidate_pages()
2796 if (filemap_range_has_page(mapping, pos, end)) in kiocb_invalidate_pages()
2797 return -EAGAIN; in kiocb_invalidate_pages()
2799 ret = filemap_write_and_wait_range(mapping, pos, end); in kiocb_invalidate_pages()
2806 * the new data. We invalidate clean cached page from the region we're in kiocb_invalidate_pages()
2808 * without clobbering -EIOCBQUEUED from ->direct_IO(). in kiocb_invalidate_pages()
2810 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, in kiocb_invalidate_pages()
2815 * generic_file_read_iter - generic filesystem read routine
2817 * @iter: destination for the data read
2822 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2823 * be returned when no data can be read without waiting for I/O requests
2826 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2827 * requests shall be made for the read or for readahead. When no data
2828 * can be read, -EAGAIN shall be returned. When readahead would be
2844 if (iocb->ki_flags & IOCB_DIRECT) { in generic_file_read_iter()
2845 struct file *file = iocb->ki_filp; in generic_file_read_iter()
2846 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2847 struct inode *inode = mapping->host; in generic_file_read_iter()
2854 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2856 iocb->ki_pos += retval; in generic_file_read_iter()
2857 count -= retval; in generic_file_read_iter()
2859 if (retval != -EIOCBQUEUED) in generic_file_read_iter()
2860 iov_iter_revert(iter, count - iov_iter_count(iter)); in generic_file_read_iter()
2873 if (iocb->ki_pos >= i_size_read(inode)) in generic_file_read_iter()
2891 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2895 !pipe_full(pipe->head, pipe->tail, pipe->max_usage)) { in splice_folio_into_pipe()
2897 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); in splice_folio_into_pipe()
2906 pipe->head++; in splice_folio_into_pipe()
2916 * filemap_splice_read - Splice data from a file's pagecache into a pipe
2928 * will be updated if appropriate; 0 will be returned if there is no more data
2929 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2931 * if the pipe has insufficient space, we reach the end of the data or we hit a
2945 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) in filemap_splice_read()
2951 /* Work out how much data we can actually add into the pipe */ in filemap_splice_read()
2952 used = pipe_occupancy(pipe->head, pipe->tail); in filemap_splice_read()
2953 npages = max_t(ssize_t, pipe->max_usage - used, 0); in filemap_splice_read()
2961 if (*ppos >= i_size_read(in->f_mapping->host)) in filemap_splice_read()
2973 * the correct value for "nr", which means the zero-filled in filemap_splice_read()
2975 * another truncate extends the file - this is desired though). in filemap_splice_read()
2977 isize = i_size_read(in->f_mapping->host); in filemap_splice_read()
2983 * Once we start copying data, we don't want to be touching any in filemap_splice_read()
2986 writably_mapped = mapping_writably_mapped(in->f_mapping); in filemap_splice_read()
3004 n = min_t(loff_t, len, isize - *ppos); in filemap_splice_read()
3008 len -= n; in filemap_splice_read()
3011 in->f_ra.prev_pos = *ppos; in filemap_splice_read()
3012 if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) in filemap_splice_read()
3028 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
3031 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
3032 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
3036 if (!ops->is_partially_uptodate) in folio_seek_hole_data()
3042 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3045 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3048 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3051 start = (start + bsz) & ~((u64)bsz - 1); in folio_seek_hole_data()
3063 return PAGE_SIZE << xa_get_order(xas->xa, xas->xa_index); in seek_folio_size()
3068 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3069 * @mapping: Address space to search.
3075 * contain data, your filesystem can use this function to implement
3077 * entirely memory-based such as tmpfs, and filesystems which support
3080 * Return: The requested offset on success, or -ENXIO if @whence specifies
3081 * SEEK_DATA and there is no data after @start. There is an implicit hole
3082 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3083 * and @end contain data.
3085 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
3088 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3089 pgoff_t max = (end - 1) >> PAGE_SHIFT; in mapping_seek_hole_data()
3094 return -ENXIO; in mapping_seek_hole_data()
3109 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3121 start = -ENXIO; in mapping_seek_hole_data()
3134 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3135 * @vmf - the vm_fault for this fault.
3136 * @folio - the folio to lock.
3137 * @fpin - the pointer to the file we may pin (or is already pinned).
3156 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3160 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3169 mmap_read_unlock(vmf->vma->vm_mm); in lock_folio_maybe_drop_mmap()
3187 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3188 struct file_ra_state *ra = &file->f_ra; in do_sync_mmap_readahead()
3189 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
3190 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3192 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3199 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead()
3200 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3206 ra->size *= 2; in do_sync_mmap_readahead()
3207 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3213 /* If we don't want any read-ahead, don't bother */ in do_sync_mmap_readahead()
3216 if (!ra->ra_pages) in do_sync_mmap_readahead()
3221 page_cache_sync_ra(&ractl, ra->ra_pages); in do_sync_mmap_readahead()
3226 mmap_miss = READ_ONCE(ra->mmap_miss); in do_sync_mmap_readahead()
3228 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); in do_sync_mmap_readahead()
3232 * stop bothering with read-ahead. It will only hurt. in do_sync_mmap_readahead()
3238 * mmap read-around in do_sync_mmap_readahead()
3241 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3242 ra->size = ra->ra_pages; in do_sync_mmap_readahead()
3243 ra->async_size = ra->ra_pages / 4; in do_sync_mmap_readahead()
3244 ractl._index = ra->start; in do_sync_mmap_readahead()
3257 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3258 struct file_ra_state *ra = &file->f_ra; in do_async_mmap_readahead()
3259 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3263 /* If we don't want any read-ahead, don't bother */ in do_async_mmap_readahead()
3264 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3267 mmap_miss = READ_ONCE(ra->mmap_miss); in do_async_mmap_readahead()
3269 WRITE_ONCE(ra->mmap_miss, --mmap_miss); in do_async_mmap_readahead()
3273 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3279 * filemap_fault - read in file data for page fault handling
3283 * mapped memory region to read in file data during a page fault.
3289 * vma->vm_mm->mmap_lock must be held on entry.
3299 * Return: bitwise-OR of %VM_FAULT_ codes.
3304 struct file *file = vmf->vma->vm_file; in filemap_fault()
3306 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3307 struct inode *inode = mapping->host; in filemap_fault()
3308 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3320 folio = filemap_get_folio(mapping, index); in filemap_fault()
3326 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3329 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3335 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3344 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3347 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3349 vmf->gfp_mask); in filemap_fault()
3353 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3362 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3371 * that it's up-to-date. If not, it is going to be due to an error. in filemap_fault()
3390 * time to return to the upper layer and have it re-find the vma and in filemap_fault()
3398 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3411 vmf->page = folio_file_page(folio, index); in filemap_fault()
3416 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
3417 * Try to re-read it _once_. We do this synchronously, in filemap_fault()
3422 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3429 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3436 * re-find the vma and come back and find our hopefully still populated in filemap_fault()
3442 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3452 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3455 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3461 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3471 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3472 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3478 struct address_space *mapping, pgoff_t end_pgoff) in next_uptodate_folio() argument
3501 if (folio->mapping != mapping) in next_uptodate_folio()
3505 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3506 if (xas->xa_index >= max_idx) in next_uptodate_folio()
3530 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3541 * fault-around logic. in filemap_map_folio_range()
3543 if (!pte_none(vmf->pte[count])) in filemap_map_folio_range()
3552 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3558 vmf->pte += count; in filemap_map_folio_range()
3561 } while (--nr_pages > 0); in filemap_map_folio_range()
3566 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3570 vmf->pte = old_ptep; in filemap_map_folio_range()
3580 struct page *page = &folio->page; in filemap_map_order0_folio()
3590 * the fault-around logic. in filemap_map_order0_folio()
3592 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3595 if (vmf->address == addr) in filemap_map_order0_folio()
3607 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3608 struct file *file = vma->vm_file; in filemap_map_pages()
3609 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3612 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3618 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3627 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); in filemap_map_pages()
3628 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3629 if (!vmf->pte) { in filemap_map_pages()
3637 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3638 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3640 end = folio->index + folio_nr_pages(folio) - 1; in filemap_map_pages()
3641 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; in filemap_map_pages()
3648 xas.xa_index - folio->index, addr, in filemap_map_pages()
3653 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3654 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3658 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); in filemap_map_pages()
3660 WRITE_ONCE(file->f_ra.mmap_miss, 0); in filemap_map_pages()
3662 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); in filemap_map_pages()
3670 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3671 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3674 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3675 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3677 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3690 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3704 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3706 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3707 return -ENOEXEC; in generic_file_mmap()
3709 vma->vm_ops = &generic_file_vm_ops; in generic_file_mmap()
3714 * This is for filesystems which do not implement ->writepage.
3718 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in generic_file_readonly_mmap()
3719 return -EINVAL; in generic_file_readonly_mmap()
3729 return -ENOSYS; in generic_file_mmap()
3733 return -ENOSYS; in generic_file_readonly_mmap()
3741 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio() argument
3748 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3750 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3754 return ERR_PTR(-ENOMEM); in do_read_cache_folio()
3755 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3758 if (err == -EEXIST) in do_read_cache_folio()
3774 /* Folio was truncated from mapping */ in do_read_cache_folio()
3775 if (!folio->mapping) { in do_read_cache_folio()
3802 * read_cache_folio - Read into page cache, fill it if needed.
3803 * @mapping: The address_space to read from.
3805 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3814 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3817 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio() argument
3820 return do_read_cache_folio(mapping, index, filler, file, in read_cache_folio()
3821 mapping_gfp_mask(mapping)); in read_cache_folio()
3826 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3827 * @mapping: The address_space for the folio.
3831 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3835 * possible and so is EINTR. If ->read_folio returns another error,
3838 * The function expects mapping->invalidate_lock to be already held.
3842 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp() argument
3845 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); in mapping_read_folio_gfp()
3849 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
3854 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3856 return &folio->page; in do_read_cache_page()
3860 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3863 return do_read_cache_page(mapping, index, filler, file, in read_cache_page()
3864 mapping_gfp_mask(mapping)); in read_cache_page()
3869 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3870 * @mapping: the page's address_space
3874 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3877 * If the page does not get brought uptodate, return -EIO.
3879 * The function expects mapping->invalidate_lock to be already held.
3883 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3887 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3900 errseq_set(&filp->f_mapping->wb_err, -EIO); in dio_warn_stale_pagecache()
3905 …pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision… in dio_warn_stale_pagecache()
3906 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, in dio_warn_stale_pagecache()
3907 current->comm); in dio_warn_stale_pagecache()
3913 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write() local
3915 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
3916 invalidate_inode_pages2_range(mapping, in kiocb_invalidate_post_direct_write()
3917 iocb->ki_pos >> PAGE_SHIFT, in kiocb_invalidate_post_direct_write()
3918 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) in kiocb_invalidate_post_direct_write()
3919 dio_warn_stale_pagecache(iocb->ki_filp); in kiocb_invalidate_post_direct_write()
3925 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write() local
3935 if (written == -EBUSY) in generic_file_direct_write()
3940 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3944 * cached by non-direct readahead, or faulted in by get_user_pages() in generic_file_direct_write()
3957 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
3960 struct inode *inode = mapping->host; in generic_file_direct_write()
3961 loff_t pos = iocb->ki_pos; in generic_file_direct_write()
3965 write_len -= written; in generic_file_direct_write()
3966 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { in generic_file_direct_write()
3970 iocb->ki_pos = pos; in generic_file_direct_write()
3972 if (written != -EIOCBQUEUED) in generic_file_direct_write()
3973 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write()
3980 struct file *file = iocb->ki_filp; in generic_perform_write()
3981 loff_t pos = iocb->ki_pos; in generic_perform_write()
3982 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3983 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3994 offset = (pos & (PAGE_SIZE - 1)); in generic_perform_write()
3995 bytes = min_t(unsigned long, PAGE_SIZE - offset, in generic_perform_write()
4003 * up-to-date. in generic_perform_write()
4006 status = -EFAULT; in generic_perform_write()
4011 status = -EINTR; in generic_perform_write()
4015 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
4020 if (mapping_writably_mapped(mapping)) in generic_perform_write()
4026 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
4029 iov_iter_revert(i, copied - max(status, 0L)); in generic_perform_write()
4037 * A short copy made ->write_end() reject the in generic_perform_write()
4049 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
4054 iocb->ki_pos += written; in generic_perform_write()
4060 * __generic_file_write_iter - write data to a file
4062 * @from: iov_iter with data to write
4064 * This function does all the work needed for actually writing data to a
4072 * This function does *not* take care of syncing data in case of O_SYNC write.
4078 * * negative error code if no data has been written at all
4082 struct file *file = iocb->ki_filp; in __generic_file_write_iter()
4083 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
4084 struct inode *inode = mapping->host; in __generic_file_write_iter()
4095 if (iocb->ki_flags & IOCB_DIRECT) { in __generic_file_write_iter()
4102 * page-cache pages correctly). in __generic_file_write_iter()
4115 * generic_file_write_iter - write data to a file
4117 * @from: iov_iter with data to write
4123 * * negative error code if no data has been written at all of
4129 struct file *file = iocb->ki_filp; in generic_file_write_iter()
4130 struct inode *inode = file->f_mapping->host; in generic_file_write_iter()
4146 * filemap_release_folio() - Release fs-specific metadata on a folio.
4150 * The address_space is trying to release any data attached to a folio
4151 * (presumably at folio->private).
4164 struct address_space * const mapping = folio->mapping; in filemap_release_folio() local
4172 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4173 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4180 * filemap_cachestat() - compute the page cache statistics of a mapping
4181 * @mapping: The mapping to compute the statistics for.
4186 * This will query the page cache statistics of a mapping in the
4191 static void filemap_cachestat(struct address_space *mapping, in filemap_cachestat() argument
4194 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4211 * the rcu-protected xarray. in filemap_cachestat()
4220 folio_last_index = folio_first_index + nr_pages - 1; in filemap_cachestat()
4224 nr_pages -= first_index - folio_first_index; in filemap_cachestat()
4227 nr_pages -= folio_last_index - last_index; in filemap_cachestat()
4234 cs->nr_evicted += nr_pages; in filemap_cachestat()
4237 if (shmem_mapping(mapping)) { in filemap_cachestat()
4238 /* shmem file - in swap cache */ in filemap_cachestat()
4261 cs->nr_recently_evicted += nr_pages; in filemap_cachestat()
4267 cs->nr_cache += nr_pages; in filemap_cachestat()
4270 cs->nr_dirty += nr_pages; in filemap_cachestat()
4273 cs->nr_writeback += nr_pages; in filemap_cachestat()
4291 if (f->f_mode & FMODE_WRITE) in can_do_cachestat()
4312 * `off` and `len` must be non-negative integers. If `len` > 0,
4326 * zero - success
4327 * -EFAULT - cstat or cstat_range points to an illegal address
4328 * -EINVAL - invalid flags
4329 * -EBADF - invalid file descriptor
4330 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4337 struct address_space *mapping; in SYSCALL_DEFINE4() local
4343 return -EBADF; in SYSCALL_DEFINE4()
4348 return -EFAULT; in SYSCALL_DEFINE4()
4354 return -EOPNOTSUPP; in SYSCALL_DEFINE4()
4359 return -EPERM; in SYSCALL_DEFINE4()
4364 return -EINVAL; in SYSCALL_DEFINE4()
4369 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; in SYSCALL_DEFINE4()
4371 mapping = f.file->f_mapping; in SYSCALL_DEFINE4()
4372 filemap_cachestat(mapping, first_index, last_index, &cs); in SYSCALL_DEFINE4()
4376 return -EFAULT; in SYSCALL_DEFINE4()