• Home
  • Raw
  • Download

Lines Matching full:page

63  * finished 'unifying' the page and buffer cache and SMP-threaded the
64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
121 struct page *page, void *shadow) in page_cache_delete() argument
123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
129 if (!PageHuge(page)) { in page_cache_delete()
130 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
131 nr = compound_nr(page); in page_cache_delete()
134 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
135 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
136 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
141 page->mapping = NULL; in page_cache_delete()
142 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
158 struct page *page) in unaccount_page_cache_page() argument
165 * stale data around in the cleancache once our page is gone in unaccount_page_cache_page()
167 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
168 cleancache_put_page(page); in unaccount_page_cache_page()
170 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
172 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
173 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
174 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
177 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", in unaccount_page_cache_page()
178 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
179 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
183 mapcount = page_mapcount(page); in unaccount_page_cache_page()
185 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
188 * a good bet that actually the page is unmapped, in unaccount_page_cache_page()
190 * some other bad page check should catch it later. in unaccount_page_cache_page()
192 page_mapcount_reset(page); in unaccount_page_cache_page()
193 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
197 /* hugetlb pages do not participate in page cache accounting. */ in unaccount_page_cache_page()
198 if (PageHuge(page)) in unaccount_page_cache_page()
201 nr = thp_nr_pages(page); in unaccount_page_cache_page()
203 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
204 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
205 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
206 if (PageTransHuge(page)) in unaccount_page_cache_page()
207 __dec_node_page_state(page, NR_SHMEM_THPS); in unaccount_page_cache_page()
208 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
209 __dec_node_page_state(page, NR_FILE_THPS); in unaccount_page_cache_page()
214 * At this point page must be either written or cleaned by in unaccount_page_cache_page()
215 * truncate. Dirty page here signals a bug and loss of in unaccount_page_cache_page()
218 * This fixes dirty accounting after removing the page entirely in unaccount_page_cache_page()
220 * page and anyway will be cleared before returning page into in unaccount_page_cache_page()
223 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
228 * Delete a page from the page cache and free it. Caller has to make
229 * sure the page is locked and that nobody else uses it - or that usage
232 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
234 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
236 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
238 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
239 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
243 struct page *page) in page_cache_free_page() argument
245 void (*freepage)(struct page *); in page_cache_free_page()
249 freepage(page); in page_cache_free_page()
251 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
252 page_ref_sub(page, thp_nr_pages(page)); in page_cache_free_page()
253 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
255 put_page(page); in page_cache_free_page()
260 * delete_from_page_cache - delete page from page cache
261 * @page: the page which the kernel is trying to remove from page cache
263 * This must be called only on pages that have been verified to be in the page
264 * cache and locked. It will never put the page into the free list, the caller
265 * has a reference on the page.
267 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
269 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
272 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
274 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
277 page_cache_free_page(mapping, page); in delete_from_page_cache()
282 * page_cache_delete_batch - delete several pages from page cache
287 * from the mapping. The function expects @pvec to be sorted by page index
301 struct page *page; in page_cache_delete_batch() local
304 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
309 if (xa_is_value(page)) in page_cache_delete_batch()
312 * A page got inserted in our range? Skip it. We have our in page_cache_delete_batch()
314 * If we see a page whose index is higher than ours, it in page_cache_delete_batch()
315 * means our page has been removed, which shouldn't be in page_cache_delete_batch()
318 if (page != pvec->pages[i]) { in page_cache_delete_batch()
319 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
320 page); in page_cache_delete_batch()
324 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
326 if (page->index == xas.xa_index) in page_cache_delete_batch()
327 page->mapping = NULL; in page_cache_delete_batch()
328 /* Leave page->index set: truncation lookup relies on it */ in page_cache_delete_batch()
331 * Move to the next page in the vector if this is a regular in page_cache_delete_batch()
332 * page or the index is of the last sub-page of this compound in page_cache_delete_batch()
333 * page. in page_cache_delete_batch()
335 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
401 * these two operations is that if a dirty page/buffer is encountered, it must
462 * filemap_range_has_page - check if a page exists in range.
467 * Find at least one page in the range supplied, usually used to check if
470 * Return: %true if at least one page exists in the specified range,
476 struct page *page; in filemap_range_has_page() local
485 page = xas_find(&xas, max); in filemap_range_has_page()
486 if (xas_retry(&xas, page)) in filemap_range_has_page()
489 if (xa_is_value(page)) in filemap_range_has_page()
492 * We don't need to try to pin this page; we're about to in filemap_range_has_page()
494 * there was a page here recently. in filemap_range_has_page()
500 return page != NULL; in filemap_range_has_page()
525 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
527 wait_on_page_writeback(page); in __filemap_fdatawait_range()
528 ClearPageError(page); in __filemap_fdatawait_range()
775 * replace_page_cache_page - replace a pagecache page with a new one
776 * @old: page to be replaced
777 * @new: page to replace with
780 * This function replaces a page in the pagecache with a new one. On
781 * success it acquires the pagecache reference for the new page and
782 * drops it for the old page. Both the old and new pages must be
783 * locked. This function does not add the new page to the LRU, the
790 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page()
793 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
812 /* hugetlb pages do not participate in page cache accounting. */ in replace_page_cache_page()
830 noinline int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
836 int huge = PageHuge(page); in __add_to_page_cache_locked()
840 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
841 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
844 get_page(page); in __add_to_page_cache_locked()
845 page->mapping = mapping; in __add_to_page_cache_locked()
846 page->index = offset; in __add_to_page_cache_locked()
849 error = mem_cgroup_charge(page, current->mm, gfp); in __add_to_page_cache_locked()
861 if (order > thp_order(page)) in __add_to_page_cache_locked()
878 if (order > thp_order(page)) { in __add_to_page_cache_locked()
884 xas_store(&xas, page); in __add_to_page_cache_locked()
892 /* hugetlb pages do not participate in page cache accounting */ in __add_to_page_cache_locked()
894 __inc_lruvec_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
902 mem_cgroup_uncharge(page); in __add_to_page_cache_locked()
906 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
909 page->mapping = NULL; in __add_to_page_cache_locked()
910 /* Leave page->index set: truncation relies upon it */ in __add_to_page_cache_locked()
911 put_page(page); in __add_to_page_cache_locked()
917 * add_to_page_cache_locked - add a locked page to the pagecache
918 * @page: page to add
919 * @mapping: the page's address_space
920 * @offset: page index
921 * @gfp_mask: page allocation mode
923 * This function is used to add a page to the pagecache. It must be locked.
924 * This function does not add the page to the LRU. The caller must do that.
928 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
931 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
936 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
942 __SetPageLocked(page); in add_to_page_cache_lru()
943 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
946 __ClearPageLocked(page); in add_to_page_cache_lru()
949 * The page might have been evicted from cache only in add_to_page_cache_lru()
951 * any other repeatedly accessed page. in add_to_page_cache_lru()
956 WARN_ON_ONCE(PageActive(page)); in add_to_page_cache_lru()
958 workingset_refault(page, shadow); in add_to_page_cache_lru()
959 lru_cache_add(page); in add_to_page_cache_lru()
966 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
969 struct page *page; in __page_cache_alloc() local
976 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
977 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
979 return page; in __page_cache_alloc()
992 * sure the appropriate page became available, this saves space
1000 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
1002 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1016 * The page wait code treats the "wait->flags" somewhat unusually, because
1065 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1068 if (test_and_set_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1100 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1102 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1107 key.page = page; in wake_up_page_bit()
1134 * hash, so in that case check for a page match. That prevents a long- in wake_up_page_bit()
1137 * It is still possible to miss a case here, when we woke page waiters in wake_up_page_bit()
1139 * page waiters. in wake_up_page_bit()
1142 ClearPageWaiters(page); in wake_up_page_bit()
1145 * our page waiters, but the hashed waitqueue has waiters for in wake_up_page_bit()
1154 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1156 if (!PageWaiters(page)) in wake_up_page()
1158 wake_up_page_bit(page, bit); in wake_up_page()
1165 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1168 SHARED, /* Hold ref to page and check the bit when woken, like
1171 DROP, /* Drop ref to page before wait, no check when woken,
1177 * Attempt to check (or get) the page bit, and mark us done
1180 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, in trylock_page_bit_common() argument
1184 if (test_and_set_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1186 } else if (test_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1197 struct page *page, int bit_nr, int state, enum behavior behavior) in wait_on_page_bit_common() argument
1207 !PageUptodate(page) && PageWorkingset(page)) { in wait_on_page_bit_common()
1208 if (!PageSwapBacked(page)) { in wait_on_page_bit_common()
1218 wait_page.page = page; in wait_on_page_bit_common()
1231 * page bit synchronously. in wait_on_page_bit_common()
1237 * page queue), and add ourselves to the wait in wait_on_page_bit_common()
1244 SetPageWaiters(page); in wait_on_page_bit_common()
1245 if (!trylock_page_bit_common(page, bit_nr, wait)) in wait_on_page_bit_common()
1252 * see whether the page bit testing has already in wait_on_page_bit_common()
1255 * We can drop our reference to the page. in wait_on_page_bit_common()
1258 put_page(page); in wait_on_page_bit_common()
1295 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) in wait_on_page_bit_common()
1335 void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1337 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1338 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in wait_on_page_bit()
1342 int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1344 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1345 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); in wait_on_page_bit_killable()
1349 static int __wait_on_page_locked_async(struct page *page, in __wait_on_page_locked_async() argument
1352 struct wait_queue_head *q = page_waitqueue(page); in __wait_on_page_locked_async()
1355 wait->page = page; in __wait_on_page_locked_async()
1360 SetPageWaiters(page); in __wait_on_page_locked_async()
1362 ret = !trylock_page(page); in __wait_on_page_locked_async()
1364 ret = PageLocked(page); in __wait_on_page_locked_async()
1379 static int wait_on_page_locked_async(struct page *page, in wait_on_page_locked_async() argument
1382 if (!PageLocked(page)) in wait_on_page_locked_async()
1384 return __wait_on_page_locked_async(compound_head(page), wait, false); in wait_on_page_locked_async()
1389 * @page: The page to wait for.
1391 * The caller should hold a reference on @page. They expect the page to
1393 * (for example) by holding the reference while waiting for the page to
1395 * dereference @page.
1397 void put_and_wait_on_page_locked(struct page *page) in put_and_wait_on_page_locked() argument
1401 page = compound_head(page); in put_and_wait_on_page_locked()
1402 q = page_waitqueue(page); in put_and_wait_on_page_locked()
1403 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); in put_and_wait_on_page_locked()
1407 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1408 * @page: Page defining the wait queue of interest
1411 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1413 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1415 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1420 SetPageWaiters(page); in add_page_wait_queue()
1449 * unlock_page - unlock a locked page
1450 * @page: the page
1452 * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1463 void unlock_page(struct page *page) in unlock_page() argument
1466 page = compound_head(page); in unlock_page()
1467 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1468 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1469 wake_up_page_bit(page, PG_locked); in unlock_page()
1474 * end_page_writeback - end writeback against a page
1475 * @page: the page
1477 void end_page_writeback(struct page *page) in end_page_writeback() argument
1482 * shuffle a page marked for immediate reclaim is too mild to in end_page_writeback()
1484 * ever page writeback. in end_page_writeback()
1486 if (PageReclaim(page)) { in end_page_writeback()
1487 ClearPageReclaim(page); in end_page_writeback()
1488 rotate_reclaimable_page(page); in end_page_writeback()
1492 * Writeback does not hold a page reference of its own, relying in end_page_writeback()
1494 * But here we must make sure that the page is not freed and in end_page_writeback()
1497 get_page(page); in end_page_writeback()
1498 if (!test_clear_page_writeback(page)) in end_page_writeback()
1502 wake_up_page(page, PG_writeback); in end_page_writeback()
1503 put_page(page); in end_page_writeback()
1508 * After completing I/O on a page, call this routine to update the page
1511 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1515 SetPageUptodate(page); in page_endio()
1517 ClearPageUptodate(page); in page_endio()
1518 SetPageError(page); in page_endio()
1520 unlock_page(page); in page_endio()
1525 SetPageError(page); in page_endio()
1526 mapping = page_mapping(page); in page_endio()
1530 end_page_writeback(page); in page_endio()
1536 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1537 * @__page: the page to lock
1539 void __lock_page(struct page *__page) in __lock_page()
1541 struct page *page = compound_head(__page); in __lock_page() local
1542 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1543 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, in __lock_page()
1548 int __lock_page_killable(struct page *__page) in __lock_page_killable()
1550 struct page *page = compound_head(__page); in __lock_page_killable() local
1551 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1552 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, in __lock_page_killable()
1557 int __lock_page_async(struct page *page, struct wait_page_queue *wait) in __lock_page_async() argument
1559 return __wait_on_page_locked_async(page, wait, true); in __lock_page_async()
1564 * 1 - page is locked; mmap_lock is still held.
1565 * 0 - page is not locked.
1571 * with the page locked and the mmap_lock unperturbed.
1573 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1586 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1588 wait_on_page_locked(page); in __lock_page_or_retry()
1594 ret = __lock_page_killable(page); in __lock_page_or_retry()
1600 __lock_page(page); in __lock_page_or_retry()
1606 * page_cache_next_miss() - Find the next gap in the page cache.
1642 * page_cache_prev_miss() - Find the previous gap in the page cache.
1678 * find_get_entry - find and get a page cache entry
1680 * @index: The page cache index.
1682 * Looks up the page cache slot at @mapping & @offset. If there is a
1683 * page cache page, the head page is returned with an increased refcount.
1685 * If the slot holds a shadow entry of a previously evicted page, or a
1688 * Return: The head page or shadow entry, %NULL if nothing is found.
1690 struct page *find_get_entry(struct address_space *mapping, pgoff_t index) in find_get_entry()
1693 struct page *page; in find_get_entry() local
1698 page = xas_load(&xas); in find_get_entry()
1699 if (xas_retry(&xas, page)) in find_get_entry()
1702 * A shadow entry of a recently evicted page, or a swap entry from in find_get_entry()
1703 * shmem/tmpfs. Return it without attempting to raise page count. in find_get_entry()
1705 if (!page || xa_is_value(page)) in find_get_entry()
1708 if (!page_cache_get_speculative(page)) in find_get_entry()
1712 * Has the page moved or been split? in find_get_entry()
1716 if (unlikely(page != xas_reload(&xas))) { in find_get_entry()
1717 put_page(page); in find_get_entry()
1723 return page; in find_get_entry()
1727 * find_lock_entry - Locate and lock a page cache entry.
1729 * @index: The page cache index.
1731 * Looks up the page at @mapping & @index. If there is a page in the
1732 * cache, the head page is returned locked and with an increased refcount.
1734 * If the slot holds a shadow entry of a previously evicted page, or a
1738 * Return: The head page or shadow entry, %NULL if nothing is found.
1740 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index) in find_lock_entry()
1742 struct page *page; in find_lock_entry() local
1745 page = find_get_entry(mapping, index); in find_lock_entry()
1746 if (page && !xa_is_value(page)) { in find_lock_entry()
1747 lock_page(page); in find_lock_entry()
1748 /* Has the page been truncated? */ in find_lock_entry()
1749 if (unlikely(page->mapping != mapping)) { in find_lock_entry()
1750 unlock_page(page); in find_lock_entry()
1751 put_page(page); in find_lock_entry()
1754 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in find_lock_entry()
1756 return page; in find_lock_entry()
1760 * pagecache_get_page - Find and get a reference to a page.
1762 * @index: The page index.
1763 * @fgp_flags: %FGP flags modify how the page is returned.
1766 * Looks up the page cache entry at @mapping & @index.
1770 * * %FGP_ACCESSED - The page will be marked accessed.
1771 * * %FGP_LOCK - The page is returned locked.
1772 * * %FGP_HEAD - If the page is present and a THP, return the head page
1773 * rather than the exact page specified by the index.
1774 * * %FGP_CREAT - If no page is present then a new page is allocated using
1775 * @gfp_mask and added to the page cache and the VM's LRU list.
1776 * The page is returned locked and with an increased refcount.
1778 * page is already in cache. If the page was allocated, unlock it before
1780 * * %FGP_WRITE - The page will be written
1782 * * %FGP_NOWAIT - Don't get blocked by page lock
1787 * If there is a page cache page, it is returned with an increased refcount.
1789 * Return: The found page or %NULL otherwise.
1791 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page()
1794 struct page *page; in pagecache_get_page() local
1797 page = find_get_entry(mapping, index); in pagecache_get_page()
1798 if (xa_is_value(page)) in pagecache_get_page()
1799 page = NULL; in pagecache_get_page()
1800 if (!page) in pagecache_get_page()
1805 if (!trylock_page(page)) { in pagecache_get_page()
1806 put_page(page); in pagecache_get_page()
1810 lock_page(page); in pagecache_get_page()
1813 /* Has the page been truncated? */ in pagecache_get_page()
1814 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1815 unlock_page(page); in pagecache_get_page()
1816 put_page(page); in pagecache_get_page()
1819 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in pagecache_get_page()
1823 mark_page_accessed(page); in pagecache_get_page()
1826 if (page_is_idle(page)) in pagecache_get_page()
1827 clear_page_idle(page); in pagecache_get_page()
1830 page = find_subpage(page, index); in pagecache_get_page()
1833 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1840 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1841 if (!page) in pagecache_get_page()
1849 __SetPageReferenced(page); in pagecache_get_page()
1851 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
1853 put_page(page); in pagecache_get_page()
1854 page = NULL; in pagecache_get_page()
1860 * add_to_page_cache_lru locks the page, and for mmap we expect in pagecache_get_page()
1861 * an unlocked page. in pagecache_get_page()
1863 if (page && (fgp_flags & FGP_FOR_MMAP)) in pagecache_get_page()
1864 unlock_page(page); in pagecache_get_page()
1867 return page; in pagecache_get_page()
1874 * @start: The starting page cache index
1884 * The search returns a group of mapping-contiguous page cache entries
1891 * If it finds a Transparent Huge Page, head or tail, find_get_entries()
1892 * stops at that page: the caller is likely to have a better way to handle
1893 * the compound page as a whole, and then skip its extent, than repeatedly
1900 struct page **entries, pgoff_t *indices) in find_get_entries()
1903 struct page *page; in find_get_entries() local
1910 xas_for_each(&xas, page, ULONG_MAX) { in find_get_entries()
1911 if (xas_retry(&xas, page)) in find_get_entries()
1914 * A shadow entry of a recently evicted page, a swap in find_get_entries()
1916 * without attempting to raise page count. in find_get_entries()
1918 if (xa_is_value(page)) in find_get_entries()
1921 if (!page_cache_get_speculative(page)) in find_get_entries()
1924 /* Has the page moved or been split? */ in find_get_entries()
1925 if (unlikely(page != xas_reload(&xas))) in find_get_entries()
1932 if (PageTransHuge(page) && !PageHuge(page)) { in find_get_entries()
1933 page = find_subpage(page, xas.xa_index); in find_get_entries()
1938 entries[ret] = page; in find_get_entries()
1943 put_page(page); in find_get_entries()
1954 * @start: The starting page index
1955 * @end: The final page index (inclusive)
1966 * We also update @start to index the next page for the traversal.
1974 struct page **pages) in find_get_pages_range()
1977 struct page *page; in find_get_pages_range() local
1984 xas_for_each(&xas, page, end) { in find_get_pages_range()
1985 if (xas_retry(&xas, page)) in find_get_pages_range()
1988 if (xa_is_value(page)) in find_get_pages_range()
1991 if (!page_cache_get_speculative(page)) in find_get_pages_range()
1994 /* Has the page moved or been split? */ in find_get_pages_range()
1995 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range()
1998 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2005 put_page(page); in find_get_pages_range()
2011 * We come here when there is no page beyond @end. We take care to not in find_get_pages_range()
2013 * breaks the iteration when there is a page at index -1 but that is in find_get_pages_range()
2029 * @index: The starting page index
2039 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
2042 struct page *page; in find_get_pages_contig() local
2049 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2050 if (xas_retry(&xas, page)) in find_get_pages_contig()
2056 if (xa_is_value(page)) in find_get_pages_contig()
2059 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
2062 /* Has the page moved or been split? */ in find_get_pages_contig()
2063 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2066 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2071 put_page(page); in find_get_pages_contig()
2083 * @index: the starting page index
2084 * @end: The final page index (inclusive)
2090 * @tag. We update @index to index the next page for the traversal.
2096 struct page **pages) in find_get_pages_range_tag()
2099 struct page *page; in find_get_pages_range_tag() local
2106 xas_for_each_marked(&xas, page, end, tag) { in find_get_pages_range_tag()
2107 if (xas_retry(&xas, page)) in find_get_pages_range_tag()
2111 * is lockless so there is a window for page reclaim to evict in find_get_pages_range_tag()
2112 * a page we saw tagged. Skip over it. in find_get_pages_range_tag()
2114 if (xa_is_value(page)) in find_get_pages_range_tag()
2117 if (!page_cache_get_speculative(page)) in find_get_pages_range_tag()
2120 /* Has the page moved or been split? */ in find_get_pages_range_tag()
2121 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range_tag()
2124 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range_tag()
2131 put_page(page); in find_get_pages_range_tag()
2139 * iteration when there is a page at index -1 but that is already in find_get_pages_range_tag()
2200 unsigned long offset; /* offset into pagecache page */ in generic_file_buffered_read()
2223 struct page *page; in generic_file_buffered_read() local
2235 page = find_get_page(mapping, index); in generic_file_buffered_read()
2236 if (!page) { in generic_file_buffered_read()
2242 page = find_get_page(mapping, index); in generic_file_buffered_read()
2243 if (unlikely(page == NULL)) in generic_file_buffered_read()
2246 if (PageReadahead(page)) { in generic_file_buffered_read()
2248 put_page(page); in generic_file_buffered_read()
2252 ra, filp, page, in generic_file_buffered_read()
2255 if (!PageUptodate(page)) { in generic_file_buffered_read()
2263 put_page(page); in generic_file_buffered_read()
2266 error = wait_on_page_locked_async(page, in generic_file_buffered_read()
2270 put_page(page); in generic_file_buffered_read()
2273 error = wait_on_page_locked_killable(page); in generic_file_buffered_read()
2277 if (PageUptodate(page)) in generic_file_buffered_read()
2286 if (!trylock_page(page)) in generic_file_buffered_read()
2289 if (!page->mapping) in generic_file_buffered_read()
2291 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2294 unlock_page(page); in generic_file_buffered_read()
2298 * i_size must be checked after we know the page is Uptodate. in generic_file_buffered_read()
2302 * part of the page is not copied back to userspace (unless in generic_file_buffered_read()
2309 put_page(page); in generic_file_buffered_read()
2313 /* nr is the maximum number of bytes to copy from this page */ in generic_file_buffered_read()
2318 put_page(page); in generic_file_buffered_read()
2324 /* If users can be writing to this page using arbitrary in generic_file_buffered_read()
2326 * before reading the page on the kernel side. in generic_file_buffered_read()
2329 flush_dcache_page(page); in generic_file_buffered_read()
2332 * When a sequential read accesses a page several times, in generic_file_buffered_read()
2336 mark_page_accessed(page); in generic_file_buffered_read()
2340 * Ok, we have the page, and it's up-to-date, so in generic_file_buffered_read()
2344 ret = copy_page_to_iter(page, offset, nr, iter); in generic_file_buffered_read()
2350 put_page(page); in generic_file_buffered_read()
2361 /* Get exclusive access to the page ... */ in generic_file_buffered_read()
2364 put_page(page); in generic_file_buffered_read()
2367 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2369 error = lock_page_killable(page); in generic_file_buffered_read()
2376 if (!page->mapping) { in generic_file_buffered_read()
2377 unlock_page(page); in generic_file_buffered_read()
2378 put_page(page); in generic_file_buffered_read()
2383 if (PageUptodate(page)) { in generic_file_buffered_read()
2384 unlock_page(page); in generic_file_buffered_read()
2390 unlock_page(page); in generic_file_buffered_read()
2391 put_page(page); in generic_file_buffered_read()
2399 ClearPageError(page); in generic_file_buffered_read()
2400 /* Start the actual read. The read will unlock the page. */ in generic_file_buffered_read()
2401 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2405 put_page(page); in generic_file_buffered_read()
2412 if (!PageUptodate(page)) { in generic_file_buffered_read()
2415 put_page(page); in generic_file_buffered_read()
2418 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2420 error = lock_page_killable(page); in generic_file_buffered_read()
2425 if (!PageUptodate(page)) { in generic_file_buffered_read()
2426 if (page->mapping == NULL) { in generic_file_buffered_read()
2430 unlock_page(page); in generic_file_buffered_read()
2431 put_page(page); in generic_file_buffered_read()
2434 unlock_page(page); in generic_file_buffered_read()
2439 unlock_page(page); in generic_file_buffered_read()
2446 put_page(page); in generic_file_buffered_read()
2452 * page.. in generic_file_buffered_read()
2454 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2455 if (!page) { in generic_file_buffered_read()
2459 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2462 put_page(page); in generic_file_buffered_read()
2491 * that can use the page cache directly.
2566 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2568 * @page - the page to lock.
2572 * It differs in that it actually returns the page locked if it returns 1 and 0
2573 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
2576 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2579 if (trylock_page(page)) in lock_page_maybe_drop_mmap()
2592 if (__lock_page_killable(page)) { in lock_page_maybe_drop_mmap()
2604 __lock_page(page); in lock_page_maybe_drop_mmap()
2610 * Synchronous readahead happens when we don't even find a page in the page
2662 * Asynchronous readahead happens when we find the page and PG_readahead,
2667 struct page *page) in do_async_mmap_readahead() argument
2682 if (PageReadahead(page)) { in do_async_mmap_readahead()
2685 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2691 * filemap_fault - read in file data for page fault handling
2695 * mapped memory region to read in file data during a page fault.
2698 * it in the page cache, and handles the special cases reasonably without
2723 struct page *page; in filemap_fault() local
2731 * Do we have something in the page cache already? in filemap_fault()
2733 page = find_get_page(mapping, offset); in filemap_fault()
2734 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2736 * We found the page, so try async readahead before in filemap_fault()
2739 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
2740 } else if (!page) { in filemap_fault()
2741 /* No page in the page cache at all */ in filemap_fault()
2747 page = pagecache_get_page(mapping, offset, in filemap_fault()
2750 if (!page) { in filemap_fault()
2757 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
2761 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
2762 unlock_page(page); in filemap_fault()
2763 put_page(page); in filemap_fault()
2766 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
2769 * We have a locked page in the page cache, now we need to check in filemap_fault()
2772 if (unlikely(!PageUptodate(page))) in filemap_fault()
2781 unlock_page(page); in filemap_fault()
2786 * Found the page and have a reference on it. in filemap_fault()
2787 * We must recheck i_size under page lock. in filemap_fault()
2791 unlock_page(page); in filemap_fault()
2792 put_page(page); in filemap_fault()
2796 vmf->page = page; in filemap_fault()
2801 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
2806 ClearPageError(page); in filemap_fault()
2808 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2810 wait_on_page_locked(page); in filemap_fault()
2811 if (!PageUptodate(page)) in filemap_fault()
2816 put_page(page); in filemap_fault()
2828 * page. in filemap_fault()
2830 if (page) in filemap_fault()
2831 put_page(page); in filemap_fault()
2846 struct page *head, *page; in filemap_map_pages() local
2857 * Check for a locked page first, as a speculative in filemap_map_pages()
2858 * reference may adversely influence page migration. in filemap_map_pages()
2865 /* Has the page moved or been split? */ in filemap_map_pages()
2868 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
2871 PageReadahead(page) || in filemap_map_pages()
2872 PageHWPoison(page)) in filemap_map_pages()
2891 if (alloc_set_pte(vmf, page)) in filemap_map_pages()
2900 /* Huge page is mapped? No need to proceed. */ in filemap_map_pages()
2911 struct page *page = vmf->page; in filemap_page_mkwrite() local
2917 lock_page(page); in filemap_page_mkwrite()
2918 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2919 unlock_page(page); in filemap_page_mkwrite()
2924 * We mark the page dirty already here so that when freeze is in in filemap_page_mkwrite()
2926 * see the dirty page and writeprotect it again. in filemap_page_mkwrite()
2928 set_page_dirty(page); in filemap_page_mkwrite()
2929 wait_for_stable_page(page); in filemap_page_mkwrite()
2982 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
2984 if (!IS_ERR(page)) { in wait_on_page_read()
2985 wait_on_page_locked(page); in wait_on_page_read()
2986 if (!PageUptodate(page)) { in wait_on_page_read()
2987 put_page(page); in wait_on_page_read()
2988 page = ERR_PTR(-EIO); in wait_on_page_read()
2991 return page; in wait_on_page_read()
2994 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
2996 int (*filler)(void *, struct page *), in do_read_cache_page() argument
3000 struct page *page; in do_read_cache_page() local
3003 page = find_get_page(mapping, index); in do_read_cache_page()
3004 if (!page) { in do_read_cache_page()
3005 page = __page_cache_alloc(gfp); in do_read_cache_page()
3006 if (!page) in do_read_cache_page()
3008 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3010 put_page(page); in do_read_cache_page()
3019 err = filler(data, page); in do_read_cache_page()
3021 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3024 put_page(page); in do_read_cache_page()
3028 page = wait_on_page_read(page); in do_read_cache_page()
3029 if (IS_ERR(page)) in do_read_cache_page()
3030 return page; in do_read_cache_page()
3033 if (PageUptodate(page)) in do_read_cache_page()
3037 * Page is not up to date and may be locked due to one of the following in do_read_cache_page()
3038 * case a: Page is being filled and the page lock is held in do_read_cache_page()
3039 * case b: Read/write error clearing the page uptodate status in do_read_cache_page()
3040 * case c: Truncation in progress (page locked) in do_read_cache_page()
3043 * Case a, the page will be up to date when the page is unlocked. in do_read_cache_page()
3044 * There is no need to serialise on the page lock here as the page in do_read_cache_page()
3046 * page is truncated, the data is still valid if PageUptodate as in do_read_cache_page()
3048 * Case b, the page will not be up to date in do_read_cache_page()
3049 * Case c, the page may be truncated but in itself, the data may still in do_read_cache_page()
3051 * operation must restart if the page is not uptodate on unlock but in do_read_cache_page()
3052 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
3053 * no additional guarantees to the caller as the page lock is in do_read_cache_page()
3055 * Case d, similar to truncation. If reclaim holds the page lock, it in do_read_cache_page()
3058 * no need to serialise with page lock. in do_read_cache_page()
3060 * As the page lock gives no additional guarantee, we optimistically in do_read_cache_page()
3061 * wait on the page to be unlocked and check if it's up to date and in do_read_cache_page()
3062 * use the page if it is. Otherwise, the page lock is required to in do_read_cache_page()
3065 * wait on the same page for IO to complete. in do_read_cache_page()
3067 wait_on_page_locked(page); in do_read_cache_page()
3068 if (PageUptodate(page)) in do_read_cache_page()
3072 lock_page(page); in do_read_cache_page()
3075 if (!page->mapping) { in do_read_cache_page()
3076 unlock_page(page); in do_read_cache_page()
3077 put_page(page); in do_read_cache_page()
3081 /* Someone else locked and filled the page in a very small window */ in do_read_cache_page()
3082 if (PageUptodate(page)) { in do_read_cache_page()
3083 unlock_page(page); in do_read_cache_page()
3090 * Clear page error before actual read, PG_error will be in do_read_cache_page()
3091 * set again if read page fails. in do_read_cache_page()
3093 ClearPageError(page); in do_read_cache_page()
3097 mark_page_accessed(page); in do_read_cache_page()
3098 return page; in do_read_cache_page()
3102 * read_cache_page - read into page cache, fill it if needed
3103 * @mapping: the page's address_space
3104 * @index: the page index
3106 * @data: first arg to filler(data, page) function, often left as NULL
3108 * Read into the page cache. If a page already exists, and PageUptodate() is
3109 * not set, try to fill the page and wait for it to become unlocked.
3111 * If the page does not get brought uptodate, return -EIO.
3113 * Return: up to date page on success, ERR_PTR() on failure.
3115 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
3117 int (*filler)(void *, struct page *), in read_cache_page() argument
3126 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3127 * @mapping: the page's address_space
3128 * @index: the page index
3129 * @gfp: the page allocator flags to use if allocating
3132 * any new page allocations done using the specified allocation flags.
3134 * If the page does not get brought uptodate, return -EIO.
3136 * Return: up to date page on success, ERR_PTR() on failure.
3138 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3148 struct page **pagep, void **fsdata) in pagecache_write_begin()
3159 struct page *page, void *fsdata) in pagecache_write_end() argument
3163 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3168 * Warn about a page cache invalidation failure during a direct I/O write.
3182 …pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision… in dio_warn_stale_pagecache()
3216 * the new data. We invalidate clean cached page from the region we're in generic_file_direct_write()
3223 * If a page can not be invalidated, return 0 to fall back in generic_file_direct_write()
3271 * Find or create a page at the given pagecache position. Return the locked
3272 * page. This function is specifically for buffered writes.
3274 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3277 struct page *page; in grab_cache_page_write_begin() local
3283 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3285 if (page) in grab_cache_page_write_begin()
3286 wait_for_stable_page(page); in grab_cache_page_write_begin()
3288 return page; in grab_cache_page_write_begin()
3302 struct page *page; in generic_perform_write() local
3303 unsigned long offset; /* Offset into pagecache page */ in generic_perform_write()
3304 unsigned long bytes; /* Bytes to write to page */ in generic_perform_write()
3314 * Bring in the user page that we will copy from _first_. in generic_perform_write()
3316 * same page as we're writing to, without it being marked in generic_perform_write()
3334 &page, &fsdata); in generic_perform_write()
3339 flush_dcache_page(page); in generic_perform_write()
3341 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write()
3342 flush_dcache_page(page); in generic_perform_write()
3345 page, fsdata); in generic_perform_write()
3406 /* We can write back this queue in page reclaim */ in __generic_file_write_iter()
3425 * page-cache pages correctly). in __generic_file_write_iter()
3443 * We need to ensure that the page cache pages are written to in __generic_file_write_iter()
3504 * try_to_release_page() - release old fs-specific metadata on a page
3506 * @page: the page which the kernel is trying to free
3509 * The address_space is to try to release any data against the page
3510 * (presumably at page->private).
3512 * This may also be called if PG_fscache is set on a page, indicating that the
3513 * page is known to the local caching routines.
3516 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3520 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3522 struct address_space * const mapping = page->mapping; in try_to_release_page()
3524 BUG_ON(!PageLocked(page)); in try_to_release_page()
3525 if (PageWriteback(page)) in try_to_release_page()
3529 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3530 return try_to_free_buffers(page); in try_to_release_page()