• Home
  • Raw
  • Download

Lines Matching full:page

57  * finished 'unifying' the page and buffer cache and SMP-threaded the
58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
115 struct page *page, void **shadowp) in page_cache_tree_insert() argument
121 error = __radix_tree_create(&mapping->i_pages, page->index, 0, in page_cache_tree_insert()
137 __radix_tree_replace(&mapping->i_pages, node, slot, page, in page_cache_tree_insert()
144 struct page *page, void *shadow) in page_cache_tree_delete() argument
149 nr = PageHuge(page) ? 1 : hpage_nr_pages(page); in page_cache_tree_delete()
151 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_tree_delete()
152 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_tree_delete()
153 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_tree_delete()
159 __radix_tree_lookup(&mapping->i_pages, page->index + i, in page_cache_tree_delete()
162 VM_BUG_ON_PAGE(!node && nr != 1, page); in page_cache_tree_delete()
169 page->mapping = NULL; in page_cache_tree_delete()
170 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_tree_delete()
186 struct page *page) in unaccount_page_cache_page() argument
193 * stale data around in the cleancache once our page is gone in unaccount_page_cache_page()
195 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
196 cleancache_put_page(page); in unaccount_page_cache_page()
198 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
200 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
201 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
202 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
205 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", in unaccount_page_cache_page()
206 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
207 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
211 mapcount = page_mapcount(page); in unaccount_page_cache_page()
213 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
216 * a good bet that actually the page is unmapped, in unaccount_page_cache_page()
218 * some other bad page check should catch it later. in unaccount_page_cache_page()
220 page_mapcount_reset(page); in unaccount_page_cache_page()
221 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
225 /* hugetlb pages do not participate in page cache accounting. */ in unaccount_page_cache_page()
226 if (PageHuge(page)) in unaccount_page_cache_page()
229 nr = hpage_nr_pages(page); in unaccount_page_cache_page()
231 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
232 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
233 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); in unaccount_page_cache_page()
234 if (PageTransHuge(page)) in unaccount_page_cache_page()
235 __dec_node_page_state(page, NR_SHMEM_THPS); in unaccount_page_cache_page()
237 VM_BUG_ON_PAGE(PageTransHuge(page), page); in unaccount_page_cache_page()
241 * At this point page must be either written or cleaned by in unaccount_page_cache_page()
242 * truncate. Dirty page here signals a bug and loss of in unaccount_page_cache_page()
245 * This fixes dirty accounting after removing the page entirely in unaccount_page_cache_page()
247 * page and anyway will be cleared before returning page into in unaccount_page_cache_page()
250 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
251 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
255 * Delete a page from the page cache and free it. Caller has to make
256 * sure the page is locked and that nobody else uses it - or that usage
259 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
261 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
263 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
265 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
266 page_cache_tree_delete(mapping, page, shadow); in __delete_from_page_cache()
270 struct page *page) in page_cache_free_page() argument
272 void (*freepage)(struct page *); in page_cache_free_page()
276 freepage(page); in page_cache_free_page()
278 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
279 page_ref_sub(page, HPAGE_PMD_NR); in page_cache_free_page()
280 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
282 put_page(page); in page_cache_free_page()
287 * delete_from_page_cache - delete page from page cache
288 * @page: the page which the kernel is trying to remove from page cache
290 * This must be called only on pages that have been verified to be in the page
291 * cache and locked. It will never put the page into the free list, the caller
292 * has a reference on the page.
294 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
296 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
299 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
301 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
304 page_cache_free_page(mapping, page); in delete_from_page_cache()
309 * page_cache_tree_delete_batch - delete several pages from page cache
314 * from the mapping. The function expects @pvec to be sorted by page index.
330 struct page *page; in page_cache_tree_delete_batch() local
337 page = radix_tree_deref_slot_protected(slot, in page_cache_tree_delete_batch()
339 if (radix_tree_exceptional_entry(page)) in page_cache_tree_delete_batch()
343 * Some page got inserted in our range? Skip it. We in page_cache_tree_delete_batch()
347 if (page != pvec->pages[i]) in page_cache_tree_delete_batch()
349 WARN_ON_ONCE(!PageLocked(page)); in page_cache_tree_delete_batch()
350 if (PageTransHuge(page) && !PageHuge(page)) in page_cache_tree_delete_batch()
352 page->mapping = NULL; in page_cache_tree_delete_batch()
354 * Leave page->index set: truncation lookup relies in page_cache_tree_delete_batch()
427 * these two operations is that if a dirty page/buffer is encountered, it must
484 * filemap_range_has_page - check if a page exists in range.
489 * Find at least one page in the range supplied, usually used to check if
497 struct page *page; in filemap_range_has_page() local
505 if (!find_get_pages_range(mapping, &index, end, 1, &page)) in filemap_range_has_page()
507 put_page(page); in filemap_range_has_page()
533 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
535 wait_on_page_writeback(page); in __filemap_fdatawait_range()
536 ClearPageError(page); in __filemap_fdatawait_range()
790 * replace_page_cache_page - replace a pagecache page with a new one
791 * @old: page to be replaced
792 * @new: page to replace with
795 * This function replaces a page in the pagecache with a new one. On
796 * success it acquires the pagecache reference for the new page and
797 * drops it for the old page. Both the old and new pages must be
798 * locked. This function does not add the new page to the LRU, the
804 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page()
815 void (*freepage)(struct page *); in replace_page_cache_page()
831 * hugetlb pages do not participate in page cache accounting. in replace_page_cache_page()
849 static int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
854 int huge = PageHuge(page); in __add_to_page_cache_locked()
858 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
859 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
862 error = mem_cgroup_try_charge(page, current->mm, in __add_to_page_cache_locked()
871 mem_cgroup_cancel_charge(page, memcg, false); in __add_to_page_cache_locked()
875 get_page(page); in __add_to_page_cache_locked()
876 page->mapping = mapping; in __add_to_page_cache_locked()
877 page->index = offset; in __add_to_page_cache_locked()
880 error = page_cache_tree_insert(mapping, page, shadowp); in __add_to_page_cache_locked()
885 /* hugetlb pages do not participate in page cache accounting. */ in __add_to_page_cache_locked()
887 __inc_node_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
890 mem_cgroup_commit_charge(page, memcg, false, false); in __add_to_page_cache_locked()
891 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
894 page->mapping = NULL; in __add_to_page_cache_locked()
895 /* Leave page->index set: truncation relies upon it */ in __add_to_page_cache_locked()
898 mem_cgroup_cancel_charge(page, memcg, false); in __add_to_page_cache_locked()
899 put_page(page); in __add_to_page_cache_locked()
904 * add_to_page_cache_locked - add a locked page to the pagecache
905 * @page: page to add
906 * @mapping: the page's address_space
907 * @offset: page index
908 * @gfp_mask: page allocation mode
910 * This function is used to add a page to the pagecache. It must be locked.
911 * This function does not add the page to the LRU. The caller must do that.
913 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
916 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
921 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
927 __SetPageLocked(page); in add_to_page_cache_lru()
928 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
931 __ClearPageLocked(page); in add_to_page_cache_lru()
934 * The page might have been evicted from cache only in add_to_page_cache_lru()
936 * any other repeatedly accessed page. in add_to_page_cache_lru()
943 SetPageActive(page); in add_to_page_cache_lru()
944 workingset_activation(page); in add_to_page_cache_lru()
946 ClearPageActive(page); in add_to_page_cache_lru()
947 lru_cache_add(page); in add_to_page_cache_lru()
954 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
957 struct page *page; in __page_cache_alloc() local
964 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
965 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
967 return page; in __page_cache_alloc()
980 * sure the appropriate page became available, this saves space
988 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
990 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1005 struct page *page; member
1011 struct page *page; member
1022 if (wait_page->page != key->page) in wake_page_function()
1030 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1036 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1038 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1043 key.page = page; in wake_up_page_bit()
1070 * hash, so in that case check for a page match. That prevents a long- in wake_up_page_bit()
1073 * It is still possible to miss a case here, when we woke page waiters in wake_up_page_bit()
1075 * page waiters. in wake_up_page_bit()
1078 ClearPageWaiters(page); in wake_up_page_bit()
1081 * our page waiters, but the hashed waitqueue has waiters for in wake_up_page_bit()
1090 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1092 if (!PageWaiters(page)) in wake_up_page()
1094 wake_up_page_bit(page, bit); in wake_up_page()
1098 struct page *page, int bit_nr, int state, bool lock) in wait_on_page_bit_common() argument
1107 wait_page.page = page; in wait_on_page_bit_common()
1115 SetPageWaiters(page); in wait_on_page_bit_common()
1122 if (likely(test_bit(bit_nr, &page->flags))) { in wait_on_page_bit_common()
1127 if (!test_and_set_bit_lock(bit_nr, &page->flags)) in wait_on_page_bit_common()
1130 if (!test_bit(bit_nr, &page->flags)) in wait_on_page_bit_common()
1153 void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1155 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1156 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, false); in wait_on_page_bit()
1160 int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1162 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1163 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, false); in wait_on_page_bit_killable()
1168 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1169 * @page: Page defining the wait queue of interest
1172 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1174 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1176 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1181 SetPageWaiters(page); in add_page_wait_queue()
1210 * unlock_page - unlock a locked page
1211 * @page: the page
1213 * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
1224 void unlock_page(struct page *page) in unlock_page() argument
1227 page = compound_head(page); in unlock_page()
1228 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1229 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1230 wake_up_page_bit(page, PG_locked); in unlock_page()
1235 * end_page_writeback - end writeback against a page
1236 * @page: the page
1238 void end_page_writeback(struct page *page) in end_page_writeback() argument
1243 * shuffle a page marked for immediate reclaim is too mild to in end_page_writeback()
1245 * ever page writeback. in end_page_writeback()
1247 if (PageReclaim(page)) { in end_page_writeback()
1248 ClearPageReclaim(page); in end_page_writeback()
1249 rotate_reclaimable_page(page); in end_page_writeback()
1252 if (!test_clear_page_writeback(page)) in end_page_writeback()
1256 wake_up_page(page, PG_writeback); in end_page_writeback()
1261 * After completing I/O on a page, call this routine to update the page
1264 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1268 SetPageUptodate(page); in page_endio()
1270 ClearPageUptodate(page); in page_endio()
1271 SetPageError(page); in page_endio()
1273 unlock_page(page); in page_endio()
1278 SetPageError(page); in page_endio()
1279 mapping = page_mapping(page); in page_endio()
1283 end_page_writeback(page); in page_endio()
1289 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1290 * @__page: the page to lock
1292 void __lock_page(struct page *__page) in __lock_page()
1294 struct page *page = compound_head(__page); in __lock_page() local
1295 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1296 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, true); in __lock_page()
1300 int __lock_page_killable(struct page *__page) in __lock_page_killable()
1302 struct page *page = compound_head(__page); in __lock_page_killable() local
1303 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1304 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, true); in __lock_page_killable()
1310 * 1 - page is locked; mmap_sem is still held.
1311 * 0 - page is not locked.
1317 * with the page locked and the mmap_sem unperturbed.
1319 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1332 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1334 wait_on_page_locked(page); in __lock_page_or_retry()
1340 ret = __lock_page_killable(page); in __lock_page_or_retry()
1346 __lock_page(page); in __lock_page_or_retry()
1378 struct page *page; in page_cache_next_hole() local
1380 page = radix_tree_lookup(&mapping->i_pages, index); in page_cache_next_hole()
1381 if (!page || radix_tree_exceptional_entry(page)) in page_cache_next_hole()
1419 struct page *page; in page_cache_prev_hole() local
1421 page = radix_tree_lookup(&mapping->i_pages, index); in page_cache_prev_hole()
1422 if (!page || radix_tree_exceptional_entry(page)) in page_cache_prev_hole()
1434 * find_get_entry - find and get a page cache entry
1436 * @offset: the page cache index
1438 * Looks up the page cache slot at @mapping & @offset. If there is a
1439 * page cache page, it is returned with an increased refcount.
1441 * If the slot holds a shadow entry of a previously evicted page, or a
1446 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) in find_get_entry()
1449 struct page *head, *page; in find_get_entry() local
1453 page = NULL; in find_get_entry()
1456 page = radix_tree_deref_slot(pagep); in find_get_entry()
1457 if (unlikely(!page)) in find_get_entry()
1459 if (radix_tree_exception(page)) { in find_get_entry()
1460 if (radix_tree_deref_retry(page)) in find_get_entry()
1463 * A shadow entry of a recently evicted page, in find_get_entry()
1465 * it without attempting to raise page count. in find_get_entry()
1470 head = compound_head(page); in find_get_entry()
1474 /* The page was split under us? */ in find_get_entry()
1475 if (compound_head(page) != head) { in find_get_entry()
1481 * Has the page moved? in find_get_entry()
1485 if (unlikely(page != *pagep)) { in find_get_entry()
1493 return page; in find_get_entry()
1498 * find_lock_entry - locate, pin and lock a page cache entry
1500 * @offset: the page cache index
1502 * Looks up the page cache slot at @mapping & @offset. If there is a
1503 * page cache page, it is returned locked and with an increased
1506 * If the slot holds a shadow entry of a previously evicted page, or a
1513 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) in find_lock_entry()
1515 struct page *page; in find_lock_entry() local
1518 page = find_get_entry(mapping, offset); in find_lock_entry()
1519 if (page && !radix_tree_exception(page)) { in find_lock_entry()
1520 lock_page(page); in find_lock_entry()
1521 /* Has the page been truncated? */ in find_lock_entry()
1522 if (unlikely(page_mapping(page) != mapping)) { in find_lock_entry()
1523 unlock_page(page); in find_lock_entry()
1524 put_page(page); in find_lock_entry()
1527 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in find_lock_entry()
1529 return page; in find_lock_entry()
1534 * pagecache_get_page - find and get a page reference
1536 * @offset: the page index
1538 * @gfp_mask: gfp mask to use for the page cache data page allocation
1540 * Looks up the page cache slot at @mapping & @offset.
1542 * PCG flags modify how the page is returned.
1546 * - FGP_ACCESSED: the page will be marked accessed
1547 * - FGP_LOCK: Page is return locked
1548 * - FGP_CREAT: If page is not present then a new page is allocated using
1549 * @gfp_mask and added to the page cache and the VM's LRU
1550 * list. The page is returned locked and with an increased
1556 * If there is a page cache page, it is returned with an increased refcount.
1558 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, in pagecache_get_page()
1561 struct page *page; in pagecache_get_page() local
1564 page = find_get_entry(mapping, offset); in pagecache_get_page()
1565 if (radix_tree_exceptional_entry(page)) in pagecache_get_page()
1566 page = NULL; in pagecache_get_page()
1567 if (!page) in pagecache_get_page()
1572 if (!trylock_page(page)) { in pagecache_get_page()
1573 put_page(page); in pagecache_get_page()
1577 lock_page(page); in pagecache_get_page()
1580 /* Has the page been truncated? */ in pagecache_get_page()
1581 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1582 unlock_page(page); in pagecache_get_page()
1583 put_page(page); in pagecache_get_page()
1586 VM_BUG_ON_PAGE(page->index != offset, page); in pagecache_get_page()
1589 if (page && (fgp_flags & FGP_ACCESSED)) in pagecache_get_page()
1590 mark_page_accessed(page); in pagecache_get_page()
1593 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1600 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1601 if (!page) in pagecache_get_page()
1609 __SetPageReferenced(page); in pagecache_get_page()
1611 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); in pagecache_get_page()
1613 put_page(page); in pagecache_get_page()
1614 page = NULL; in pagecache_get_page()
1620 return page; in pagecache_get_page()
1627 * @start: The starting page cache index
1637 * The search returns a group of mapping-contiguous page cache entries
1649 struct page **entries, pgoff_t *indices) in find_get_entries()
1660 struct page *head, *page; in find_get_entries() local
1662 page = radix_tree_deref_slot(slot); in find_get_entries()
1663 if (unlikely(!page)) in find_get_entries()
1665 if (radix_tree_exception(page)) { in find_get_entries()
1666 if (radix_tree_deref_retry(page)) { in find_get_entries()
1671 * A shadow entry of a recently evicted page, a swap in find_get_entries()
1673 * without attempting to raise page count. in find_get_entries()
1678 head = compound_head(page); in find_get_entries()
1682 /* The page was split under us? */ in find_get_entries()
1683 if (compound_head(page) != head) { in find_get_entries()
1688 /* Has the page moved? */ in find_get_entries()
1689 if (unlikely(page != *slot)) { in find_get_entries()
1695 entries[ret] = page; in find_get_entries()
1706 * @start: The starting page index
1707 * @end: The final page index (inclusive)
1718 * We also update @start to index the next page for the traversal.
1726 struct page **pages) in find_get_pages_range()
1737 struct page *head, *page; in find_get_pages_range() local
1742 page = radix_tree_deref_slot(slot); in find_get_pages_range()
1743 if (unlikely(!page)) in find_get_pages_range()
1746 if (radix_tree_exception(page)) { in find_get_pages_range()
1747 if (radix_tree_deref_retry(page)) { in find_get_pages_range()
1752 * A shadow entry of a recently evicted page, in find_get_pages_range()
1759 head = compound_head(page); in find_get_pages_range()
1763 /* The page was split under us? */ in find_get_pages_range()
1764 if (compound_head(page) != head) { in find_get_pages_range()
1769 /* Has the page moved? */ in find_get_pages_range()
1770 if (unlikely(page != *slot)) { in find_get_pages_range()
1775 pages[ret] = page; in find_get_pages_range()
1783 * We come here when there is no page beyond @end. We take care to not in find_get_pages_range()
1785 * breaks the iteration when there is page at index -1 but that is in find_get_pages_range()
1801 * @index: The starting page index
1811 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
1822 struct page *head, *page; in find_get_pages_contig() local
1824 page = radix_tree_deref_slot(slot); in find_get_pages_contig()
1826 if (unlikely(!page)) in find_get_pages_contig()
1829 if (radix_tree_exception(page)) { in find_get_pages_contig()
1830 if (radix_tree_deref_retry(page)) { in find_get_pages_contig()
1835 * A shadow entry of a recently evicted page, in find_get_pages_contig()
1842 head = compound_head(page); in find_get_pages_contig()
1846 /* The page was split under us? */ in find_get_pages_contig()
1847 if (compound_head(page) != head) { in find_get_pages_contig()
1852 /* Has the page moved? */ in find_get_pages_contig()
1853 if (unlikely(page != *slot)) { in find_get_pages_contig()
1863 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { in find_get_pages_contig()
1864 put_page(page); in find_get_pages_contig()
1868 pages[ret] = page; in find_get_pages_contig()
1880 * @index: the starting page index
1881 * @end: The final page index (inclusive)
1887 * @tag. We update @index to index the next page for the traversal.
1891 struct page **pages) in find_get_pages_range_tag()
1902 struct page *head, *page; in find_get_pages_range_tag() local
1907 page = radix_tree_deref_slot(slot); in find_get_pages_range_tag()
1908 if (unlikely(!page)) in find_get_pages_range_tag()
1911 if (radix_tree_exception(page)) { in find_get_pages_range_tag()
1912 if (radix_tree_deref_retry(page)) { in find_get_pages_range_tag()
1917 * A shadow entry of a recently evicted page. in find_get_pages_range_tag()
1922 * time, so there is a sizable window for page in find_get_pages_range_tag()
1923 * reclaim to evict a page we saw tagged. in find_get_pages_range_tag()
1930 head = compound_head(page); in find_get_pages_range_tag()
1934 /* The page was split under us? */ in find_get_pages_range_tag()
1935 if (compound_head(page) != head) { in find_get_pages_range_tag()
1940 /* Has the page moved? */ in find_get_pages_range_tag()
1941 if (unlikely(page != *slot)) { in find_get_pages_range_tag()
1946 pages[ret] = page; in find_get_pages_range_tag()
1956 * iteration when there is page at index -1 but that is already broken in find_get_pages_range_tag()
1973 * @start: the starting page cache index
1984 struct page **entries, pgoff_t *indices) in find_get_entries_tag()
1995 struct page *head, *page; in find_get_entries_tag() local
1997 page = radix_tree_deref_slot(slot); in find_get_entries_tag()
1998 if (unlikely(!page)) in find_get_entries_tag()
2000 if (radix_tree_exception(page)) { in find_get_entries_tag()
2001 if (radix_tree_deref_retry(page)) { in find_get_entries_tag()
2007 * A shadow entry of a recently evicted page, a swap in find_get_entries_tag()
2009 * without attempting to raise page count. in find_get_entries_tag()
2014 head = compound_head(page); in find_get_entries_tag()
2018 /* The page was split under us? */ in find_get_entries_tag()
2019 if (compound_head(page) != head) { in find_get_entries_tag()
2024 /* Has the page moved? */ in find_get_entries_tag()
2025 if (unlikely(page != *slot)) { in find_get_entries_tag()
2031 entries[ret] = page; in find_get_entries_tag()
2084 unsigned long offset; /* offset into pagecache page */ in generic_file_buffered_read()
2099 struct page *page; in generic_file_buffered_read() local
2111 page = find_get_page(mapping, index); in generic_file_buffered_read()
2112 if (!page) { in generic_file_buffered_read()
2118 page = find_get_page(mapping, index); in generic_file_buffered_read()
2119 if (unlikely(page == NULL)) in generic_file_buffered_read()
2122 if (PageReadahead(page)) { in generic_file_buffered_read()
2124 ra, filp, page, in generic_file_buffered_read()
2127 if (!PageUptodate(page)) { in generic_file_buffered_read()
2129 put_page(page); in generic_file_buffered_read()
2138 error = wait_on_page_locked_killable(page); in generic_file_buffered_read()
2141 if (PageUptodate(page)) in generic_file_buffered_read()
2150 if (!trylock_page(page)) in generic_file_buffered_read()
2153 if (!page->mapping) in generic_file_buffered_read()
2155 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2158 unlock_page(page); in generic_file_buffered_read()
2162 * i_size must be checked after we know the page is Uptodate. in generic_file_buffered_read()
2166 * part of the page is not copied back to userspace (unless in generic_file_buffered_read()
2173 put_page(page); in generic_file_buffered_read()
2177 /* nr is the maximum number of bytes to copy from this page */ in generic_file_buffered_read()
2182 put_page(page); in generic_file_buffered_read()
2188 /* If users can be writing to this page using arbitrary in generic_file_buffered_read()
2190 * before reading the page on the kernel side. in generic_file_buffered_read()
2193 flush_dcache_page(page); in generic_file_buffered_read()
2196 * When a sequential read accesses a page several times, in generic_file_buffered_read()
2200 mark_page_accessed(page); in generic_file_buffered_read()
2204 * Ok, we have the page, and it's up-to-date, so in generic_file_buffered_read()
2208 ret = copy_page_to_iter(page, offset, nr, iter); in generic_file_buffered_read()
2214 put_page(page); in generic_file_buffered_read()
2225 /* Get exclusive access to the page ... */ in generic_file_buffered_read()
2226 error = lock_page_killable(page); in generic_file_buffered_read()
2232 if (!page->mapping) { in generic_file_buffered_read()
2233 unlock_page(page); in generic_file_buffered_read()
2234 put_page(page); in generic_file_buffered_read()
2239 if (PageUptodate(page)) { in generic_file_buffered_read()
2240 unlock_page(page); in generic_file_buffered_read()
2250 ClearPageError(page); in generic_file_buffered_read()
2251 /* Start the actual read. The read will unlock the page. */ in generic_file_buffered_read()
2252 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2256 put_page(page); in generic_file_buffered_read()
2263 if (!PageUptodate(page)) { in generic_file_buffered_read()
2264 error = lock_page_killable(page); in generic_file_buffered_read()
2267 if (!PageUptodate(page)) { in generic_file_buffered_read()
2268 if (page->mapping == NULL) { in generic_file_buffered_read()
2272 unlock_page(page); in generic_file_buffered_read()
2273 put_page(page); in generic_file_buffered_read()
2276 unlock_page(page); in generic_file_buffered_read()
2281 unlock_page(page); in generic_file_buffered_read()
2288 put_page(page); in generic_file_buffered_read()
2294 * page.. in generic_file_buffered_read()
2296 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2297 if (!page) { in generic_file_buffered_read()
2301 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2304 put_page(page); in generic_file_buffered_read()
2332 * that can use the page cache directly.
2393 * page_cache_read - adds requested page to the page cache if not already there
2395 * @offset: page index
2398 * This adds the requested page to the page cache if it isn't already there,
2404 struct page *page; in page_cache_read() local
2408 page = __page_cache_alloc(gfp_mask); in page_cache_read()
2409 if (!page) in page_cache_read()
2412 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); in page_cache_read()
2414 ret = mapping->a_ops->readpage(file, page); in page_cache_read()
2418 put_page(page); in page_cache_read()
2429 * a page in the page cache at all.
2471 * Asynchronous readahead happens when we find the page and PG_readahead,
2477 struct page *page, in do_async_mmap_readahead() argument
2487 if (PageReadahead(page)) in do_async_mmap_readahead()
2489 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2493 * filemap_fault - read in file data for page fault handling
2497 * mapped memory region to read in file data during a page fault.
2500 * it in the page cache, and handles the special cases reasonably without
2524 struct page *page; in filemap_fault() local
2532 * Do we have something in the page cache already? in filemap_fault()
2534 page = find_get_page(mapping, offset); in filemap_fault()
2535 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2537 * We found the page, so try async readahead before in filemap_fault()
2540 do_async_mmap_readahead(vmf->vma, ra, file, page, offset); in filemap_fault()
2541 } else if (!page) { in filemap_fault()
2542 /* No page in the page cache at all */ in filemap_fault()
2548 page = find_get_page(mapping, offset); in filemap_fault()
2549 if (!page) in filemap_fault()
2553 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { in filemap_fault()
2554 put_page(page); in filemap_fault()
2559 if (unlikely(page->mapping != mapping)) { in filemap_fault()
2560 unlock_page(page); in filemap_fault()
2561 put_page(page); in filemap_fault()
2564 VM_BUG_ON_PAGE(page->index != offset, page); in filemap_fault()
2567 * We have a locked page in the page cache, now we need to check in filemap_fault()
2570 if (unlikely(!PageUptodate(page))) in filemap_fault()
2574 * Found the page and have a reference on it. in filemap_fault()
2575 * We must recheck i_size under page lock. in filemap_fault()
2579 unlock_page(page); in filemap_fault()
2580 put_page(page); in filemap_fault()
2584 vmf->page = page; in filemap_fault()
2595 * The page we want has now been added to the page cache. in filemap_fault()
2613 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
2618 ClearPageError(page); in filemap_fault()
2619 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2621 wait_on_page_locked(page); in filemap_fault()
2622 if (!PageUptodate(page)) in filemap_fault()
2625 put_page(page); in filemap_fault()
2645 struct page *head, *page; in filemap_map_pages() local
2652 page = radix_tree_deref_slot(slot); in filemap_map_pages()
2653 if (unlikely(!page)) in filemap_map_pages()
2655 if (radix_tree_exception(page)) { in filemap_map_pages()
2656 if (radix_tree_deref_retry(page)) { in filemap_map_pages()
2663 head = compound_head(page); in filemap_map_pages()
2667 /* The page was split under us? */ in filemap_map_pages()
2668 if (compound_head(page) != head) { in filemap_map_pages()
2673 /* Has the page moved? */ in filemap_map_pages()
2674 if (unlikely(page != *slot)) { in filemap_map_pages()
2679 if (!PageUptodate(page) || in filemap_map_pages()
2680 PageReadahead(page) || in filemap_map_pages()
2681 PageHWPoison(page)) in filemap_map_pages()
2683 if (!trylock_page(page)) in filemap_map_pages()
2686 if (page->mapping != mapping || !PageUptodate(page)) in filemap_map_pages()
2690 if (page->index >= max_idx) in filemap_map_pages()
2700 if (alloc_set_pte(vmf, NULL, page)) in filemap_map_pages()
2702 unlock_page(page); in filemap_map_pages()
2705 unlock_page(page); in filemap_map_pages()
2707 put_page(page); in filemap_map_pages()
2709 /* Huge page is mapped? No need to proceed. */ in filemap_map_pages()
2721 struct page *page = vmf->page; in filemap_page_mkwrite() local
2727 lock_page(page); in filemap_page_mkwrite()
2728 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2729 unlock_page(page); in filemap_page_mkwrite()
2734 * We mark the page dirty already here so that when freeze is in in filemap_page_mkwrite()
2736 * see the dirty page and writeprotect it again. in filemap_page_mkwrite()
2738 set_page_dirty(page); in filemap_page_mkwrite()
2739 wait_for_stable_page(page); in filemap_page_mkwrite()
2792 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
2794 if (!IS_ERR(page)) { in wait_on_page_read()
2795 wait_on_page_locked(page); in wait_on_page_read()
2796 if (!PageUptodate(page)) { in wait_on_page_read()
2797 put_page(page); in wait_on_page_read()
2798 page = ERR_PTR(-EIO); in wait_on_page_read()
2801 return page; in wait_on_page_read()
2804 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
2806 int (*filler)(void *, struct page *), in do_read_cache_page() argument
2810 struct page *page; in do_read_cache_page() local
2813 page = find_get_page(mapping, index); in do_read_cache_page()
2814 if (!page) { in do_read_cache_page()
2815 page = __page_cache_alloc(gfp); in do_read_cache_page()
2816 if (!page) in do_read_cache_page()
2818 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
2820 put_page(page); in do_read_cache_page()
2828 err = filler(data, page); in do_read_cache_page()
2830 put_page(page); in do_read_cache_page()
2834 page = wait_on_page_read(page); in do_read_cache_page()
2835 if (IS_ERR(page)) in do_read_cache_page()
2836 return page; in do_read_cache_page()
2839 if (PageUptodate(page)) in do_read_cache_page()
2843 * Page is not up to date and may be locked due one of the following in do_read_cache_page()
2844 * case a: Page is being filled and the page lock is held in do_read_cache_page()
2845 * case b: Read/write error clearing the page uptodate status in do_read_cache_page()
2846 * case c: Truncation in progress (page locked) in do_read_cache_page()
2849 * Case a, the page will be up to date when the page is unlocked. in do_read_cache_page()
2850 * There is no need to serialise on the page lock here as the page in do_read_cache_page()
2852 * the page is truncated, the data is still valid if PageUptodate as in do_read_cache_page()
2854 * Case b, the page will not be up to date in do_read_cache_page()
2855 * Case c, the page may be truncated but in itself, the data may still in do_read_cache_page()
2857 * operation must restart if the page is not uptodate on unlock but in do_read_cache_page()
2858 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
2859 * no additional guarantees to the caller as the page lock is in do_read_cache_page()
2861 * Case d, similar to truncation. If reclaim holds the page lock, it in do_read_cache_page()
2864 * no need to serialise with page lock. in do_read_cache_page()
2866 * As the page lock gives no additional guarantee, we optimistically in do_read_cache_page()
2867 * wait on the page to be unlocked and check if it's up to date and in do_read_cache_page()
2868 * use the page if it is. Otherwise, the page lock is required to in do_read_cache_page()
2871 * wait on the same page for IO to complete. in do_read_cache_page()
2873 wait_on_page_locked(page); in do_read_cache_page()
2874 if (PageUptodate(page)) in do_read_cache_page()
2878 lock_page(page); in do_read_cache_page()
2881 if (!page->mapping) { in do_read_cache_page()
2882 unlock_page(page); in do_read_cache_page()
2883 put_page(page); in do_read_cache_page()
2887 /* Someone else locked and filled the page in a very small window */ in do_read_cache_page()
2888 if (PageUptodate(page)) { in do_read_cache_page()
2889 unlock_page(page); in do_read_cache_page()
2896 * Clear page error before actual read, PG_error will be in do_read_cache_page()
2897 * set again if read page fails. in do_read_cache_page()
2899 ClearPageError(page); in do_read_cache_page()
2903 mark_page_accessed(page); in do_read_cache_page()
2904 return page; in do_read_cache_page()
2908 * read_cache_page - read into page cache, fill it if needed
2909 * @mapping: the page's address_space
2910 * @index: the page index
2912 * @data: first arg to filler(data, page) function, often left as NULL
2914 * Read into the page cache. If a page already exists, and PageUptodate() is
2915 * not set, try to fill the page and wait for it to become unlocked.
2917 * If the page does not get brought uptodate, return -EIO.
2919 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
2921 int (*filler)(void *, struct page *), in read_cache_page() argument
2929 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2930 * @mapping: the page's address_space
2931 * @index: the page index
2932 * @gfp: the page allocator flags to use if allocating
2935 * any new page allocations done using the specified allocation flags.
2937 * If the page does not get brought uptodate, return -EIO.
2939 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3010 struct page **pagep, void **fsdata) in pagecache_write_begin()
3021 struct page *page, void *fsdata) in pagecache_write_end() argument
3025 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3057 * the new data. We invalidate clean cached page from the region we're in generic_file_direct_write()
3064 * If a page can not be invalidated, return 0 to fall back in generic_file_direct_write()
3108 * Find or create a page at the given pagecache position. Return the locked
3109 * page. This function is specifically for buffered writes.
3111 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3114 struct page *page; in grab_cache_page_write_begin() local
3120 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3122 if (page) in grab_cache_page_write_begin()
3123 wait_for_stable_page(page); in grab_cache_page_write_begin()
3125 return page; in grab_cache_page_write_begin()
3139 struct page *page; in generic_perform_write() local
3140 unsigned long offset; /* Offset into pagecache page */ in generic_perform_write()
3141 unsigned long bytes; /* Bytes to write to page */ in generic_perform_write()
3151 * Bring in the user page that we will copy from _first_. in generic_perform_write()
3153 * same page as we're writing to, without it being marked in generic_perform_write()
3171 &page, &fsdata); in generic_perform_write()
3176 flush_dcache_page(page); in generic_perform_write()
3178 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write()
3179 flush_dcache_page(page); in generic_perform_write()
3182 page, fsdata); in generic_perform_write()
3239 /* We can write back this queue in page reclaim */ in __generic_file_write_iter()
3258 * page-cache pages correctly). in __generic_file_write_iter()
3276 * We need to ensure that the page cache pages are written to in __generic_file_write_iter()
3333 * try_to_release_page() - release old fs-specific metadata on a page
3335 * @page: the page which the kernel is trying to free
3338 * The address_space is to try to release any data against the page
3339 * (presumably at page->private). If the release was successful, return '1'.
3342 * This may also be called if PG_fscache is set on a page, indicating that the
3343 * page is known to the local caching routines.
3346 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3349 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3351 struct address_space * const mapping = page->mapping; in try_to_release_page()
3353 BUG_ON(!PageLocked(page)); in try_to_release_page()
3354 if (PageWriteback(page)) in try_to_release_page()
3358 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3359 return try_to_free_buffers(page); in try_to_release_page()