• Home
  • Raw
  • Download

Lines Matching +full:data +full:- +full:mapping

4  * Copyright (C) 1994-1999  Linus Torvalds
29 #include <linux/backing-dev.h>
57 * finished 'unifying' the page and buffer cache and SMP-threaded the
58 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
60 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
66 * ->i_mmap_rwsem (truncate_pagecache)
67 * ->private_lock (__free_pte->__set_page_dirty_buffers)
68 * ->swap_lock (exclusive_swap_page, others)
69 * ->i_pages lock
71 * ->i_mutex
72 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
74 * ->mmap_sem
75 * ->i_mmap_rwsem
76 * ->page_table_lock or pte_lock (various, mainly in memory.c)
77 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
79 * ->mmap_sem
80 * ->lock_page (access_process_vm)
82 * ->i_mutex (generic_perform_write)
83 * ->mmap_sem (fault_in_pages_readable->do_page_fault)
85 * bdi->wb.list_lock
86 * sb_lock (fs/fs-writeback.c)
87 * ->i_pages lock (__sync_single_inode)
89 * ->i_mmap_rwsem
90 * ->anon_vma.lock (vma_adjust)
92 * ->anon_vma.lock
93 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
95 * ->page_table_lock or pte_lock
96 * ->swap_lock (try_to_unmap_one)
97 * ->private_lock (try_to_unmap_one)
98 * ->i_pages lock (try_to_unmap_one)
99 * ->zone_lru_lock(zone) (follow_page->mark_page_accessed)
100 * ->zone_lru_lock(zone) (check_pte_range->isolate_lru_page)
101 * ->private_lock (page_remove_rmap->set_page_dirty)
102 * ->i_pages lock (page_remove_rmap->set_page_dirty)
103 * bdi.wb->list_lock (page_remove_rmap->set_page_dirty)
104 * ->inode->i_lock (page_remove_rmap->set_page_dirty)
105 * ->memcg->move_lock (page_remove_rmap->lock_page_memcg)
106 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
107 * ->inode->i_lock (zap_pte_range->set_page_dirty)
108 * ->private_lock (zap_pte_range->__set_page_dirty_buffers)
110 * ->i_mmap_rwsem
111 * ->tasklist_lock (memory_failure, collect_procs_ao)
114 static int page_cache_tree_insert(struct address_space *mapping, in page_cache_tree_insert() argument
121 error = __radix_tree_create(&mapping->i_pages, page->index, 0, in page_cache_tree_insert()
129 &mapping->i_pages.xa_lock); in page_cache_tree_insert()
131 return -EEXIST; in page_cache_tree_insert()
133 mapping->nrexceptional--; in page_cache_tree_insert()
137 __radix_tree_replace(&mapping->i_pages, node, slot, page, in page_cache_tree_insert()
138 workingset_lookup_update(mapping)); in page_cache_tree_insert()
139 mapping->nrpages++; in page_cache_tree_insert()
143 static void page_cache_tree_delete(struct address_space *mapping, in page_cache_tree_delete() argument
159 __radix_tree_lookup(&mapping->i_pages, page->index + i, in page_cache_tree_delete()
164 radix_tree_clear_tags(&mapping->i_pages, node, slot); in page_cache_tree_delete()
165 __radix_tree_replace(&mapping->i_pages, node, slot, shadow, in page_cache_tree_delete()
166 workingset_lookup_update(mapping)); in page_cache_tree_delete()
169 page->mapping = NULL; in page_cache_tree_delete()
170 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_tree_delete()
173 mapping->nrexceptional += nr; in page_cache_tree_delete()
182 mapping->nrpages -= nr; in page_cache_tree_delete()
185 static void unaccount_page_cache_page(struct address_space *mapping, in unaccount_page_cache_page() argument
193 * stale data around in the cleancache once our page is gone in unaccount_page_cache_page()
198 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
206 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
212 if (mapping_exiting(mapping) && in unaccount_page_cache_page()
231 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
233 __mod_node_page_state(page_pgdat(page), NR_SHMEM, -nr); in unaccount_page_cache_page()
243 * unwritten data. in unaccount_page_cache_page()
251 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
256 * sure the page is locked and that nobody else uses it - or that usage
261 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local
265 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
266 page_cache_tree_delete(mapping, page, shadow); in __delete_from_page_cache()
269 static void page_cache_free_page(struct address_space *mapping, in page_cache_free_page() argument
274 freepage = mapping->a_ops->freepage; in page_cache_free_page()
287 * delete_from_page_cache - delete page from page cache
296 struct address_space *mapping = page_mapping(page); in delete_from_page_cache() local
300 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache()
302 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache()
304 page_cache_free_page(mapping, page); in delete_from_page_cache()
309 * page_cache_tree_delete_batch - delete several pages from page cache
310 * @mapping: the mapping to which pages belong
313 * The function walks over mapping->i_pages and removes pages passed in @pvec
314 * from the mapping. The function expects @pvec to be sorted by page index.
315 * It tolerates holes in @pvec (mapping entries at those indices are not
318 * mapping as well.
323 page_cache_tree_delete_batch(struct address_space *mapping, in page_cache_tree_delete_batch() argument
333 start = pvec->pages[0]->index; in page_cache_tree_delete_batch()
334 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { in page_cache_tree_delete_batch()
338 &mapping->i_pages.xa_lock); in page_cache_tree_delete_batch()
347 if (page != pvec->pages[i]) in page_cache_tree_delete_batch()
351 tail_pages = HPAGE_PMD_NR - 1; in page_cache_tree_delete_batch()
352 page->mapping = NULL; in page_cache_tree_delete_batch()
354 * Leave page->index set: truncation lookup relies in page_cache_tree_delete_batch()
359 tail_pages--; in page_cache_tree_delete_batch()
361 radix_tree_clear_tags(&mapping->i_pages, iter.node, slot); in page_cache_tree_delete_batch()
362 __radix_tree_replace(&mapping->i_pages, iter.node, slot, NULL, in page_cache_tree_delete_batch()
363 workingset_lookup_update(mapping)); in page_cache_tree_delete_batch()
366 mapping->nrpages -= total_pages; in page_cache_tree_delete_batch()
369 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
378 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache_batch()
380 trace_mm_filemap_delete_from_page_cache(pvec->pages[i]); in delete_from_page_cache_batch()
382 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
384 page_cache_tree_delete_batch(mapping, pvec); in delete_from_page_cache_batch()
385 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache_batch()
388 page_cache_free_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
391 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
395 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
396 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
397 ret = -ENOSPC; in filemap_check_errors()
398 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
399 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
400 ret = -EIO; in filemap_check_errors()
405 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
408 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
409 return -EIO; in filemap_check_and_keep_errors()
410 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
411 return -ENOSPC; in filemap_check_and_keep_errors()
416 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
417 * @mapping: address space structure to write
422 * Start writeback against all of a mapping's dirty pages that lie
425 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
430 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
441 if (!mapping_cap_writeback_dirty(mapping) || in __filemap_fdatawrite_range()
442 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in __filemap_fdatawrite_range()
445 wbc_attach_fdatawrite_inode(&wbc, mapping->host); in __filemap_fdatawrite_range()
446 ret = do_writepages(mapping, &wbc); in __filemap_fdatawrite_range()
451 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
454 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
457 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
459 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
463 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
466 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
471 * filemap_flush - mostly a non-blocking flush
472 * @mapping: target address_space
474 * This is a mostly non-blocking flush. Not suitable for data-integrity
475 * purposes - I/O may not be started against all dirty pages.
477 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
479 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
484 * filemap_range_has_page - check if a page exists in range.
485 * @mapping: address space within which to check
492 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
502 if (mapping->nrpages == 0) in filemap_range_has_page()
505 if (!find_get_pages_range(mapping, &index, end, 1, &page)) in filemap_range_has_page()
512 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
527 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
544 * filemap_fdatawait_range - wait for writeback to complete
545 * @mapping: address space structure to wait for
549 * Walk the list of under-writeback pages of the given address space
557 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
560 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
561 return filemap_check_errors(mapping); in filemap_fdatawait_range()
566 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
567 * @mapping: address space structure to wait for
571 * Walk the list of under-writeback pages of the given address space in the
576 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
579 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
582 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
583 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
588 * file_fdatawait_range - wait for writeback to complete
593 * Walk the list of under-writeback pages of the address space that file
595 * status of the address space vs. the file->f_wb_err cursor and return it.
603 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
605 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
611 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
612 * @mapping: address space structure to wait for
614 * Walk the list of under-writeback pages of the given address space
619 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
622 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
624 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
625 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
629 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
631 return (!dax_mapping(mapping) && mapping->nrpages) || in mapping_needs_writeback()
632 (dax_mapping(mapping) && mapping->nrexceptional); in mapping_needs_writeback()
635 int filemap_write_and_wait(struct address_space *mapping) in filemap_write_and_wait() argument
639 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait()
640 err = filemap_fdatawrite(mapping); in filemap_write_and_wait()
643 * written partially (e.g. -ENOSPC), so we wait for it. in filemap_write_and_wait()
644 * But the -EIO is special case, it may indicate the worst in filemap_write_and_wait()
647 if (err != -EIO) { in filemap_write_and_wait()
648 int err2 = filemap_fdatawait(mapping); in filemap_write_and_wait()
653 filemap_check_errors(mapping); in filemap_write_and_wait()
656 err = filemap_check_errors(mapping); in filemap_write_and_wait()
663 * filemap_write_and_wait_range - write out & wait on a file range
664 * @mapping: the address_space for the pages
668 * Write out and wait upon file offsets lstart->lend, inclusive.
671 * that this function can be used to write to the very end-of-file (end = -1).
673 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
678 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
679 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
682 if (err != -EIO) { in filemap_write_and_wait_range()
683 int err2 = filemap_fdatawait_range(mapping, in filemap_write_and_wait_range()
689 filemap_check_errors(mapping); in filemap_write_and_wait_range()
692 err = filemap_check_errors(mapping); in filemap_write_and_wait_range()
698 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
700 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
702 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
707 * file_check_and_advance_wb_err - report wb error (if any) that was previously
715 * Grab the wb_err from the mapping. If it matches what we have in the file,
718 * If it doesn't match, then take the mapping value, set the "seen" flag in
724 * While we handle mapping->wb_err with atomic operations, the f_wb_err
731 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err()
732 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
735 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
737 spin_lock(&file->f_lock); in file_check_and_advance_wb_err()
738 old = file->f_wb_err; in file_check_and_advance_wb_err()
739 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
740 &file->f_wb_err); in file_check_and_advance_wb_err()
742 spin_unlock(&file->f_lock); in file_check_and_advance_wb_err()
750 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
751 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
757 * file_write_and_wait_range - write out & wait on a file range
762 * Write out and wait upon file offsets lstart->lend, inclusive.
765 * that this function can be used to write to the very end-of-file (end = -1).
767 * After writing out and waiting on the data, we check and advance the
773 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
775 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
776 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
779 if (err != -EIO) in file_write_and_wait_range()
780 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
790 * replace_page_cache_page - replace a pagecache page with a new one
810 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page()
814 struct address_space *mapping = old->mapping; in replace_page_cache_page() local
818 pgoff_t offset = old->index; in replace_page_cache_page()
819 freepage = mapping->a_ops->freepage; in replace_page_cache_page()
822 new->mapping = mapping; in replace_page_cache_page()
823 new->index = offset; in replace_page_cache_page()
825 xa_lock_irqsave(&mapping->i_pages, flags); in replace_page_cache_page()
827 error = page_cache_tree_insert(mapping, new, NULL); in replace_page_cache_page()
837 xa_unlock_irqrestore(&mapping->i_pages, flags); in replace_page_cache_page()
850 struct address_space *mapping, in __add_to_page_cache_locked() argument
862 error = mem_cgroup_try_charge(page, current->mm, in __add_to_page_cache_locked()
876 page->mapping = mapping; in __add_to_page_cache_locked()
877 page->index = offset; in __add_to_page_cache_locked()
879 xa_lock_irq(&mapping->i_pages); in __add_to_page_cache_locked()
880 error = page_cache_tree_insert(mapping, page, shadowp); in __add_to_page_cache_locked()
888 xa_unlock_irq(&mapping->i_pages); in __add_to_page_cache_locked()
894 page->mapping = NULL; in __add_to_page_cache_locked()
895 /* Leave page->index set: truncation relies upon it */ in __add_to_page_cache_locked()
896 xa_unlock_irq(&mapping->i_pages); in __add_to_page_cache_locked()
904 * add_to_page_cache_locked - add a locked page to the pagecache
906 * @mapping: the page's address_space
913 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
916 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
921 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
928 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
938 * data from the working set, only to cache data that will in add_to_page_cache_lru()
1003 /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */
1022 if (wait_page->page != key->page) in wake_page_function()
1024 key->page_match = 1; in wake_page_function()
1026 if (wait_page->bit_nr != key->bit_nr) in wake_page_function()
1030 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1031 return -1; in wake_page_function()
1052 spin_lock_irqsave(&q->lock, flags); in wake_up_page_bit()
1062 spin_unlock_irqrestore(&q->lock, flags); in wake_up_page_bit()
1064 spin_lock_irqsave(&q->lock, flags); in wake_up_page_bit()
1070 * hash, so in that case check for a page match. That prevents a long- in wake_up_page_bit()
1087 spin_unlock_irqrestore(&q->lock, flags); in wake_up_page_bit()
1105 wait->flags = lock ? WQ_FLAG_EXCLUSIVE : 0; in wait_on_page_bit_common()
1106 wait->func = wake_page_function; in wait_on_page_bit_common()
1111 spin_lock_irq(&q->lock); in wait_on_page_bit_common()
1113 if (likely(list_empty(&wait->entry))) { in wait_on_page_bit_common()
1120 spin_unlock_irq(&q->lock); in wait_on_page_bit_common()
1122 if (likely(test_bit(bit_nr, &page->flags))) { in wait_on_page_bit_common()
1127 if (!test_and_set_bit_lock(bit_nr, &page->flags)) in wait_on_page_bit_common()
1130 if (!test_bit(bit_nr, &page->flags)) in wait_on_page_bit_common()
1135 ret = -EINTR; in wait_on_page_bit_common()
1144 * !waitqueue_active would be possible (by open-coding finish_wait), in wait_on_page_bit_common()
1168 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1179 spin_lock_irqsave(&q->lock, flags); in add_page_wait_queue()
1182 spin_unlock_irqrestore(&q->lock, flags); in add_page_wait_queue()
1210 * unlock_page - unlock a locked page
1216 * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
1219 * that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
1229 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1235 * end_page_writeback - end writeback against a page
1276 struct address_space *mapping; in page_endio() local
1279 mapping = page_mapping(page); in page_endio()
1280 if (mapping) in page_endio()
1281 mapping_set_error(mapping, err); in page_endio()
1289 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1310 * 1 - page is locked; mmap_sem is still held.
1311 * 0 - page is not locked.
1330 up_read(&mm->mmap_sem); in __lock_page_or_retry()
1342 up_read(&mm->mmap_sem); in __lock_page_or_retry()
1352 * page_cache_next_hole - find the next hole (not-present entry)
1353 * @mapping: mapping
1357 * Search the set [index, min(index+max_scan-1, MAX_INDEX)] for the
1361 * outside of the set specified (in which case 'return - index >=
1362 * max_scan' will be true). In rare cases of index wrap-around, 0 will
1372 pgoff_t page_cache_next_hole(struct address_space *mapping, in page_cache_next_hole() argument
1380 page = radix_tree_lookup(&mapping->i_pages, index); in page_cache_next_hole()
1393 * page_cache_prev_hole - find the prev hole (not-present entry)
1394 * @mapping: mapping
1398 * Search backwards in the range [max(index-max_scan+1, 0), index] for
1402 * outside of the set specified (in which case 'index - return >=
1403 * max_scan' will be true). In rare cases of wrap-around, ULONG_MAX
1413 pgoff_t page_cache_prev_hole(struct address_space *mapping, in page_cache_prev_hole() argument
1421 page = radix_tree_lookup(&mapping->i_pages, index); in page_cache_prev_hole()
1424 index--; in page_cache_prev_hole()
1434 * find_get_entry - find and get a page cache entry
1435 * @mapping: the address_space to search
1438 * Looks up the page cache slot at @mapping & @offset. If there is a
1446 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) in find_get_entry() argument
1454 pagep = radix_tree_lookup_slot(&mapping->i_pages, offset); in find_get_entry()
1498 * find_lock_entry - locate, pin and lock a page cache entry
1499 * @mapping: the address_space to search
1502 * Looks up the page cache slot at @mapping & @offset. If there is a
1513 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset) in find_lock_entry() argument
1518 page = find_get_entry(mapping, offset); in find_lock_entry()
1522 if (unlikely(page_mapping(page) != mapping)) { in find_lock_entry()
1534 * pagecache_get_page - find and get a page reference
1535 * @mapping: the address_space to search
1538 * @gfp_mask: gfp mask to use for the page cache data page allocation
1540 * Looks up the page cache slot at @mapping & @offset.
1546 * - FGP_ACCESSED: the page will be marked accessed
1547 * - FGP_LOCK: Page is return locked
1548 * - FGP_CREAT: If page is not present then a new page is allocated using
1558 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, in pagecache_get_page() argument
1564 page = find_get_entry(mapping, offset); in pagecache_get_page()
1581 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1586 VM_BUG_ON_PAGE(page->index != offset, page); in pagecache_get_page()
1595 if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping)) in pagecache_get_page()
1611 err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); in pagecache_get_page()
1615 if (err == -EEXIST) in pagecache_get_page()
1625 * find_get_entries - gang pagecache lookup
1626 * @mapping: The address_space to search
1633 * @nr_entries entries in the mapping. The entries are placed at
1637 * The search returns a group of mapping-contiguous page cache entries
1639 * not-present pages.
1647 unsigned find_get_entries(struct address_space *mapping, in find_get_entries() argument
1659 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) { in find_get_entries()
1704 * find_get_pages_range - gang pagecache lookup
1705 * @mapping: The address_space to search
1712 * pages in the mapping starting at index @start and up to index @end
1716 * The search returns a group of mapping-contiguous pages with ascending
1717 * indexes. There may be holes in the indices due to not-present pages.
1724 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, in find_get_pages_range() argument
1736 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, *start) { in find_get_pages_range()
1777 *start = pages[ret - 1]->index + 1; in find_get_pages_range()
1785 * breaks the iteration when there is page at index -1 but that is in find_get_pages_range()
1788 if (end == (pgoff_t)-1) in find_get_pages_range()
1789 *start = (pgoff_t)-1; in find_get_pages_range()
1799 * find_get_pages_contig - gang contiguous pagecache lookup
1800 * @mapping: The address_space to search
1810 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, in find_get_pages_contig() argument
1821 radix_tree_for_each_contig(slot, &mapping->i_pages, &iter, index) { in find_get_pages_contig()
1859 * must check mapping and index after taking the ref. in find_get_pages_contig()
1863 if (page->mapping == NULL || page_to_pgoff(page) != iter.index) { in find_get_pages_contig()
1878 * find_get_pages_range_tag - find and return pages in given range matching @tag
1879 * @mapping: the address_space to search
1889 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, in find_get_pages_range_tag() argument
1901 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, *index, tag) { in find_get_pages_range_tag()
1948 *index = pages[ret - 1]->index + 1; in find_get_pages_range_tag()
1956 * iteration when there is page at index -1 but that is already broken in find_get_pages_range_tag()
1959 if (end == (pgoff_t)-1) in find_get_pages_range_tag()
1960 *index = (pgoff_t)-1; in find_get_pages_range_tag()
1971 * find_get_entries_tag - find and return entries that match @tag
1972 * @mapping: the address_space to search
1982 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, in find_get_entries_tag() argument
1994 radix_tree_for_each_tagged(slot, &mapping->i_pages, &iter, start, tag) { in find_get_entries_tag()
2044 * ---R__________________________________________B__________
2058 ra->ra_pages /= 4; in shrink_readahead_size_eio()
2062 * generic_file_buffered_read - generic file read routine
2064 * @iter: data destination
2068 * mapping->a_ops->readpage() function for the actual low-level stuff.
2076 struct file *filp = iocb->ki_filp; in generic_file_buffered_read()
2077 struct address_space *mapping = filp->f_mapping; in generic_file_buffered_read() local
2078 struct inode *inode = mapping->host; in generic_file_buffered_read()
2079 struct file_ra_state *ra = &filp->f_ra; in generic_file_buffered_read()
2080 loff_t *ppos = &iocb->ki_pos; in generic_file_buffered_read()
2088 if (unlikely(*ppos >= inode->i_sb->s_maxbytes)) in generic_file_buffered_read()
2090 iov_iter_truncate(iter, inode->i_sb->s_maxbytes); in generic_file_buffered_read()
2093 prev_index = ra->prev_pos >> PAGE_SHIFT; in generic_file_buffered_read()
2094 prev_offset = ra->prev_pos & (PAGE_SIZE-1); in generic_file_buffered_read()
2095 last_index = (*ppos + iter->count + PAGE_SIZE-1) >> PAGE_SHIFT; in generic_file_buffered_read()
2107 error = -EINTR; in generic_file_buffered_read()
2111 page = find_get_page(mapping, index); in generic_file_buffered_read()
2113 if (iocb->ki_flags & IOCB_NOWAIT) in generic_file_buffered_read()
2115 page_cache_sync_readahead(mapping, in generic_file_buffered_read()
2117 index, last_index - index); in generic_file_buffered_read()
2118 page = find_get_page(mapping, index); in generic_file_buffered_read()
2123 page_cache_async_readahead(mapping, in generic_file_buffered_read()
2125 index, last_index - index); in generic_file_buffered_read()
2128 if (iocb->ki_flags & IOCB_NOWAIT) { in generic_file_buffered_read()
2144 if (inode->i_blkbits == PAGE_SHIFT || in generic_file_buffered_read()
2145 !mapping->a_ops->is_partially_uptodate) in generic_file_buffered_read()
2148 if (unlikely(iter->type & ITER_PIPE)) in generic_file_buffered_read()
2153 if (!page->mapping) in generic_file_buffered_read()
2155 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2156 offset, iter->count)) in generic_file_buffered_read()
2165 * the correct value for "nr", which means the zero-filled in generic_file_buffered_read()
2167 * another truncate extends the file - this is desired though). in generic_file_buffered_read()
2171 end_index = (isize - 1) >> PAGE_SHIFT; in generic_file_buffered_read()
2180 nr = ((isize - 1) & ~PAGE_MASK) + 1; in generic_file_buffered_read()
2186 nr = nr - offset; in generic_file_buffered_read()
2192 if (mapping_writably_mapped(mapping)) in generic_file_buffered_read()
2204 * Ok, we have the page, and it's up-to-date, so in generic_file_buffered_read()
2219 error = -EFAULT; in generic_file_buffered_read()
2232 if (!page->mapping) { in generic_file_buffered_read()
2252 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2268 if (page->mapping == NULL) { in generic_file_buffered_read()
2278 error = -EIO; in generic_file_buffered_read()
2296 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2298 error = -ENOMEM; in generic_file_buffered_read()
2301 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2302 mapping_gfp_constraint(mapping, GFP_KERNEL)); in generic_file_buffered_read()
2305 if (error == -EEXIST) { in generic_file_buffered_read()
2315 error = -EAGAIN; in generic_file_buffered_read()
2317 ra->prev_pos = prev_index; in generic_file_buffered_read()
2318 ra->prev_pos <<= PAGE_SHIFT; in generic_file_buffered_read()
2319 ra->prev_pos |= prev_offset; in generic_file_buffered_read()
2327 * generic_file_read_iter - generic filesystem read routine
2329 * @iter: destination for the data read
2343 if (iocb->ki_flags & IOCB_DIRECT) { in generic_file_read_iter()
2344 struct file *file = iocb->ki_filp; in generic_file_read_iter()
2345 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2346 struct inode *inode = mapping->host; in generic_file_read_iter()
2350 if (iocb->ki_flags & IOCB_NOWAIT) { in generic_file_read_iter()
2351 if (filemap_range_has_page(mapping, iocb->ki_pos, in generic_file_read_iter()
2352 iocb->ki_pos + count - 1)) in generic_file_read_iter()
2353 return -EAGAIN; in generic_file_read_iter()
2355 retval = filemap_write_and_wait_range(mapping, in generic_file_read_iter()
2356 iocb->ki_pos, in generic_file_read_iter()
2357 iocb->ki_pos + count - 1); in generic_file_read_iter()
2364 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2366 iocb->ki_pos += retval; in generic_file_read_iter()
2367 count -= retval; in generic_file_read_iter()
2369 iov_iter_revert(iter, count - iov_iter_count(iter)); in generic_file_read_iter()
2380 if (retval < 0 || !count || iocb->ki_pos >= size || in generic_file_read_iter()
2393 * page_cache_read - adds requested page to the page cache if not already there
2403 struct address_space *mapping = file->f_mapping; in page_cache_read() local
2410 return -ENOMEM; in page_cache_read()
2412 ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); in page_cache_read()
2414 ret = mapping->a_ops->readpage(file, page); in page_cache_read()
2415 else if (ret == -EEXIST) in page_cache_read()
2436 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
2438 /* If we don't want any read-ahead, don't bother */ in do_sync_mmap_readahead()
2439 if (vma->vm_flags & VM_RAND_READ) in do_sync_mmap_readahead()
2441 if (!ra->ra_pages) in do_sync_mmap_readahead()
2444 if (vma->vm_flags & VM_SEQ_READ) { in do_sync_mmap_readahead()
2445 page_cache_sync_readahead(mapping, ra, file, offset, in do_sync_mmap_readahead()
2446 ra->ra_pages); in do_sync_mmap_readahead()
2451 if (ra->mmap_miss < MMAP_LOTSAMISS * 10) in do_sync_mmap_readahead()
2452 ra->mmap_miss++; in do_sync_mmap_readahead()
2456 * stop bothering with read-ahead. It will only hurt. in do_sync_mmap_readahead()
2458 if (ra->mmap_miss > MMAP_LOTSAMISS) in do_sync_mmap_readahead()
2462 * mmap read-around in do_sync_mmap_readahead()
2464 ra->start = max_t(long, 0, offset - ra->ra_pages / 2); in do_sync_mmap_readahead()
2465 ra->size = ra->ra_pages; in do_sync_mmap_readahead()
2466 ra->async_size = ra->ra_pages / 4; in do_sync_mmap_readahead()
2467 ra_submit(ra, mapping, file); in do_sync_mmap_readahead()
2480 struct address_space *mapping = file->f_mapping; in do_async_mmap_readahead() local
2482 /* If we don't want any read-ahead, don't bother */ in do_async_mmap_readahead()
2483 if (vma->vm_flags & VM_RAND_READ) in do_async_mmap_readahead()
2485 if (ra->mmap_miss > 0) in do_async_mmap_readahead()
2486 ra->mmap_miss--; in do_async_mmap_readahead()
2488 page_cache_async_readahead(mapping, ra, file, in do_async_mmap_readahead()
2489 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2493 * filemap_fault - read in file data for page fault handling
2497 * mapped memory region to read in file data during a page fault.
2503 * vma->vm_mm->mmap_sem must be held on entry.
2518 struct file *file = vmf->vma->vm_file; in filemap_fault()
2519 struct address_space *mapping = file->f_mapping; in filemap_fault() local
2520 struct file_ra_state *ra = &file->f_ra; in filemap_fault()
2521 struct inode *inode = mapping->host; in filemap_fault()
2522 pgoff_t offset = vmf->pgoff; in filemap_fault()
2534 page = find_get_page(mapping, offset); in filemap_fault()
2535 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2540 do_async_mmap_readahead(vmf->vma, ra, file, page, offset); in filemap_fault()
2543 do_sync_mmap_readahead(vmf->vma, ra, file, offset); in filemap_fault()
2545 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
2548 page = find_get_page(mapping, offset); in filemap_fault()
2553 if (!lock_page_or_retry(page, vmf->vma->vm_mm, vmf->flags)) { in filemap_fault()
2559 if (unlikely(page->mapping != mapping)) { in filemap_fault()
2564 VM_BUG_ON_PAGE(page->index != offset, page); in filemap_fault()
2568 * that it's up-to-date. If not, it is going to be due to an error. in filemap_fault()
2584 vmf->page = page; in filemap_fault()
2592 error = page_cache_read(file, offset, vmf->gfp_mask); in filemap_fault()
2607 if (error == -ENOMEM) in filemap_fault()
2613 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
2614 * Try to re-read it _once_. We do this synchronously, in filemap_fault()
2619 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2623 error = -EIO; in filemap_fault()
2641 struct file *file = vmf->vma->vm_file; in filemap_map_pages()
2642 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
2648 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start_pgoff) { in filemap_map_pages()
2686 if (page->mapping != mapping || !PageUptodate(page)) in filemap_map_pages()
2689 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in filemap_map_pages()
2690 if (page->index >= max_idx) in filemap_map_pages()
2693 if (file->f_ra.mmap_miss > 0) in filemap_map_pages()
2694 file->f_ra.mmap_miss--; in filemap_map_pages()
2696 vmf->address += (iter.index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
2697 if (vmf->pte) in filemap_map_pages()
2698 vmf->pte += iter.index - last_pgoff; in filemap_map_pages()
2710 if (pmd_trans_huge(*vmf->pmd)) in filemap_map_pages()
2721 struct page *page = vmf->page; in filemap_page_mkwrite()
2722 struct inode *inode = file_inode(vmf->vma->vm_file); in filemap_page_mkwrite()
2725 sb_start_pagefault(inode->i_sb); in filemap_page_mkwrite()
2726 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
2728 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2741 sb_end_pagefault(inode->i_sb); in filemap_page_mkwrite()
2755 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
2757 if (!mapping->a_ops->readpage) in generic_file_mmap()
2758 return -ENOEXEC; in generic_file_mmap()
2760 vma->vm_ops = &generic_file_vm_ops; in generic_file_mmap()
2765 * This is for filesystems which do not implement ->writepage.
2769 if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) in generic_file_readonly_mmap()
2770 return -EINVAL; in generic_file_readonly_mmap()
2776 return -ENOSYS; in filemap_page_mkwrite()
2780 return -ENOSYS; in generic_file_mmap()
2784 return -ENOSYS; in generic_file_readonly_mmap()
2798 page = ERR_PTR(-EIO); in wait_on_page_read()
2804 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
2807 void *data, in do_read_cache_page() argument
2813 page = find_get_page(mapping, index); in do_read_cache_page()
2817 return ERR_PTR(-ENOMEM); in do_read_cache_page()
2818 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
2821 if (err == -EEXIST) in do_read_cache_page()
2828 err = filler(data, page); in do_read_cache_page()
2852 * the page is truncated, the data is still valid if PageUptodate as in do_read_cache_page()
2855 * Case c, the page may be truncated but in itself, the data may still in do_read_cache_page()
2858 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
2862 * will be a race with remove_mapping that determines if the mapping in do_read_cache_page()
2863 * is valid on unlock but otherwise the data is valid and there is in do_read_cache_page()
2881 if (!page->mapping) { in do_read_cache_page()
2908 * read_cache_page - read into page cache, fill it if needed
2909 * @mapping: the page's address_space
2912 * @data: first arg to filler(data, page) function, often left as NULL
2917 * If the page does not get brought uptodate, return -EIO.
2919 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
2922 void *data) in read_cache_page() argument
2924 return do_read_cache_page(mapping, index, filler, data, mapping_gfp_mask(mapping)); in read_cache_page()
2929 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
2930 * @mapping: the page's address_space
2934 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
2937 * If the page does not get brought uptodate, return -EIO.
2939 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
2943 filler_t *filler = (filler_t *)mapping->a_ops->readpage; in read_cache_page_gfp()
2945 return do_read_cache_page(mapping, index, filler, NULL, gfp); in read_cache_page_gfp()
2958 struct file *file = iocb->ki_filp; in generic_write_checks()
2959 struct inode *inode = file->f_mapping->host; in generic_write_checks()
2967 if (iocb->ki_flags & IOCB_APPEND) in generic_write_checks()
2968 iocb->ki_pos = i_size_read(inode); in generic_write_checks()
2970 pos = iocb->ki_pos; in generic_write_checks()
2972 if ((iocb->ki_flags & IOCB_NOWAIT) && !(iocb->ki_flags & IOCB_DIRECT)) in generic_write_checks()
2973 return -EINVAL; in generic_write_checks()
2976 if (iocb->ki_pos >= limit) { in generic_write_checks()
2978 return -EFBIG; in generic_write_checks()
2980 iov_iter_truncate(from, limit - (unsigned long)pos); in generic_write_checks()
2987 !(file->f_flags & O_LARGEFILE))) { in generic_write_checks()
2989 return -EFBIG; in generic_write_checks()
2990 iov_iter_truncate(from, MAX_NON_LFS - (unsigned long)pos); in generic_write_checks()
2996 * If we have written data it becomes a short write. If we have in generic_write_checks()
2997 * exceeded without writing data we send a signal and return EFBIG. in generic_write_checks()
3000 if (unlikely(pos >= inode->i_sb->s_maxbytes)) in generic_write_checks()
3001 return -EFBIG; in generic_write_checks()
3003 iov_iter_truncate(from, inode->i_sb->s_maxbytes - pos); in generic_write_checks()
3008 int pagecache_write_begin(struct file *file, struct address_space *mapping, in pagecache_write_begin() argument
3012 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_begin()
3014 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin()
3019 int pagecache_write_end(struct file *file, struct address_space *mapping, in pagecache_write_end() argument
3023 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_end()
3025 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3032 struct file *file = iocb->ki_filp; in generic_file_direct_write()
3033 struct address_space *mapping = file->f_mapping; in generic_file_direct_write() local
3034 struct inode *inode = mapping->host; in generic_file_direct_write()
3035 loff_t pos = iocb->ki_pos; in generic_file_direct_write()
3041 end = (pos + write_len - 1) >> PAGE_SHIFT; in generic_file_direct_write()
3043 if (iocb->ki_flags & IOCB_NOWAIT) { in generic_file_direct_write()
3045 if (filemap_range_has_page(inode->i_mapping, pos, in generic_file_direct_write()
3047 return -EAGAIN; in generic_file_direct_write()
3049 written = filemap_write_and_wait_range(mapping, pos, in generic_file_direct_write()
3050 pos + write_len - 1); in generic_file_direct_write()
3057 * the new data. We invalidate clean cached page from the region we're in generic_file_direct_write()
3059 * without clobbering -EIOCBQUEUED from ->direct_IO(). in generic_file_direct_write()
3061 written = invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
3068 if (written == -EBUSY) in generic_file_direct_write()
3073 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3077 * cached by non-direct readahead, or faulted in by get_user_pages() in generic_file_direct_write()
3088 if (mapping->nrpages) in generic_file_direct_write()
3089 invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
3094 write_len -= written; in generic_file_direct_write()
3095 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { in generic_file_direct_write()
3099 iocb->ki_pos = pos; in generic_file_direct_write()
3101 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write()
3111 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin() argument
3120 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3121 mapping_gfp_mask(mapping)); in grab_cache_page_write_begin()
3132 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3133 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3145 offset = (pos & (PAGE_SIZE - 1)); in generic_perform_write()
3146 bytes = min_t(unsigned long, PAGE_SIZE - offset, in generic_perform_write()
3154 * up-to-date. in generic_perform_write()
3161 status = -EFAULT; in generic_perform_write()
3166 status = -EINTR; in generic_perform_write()
3170 status = a_ops->write_begin(file, mapping, pos, bytes, flags, in generic_perform_write()
3175 if (mapping_writably_mapped(mapping)) in generic_perform_write()
3181 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
3192 * If we were unable to copy any data at all, we must in generic_perform_write()
3199 bytes = min_t(unsigned long, PAGE_SIZE - offset, in generic_perform_write()
3206 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
3214 * __generic_file_write_iter - write data to a file
3216 * @from: iov_iter with data to write
3218 * This function does all the work needed for actually writing data to a
3226 * This function does *not* take care of syncing data in case of O_SYNC write.
3232 struct file *file = iocb->ki_filp; in __generic_file_write_iter()
3233 struct address_space * mapping = file->f_mapping; in __generic_file_write_iter() local
3234 struct inode *inode = mapping->host; in __generic_file_write_iter()
3240 current->backing_dev_info = inode_to_bdi(inode); in __generic_file_write_iter()
3249 if (iocb->ki_flags & IOCB_DIRECT) { in __generic_file_write_iter()
3258 * page-cache pages correctly). in __generic_file_write_iter()
3263 status = generic_perform_write(file, from, pos = iocb->ki_pos); in __generic_file_write_iter()
3267 * direct-written, or the error code if that was zero. Note in __generic_file_write_iter()
3268 * that this differs from normal direct-io semantics, which in __generic_file_write_iter()
3269 * will return -EFOO even if some bytes were written. in __generic_file_write_iter()
3280 endbyte = pos + status - 1; in __generic_file_write_iter()
3281 err = filemap_write_and_wait_range(mapping, pos, endbyte); in __generic_file_write_iter()
3283 iocb->ki_pos = endbyte + 1; in __generic_file_write_iter()
3285 invalidate_mapping_pages(mapping, in __generic_file_write_iter()
3291 * the number of bytes which were direct-written in __generic_file_write_iter()
3295 written = generic_perform_write(file, from, iocb->ki_pos); in __generic_file_write_iter()
3297 iocb->ki_pos += written; in __generic_file_write_iter()
3300 current->backing_dev_info = NULL; in __generic_file_write_iter()
3306 * generic_file_write_iter - write data to a file
3308 * @from: iov_iter with data to write
3316 struct file *file = iocb->ki_filp; in generic_file_write_iter()
3317 struct inode *inode = file->f_mapping->host; in generic_file_write_iter()
3333 * try_to_release_page() - release old fs-specific metadata on a page
3338 * The address_space is to try to release any data against the page
3339 * (presumably at page->private). If the release was successful, return '1'.
3351 struct address_space * const mapping = page->mapping; in try_to_release_page() local
3357 if (mapping && mapping->a_ops->releasepage) in try_to_release_page()
3358 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()