• Home
  • Raw
  • Download

Lines Matching refs:page

961 	struct page *page;  in set_range_dirty()  local
964 page = find_get_page(tree->mapping, index); in set_range_dirty()
965 BUG_ON(!page); in set_range_dirty()
966 __set_page_dirty_nobuffers(page); in set_range_dirty()
967 page_cache_release(page); in set_range_dirty()
981 struct page *page; in set_range_writeback() local
984 page = find_get_page(tree->mapping, index); in set_range_writeback()
985 BUG_ON(!page); in set_range_writeback()
986 set_page_writeback(page); in set_range_writeback()
987 page_cache_release(page); in set_range_writeback()
1122 struct page *locked_page, in __unlock_for_delalloc()
1126 struct page *pages[16]; in __unlock_for_delalloc()
1152 struct page *locked_page, in lock_delalloc_pages()
1160 struct page *pages[16]; in lock_delalloc_pages()
1221 struct page *locked_page, in find_lock_delalloc_range()
1298 u64 start, u64 end, struct page *locked_page, in extent_clear_unlock_delalloc()
1306 struct page *pages[16]; in extent_clear_unlock_delalloc()
1416 struct page *page;
1420 page = grab_cache_page(tree->mapping, index);
1421 if (!page) {
1425 if (IS_ERR(page)) {
1426 err = PTR_ERR(page);
1442 page = find_get_page(tree->mapping, index);
1443 unlock_page(page);
1444 page_cache_release(page);
1457 struct page *page;
1460 page = find_get_page(tree->mapping, index);
1461 unlock_page(page);
1462 page_cache_release(page);
1581 struct page *page) in check_page_uptodate() argument
1583 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in check_page_uptodate()
1586 SetPageUptodate(page); in check_page_uptodate()
1595 struct page *page) in check_page_locked() argument
1597 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in check_page_locked()
1600 unlock_page(page); in check_page_locked()
1609 struct page *page) in check_page_writeback() argument
1611 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in check_page_writeback()
1614 end_page_writeback(page); in check_page_writeback()
1640 struct page *page = bvec->bv_page; in end_bio_extent_writepage() local
1641 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_bio_extent_writepage()
1643 start = ((u64)page->index << PAGE_CACHE_SHIFT) + in end_bio_extent_writepage()
1655 ret = tree->ops->writepage_end_io_hook(page, start, in end_bio_extent_writepage()
1663 ret = tree->ops->writepage_io_failed_hook(bio, page, in end_bio_extent_writepage()
1673 ClearPageUptodate(page); in end_bio_extent_writepage()
1674 SetPageError(page); in end_bio_extent_writepage()
1680 end_page_writeback(page); in end_bio_extent_writepage()
1682 check_page_writeback(tree, page); in end_bio_extent_writepage()
1713 struct page *page = bvec->bv_page; in end_bio_extent_readpage() local
1714 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_bio_extent_readpage()
1716 start = ((u64)page->index << PAGE_CACHE_SHIFT) + in end_bio_extent_readpage()
1729 ret = tree->ops->readpage_end_io_hook(page, start, end, in end_bio_extent_readpage()
1736 ret = tree->ops->readpage_io_failed_hook(bio, page, in end_bio_extent_readpage()
1755 SetPageUptodate(page); in end_bio_extent_readpage()
1757 ClearPageUptodate(page); in end_bio_extent_readpage()
1758 SetPageError(page); in end_bio_extent_readpage()
1760 unlock_page(page); in end_bio_extent_readpage()
1763 check_page_uptodate(tree, page); in end_bio_extent_readpage()
1765 ClearPageUptodate(page); in end_bio_extent_readpage()
1766 SetPageError(page); in end_bio_extent_readpage()
1768 check_page_locked(tree, page); in end_bio_extent_readpage()
1789 struct page *page = bvec->bv_page; in end_bio_extent_preparewrite() local
1790 tree = &BTRFS_I(page->mapping->host)->io_tree; in end_bio_extent_preparewrite()
1792 start = ((u64)page->index << PAGE_CACHE_SHIFT) + in end_bio_extent_preparewrite()
1802 ClearPageUptodate(page); in end_bio_extent_preparewrite()
1803 SetPageError(page); in end_bio_extent_preparewrite()
1839 struct page *page = bvec->bv_page; in submit_one_bio() local
1844 start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset; in submit_one_bio()
1852 tree->ops->submit_bio_hook(page->mapping->host, rw, bio, in submit_one_bio()
1863 struct page *page, sector_t sector, in submit_extent_page() argument
1891 tree->ops->merge_bio_hook(page, offset, page_size, bio, in submit_extent_page()
1893 bio_add_page(bio, page, page_size, offset) < page_size) { in submit_extent_page()
1908 bio_add_page(bio, page, page_size, offset); in submit_extent_page()
1920 void set_page_extent_mapped(struct page *page) in set_page_extent_mapped() argument
1922 if (!PagePrivate(page)) { in set_page_extent_mapped()
1923 SetPagePrivate(page); in set_page_extent_mapped()
1924 page_cache_get(page); in set_page_extent_mapped()
1925 set_page_private(page, EXTENT_PAGE_PRIVATE); in set_page_extent_mapped()
1929 static void set_page_extent_head(struct page *page, unsigned long len) in set_page_extent_head() argument
1931 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); in set_page_extent_head()
1940 struct page *page, in __extent_read_full_page() argument
1945 struct inode *inode = page->mapping->host; in __extent_read_full_page()
1946 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in __extent_read_full_page()
1965 set_page_extent_mapped(page); in __extent_read_full_page()
1970 if (page->index == last_byte >> PAGE_CACHE_SHIFT) { in __extent_read_full_page()
1976 userpage = kmap_atomic(page, KM_USER0); in __extent_read_full_page()
1978 flush_dcache_page(page); in __extent_read_full_page()
1986 userpage = kmap_atomic(page, KM_USER0); in __extent_read_full_page()
1988 flush_dcache_page(page); in __extent_read_full_page()
1995 em = get_extent(inode, page, page_offset, cur, in __extent_read_full_page()
1998 SetPageError(page); in __extent_read_full_page()
2029 userpage = kmap_atomic(page, KM_USER0); in __extent_read_full_page()
2031 flush_dcache_page(page); in __extent_read_full_page()
2043 check_page_uptodate(tree, page); in __extent_read_full_page()
2053 SetPageError(page); in __extent_read_full_page()
2062 ret = tree->ops->readpage_io_hook(page, cur, in __extent_read_full_page()
2067 pnr -= page->index; in __extent_read_full_page()
2068 ret = submit_extent_page(READ, tree, page, in __extent_read_full_page()
2078 SetPageError(page); in __extent_read_full_page()
2083 if (!PageError(page)) in __extent_read_full_page()
2084 SetPageUptodate(page); in __extent_read_full_page()
2085 unlock_page(page); in __extent_read_full_page()
2090 int extent_read_full_page(struct extent_io_tree *tree, struct page *page, in extent_read_full_page() argument
2097 ret = __extent_read_full_page(tree, page, get_extent, &bio, 0, in extent_read_full_page()
2110 static int __extent_writepage(struct page *page, struct writeback_control *wbc, in __extent_writepage() argument
2113 struct inode *inode = page->mapping->host; in __extent_writepage()
2116 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in __extent_writepage()
2141 WARN_ON(!PageLocked(page)); in __extent_writepage()
2143 if (page->index > end_index || in __extent_writepage()
2144 (page->index == end_index && !pg_offset)) { in __extent_writepage()
2145 page->mapping->a_ops->invalidatepage(page, 0); in __extent_writepage()
2146 unlock_page(page); in __extent_writepage()
2150 if (page->index == end_index) { in __extent_writepage()
2153 userpage = kmap_atomic(page, KM_USER0); in __extent_writepage()
2157 flush_dcache_page(page); in __extent_writepage()
2161 set_page_extent_mapped(page); in __extent_writepage()
2169 page, in __extent_writepage()
2177 tree->ops->fill_delalloc(inode, page, delalloc_start, in __extent_writepage()
2196 ret = tree->ops->writepage_start_hook(page, start, in __extent_writepage()
2200 redirty_page_for_writepage(wbc, page); in __extent_writepage()
2201 unlock_page(page); in __extent_writepage()
2217 tree->ops->writepage_end_io_hook(page, start, in __extent_writepage()
2231 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage()
2236 em = epd->get_extent(inode, page, pg_offset, cur, in __extent_writepage()
2239 SetPageError(page); in __extent_writepage()
2273 tree->ops->writepage_end_io_hook(page, cur, in __extent_writepage()
2299 ret = tree->ops->writepage_io_hook(page, cur, in __extent_writepage()
2305 SetPageError(page); in __extent_writepage()
2310 if (!PageWriteback(page)) { in __extent_writepage()
2313 page->index, (unsigned long long)cur, in __extent_writepage()
2317 ret = submit_extent_page(WRITE, tree, page, sector, in __extent_writepage()
2323 SetPageError(page); in __extent_writepage()
2332 set_page_writeback(page); in __extent_writepage()
2333 end_page_writeback(page); in __extent_writepage()
2337 unlock_page(page); in __extent_writepage()
2343 page->mapping->writeback_index = page->index + nr_written; in __extent_writepage()
2398 struct page *page = pvec.pages[i]; in extent_write_cache_pages() local
2408 tree->ops->write_cache_pages_lock_hook(page); in extent_write_cache_pages()
2410 lock_page(page); in extent_write_cache_pages()
2412 if (unlikely(page->mapping != mapping)) { in extent_write_cache_pages()
2413 unlock_page(page); in extent_write_cache_pages()
2417 if (!wbc->range_cyclic && page->index > end) { in extent_write_cache_pages()
2419 unlock_page(page); in extent_write_cache_pages()
2424 if (PageWriteback(page)) in extent_write_cache_pages()
2426 wait_on_page_writeback(page); in extent_write_cache_pages()
2429 if (PageWriteback(page) || in extent_write_cache_pages()
2430 !clear_page_dirty_for_io(page)) { in extent_write_cache_pages()
2431 unlock_page(page); in extent_write_cache_pages()
2435 ret = (*writepage)(page, wbc, data); in extent_write_cache_pages()
2438 unlock_page(page); in extent_write_cache_pages()
2472 int extent_write_full_page(struct extent_io_tree *tree, struct page *page, in extent_write_full_page() argument
2477 struct address_space *mapping = page->mapping; in extent_write_full_page()
2489 .range_start = page_offset(page) + PAGE_CACHE_SIZE, in extent_write_full_page()
2494 ret = __extent_writepage(page, wbc, &epd); in extent_write_full_page()
2509 struct page *page; in extent_write_locked_range() local
2529 page = find_get_page(mapping, start >> PAGE_CACHE_SHIFT); in extent_write_locked_range()
2530 if (clear_page_dirty_for_io(page)) in extent_write_locked_range()
2531 ret = __extent_writepage(page, &wbc_writepages, &epd); in extent_write_locked_range()
2534 tree->ops->writepage_end_io_hook(page, start, in extent_write_locked_range()
2537 unlock_page(page); in extent_write_locked_range()
2539 page_cache_release(page); in extent_write_locked_range()
2581 struct page *page = list_entry(pages->prev, struct page, lru); in extent_readpages() local
2583 prefetchw(&page->flags); in extent_readpages()
2584 list_del(&page->lru); in extent_readpages()
2589 if (!add_to_page_cache(page, mapping, in extent_readpages()
2590 page->index, GFP_KERNEL)) { in extent_readpages()
2593 page_cache_get(page); in extent_readpages()
2594 if (!pagevec_add(&pvec, page)) in extent_readpages()
2596 __extent_read_full_page(tree, page, get_extent, in extent_readpages()
2599 page_cache_release(page); in extent_readpages()
2615 struct page *page, unsigned long offset) in extent_invalidatepage() argument
2617 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT); in extent_invalidatepage()
2619 size_t blocksize = page->mapping->host->i_sb->s_blocksize; in extent_invalidatepage()
2638 struct inode *inode, struct page *page, in extent_commit_write() argument
2641 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; in extent_commit_write()
2643 set_page_extent_mapped(page); in extent_commit_write()
2644 set_page_dirty(page); in extent_commit_write()
2654 struct inode *inode, struct page *page, in extent_prepare_write() argument
2657 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; in extent_prepare_write()
2673 set_page_extent_mapped(page); in extent_prepare_write()
2681 em = get_extent(inode, page, page_offset, block_start, in extent_prepare_write()
2691 if (!PageUptodate(page) && isnew && in extent_prepare_write()
2695 kaddr = kmap_atomic(page, KM_USER0); in extent_prepare_write()
2701 flush_dcache_page(page); in extent_prepare_write()
2706 !isnew && !PageUptodate(page) && in extent_prepare_write()
2724 ret = submit_extent_page(READ, tree, page, in extent_prepare_write()
2744 check_page_uptodate(tree, page); in extent_prepare_write()
2756 struct extent_io_tree *tree, struct page *page, in try_release_extent_state() argument
2759 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in try_release_extent_state()
2781 struct extent_io_tree *tree, struct page *page, in try_release_extent_mapping() argument
2785 u64 start = (u64)page->index << PAGE_CACHE_SHIFT; in try_release_extent_mapping()
2789 page->mapping->host->i_size > 16 * 1024 * 1024) { in try_release_extent_mapping()
2821 return try_release_extent_state(map, tree, page, mask); in try_release_extent_mapping()
2942 static inline struct page *extent_buffer_page(struct extent_buffer *eb, in extent_buffer_page()
2945 struct page *p; in extent_buffer_page()
3012 struct page *page0, in alloc_extent_buffer()
3020 struct page *p; in alloc_extent_buffer()
3130 struct page *page; in clear_extent_buffer_dirty() local
3139 page = extent_buffer_page(eb, i); in clear_extent_buffer_dirty()
3140 if (!set && !PageDirty(page)) in clear_extent_buffer_dirty()
3143 lock_page(page); in clear_extent_buffer_dirty()
3145 set_page_extent_head(page, eb->len); in clear_extent_buffer_dirty()
3147 set_page_private(page, EXTENT_PAGE_PRIVATE); in clear_extent_buffer_dirty()
3157 start = (u64)page->index << PAGE_CACHE_SHIFT; in clear_extent_buffer_dirty()
3161 unlock_page(page); in clear_extent_buffer_dirty()
3165 clear_page_dirty_for_io(page); in clear_extent_buffer_dirty()
3166 spin_lock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty()
3167 if (!PageDirty(page)) { in clear_extent_buffer_dirty()
3168 radix_tree_tag_clear(&page->mapping->page_tree, in clear_extent_buffer_dirty()
3169 page_index(page), in clear_extent_buffer_dirty()
3172 spin_unlock_irq(&page->mapping->tree_lock); in clear_extent_buffer_dirty()
3173 unlock_page(page); in clear_extent_buffer_dirty()
3193 struct page *page = extent_buffer_page(eb, i); in set_extent_buffer_dirty() local
3199 lock_page(page); in set_extent_buffer_dirty()
3201 set_page_extent_head(page, eb->len); in set_extent_buffer_dirty()
3202 } else if (PagePrivate(page) && in set_extent_buffer_dirty()
3203 page->private != EXTENT_PAGE_PRIVATE) { in set_extent_buffer_dirty()
3204 set_page_extent_mapped(page); in set_extent_buffer_dirty()
3207 set_extent_dirty(tree, page_offset(page), in set_extent_buffer_dirty()
3208 page_offset(page) + PAGE_CACHE_SIZE - 1, in set_extent_buffer_dirty()
3210 unlock_page(page); in set_extent_buffer_dirty()
3219 struct page *page; in clear_extent_buffer_uptodate() local
3228 page = extent_buffer_page(eb, i); in clear_extent_buffer_uptodate()
3229 if (page) in clear_extent_buffer_uptodate()
3230 ClearPageUptodate(page); in clear_extent_buffer_uptodate()
3239 struct page *page; in set_extent_buffer_uptodate() local
3247 page = extent_buffer_page(eb, i); in set_extent_buffer_uptodate()
3251 check_page_uptodate(tree, page); in set_extent_buffer_uptodate()
3254 SetPageUptodate(page); in set_extent_buffer_uptodate()
3262 struct page *page; in extent_range_uptodate() local
3273 page = find_get_page(tree->mapping, index); in extent_range_uptodate()
3274 uptodate = PageUptodate(page); in extent_range_uptodate()
3275 page_cache_release(page); in extent_range_uptodate()
3291 struct page *page; in extent_buffer_uptodate() local
3304 page = extent_buffer_page(eb, i); in extent_buffer_uptodate()
3305 if (!PageUptodate(page)) { in extent_buffer_uptodate()
3320 struct page *page; in read_extent_buffer_pages() local
3348 page = extent_buffer_page(eb, i); in read_extent_buffer_pages()
3350 if (!trylock_page(page)) in read_extent_buffer_pages()
3353 lock_page(page); in read_extent_buffer_pages()
3356 if (!PageUptodate(page)) in read_extent_buffer_pages()
3366 page = extent_buffer_page(eb, i); in read_extent_buffer_pages()
3368 page_cache_get(page); in read_extent_buffer_pages()
3369 if (!PageUptodate(page)) { in read_extent_buffer_pages()
3372 ClearPageError(page); in read_extent_buffer_pages()
3373 err = __extent_read_full_page(tree, page, in read_extent_buffer_pages()
3379 unlock_page(page); in read_extent_buffer_pages()
3390 page = extent_buffer_page(eb, i); in read_extent_buffer_pages()
3391 wait_on_page_locked(page); in read_extent_buffer_pages()
3392 if (!PageUptodate(page)) in read_extent_buffer_pages()
3403 page = extent_buffer_page(eb, i); in read_extent_buffer_pages()
3405 unlock_page(page); in read_extent_buffer_pages()
3417 struct page *page; in read_extent_buffer() local
3429 page = extent_buffer_page(eb, i); in read_extent_buffer()
3432 kaddr = kmap_atomic(page, KM_USER1); in read_extent_buffer()
3450 struct page *p; in map_private_extent_buffer()
3517 struct page *page; in memcmp_extent_buffer() local
3530 page = extent_buffer_page(eb, i); in memcmp_extent_buffer()
3534 kaddr = kmap_atomic(page, KM_USER0); in memcmp_extent_buffer()
3553 struct page *page; in write_extent_buffer() local
3565 page = extent_buffer_page(eb, i); in write_extent_buffer()
3566 WARN_ON(!PageUptodate(page)); in write_extent_buffer()
3569 kaddr = kmap_atomic(page, KM_USER1); in write_extent_buffer()
3585 struct page *page; in memset_extent_buffer() local
3596 page = extent_buffer_page(eb, i); in memset_extent_buffer()
3597 WARN_ON(!PageUptodate(page)); in memset_extent_buffer()
3600 kaddr = kmap_atomic(page, KM_USER0); in memset_extent_buffer()
3617 struct page *page; in copy_extent_buffer() local
3628 page = extent_buffer_page(dst, i); in copy_extent_buffer()
3629 WARN_ON(!PageUptodate(page)); in copy_extent_buffer()
3633 kaddr = kmap_atomic(page, KM_USER0); in copy_extent_buffer()
3644 static void move_pages(struct page *dst_page, struct page *src_page, in move_pages()
3664 static void copy_pages(struct page *dst_page, struct page *src_page, in copy_pages()
3775 int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page) in try_release_extent_buffer() argument
3777 u64 start = page_offset(page); in try_release_extent_buffer()