/kernel/linux/linux-5.10/fs/btrfs/tests/ |
D | extent-io-tests.c | 64 struct page *locked_page = NULL; in test_find_delalloc() local 105 locked_page = page; in test_find_delalloc() 116 found = find_lock_delalloc_range(inode, locked_page, &start, in test_find_delalloc() 128 unlock_page(locked_page); in test_find_delalloc() 129 put_page(locked_page); in test_find_delalloc() 138 locked_page = find_lock_page(inode->i_mapping, in test_find_delalloc() 140 if (!locked_page) { in test_find_delalloc() 147 found = find_lock_delalloc_range(inode, locked_page, &start, in test_find_delalloc() 165 put_page(locked_page); in test_find_delalloc() 173 locked_page = find_lock_page(inode->i_mapping, test_start >> in test_find_delalloc() [all …]
|
/kernel/linux/linux-5.10/fs/ocfs2/ |
D | mmap.c | 58 struct page *locked_page = NULL; in __ocfs2_page_mkwrite() local 96 &locked_page, &fsdata, di_bh, page); in __ocfs2_page_mkwrite() 104 if (!locked_page) { in __ocfs2_page_mkwrite()
|
/kernel/linux/linux-5.10/fs/ufs/ |
D | balloc.c | 241 sector_t newb, struct page *locked_page) in ufs_change_blocknr() argument 257 BUG_ON(!locked_page); in ufs_change_blocknr() 258 BUG_ON(!PageLocked(locked_page)); in ufs_change_blocknr() 260 cur_index = locked_page->index; in ufs_change_blocknr() 277 page = locked_page; in ufs_change_blocknr() 346 struct page *locked_page) in ufs_new_fragments() argument 426 newcount - oldcount, locked_page != NULL); in ufs_new_fragments() 450 locked_page != NULL); in ufs_new_fragments() 471 locked_page != NULL); in ufs_new_fragments() 475 uspi->s_sbbase + result, locked_page); in ufs_new_fragments()
|
D | inode.c | 222 int *err, struct page *locked_page) in ufs_extend_tail() argument 241 locked_page); in ufs_extend_tail() 257 int *new, struct page *locked_page) in ufs_inode_getfrag() argument 290 goal, nfrags, err, locked_page); in ufs_inode_getfrag() 336 int *new, struct page *locked_page) in ufs_inode_getblock() argument 371 uspi->s_fpb, err, locked_page); in ufs_inode_getblock()
|
/kernel/linux/linux-5.10/fs/ceph/ |
D | addr.c | 1602 struct page *locked_page = NULL; in ceph_page_mkwrite() local 1605 locked_page = page; in ceph_page_mkwrite() 1607 err = ceph_uninline_data(vma->vm_file, locked_page); in ceph_page_mkwrite() 1608 if (locked_page) in ceph_page_mkwrite() 1609 unlock_page(locked_page); in ceph_page_mkwrite() 1695 void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, in ceph_fill_inline_data() argument 1701 if (locked_page) { in ceph_fill_inline_data() 1702 page = locked_page; in ceph_fill_inline_data() 1719 inode, ceph_vinop(inode), len, locked_page); in ceph_fill_inline_data() 1727 if (page != locked_page) { in ceph_fill_inline_data() [all …]
|
D | super.h | 973 extern int ceph_fill_inode(struct inode *inode, struct page *locked_page, 992 extern int __ceph_do_getattr(struct inode *inode, struct page *locked_page, 1168 extern int ceph_uninline_data(struct file *filp, struct page *locked_page); 1180 extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
|
D | inode.c | 742 int ceph_fill_inode(struct inode *inode, struct page *locked_page, in ceph_fill_inode() argument 1017 (locked_page || (info_caps & cache_caps))) in ceph_fill_inode() 1031 ceph_fill_inline_data(inode, locked_page, in ceph_fill_inode() 2283 int __ceph_do_getattr(struct inode *inode, struct page *locked_page, in __ceph_do_getattr() argument 2310 req->r_locked_page = locked_page; in __ceph_do_getattr() 2312 if (locked_page && err == 0) { in __ceph_do_getattr()
|
/kernel/linux/linux-5.10/fs/btrfs/ |
D | extent_io.h | 272 struct page *locked_page, 317 struct page *locked_page, u64 *start,
|
D | inode.c | 85 struct page *locked_page, 109 struct page *locked_page, in btrfs_cleanup_ordered_extents() argument 114 u64 page_start = page_offset(locked_page); in btrfs_cleanup_ordered_extents() 369 struct page *locked_page; member 728 if (async_chunk->locked_page && in compress_file_range() 729 (page_offset(async_chunk->locked_page) >= start && in compress_file_range() 730 page_offset(async_chunk->locked_page)) <= end) { in compress_file_range() 731 __set_page_dirty_nobuffers(async_chunk->locked_page); in compress_file_range() 793 ret = cow_file_range(inode, async_chunk->locked_page, in submit_compressed_extents() 813 else if (ret && async_chunk->locked_page) in submit_compressed_extents() [all …]
|
D | extent_io.c | 1811 struct page *locked_page, 1816 struct page *locked_page, in __unlock_for_delalloc() argument 1822 ASSERT(locked_page); in __unlock_for_delalloc() 1823 if (index == locked_page->index && end_index == index) in __unlock_for_delalloc() 1826 __process_pages_contig(inode->i_mapping, locked_page, index, end_index, in __unlock_for_delalloc() 1831 struct page *locked_page, in lock_delalloc_pages() argument 1840 ASSERT(locked_page); in lock_delalloc_pages() 1841 if (index == locked_page->index && index == end_index) in lock_delalloc_pages() 1844 ret = __process_pages_contig(inode->i_mapping, locked_page, index, in lock_delalloc_pages() 1847 __unlock_for_delalloc(inode, locked_page, delalloc_start, in lock_delalloc_pages() [all …]
|
D | ctree.h | 3067 int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page,
|