/fs/coda/ |
D | file.c | 158 if (coda_inode->i_mapping == &coda_inode->i_data) in coda_file_mmap() 159 coda_inode->i_mapping = host_inode->i_mapping; in coda_file_mmap() 163 else if (coda_inode->i_mapping != host_inode->i_mapping) { in coda_file_mmap() 253 if (coda_inode->i_mapping == &host_inode->i_data) { in coda_release() 256 coda_inode->i_mapping = &coda_inode->i_data; in coda_release() 280 err = filemap_write_and_wait_range(coda_inode->i_mapping, start, end); in coda_fsync()
|
/fs/nilfs2/ |
D | mdt.c | 80 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0); in nilfs_mdt_create_block() 121 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); in nilfs_mdt_submit_block() 365 page = find_lock_page(inode->i_mapping, index); in nilfs_mdt_forget_block() 384 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) in nilfs_mdt_forget_block() 456 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); in nilfs_mdt_init() 460 inode->i_mapping->a_ops = &def_mdt_aops; in nilfs_mdt_init() 529 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); in nilfs_mdt_save_to_shadow_map() 622 nilfs_clear_dirty_pages(inode->i_mapping, true); in nilfs_mdt_restore_from_shadow_map() 623 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); in nilfs_mdt_restore_from_shadow_map()
|
D | gcinode.c | 65 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); in nilfs_gccache_submit_read_data() 167 mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); in nilfs_init_gcinode() 168 inode->i_mapping->a_ops = &empty_aops; in nilfs_init_gcinode()
|
/fs/f2fs/ |
D | verity.c | 50 page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT, in pagecache_read() 86 res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, in pagecache_write() 95 res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, in pagecache_write() 167 err = filemap_write_and_wait(inode->i_mapping); in f2fs_end_enable_verity() 230 return read_mapping_page(inode->i_mapping, index, NULL); in f2fs_read_merkle_tree_page()
|
/fs/freevxfs/ |
D | vxfs_inode.c | 185 vip->vfs_inode.i_mapping->a_ops = &vxfs_aops; in vxfs_blkiget() 217 pp = vxfs_get_page(ilistp->i_mapping, ino * VXFS_ISIZE / PAGE_SIZE); in __vxfs_iget() 225 vip->vfs_inode.i_mapping->a_ops = &vxfs_aops; in __vxfs_iget() 305 ip->i_mapping->a_ops = aops; in vxfs_iget() 309 ip->i_mapping->a_ops = aops; in vxfs_iget() 314 ip->i_mapping->a_ops = &vxfs_aops; in vxfs_iget()
|
/fs/cachefiles/ |
D | rdwr.c | 80 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; in cachefiles_read_reissue() 253 bmapping = d_backing_inode(object->backer)->i_mapping; in cachefiles_read_backing_file_one() 415 ASSERT(inode->i_mapping->a_ops->bmap); in cachefiles_read_or_alloc_page() 416 ASSERT(inode->i_mapping->a_ops->readpages); in cachefiles_read_or_alloc_page() 434 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0); in cachefiles_read_or_alloc_page() 470 struct address_space *bmapping = d_backing_inode(object->backer)->i_mapping; in cachefiles_read_backing_file() 714 ASSERT(inode->i_mapping->a_ops->bmap); in cachefiles_read_or_alloc_pages() 715 ASSERT(inode->i_mapping->a_ops->readpages); in cachefiles_read_or_alloc_pages() 742 block = inode->i_mapping->a_ops->bmap(inode->i_mapping, in cachefiles_read_or_alloc_pages()
|
/fs/ext4/ |
D | verity.c | 50 page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT, in pagecache_read() 86 res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0, in pagecache_write() 95 res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n, in pagecache_write() 211 err = filemap_write_and_wait(inode->i_mapping); in ext4_end_enable_verity() 350 return read_mapping_page(inode->i_mapping, index, NULL); in ext4_read_merkle_tree_page()
|
/fs/9p/ |
D | vfs_file.c | 128 filemap_write_and_wait(inode->i_mapping); in v9fs_file_lock() 319 filemap_write_and_wait(inode->i_mapping); in v9fs_file_lock_dotl() 358 filemap_write_and_wait(inode->i_mapping); in v9fs_file_flock_dotl() 427 if (inode->i_mapping && inode->i_mapping->nrpages) in v9fs_file_write_iter() 428 invalidate_inode_pages2_range(inode->i_mapping, in v9fs_file_write_iter() 562 if (page->mapping != inode->i_mapping) in v9fs_vm_page_mkwrite() 625 if (!mapping_cap_writeback_dirty(inode->i_mapping)) in v9fs_mmap_vm_close()
|
/fs/hfsplus/ |
D | super.c | 34 inode->i_mapping->a_ops = &hfsplus_btree_aops; in hfsplus_system_read_inode() 38 inode->i_mapping->a_ops = &hfsplus_btree_aops; in hfsplus_system_read_inode() 42 inode->i_mapping->a_ops = &hfsplus_aops; in hfsplus_system_read_inode() 49 inode->i_mapping->a_ops = &hfsplus_btree_aops; in hfsplus_system_read_inode() 196 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_sync_fs() 197 error2 = filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_sync_fs() 202 filemap_write_and_wait(sbi->attr_tree->inode->i_mapping); in hfsplus_sync_fs() 206 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); in hfsplus_sync_fs()
|
D | inode.c | 315 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping); in hfsplus_file_fsync() 319 filemap_write_and_wait(sbi->ext_tree->inode->i_mapping); in hfsplus_file_fsync() 328 sbi->attr_tree->inode->i_mapping); in hfsplus_file_fsync() 337 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping); in hfsplus_file_fsync() 410 inode->i_mapping->a_ops = &hfsplus_aops; in hfsplus_new_inode() 416 inode->i_mapping->a_ops = &hfsplus_aops; in hfsplus_new_inode() 536 inode->i_mapping->a_ops = &hfsplus_aops; in hfsplus_cat_read_inode() 540 inode->i_mapping->a_ops = &hfsplus_aops; in hfsplus_cat_read_inode()
|
/fs/afs/ |
D | dir_edit.c | 209 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add() 210 page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp); in afs_edit_dir_add() 241 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add() 242 page = find_or_create_page(vnode->vfs_inode.i_mapping, in afs_edit_dir_add() 390 page0 = find_lock_page(vnode->vfs_inode.i_mapping, 0); in afs_edit_dir_remove() 410 page = find_lock_page(vnode->vfs_inode.i_mapping, index); in afs_edit_dir_remove()
|
/fs/iomap/ |
D | seek.c | 21 const struct address_space_operations *ops = inode->i_mapping->a_ops; in page_seek_hole_data() 46 if (unlikely(page->mapping != inode->i_mapping)) in page_seek_hole_data() 90 nr_pages = pagevec_lookup_range(&pvec, inode->i_mapping, &index, in page_cache_seek_hole_data()
|
D | buffered-io.c | 335 if (!add_to_page_cache_lru(page, inode->i_mapping, page->index, in iomap_next_page() 602 page = grab_cache_page_write_begin(inode->i_mapping, index, flags); in iomap_write_begin() 709 ret = block_write_end(NULL, inode->i_mapping, pos, len, copied, in iomap_write_end() 779 if (mapping_writably_mapped(inode->i_mapping)) in iomap_write_actor() 812 balance_dirty_pages_ratelimited(inode->i_mapping); in iomap_write_actor() 841 struct address_space *mapping = inode->i_mapping; in __iomap_read_page() 894 balance_dirty_pages_ratelimited(inode->i_mapping); in iomap_dirty_actor() 1043 if ((page->mapping != inode->i_mapping) || in iomap_page_mkwrite()
|
/fs/btrfs/tests/ |
D | extent-io-tests.c | 32 ret = find_get_pages_contig(inode->i_mapping, index, in process_page_range() 94 page = find_or_create_page(inode->i_mapping, index, GFP_KERNEL); in test_find_delalloc() 138 locked_page = find_lock_page(inode->i_mapping, in test_find_delalloc() 173 locked_page = find_lock_page(inode->i_mapping, test_start >> in test_find_delalloc() 224 page = find_get_page(inode->i_mapping, in test_find_delalloc()
|
/fs/nfs/ |
D | symlink.c | 58 page = find_get_page(inode->i_mapping, 0); in nfs_get_link() 66 err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping)); in nfs_get_link()
|
/fs/ramfs/ |
D | file-nommu.c | 69 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in ramfs_nommu_expand_for_mapping() 107 ret = add_to_page_cache_lru(page, inode->i_mapping, loop, in ramfs_nommu_expand_for_mapping() 227 nr = find_get_pages(inode->i_mapping, &pgoff, lpages, pages); in ramfs_nommu_get_unmapped_area()
|
D | inode.c | 71 inode->i_mapping->a_ops = &ramfs_aops; in ramfs_get_inode() 72 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); in ramfs_get_inode() 73 mapping_set_unevictable(inode->i_mapping); in ramfs_get_inode()
|
/fs/ |
D | drop_caches.c | 30 (inode->i_mapping->nrpages == 0 && !need_resched())) { in drop_pagecache_sb() 38 invalidate_mapping_pages(inode->i_mapping, 0, -1); in drop_pagecache_sb()
|
/fs/jfs/ |
D | jfs_umount.c | 98 filemap_write_and_wait(sbi->direct_inode->i_mapping); in jfs_umount() 150 filemap_write_and_wait(sbi->direct_inode->i_mapping); in jfs_umount_rw()
|
/fs/ext2/ |
D | namei.c | 173 inode->i_mapping->a_ops = &ext2_nobh_aops; in ext2_symlink() 175 inode->i_mapping->a_ops = &ext2_aops; in ext2_symlink() 241 inode->i_mapping->a_ops = &ext2_nobh_aops; in ext2_mkdir() 243 inode->i_mapping->a_ops = &ext2_aops; in ext2_mkdir()
|
D | inode.c | 850 ext2_write_failed(inode->i_mapping, offset + length); in ext2_iomap_end() 1304 error = nobh_truncate_page(inode->i_mapping, in ext2_setsize() 1307 error = block_truncate_page(inode->i_mapping, in ext2_setsize() 1319 sync_mapping_buffers(inode->i_mapping); in ext2_setsize() 1396 inode->i_mapping->a_ops = &ext2_dax_aops; in ext2_set_file_ops() 1398 inode->i_mapping->a_ops = &ext2_nobh_aops; in ext2_set_file_ops() 1400 inode->i_mapping->a_ops = &ext2_aops; in ext2_set_file_ops() 1499 inode->i_mapping->a_ops = &ext2_nobh_aops; in ext2_iget() 1501 inode->i_mapping->a_ops = &ext2_aops; in ext2_iget() 1512 inode->i_mapping->a_ops = &ext2_nobh_aops; in ext2_iget() [all …]
|
/fs/xfs/ |
D | xfs_pnfs.c | 132 error = filemap_write_and_wait(inode->i_mapping); in xfs_fs_map_blocks() 135 error = invalidate_inode_pages2(inode->i_mapping); in xfs_fs_map_blocks() 267 error = invalidate_inode_pages2_range(inode->i_mapping, in xfs_fs_commit_blocks()
|
/fs/ecryptfs/ |
D | mmap.c | 35 struct page *page = read_mapping_page(inode->i_mapping, index, NULL); in ecryptfs_get_locked_page() 533 if (lower_inode->i_mapping->a_ops->bmap) in ecryptfs_bmap() 534 rc = lower_inode->i_mapping->a_ops->bmap(lower_inode->i_mapping, in ecryptfs_bmap()
|
/fs/qnx6/ |
D | inode.c | 187 struct address_space *mapping = root->i_mapping; in qnx6_checkroot() 517 inode->i_mapping->a_ops = &qnx6_aops; in qnx6_private_inode() 550 mapping = sbi->inodes->i_mapping; in qnx6_iget() 581 inode->i_mapping->a_ops = &qnx6_aops; in qnx6_iget() 585 inode->i_mapping->a_ops = &qnx6_aops; in qnx6_iget() 589 inode->i_mapping->a_ops = &qnx6_aops; in qnx6_iget()
|
/fs/hfs/ |
D | btree.c | 57 tree->inode->i_mapping->a_ops = &hfs_btree_aops; in hfs_btree_open() 69 tree->inode->i_mapping->a_ops = &hfs_btree_aops; in hfs_btree_open() 77 mapping = tree->inode->i_mapping; in hfs_btree_open() 129 tree->inode->i_mapping->a_ops = &hfs_aops; in hfs_btree_open()
|