/fs/btrfs/ |
D | locking.h | 108 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) in btrfs_tree_unlock_rw() argument 110 if (rw == BTRFS_WRITE_LOCK) in btrfs_tree_unlock_rw() 112 else if (rw == BTRFS_READ_LOCK) in btrfs_tree_unlock_rw()
|
D | zoned.h | 42 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 44 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 111 int mirror, int rw, u64 *bytenr_ret) in btrfs_sb_log_location_bdev() argument 118 int rw, u64 *bytenr_ret) in btrfs_sb_log_location() argument
|
D | zoned.c | 725 int rw, u64 *bytenr_ret) in sb_log_location() argument 739 if (rw == WRITE) { in sb_log_location() 771 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, in btrfs_sb_log_location_bdev() argument 787 ASSERT(rw == READ || rw == WRITE); in btrfs_sb_log_location_bdev() 808 return sb_log_location(bdev, zones, rw, bytenr_ret); in btrfs_sb_log_location_bdev() 811 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, in btrfs_sb_log_location() argument 834 rw, bytenr_ret); in btrfs_sb_log_location()
|
D | relocation.c | 4001 int rw = 0; in btrfs_relocate_block_group() local 4046 rw = 1; in btrfs_relocate_block_group() 4125 if (err && rw) in btrfs_relocate_block_group()
|
/fs/f2fs/ |
D | iostat.c | 185 int rw, bool is_sync) in __update_iostat_latency() argument 205 if (rw == 0) { in __update_iostat_latency() 222 void iostat_update_and_unbind_ctx(struct bio *bio, int rw) in iostat_update_and_unbind_ctx() argument 227 if (rw == 0) in iostat_update_and_unbind_ctx() 231 __update_iostat_latency(iostat_ctx, rw, is_sync); in iostat_update_and_unbind_ctx()
|
D | iostat.h | 60 extern void iostat_update_and_unbind_ctx(struct bio *bio, int rw); 70 static inline void iostat_update_and_unbind_ctx(struct bio *bio, int rw) {} in iostat_update_and_unbind_ctx() argument
|
D | f2fs.h | 1224 #define is_read_io(rw) ((rw) == READ) argument 4528 int rw = iov_iter_rw(iter); in f2fs_force_buffered_io() local 4546 if (f2fs_lfs_mode(sbi) && (rw == WRITE)) { in f2fs_force_buffered_io()
|
/fs/crypto/ |
D | crypto.c | 101 int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, in fscrypt_crypt_block() argument 134 if (rw == FS_DECRYPT) in fscrypt_crypt_block() 141 (rw == FS_DECRYPT ? "De" : "En"), lblk_num, res); in fscrypt_crypt_block()
|
D | fscrypt_private.h | 279 int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw,
|
/fs/nfs/blocklayout/ |
D | blocklayout.h | 182 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, 187 struct pnfs_block_extent *ret, bool rw);
|
D | extent_tree.c | 346 struct pnfs_block_extent *ret, bool rw) in ext_tree_lookup() argument 351 if (!rw) in ext_tree_lookup() 360 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, in ext_tree_remove() argument 368 if (rw) { in ext_tree_remove()
|
D | blocklayout.c | 141 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, in do_add_page_to_bio() argument 151 npg, rw, (unsigned long long)isect, offset, *len); in do_add_page_to_bio() 178 bio_set_op_attrs(bio, rw, 0); in do_add_page_to_bio() 187 static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw) in bl_mark_devices_unavailable() argument 198 if (!ext_tree_lookup(bl, isect, &be, rw)) in bl_mark_devices_unavailable()
|
/fs/ |
D | aio.c | 200 struct kiocb rw; member 567 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw); in kiocb_set_cancel_fn() 627 req->ki_cancel(&req->rw); in free_ioctx_users() 1427 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); in aio_complete_rw() 1484 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, in aio_setup_rw() argument 1492 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); in aio_setup_rw() 1497 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); in aio_setup_rw() 1721 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); in aio_poll_cancel() 1953 return aio_read(&req->rw, iocb, false, compat); in __io_submit_one() 1955 return aio_write(&req->rw, iocb, false, compat); in __io_submit_one() [all …]
|
/fs/ocfs2/ |
D | refcounttree.h | 27 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, u64 ref_blkno, int rw, 32 int rw);
|
D | refcounttree.c | 416 struct ocfs2_refcount_tree *tree, int rw) in __ocfs2_lock_refcount_tree() argument 420 ret = ocfs2_refcount_lock(tree, rw); in __ocfs2_lock_refcount_tree() 426 if (rw) in __ocfs2_lock_refcount_tree() 444 u64 ref_blkno, int rw, in ocfs2_lock_refcount_tree() argument 462 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 473 ocfs2_unlock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 494 ocfs2_unlock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 517 struct ocfs2_refcount_tree *tree, int rw) in ocfs2_unlock_refcount_tree() argument 519 if (rw) in ocfs2_unlock_refcount_tree() 524 ocfs2_refcount_unlock(tree, rw); in ocfs2_unlock_refcount_tree()
|
/fs/exfat/ |
D | inode.c | 441 int rw = iov_iter_rw(iter); in exfat_direct_IO() local 444 if (rw == WRITE) { in exfat_direct_IO() 463 if (ret < 0 && (rw & WRITE)) in exfat_direct_IO()
|
/fs/ntfs3/ |
D | file.c | 358 bool rw = vma->vm_flags & VM_WRITE; in ntfs_file_mmap() local 371 if (is_compressed(ni) && rw) { in ntfs_file_mmap() 376 if (rw) { in ntfs_file_mmap()
|
/fs/cifs/ |
D | misc.c | 995 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) in setup_aio_ctx_iter() argument 1078 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); in setup_aio_ctx_iter()
|
D | cifsproto.h | 586 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
|
/fs/affs/ |
D | Changes | 225 flush the super block, and rw remounts didn't
|