/fs/btrfs/ |
D | locking.h | 108 static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) in btrfs_tree_unlock_rw() argument 110 if (rw == BTRFS_WRITE_LOCK) in btrfs_tree_unlock_rw() 112 else if (rw == BTRFS_READ_LOCK) in btrfs_tree_unlock_rw()
|
D | zoned.h | 42 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, 44 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, 111 int mirror, int rw, u64 *bytenr_ret) in btrfs_sb_log_location_bdev() argument 118 int rw, u64 *bytenr_ret) in btrfs_sb_log_location() argument
|
D | zoned.c | 725 int rw, u64 *bytenr_ret) in sb_log_location() argument 739 if (rw == WRITE) { in sb_log_location() 771 int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw, in btrfs_sb_log_location_bdev() argument 787 ASSERT(rw == READ || rw == WRITE); in btrfs_sb_log_location_bdev() 808 return sb_log_location(bdev, zones, rw, bytenr_ret); in btrfs_sb_log_location_bdev() 811 int btrfs_sb_log_location(struct btrfs_device *device, int mirror, int rw, in btrfs_sb_log_location() argument 834 rw, bytenr_ret); in btrfs_sb_log_location()
|
D | relocation.c | 4001 int rw = 0; in btrfs_relocate_block_group() local 4046 rw = 1; in btrfs_relocate_block_group() 4125 if (err && rw) in btrfs_relocate_block_group()
|
/fs/nfs/blocklayout/ |
D | blocklayout.h | 182 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, sector_t start, 187 struct pnfs_block_extent *ret, bool rw);
|
D | extent_tree.c | 346 struct pnfs_block_extent *ret, bool rw) in ext_tree_lookup() argument 351 if (!rw) in ext_tree_lookup() 360 int ext_tree_remove(struct pnfs_block_layout *bl, bool rw, in ext_tree_remove() argument 368 if (rw) { in ext_tree_remove()
|
D | blocklayout.c | 141 do_add_page_to_bio(struct bio *bio, int npg, int rw, sector_t isect, in do_add_page_to_bio() argument 151 npg, rw, (unsigned long long)isect, offset, *len); in do_add_page_to_bio() 178 bio_set_op_attrs(bio, rw, 0); in do_add_page_to_bio() 187 static void bl_mark_devices_unavailable(struct nfs_pgio_header *header, bool rw) in bl_mark_devices_unavailable() argument 198 if (!ext_tree_lookup(bl, isect, &be, rw)) in bl_mark_devices_unavailable()
|
/fs/crypto/ |
D | crypto.c | 102 fscrypt_direction_t rw, u64 index, in fscrypt_crypt_data_unit() argument 134 if (rw == FS_DECRYPT) in fscrypt_crypt_data_unit() 142 (rw == FS_DECRYPT ? "De" : "En"), index, res); in fscrypt_crypt_data_unit()
|
D | fscrypt_private.h | 321 fscrypt_direction_t rw, u64 index,
|
/fs/ |
D | aio.c | 200 struct kiocb rw; member 567 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw); in kiocb_set_cancel_fn() 634 req->ki_cancel(&req->rw); in free_ioctx_users() 1434 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw); in aio_complete_rw() 1491 static ssize_t aio_setup_rw(int rw, const struct iocb *iocb, in aio_setup_rw() argument 1499 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter); in aio_setup_rw() 1504 return __import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter, compat); in aio_setup_rw() 1728 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw); in aio_poll_cancel() 1960 return aio_read(&req->rw, iocb, false, compat); in __io_submit_one() 1962 return aio_write(&req->rw, iocb, false, compat); in __io_submit_one() [all …]
|
/fs/ocfs2/ |
D | refcounttree.h | 27 int ocfs2_lock_refcount_tree(struct ocfs2_super *osb, u64 ref_blkno, int rw, 32 int rw);
|
D | refcounttree.c | 416 struct ocfs2_refcount_tree *tree, int rw) in __ocfs2_lock_refcount_tree() argument 420 ret = ocfs2_refcount_lock(tree, rw); in __ocfs2_lock_refcount_tree() 426 if (rw) in __ocfs2_lock_refcount_tree() 444 u64 ref_blkno, int rw, in ocfs2_lock_refcount_tree() argument 462 ret = __ocfs2_lock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 473 ocfs2_unlock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 494 ocfs2_unlock_refcount_tree(osb, tree, rw); in ocfs2_lock_refcount_tree() 517 struct ocfs2_refcount_tree *tree, int rw) in ocfs2_unlock_refcount_tree() argument 519 if (rw) in ocfs2_unlock_refcount_tree() 524 ocfs2_refcount_unlock(tree, rw); in ocfs2_unlock_refcount_tree()
|
/fs/exfat/ |
D | inode.c | 441 int rw = iov_iter_rw(iter); in exfat_direct_IO() local 444 if (rw == WRITE) { in exfat_direct_IO() 463 if (ret < 0 && (rw & WRITE)) in exfat_direct_IO()
|
/fs/ntfs3/ |
D | file.c | 358 bool rw = vma->vm_flags & VM_WRITE; in ntfs_file_mmap() local 371 if (is_compressed(ni) && rw) { in ntfs_file_mmap() 376 if (rw) { in ntfs_file_mmap()
|
/fs/cifs/ |
D | misc.c | 995 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) in setup_aio_ctx_iter() argument 1078 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); in setup_aio_ctx_iter()
|
D | cifsproto.h | 586 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
|
/fs/affs/ |
D | Changes | 225 flush the super block, and rw remounts didn't
|
/fs/f2fs/ |
D | file.c | 809 static bool f2fs_force_buffered_io(struct inode *inode, int rw) in f2fs_force_buffered_io() argument 827 if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE)) in f2fs_force_buffered_io() 829 if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi)) in f2fs_force_buffered_io() 4367 static void f2fs_trace_rw_file_path(struct kiocb *iocb, size_t count, int rw) in f2fs_trace_rw_file_path() argument 4378 if (rw == WRITE) in f2fs_trace_rw_file_path()
|
D | f2fs.h | 1211 #define is_read_io(rw) ((rw) == READ) argument
|