/fs/btrfs/ |
D | backref.h | 99 static inline void btrfs_backref_iter_free(struct btrfs_backref_iter *iter) in btrfs_backref_iter_free() argument 101 if (!iter) in btrfs_backref_iter_free() 103 btrfs_free_path(iter->path); in btrfs_backref_iter_free() 104 kfree(iter); in btrfs_backref_iter_free() 108 struct btrfs_backref_iter *iter) in btrfs_backref_get_eb() argument 110 if (!iter) in btrfs_backref_get_eb() 112 return iter->path->nodes[0]; in btrfs_backref_get_eb() 122 struct btrfs_backref_iter *iter) in btrfs_backref_has_tree_block_info() argument 124 if (iter->cur_key.type == BTRFS_EXTENT_ITEM_KEY && in btrfs_backref_has_tree_block_info() 125 iter->cur_ptr - iter->item_ptr == sizeof(struct btrfs_extent_item)) in btrfs_backref_has_tree_block_info() [all …]
|
D | backref.c | 2396 int btrfs_backref_iter_start(struct btrfs_backref_iter *iter, u64 bytenr) argument 2398 struct btrfs_fs_info *fs_info = iter->fs_info; 2399 struct btrfs_path *path = iter->path; 2407 iter->bytenr = bytenr; 2429 memcpy(&iter->cur_key, &key, sizeof(key)); 2430 iter->item_ptr = (u32)btrfs_item_ptr_offset(path->nodes[0], 2432 iter->end_ptr = (u32)(iter->item_ptr + 2448 iter->cur_ptr = (u32)(iter->item_ptr + sizeof(*ei)); 2451 if (iter->cur_ptr >= iter->end_ptr) { 2462 btrfs_item_key_to_cpu(path->nodes[0], &iter->cur_key, [all …]
|
/fs/iomap/ |
D | direct-io.c | 40 struct iov_iter *iter; member 214 unsigned int align = iov_iter_alignment(dio->submit.iter); in iomap_dio_bio_actor() 254 orig_count = iov_iter_count(dio->submit.iter); in iomap_dio_bio_actor() 255 iov_iter_truncate(dio->submit.iter, length); in iomap_dio_bio_actor() 257 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES); in iomap_dio_bio_actor() 273 iov_iter_revert(dio->submit.iter, copied); in iomap_dio_bio_actor() 288 ret = bio_iov_iter_get_pages(bio, dio->submit.iter); in iomap_dio_bio_actor() 317 nr_pages = iov_iter_npages(dio->submit.iter, BIO_MAX_PAGES); in iomap_dio_bio_actor() 338 iov_iter_reexpand(dio->submit.iter, orig_count - copied); in iomap_dio_bio_actor() 347 length = iov_iter_zero(length, dio->submit.iter); in iomap_dio_hole_actor() [all …]
|
/fs/nfs/ |
D | direct.c | 164 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) in nfs_direct_IO() argument 172 VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); in nfs_direct_IO() 174 if (iov_iter_rw(iter) == READ) in nfs_direct_IO() 175 return nfs_file_direct_read(iocb, iter, true); in nfs_direct_IO() 176 return nfs_file_direct_write(iocb, iter, true); in nfs_direct_IO() 348 struct iov_iter *iter, in nfs_direct_read_schedule_iovec() argument 363 while (iov_iter_count(iter)) { in nfs_direct_read_schedule_iovec() 369 result = iov_iter_get_pages_alloc(iter, &pagevec, in nfs_direct_read_schedule_iovec() 375 iov_iter_advance(iter, bytes); in nfs_direct_read_schedule_iovec() 443 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, in nfs_file_direct_read() argument [all …]
|
/fs/ |
D | read_write.c | 408 struct iov_iter iter; in new_sync_read() local 413 iov_iter_init(&iter, READ, &iov, 1, len); in new_sync_read() 415 ret = call_read_iter(filp, &kiocb, &iter); in new_sync_read() 437 struct iov_iter iter; in __kernel_read() local 453 iov_iter_kvec(&iter, READ, &iov, 1, iov.iov_len); in __kernel_read() 454 ret = file->f_op->read_iter(&kiocb, &iter); in __kernel_read() 511 struct iov_iter iter; in new_sync_write() local 516 iov_iter_init(&iter, WRITE, &iov, 1, len); in new_sync_write() 518 ret = call_write_iter(filp, &kiocb, &iter); in new_sync_write() 533 struct iov_iter iter; in __kernel_write() local [all …]
|
D | direct-io.c | 108 struct iov_iter *iter; member 174 ret = iov_iter_get_pages(sdio->iter, dio->pages, LONG_MAX, DIO_PAGES, in dio_refill_pages() 196 iov_iter_advance(sdio->iter, ret); in dio_refill_pages() 1141 struct block_device *bdev, struct iov_iter *iter, in do_blockdev_direct_IO() argument 1149 const size_t count = iov_iter_count(iter); in do_blockdev_direct_IO() 1156 unsigned long align = offset | iov_iter_alignment(iter); in do_blockdev_direct_IO() 1164 if (iov_iter_rw(iter) == READ && !count) in do_blockdev_direct_IO() 1178 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO() 1185 if (iov_iter_rw(iter) == READ && offset >= dio->i_size) { in do_blockdev_direct_IO() 1198 if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) { in do_blockdev_direct_IO() [all …]
|
D | block_dev.c | 237 __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter, in __blkdev_direct_IO_simple() argument 249 if ((pos | iov_iter_alignment(iter)) & in __blkdev_direct_IO_simple() 270 ret = bio_iov_iter_get_pages(&bio, iter); in __blkdev_direct_IO_simple() 275 if (iov_iter_rw(iter) == READ) { in __blkdev_direct_IO_simple() 277 if (iter_is_iovec(iter)) in __blkdev_direct_IO_simple() 375 __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) in __blkdev_direct_IO() argument 384 bool is_read = (iov_iter_rw(iter) == READ), is_sync; in __blkdev_direct_IO() 389 if ((pos | iov_iter_alignment(iter)) & in __blkdev_direct_IO() 406 dio->should_dirty = is_read && iter_is_iovec(iter); in __blkdev_direct_IO() 423 ret = bio_iov_iter_get_pages(bio, iter); in __blkdev_direct_IO() [all …]
|
D | splice.c | 1206 static long vmsplice_to_user(struct file *file, struct iov_iter *iter, in vmsplice_to_user() argument 1211 .total_len = iov_iter_count(iter), in vmsplice_to_user() 1213 .u.data = iter in vmsplice_to_user() 1234 static long vmsplice_to_pipe(struct file *file, struct iov_iter *iter, in vmsplice_to_pipe() argument 1251 ret = iter_to_pipe(iter, pipe, buf_flag); in vmsplice_to_pipe() 1294 struct iov_iter iter; in SYSCALL_DEFINE4() local 1308 ARRAY_SIZE(iovstack), &iov, &iter); in SYSCALL_DEFINE4() 1312 if (!iov_iter_count(&iter)) in SYSCALL_DEFINE4() 1314 else if (iov_iter_rw(&iter) == WRITE) in SYSCALL_DEFINE4() 1315 error = vmsplice_to_pipe(f.file, &iter, flags); in SYSCALL_DEFINE4() [all …]
|
D | seq_file.c | 155 struct iov_iter iter; in seq_read() local 159 iov_iter_init(&iter, READ, &iov, 1, size); in seq_read() 162 ret = seq_read_iter(&kiocb, &iter); in seq_read() 171 ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter) in seq_read_iter() argument 179 if (!iov_iter_count(iter)) in seq_read_iter() 216 n = copy_to_iter(m->buf + m->from, m->count, iter); in seq_read_iter() 270 if (m->count >= iov_iter_count(iter)) in seq_read_iter() 281 n = copy_to_iter(m->buf, m->count, iter); in seq_read_iter()
|
/fs/dlm/ |
D | plock.c | 433 struct plock_op *op = NULL, *iter; in dev_write() local 454 list_for_each_entry(iter, &recv_list, list) { in dev_write() 455 if (iter->info.fsid == info.fsid && in dev_write() 456 iter->info.number == info.number && in dev_write() 457 iter->info.owner == info.owner && in dev_write() 458 iter->info.pid == info.pid && in dev_write() 459 iter->info.start == info.start && in dev_write() 460 iter->info.end == info.end && in dev_write() 461 iter->info.ex == info.ex && in dev_write() 462 iter->info.wait) { in dev_write() [all …]
|
D | recover.c | 735 struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL; in recover_lvb() local 753 list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) { in recover_lvb() 754 if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) in recover_lvb() 759 if (iter->lkb_grmode > DLM_LOCK_CR) { in recover_lvb() 760 big_lkb = iter; in recover_lvb() 764 if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) { in recover_lvb() 765 high_lkb = iter; in recover_lvb() 766 high_seq = iter->lkb_lvbseq; in recover_lvb() 770 list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) { in recover_lvb() 771 if (!(iter->lkb_exflags & DLM_LKF_VALBLK)) in recover_lvb() [all …]
|
/fs/fuse/ |
D | passthrough.c | 75 struct iov_iter *iter) in fuse_passthrough_read_iter() argument 83 if (!iov_iter_count(iter)) in fuse_passthrough_read_iter() 88 ret = vfs_iter_read(passthrough_filp, iter, &iocb_fuse->ki_pos, in fuse_passthrough_read_iter() 103 ret = call_read_iter(passthrough_filp, &aio_req->iocb, iter); in fuse_passthrough_read_iter() 116 struct iov_iter *iter) in fuse_passthrough_write_iter() argument 126 if (!iov_iter_count(iter)) in fuse_passthrough_write_iter() 136 ret = vfs_iter_write(passthrough_filp, iter, &iocb_fuse->ki_pos, in fuse_passthrough_write_iter() 157 ret = call_write_iter(passthrough_filp, &aio_req->iocb, iter); in fuse_passthrough_write_iter()
|
/fs/orangefs/ |
D | inode.c | 23 struct iov_iter iter; in orangefs_writepage_locked() local 55 iov_iter_bvec(&iter, WRITE, &bv, 1, wlen); in orangefs_writepage_locked() 57 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, wlen, in orangefs_writepage_locked() 94 struct iov_iter iter; in orangefs_writepages_work() local 114 iov_iter_bvec(&iter, WRITE, ow->bv, ow->npages, ow->len); in orangefs_writepages_work() 123 ret = wait_for_direct_io(ORANGEFS_IO_WRITE, inode, &off, &iter, ow->len, in orangefs_writepages_work() 250 struct iov_iter iter; in orangefs_readpage() local 279 iov_iter_bvec(&iter, READ, &bv, 1, PAGE_SIZE); in orangefs_readpage() 281 ret = wait_for_direct_io(ORANGEFS_IO_READ, inode, &off, &iter, in orangefs_readpage() 285 iov_iter_zero(~0U, &iter); in orangefs_readpage() [all …]
|
D | file.c | 49 loff_t *offset, struct iov_iter *iter, size_t total_size, in wait_for_direct_io() argument 139 ret = orangefs_bufmap_copy_from_iovec(iter, buffer_index, in wait_for_direct_io() 173 iov_iter_revert(iter, total_size); in wait_for_direct_io() 267 ret = orangefs_bufmap_copy_to_iovec(iter, buffer_index, in wait_for_direct_io() 346 struct iov_iter *iter) in orangefs_file_read_iter() argument 356 ret = generic_file_read_iter(iocb, iter); in orangefs_file_read_iter() 363 struct iov_iter *iter) in orangefs_file_write_iter() argument 374 ret = generic_file_write_iter(iocb, iter); in orangefs_file_write_iter()
|
D | orangefs-bufmap.h | 29 int orangefs_bufmap_copy_from_iovec(struct iov_iter *iter, 33 int orangefs_bufmap_copy_to_iovec(struct iov_iter *iter,
|
/fs/ocfs2/dlm/ |
D | dlmmaster.c | 713 struct dlm_node_iter iter; in dlm_get_lock_resource() local 936 dlm_node_iter_init(mle->vote_map, &iter); in dlm_get_lock_resource() 937 while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { in dlm_get_lock_resource() 1164 static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, in dlm_bitmap_diff_iter_init() argument 1171 iter->curnode = -1; in dlm_bitmap_diff_iter_init() 1172 iter->orig_bm = orig_bm; in dlm_bitmap_diff_iter_init() 1173 iter->cur_bm = cur_bm; in dlm_bitmap_diff_iter_init() 1176 p1 = *(iter->orig_bm + i); in dlm_bitmap_diff_iter_init() 1177 p2 = *(iter->cur_bm + i); in dlm_bitmap_diff_iter_init() 1178 iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); in dlm_bitmap_diff_iter_init() [all …]
|
D | dlmcommon.h | 1101 struct dlm_node_iter *iter) in dlm_node_iter_init() argument 1103 memcpy(iter->node_map, map, sizeof(iter->node_map)); in dlm_node_iter_init() 1104 iter->curnode = -1; in dlm_node_iter_init() 1107 static inline int dlm_node_iter_next(struct dlm_node_iter *iter) in dlm_node_iter_next() argument 1110 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1); in dlm_node_iter_next() 1112 iter->curnode = O2NM_MAX_NODES; in dlm_node_iter_next() 1115 iter->curnode = bit; in dlm_node_iter_next()
|
/fs/pstore/ |
D | blk.c | 283 struct iov_iter iter; in psblk_generic_blk_read() local 297 iov_iter_kvec(&iter, READ, &iov, 1, bytes); in psblk_generic_blk_read() 299 return generic_file_read_iter(&kiocb, &iter); in psblk_generic_blk_read() 306 struct iov_iter iter; in psblk_generic_blk_write() local 326 iov_iter_kvec(&iter, WRITE, &iov, 1, bytes); in psblk_generic_blk_write() 329 ret = generic_write_checks(&kiocb, &iter); in psblk_generic_blk_write() 331 ret = generic_perform_write(&file, &iter, pos); in psblk_generic_blk_write()
|
/fs/cifs/ |
D | misc.c | 836 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw) in setup_aio_ctx_iter() argument 843 size_t count = iov_iter_count(iter); in setup_aio_ctx_iter() 846 unsigned int max_pages = iov_iter_npages(iter, INT_MAX); in setup_aio_ctx_iter() 850 if (iov_iter_is_kvec(iter)) { in setup_aio_ctx_iter() 851 memcpy(&ctx->iter, iter, sizeof(*iter)); in setup_aio_ctx_iter() 853 iov_iter_advance(iter, count); in setup_aio_ctx_iter() 880 rc = iov_iter_get_pages(iter, pages, count, max_pages, &start); in setup_aio_ctx_iter() 892 iov_iter_advance(iter, rc); in setup_aio_ctx_iter() 919 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len); in setup_aio_ctx_iter()
|
/fs/kernfs/ |
D | file.c | 184 static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) in kernfs_file_read_iter() argument 187 ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE); in kernfs_file_read_iter() 223 if (copy_to_iter(buf, len, iter) != len) { in kernfs_file_read_iter() 238 static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter) in kernfs_fop_read_iter() argument 241 return seq_read_iter(iocb, iter); in kernfs_fop_read_iter() 242 return kernfs_file_read_iter(iocb, iter); in kernfs_fop_read_iter() 255 static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) in kernfs_fop_write_iter() argument 258 ssize_t len = iov_iter_count(iter); in kernfs_fop_write_iter() 277 if (copy_from_iter(buf, len, iter) != len) { in kernfs_fop_write_iter()
|
/fs/9p/ |
D | vfs_addr.c | 235 v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) in v9fs_direct_IO() argument 241 if (iov_iter_rw(iter) == WRITE) { in v9fs_direct_IO() 242 n = p9_client_write(file->private_data, pos, iter, &err); in v9fs_direct_IO() 250 n = p9_client_read(file->private_data, pos, iter, &err); in v9fs_direct_IO()
|
/fs/btrfs/tests/ |
D | btrfs-tests.c | 146 struct radix_tree_iter iter; in btrfs_free_dummy_fs_info() local 160 radix_tree_for_each_slot(slot, &fs_info->buffer_radix, &iter, 0) { in btrfs_free_dummy_fs_info() 169 slot = radix_tree_iter_retry(&iter); in btrfs_free_dummy_fs_info() 172 slot = radix_tree_iter_resume(slot, &iter); in btrfs_free_dummy_fs_info()
|
/fs/ceph/ |
D | file.c | 83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize, in __iter_get_bvecs() argument 89 if (maxsize > iov_iter_count(iter)) in __iter_get_bvecs() 90 maxsize = iov_iter_count(iter); in __iter_get_bvecs() 98 bytes = iov_iter_get_pages(iter, pages, maxsize - size, in __iter_get_bvecs() 103 iov_iter_advance(iter, bytes); in __iter_get_bvecs() 130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize, in iter_get_bvecs_alloc() argument 134 size_t orig_count = iov_iter_count(iter); in iter_get_bvecs_alloc() 138 iov_iter_truncate(iter, maxsize); in iter_get_bvecs_alloc() 139 npages = iov_iter_npages(iter, INT_MAX); in iter_get_bvecs_alloc() 140 iov_iter_reexpand(iter, orig_count); in iter_get_bvecs_alloc() [all …]
|
/fs/overlayfs/ |
D | file.c | 283 static ssize_t ovl_read_iter(struct kiocb *iocb, struct iov_iter *iter) in ovl_read_iter() argument 290 if (!iov_iter_count(iter)) in ovl_read_iter() 305 ret = vfs_iter_read(real.file, iter, &iocb->ki_pos, in ovl_read_iter() 321 ret = vfs_iocb_iter_read(real.file, &aio_req->iocb, iter); in ovl_read_iter() 336 static ssize_t ovl_write_iter(struct kiocb *iocb, struct iov_iter *iter) in ovl_write_iter() argument 345 if (!iov_iter_count(iter)) in ovl_write_iter() 371 ret = vfs_iter_write(real.file, iter, &iocb->ki_pos, in ovl_write_iter() 394 ret = vfs_iocb_iter_write(real.file, &aio_req->iocb, iter); in ovl_write_iter()
|
/fs/jfs/ |
D | inode.c | 335 static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) in jfs_direct_IO() argument 340 size_t count = iov_iter_count(iter); in jfs_direct_IO() 343 ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block); in jfs_direct_IO() 349 if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { in jfs_direct_IO()
|