/io_uring/ |
D | sync.c | 19 loff_t len; member 33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep() 47 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); in io_sync_file_range() 64 sync->len = READ_ONCE(sqe->len); in io_fsync_prep() 71 loff_t end = sync->off + sync->len; in io_fsync() 92 sync->len = READ_ONCE(sqe->addr); in io_fallocate_prep() 93 sync->mode = READ_ONCE(sqe->len); in io_fallocate_prep() 105 ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); in io_fallocate()
|
D | advise.c | 20 u32 len; member 27 u32 len; member 40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep() 57 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); in io_madvise() 73 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep() 94 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
|
D | splice.c | 21 u64 len; member 32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep() 67 if (sp->len) in io_tee() 68 ret = do_tee(in, out, sp->len, flags); in io_tee() 73 if (ret != sp->len) in io_tee() 112 if (sp->len) in io_splice() 113 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); in io_splice() 118 if (ret != sp->len) in io_splice()
|
D | kbuf.c | 28 __u32 len; member 113 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, in io_provided_buffer_select() argument 121 if (*len == 0 || *len > kbuf->len) in io_provided_buffer_select() 122 *len = kbuf->len; in io_provided_buffer_select() 131 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, in io_ring_buffer_select() argument 151 if (*len == 0 || *len > buf->len) in io_ring_buffer_select() 152 *len = buf->len; in io_ring_buffer_select() 174 void __user *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument 186 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select() 188 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select() [all …]
|
D | net.c | 60 unsigned len; member 81 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep() 265 sr->len = READ_ONCE(sqe->len); in io_sendmsg_prep() 382 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter); in io_send() 400 sr->len -= ret; in io_send() 449 sr->len = iomsg->fast_iov[0].iov_len = 0; in __io_recvmsg_copy_hdr() 457 sr->len = iomsg->fast_iov[0].iov_len; in __io_recvmsg_copy_hdr() 501 sr->len = 0; in __io_compat_recvmsg_copy_hdr() 511 sr->len = clen; in __io_compat_recvmsg_copy_hdr() 569 sr->len = READ_ONCE(sqe->len); in io_recvmsg_prep() [all …]
|
D | openclose.c | 70 u64 mode = READ_ONCE(sqe->len); in io_openat_prep() 81 size_t len; in io_openat2_prep() local 85 len = READ_ONCE(sqe->len); in io_openat2_prep() 86 if (len < OPEN_HOW_SIZE_VER0) in io_openat2_prep() 89 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len); in io_openat2_prep() 201 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) in io_close_prep()
|
D | msg_ring.c | 20 u32 len; member 75 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) in io_msg_ring_data() 79 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0, true)) in io_msg_ring_data() 166 msg->len = READ_ONCE(sqe->len); in io_msg_ring_prep()
|
D | notif.h | 29 static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len) in io_notif_account_mem() argument 33 unsigned nr_pages = (len >> PAGE_SHIFT) + 2; in io_notif_account_mem()
|
D | fs.c | 63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep() 109 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in) in io_unlinkat_prep() 165 mkd->mode = READ_ONCE(sqe->len); in io_mkdirat_prep() 203 if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in) in io_symlinkat_prep() 252 lnk->new_dfd = READ_ONCE(sqe->len); in io_linkat_prep()
|
D | filetable.h | 69 unsigned off, unsigned len) in io_file_table_set_alloc_range() argument 72 ctx->file_alloc_end = off + len; in io_file_table_set_alloc_range()
|
D | rw.c | 27 u32 len; member 50 rw->len = clen; in io_iov_compat_buffer_select_prep() 61 if (rw->len != 1) in io_iov_buffer_select_prep() 72 rw->len = iov.iov_len; in io_iov_buffer_select_prep() 110 rw->len = READ_ONCE(sqe->len); in io_prep_rw() 369 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); in __io_import_iovec() 376 sqe_len = rw->len; in __io_import_iovec() 385 rw->len = sqe_len; in __io_import_iovec() 451 iovec.iov_len = rw->len; in loop_rw_iter() 472 rw->len -= nr; in loop_rw_iter() [all …]
|
D | kbuf.h | 31 __u32 len; member 36 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
|
D | uring_cmd.c | 155 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, in io_uring_cmd_import_fixed() argument 160 return io_import_fixed(rw, iter, req->imu, ubuf, len); in io_uring_cmd_import_fixed()
|
D | filetable.c | 187 if (check_add_overflow(range.off, range.len, &end)) in io_register_file_alloc_range() 192 io_file_table_set_alloc_range(ctx, range.off, range.len); in io_register_file_alloc_range()
|
D | epoll.c | 32 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
|
D | statx.c | 34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep()
|
D | xattr.c | 58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep() 165 ix->ctx.size = READ_ONCE(sqe->len); in __io_setxattr_prep()
|
D | fdinfo.c | 167 unsigned int len = buf->ubuf_end - buf->ubuf; in __io_uring_show_fdinfo() local 169 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); in __io_uring_show_fdinfo()
|
D | rsrc.c | 666 up->nr_args = READ_ONCE(sqe->len); in io_files_update_prep() 1126 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) in io_pin_pages() argument 1133 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in io_pin_pages() 1315 u64 buf_addr, size_t len) in io_import_fixed() argument 1322 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) in io_import_fixed() 1333 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); in io_import_fixed()
|
D | io_uring.c | 749 unsigned int free, queued, len; in __io_get_cqe() local 763 len = min(free, ctx->cq_entries - off); in __io_get_cqe() 764 if (!len) in __io_get_cqe() 769 len <<= 1; in __io_get_cqe() 773 ctx->cqe_sentinel = ctx->cqe_cached + len; in __io_get_cqe() 3119 unsigned long addr, unsigned long len, in io_uring_mmu_get_unmapped_area() argument 3132 ptr = io_uring_validate_mmap_request(filp, pgoff, len); in io_uring_mmu_get_unmapped_area() 3158 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in io_uring_mmu_get_unmapped_area() 3174 unsigned long addr, unsigned long len, in io_uring_nommu_get_unmapped_area() argument 3179 ptr = io_uring_validate_mmap_request(file, pgoff, len); in io_uring_nommu_get_unmapped_area() [all …]
|
D | rsrc.h | 69 u64 buf_addr, size_t len);
|
D | timeout.c | 397 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep() 469 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) in __io_timeout_prep()
|
D | cancel.c | 114 if (sqe->off || sqe->len || sqe->splice_fd_in) in io_async_cancel_prep()
|
D | io_uring.h | 43 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
|
D | poll.c | 926 flags = READ_ONCE(sqe->len); in io_poll_remove_prep() 956 flags = READ_ONCE(sqe->len); in io_poll_add_prep()
|