Home
last modified time | relevance | path

Searched refs:len (Results 1 – 25 of 25) sorted by relevance

/io_uring/
Dsync.c19 loff_t len; member
33 sync->len = READ_ONCE(sqe->len); in io_sfr_prep()
48 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); in io_sync_file_range()
65 sync->len = READ_ONCE(sqe->len); in io_fsync_prep()
73 loff_t end = sync->off + sync->len; in io_fsync()
93 sync->len = READ_ONCE(sqe->addr); in io_fallocate_prep()
94 sync->mode = READ_ONCE(sqe->len); in io_fallocate_prep()
107 ret = vfs_fallocate(req->file, sync->mode, sync->off, sync->len); in io_fallocate()
Dadvise.c20 u32 len; member
27 u32 len; member
40 ma->len = READ_ONCE(sqe->len); in io_madvise_prep()
57 ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice); in io_madvise()
85 fa->len = READ_ONCE(sqe->len); in io_fadvise_prep()
99 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
Dsplice.c21 u64 len; member
32 sp->len = READ_ONCE(sqe->len); in __io_splice_prep()
67 if (sp->len) in io_tee()
68 ret = do_tee(in, out, sp->len, flags); in io_tee()
73 if (ret != sp->len) in io_tee()
111 if (sp->len) in io_splice()
112 ret = do_splice(in, poff_in, out, poff_out, sp->len, flags); in io_splice()
117 if (ret != sp->len) in io_splice()
Dkbuf.c26 __u32 len; member
126 static void __user *io_provided_buffer_select(struct io_kiocb *req, size_t *len, in io_provided_buffer_select() argument
134 if (*len == 0 || *len > kbuf->len) in io_provided_buffer_select()
135 *len = kbuf->len; in io_provided_buffer_select()
144 static void __user *io_ring_buffer_select(struct io_kiocb *req, size_t *len, in io_ring_buffer_select() argument
165 if (*len == 0 || *len > buf->len) in io_ring_buffer_select()
166 *len = buf->len; in io_ring_buffer_select()
189 void __user *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
201 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
203 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
[all …]
Dnet.c60 unsigned len; member
102 shutdown->how = READ_ONCE(sqe->len); in io_shutdown_prep()
225 sr->len = 0; in io_compat_msg_copy_hdr()
235 sr->len = clen; in io_compat_msg_copy_hdr()
263 sr->len = iomsg->fast_iov[0].iov_len = 0; in io_msg_copy_hdr()
272 sr->len = iomsg->fast_iov[0].iov_len; in io_msg_copy_hdr()
385 sr->len = READ_ONCE(sqe->len); in io_sendmsg_prep()
501 ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &msg.msg_iter); in io_send()
519 sr->len -= ret; in io_send()
623 sr->len = READ_ONCE(sqe->len); in io_recvmsg_prep()
[all …]
Dopenclose.c83 u64 mode = READ_ONCE(sqe->len); in io_openat_prep()
94 size_t len; in io_openat2_prep() local
98 len = READ_ONCE(sqe->len); in io_openat2_prep()
99 if (len < OPEN_HOW_SIZE_VER0) in io_openat2_prep()
102 ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len); in io_openat2_prep()
206 if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index) in io_close_prep()
Dmsg_ring.c26 u32 len; member
115 if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags)) in io_msg_tw_complete()
150 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags)) in io_msg_ring_data()
154 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags)) in io_msg_ring_data()
230 if (msg->len) in io_msg_send_fd()
258 msg->len = READ_ONCE(sqe->len); in io_msg_ring_prep()
Drw.c27 u32 len; member
50 rw->len = clen; in io_iov_compat_buffer_select_prep()
61 if (rw->len != 1) in io_iov_buffer_select_prep()
72 rw->len = iov.iov_len; in io_iov_buffer_select_prep()
111 rw->len = READ_ONCE(sqe->len); in io_prep_rw()
382 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); in __io_import_iovec()
389 sqe_len = rw->len; in __io_import_iovec()
398 rw->len = sqe_len; in __io_import_iovec()
458 size_t len; in loop_rw_iter() local
463 len = iov_iter_count(iter); in loop_rw_iter()
[all …]
Dnotif.h40 static inline int io_notif_account_mem(struct io_kiocb *notif, unsigned len) in io_notif_account_mem() argument
44 unsigned nr_pages = (len >> PAGE_SHIFT) + 2; in io_notif_account_mem()
Dfs.c63 ren->new_dfd = READ_ONCE(sqe->len); in io_renameat_prep()
109 if (sqe->off || sqe->len || sqe->buf_index || sqe->splice_fd_in) in io_unlinkat_prep()
165 mkd->mode = READ_ONCE(sqe->len); in io_mkdirat_prep()
203 if (sqe->len || sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in) in io_symlinkat_prep()
252 lnk->new_dfd = READ_ONCE(sqe->len); in io_linkat_prep()
Dfiletable.h75 unsigned off, unsigned len) in io_file_table_set_alloc_range() argument
78 ctx->file_alloc_end = off + len; in io_file_table_set_alloc_range()
Dkbuf.h39 __u32 len; member
44 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
Dfiletable.c168 if (check_add_overflow(range.off, range.len, &end)) in io_register_file_alloc_range()
173 io_file_table_set_alloc_range(ctx, range.off, range.len); in io_register_file_alloc_range()
During_cmd.c159 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, in io_uring_cmd_import_fixed() argument
164 return io_import_fixed(rw, iter, req->imu, ubuf, len); in io_uring_cmd_import_fixed()
Depoll.c32 epoll->op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
Dstatx.c34 sx->mask = READ_ONCE(sqe->len); in io_statx_prep()
Dxattr.c58 ix->ctx.size = READ_ONCE(sqe->len); in __io_getxattr_prep()
162 ix->ctx.size = READ_ONCE(sqe->len); in __io_setxattr_prep()
Dfdinfo.c166 unsigned int len = buf->ubuf_end - buf->ubuf; in io_uring_show_fdinfo() local
168 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); in io_uring_show_fdinfo()
Drsrc.c565 up->nr_args = READ_ONCE(sqe->len); in io_files_update_prep()
876 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) in io_pin_pages() argument
882 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT; in io_pin_pages()
1065 u64 buf_addr, size_t len) in io_import_fixed() argument
1072 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) in io_import_fixed()
1083 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); in io_import_fixed()
Drsrc.h67 u64 buf_addr, size_t len);
Dio_uring.c833 unsigned int free, queued, len; in io_cqe_cache_refill() local
847 len = min(free, ctx->cq_entries - off); in io_cqe_cache_refill()
848 if (!len) in io_cqe_cache_refill()
853 len <<= 1; in io_cqe_cache_refill()
857 ctx->cqe_sentinel = ctx->cqe_cached + len; in io_cqe_cache_refill()
3516 unsigned long addr, unsigned long len, in io_uring_mmu_get_unmapped_area() argument
3529 ptr = io_uring_validate_mmap_request(filp, pgoff, len); in io_uring_mmu_get_unmapped_area()
3555 return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags); in io_uring_mmu_get_unmapped_area()
3571 unsigned long addr, unsigned long len, in io_uring_nommu_get_unmapped_area() argument
3576 ptr = io_uring_validate_mmap_request(file, pgoff, len); in io_uring_nommu_get_unmapped_area()
[all …]
Dtimeout.c429 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) in io_timeout_remove_prep()
501 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) in __io_timeout_prep()
Dcancel.c150 cancel->opcode = READ_ONCE(sqe->len); in io_async_cancel_prep()
Dio_uring.h57 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages);
Dpoll.c913 flags = READ_ONCE(sqe->len); in io_poll_remove_prep()
943 flags = READ_ONCE(sqe->len); in io_poll_add_prep()