Home
last modified time | relevance | path

Searched refs:req (Results 1 – 25 of 43) sorted by relevance

12

/io_uring/
Dpoll.c34 struct io_kiocb *req; member
68 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
77 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
80 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
89 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
91 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
92 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
93 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
96 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
98 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
[all …]
Dkbuf.h36 void __user *io_buffer_select(struct io_kiocb *req, size_t *len,
40 int io_remove_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
41 int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags);
43 int io_provide_buffers_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
44 int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags);
49 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags);
51 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
53 static inline void io_kbuf_recycle_ring(struct io_kiocb *req) in io_kbuf_recycle_ring() argument
62 if (req->buf_list) { in io_kbuf_recycle_ring()
63 if (req->flags & REQ_F_PARTIAL_IO) { in io_kbuf_recycle_ring()
[all …]
Drw.c31 static inline bool io_file_supports_nowait(struct io_kiocb *req) in io_file_supports_nowait() argument
33 return req->flags & REQ_F_SUPPORT_NOWAIT; in io_file_supports_nowait()
55 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
65 if (req->ctx->compat) in io_iov_buffer_select_prep()
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw()
84 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
86 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
87 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
[all …]
During_cmd.c15 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked) in io_uring_cmd_work() argument
17 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_uring_cmd_work()
26 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); in io_uring_cmd_complete_in_task() local
29 req->io_task_work.func = io_uring_cmd_work; in io_uring_cmd_complete_in_task()
30 io_req_task_work_add(req); in io_uring_cmd_complete_in_task()
34 static inline void io_req_set_cqe32_extra(struct io_kiocb *req, in io_req_set_cqe32_extra() argument
37 req->extra1 = extra1; in io_req_set_cqe32_extra()
38 req->extra2 = extra2; in io_req_set_cqe32_extra()
39 req->flags |= REQ_F_CQE32_INIT; in io_req_set_cqe32_extra()
49 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd); in io_uring_cmd_done() local
[all …]
Dtimeout.c37 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
39 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); in io_is_timeout_noseq()
44 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument
46 if (req_ref_put_and_test(req)) { in io_put_req()
47 io_queue_next(req); in io_put_req()
48 io_free_req(req); in io_put_req()
52 static bool io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
53 __must_hold(&req->ctx->completion_lock) in io_kill_timeout()
54 __must_hold(&req->ctx->timeout_lock) in io_kill_timeout()
56 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
[all …]
Drefs.h11 #define req_ref_zero_or_close_to_overflow(req) \ argument
12 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
14 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero() argument
16 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
17 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero()
20 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test() argument
22 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
25 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_put_and_test()
26 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test()
29 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get() argument
[all …]
Dnet.h31 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
32 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
34 int io_sendmsg_prep_async(struct io_kiocb *req);
35 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
36 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
37 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
39 int io_send(struct io_kiocb *req, unsigned int issue_flags);
40 int io_send_prep_async(struct io_kiocb *req);
42 int io_recvmsg_prep_async(struct io_kiocb *req);
43 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
[all …]
Dnet.c73 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_shutdown_prep() argument
75 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown_prep()
85 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown); in io_shutdown()
94 sock = sock_from_file(req->file); in io_shutdown()
99 io_req_set_res(req, ret, 0); in io_shutdown()
110 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags) in io_netmsg_recycle() argument
112 struct io_async_msghdr *hdr = req->async_data; in io_netmsg_recycle()
114 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED) in io_netmsg_recycle()
118 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) { in io_netmsg_recycle()
[all …]
Dio_uring.h30 bool io_req_cqe_overflow(struct io_kiocb *req);
34 void io_req_complete_failed(struct io_kiocb *req, s32 res);
35 void __io_req_complete(struct io_kiocb *req, unsigned issue_flags);
36 void io_req_complete_post(struct io_kiocb *req);
45 struct file *io_file_get_normal(struct io_kiocb *req, int fd);
46 struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
49 static inline bool io_req_ffs_set(struct io_kiocb *req) in io_req_ffs_set() argument
51 return req->flags & REQ_F_FIXED_FILE; in io_req_ffs_set()
54 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local);
56 bool io_alloc_async_data(struct io_kiocb *req);
[all …]
Dfs.c50 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_renameat_prep() argument
52 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename); in io_renameat_prep()
57 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
76 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
80 int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
82 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename); in io_renameat()
91 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
92 io_req_set_res(req, ret, 0); in io_renameat()
96 void io_renameat_cleanup(struct io_kiocb *req) in io_renameat_cleanup() argument
98 struct io_rename *ren = io_kiocb_to_cmd(req, struct io_rename); in io_renameat_cleanup()
[all …]
Dxattr.c25 void io_xattr_cleanup(struct io_kiocb *req) in io_xattr_cleanup() argument
27 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr); in io_xattr_cleanup()
36 static void io_xattr_finish(struct io_kiocb *req, int ret) in io_xattr_finish() argument
38 req->flags &= ~REQ_F_NEED_CLEANUP; in io_xattr_finish()
40 io_xattr_cleanup(req); in io_xattr_finish()
41 io_req_set_res(req, ret, 0); in io_xattr_finish()
44 static int __io_getxattr_prep(struct io_kiocb *req, in __io_getxattr_prep() argument
47 struct io_xattr *ix = io_kiocb_to_cmd(req, struct io_xattr); in __io_getxattr_prep()
51 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_getxattr_prep()
77 req->flags |= REQ_F_NEED_CLEANUP; in __io_getxattr_prep()
[all …]
Dfs.h3 int io_renameat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
4 int io_renameat(struct io_kiocb *req, unsigned int issue_flags);
5 void io_renameat_cleanup(struct io_kiocb *req);
7 int io_unlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
8 int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags);
9 void io_unlinkat_cleanup(struct io_kiocb *req);
11 int io_mkdirat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_mkdirat(struct io_kiocb *req, unsigned int issue_flags);
13 void io_mkdirat_cleanup(struct io_kiocb *req);
15 int io_symlinkat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
[all …]
Dio_uring.c136 struct io_kiocb *req; member
148 static void io_dismantle_req(struct io_kiocb *req);
149 static void io_clean_op(struct io_kiocb *req);
150 static void io_queue_sqe(struct io_kiocb *req);
187 struct io_kiocb *req; in io_match_linked() local
189 io_for_each_link(req, head) { in io_match_linked()
190 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
223 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node() argument
225 req_set_fail(req); in req_fail_link_node()
226 io_req_set_res(req, res, 0); in req_fail_link_node()
[all …]
Dmsg_ring.c49 void io_msg_ring_cleanup(struct io_kiocb *req) in io_msg_ring_cleanup() argument
51 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); in io_msg_ring_cleanup()
60 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags) in io_msg_ring_data() argument
62 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_ring_data()
63 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); in io_msg_ring_data()
86 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags) in io_msg_grab_file() argument
88 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg); in io_msg_grab_file()
89 struct io_ring_ctx *ctx = req->ctx; in io_msg_grab_file()
106 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags) in io_msg_install_complete() argument
108 struct io_ring_ctx *target_ctx = req->file->private_data; in io_msg_install_complete()
[all …]
Dsync.c25 int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
27 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_sfr_prep()
38 int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
40 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_sync_file_range()
47 ret = sync_file_range(req->file, sync->off, sync->len, sync->flags); in io_sync_file_range()
48 io_req_set_res(req, ret, 0); in io_sync_file_range()
52 int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
54 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_fsync_prep()
68 int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
70 struct io_sync *sync = io_kiocb_to_cmd(req, struct io_sync); in io_fsync()
[all …]
Dopenclose.c34 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
36 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); in __io_openat_prep()
42 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
63 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
67 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
69 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); in io_openat_prep()
74 return __io_openat_prep(req, sqe); in io_openat_prep()
77 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
79 struct io_open *open = io_kiocb_to_cmd(req, struct io_open); in io_openat2_prep()
93 return __io_openat_prep(req, sqe); in io_openat2_prep()
[all …]
Dsplice.c26 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
29 struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice); in __io_splice_prep()
40 int io_tee_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_tee_prep() argument
44 return __io_splice_prep(req, sqe); in io_tee_prep()
47 int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
49 struct io_splice *sp = io_kiocb_to_cmd(req, struct io_splice); in io_tee()
59 in = io_file_get_fixed(req, sp->splice_fd_in, issue_flags); in io_tee()
61 in = io_file_get_normal(req, sp->splice_fd_in); in io_tee()
74 req_set_fail(req); in io_tee()
75 io_req_set_res(req, ret, 0); in io_tee()
[all …]
Dtimeout.h4 struct io_kiocb *req; member
11 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req,
14 static inline struct io_kiocb *io_disarm_linked_timeout(struct io_kiocb *req) in io_disarm_linked_timeout() argument
16 struct io_kiocb *link = req->link; in io_disarm_linked_timeout()
19 return __io_disarm_linked_timeout(req, link); in io_disarm_linked_timeout()
29 void io_queue_linked_timeout(struct io_kiocb *req);
30 void io_disarm_next(struct io_kiocb *req);
32 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
33 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
34 int io_timeout(struct io_kiocb *req, unsigned int issue_flags);
[all …]
Dkbuf.c53 void io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags) in io_kbuf_recycle_legacy() argument
55 struct io_ring_ctx *ctx = req->ctx; in io_kbuf_recycle_legacy()
65 if (req->flags & REQ_F_PARTIAL_IO) in io_kbuf_recycle_legacy()
70 buf = req->kbuf; in io_kbuf_recycle_legacy()
73 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_kbuf_recycle_legacy()
74 req->buf_index = buf->bgid; in io_kbuf_recycle_legacy()
80 unsigned int __io_put_kbuf(struct io_kiocb *req, unsigned issue_flags) in __io_put_kbuf() argument
96 if (req->flags & REQ_F_BUFFER_RING) { in __io_put_kbuf()
98 cflags = __io_put_kbuf_list(req, NULL); in __io_put_kbuf()
100 struct io_ring_ctx *ctx = req->ctx; in __io_put_kbuf()
[all …]
Dxattr.h3 void io_xattr_cleanup(struct io_kiocb *req);
5 int io_fsetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
6 int io_fsetxattr(struct io_kiocb *req, unsigned int issue_flags);
8 int io_setxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
9 int io_setxattr(struct io_kiocb *req, unsigned int issue_flags);
11 int io_fgetxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
12 int io_fgetxattr(struct io_kiocb *req, unsigned int issue_flags);
14 int io_getxattr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
15 int io_getxattr(struct io_kiocb *req, unsigned int issue_flags);
Dadvise.c31 int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
34 struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise); in io_madvise_prep()
48 int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
51 struct io_madvise *ma = io_kiocb_to_cmd(req, struct io_madvise); in io_madvise()
58 io_req_set_res(req, ret, 0); in io_madvise()
65 int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
67 struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise); in io_fadvise_prep()
78 int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
80 struct io_fadvise *fa = io_kiocb_to_cmd(req, struct io_fadvise); in io_fadvise()
94 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
[all …]
Dopenclose.h6 int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
7 int io_openat(struct io_kiocb *req, unsigned int issue_flags);
8 void io_open_cleanup(struct io_kiocb *req);
10 int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
11 int io_openat2(struct io_kiocb *req, unsigned int issue_flags);
13 int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
14 int io_close(struct io_kiocb *req, unsigned int issue_flags);
Dcancel.c32 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
35 if (req->ctx != cd->ctx) in io_cancel_cb()
40 if (req->file != cd->file) in io_cancel_cb()
43 if (req->cqe.user_data != cd->data) in io_cancel_cb()
47 if (cd->seq == req->work.cancel_seq) in io_cancel_cb()
49 req->work.cancel_seq = cd->seq; in io_cancel_cb()
108 int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_async_cancel_prep() argument
110 struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel); in io_async_cancel_prep()
112 if (unlikely(req->flags & REQ_F_BUFFER_SELECT)) in io_async_cancel_prep()
165 int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
[all …]
Drw.h18 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe);
19 int io_read(struct io_kiocb *req, unsigned int issue_flags);
20 int io_readv_prep_async(struct io_kiocb *req);
21 int io_write(struct io_kiocb *req, unsigned int issue_flags);
22 int io_writev_prep_async(struct io_kiocb *req);
23 void io_readv_writev_cleanup(struct io_kiocb *req);
24 void io_rw_fail(struct io_kiocb *req);
Dstatx.c23 int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
25 struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); in io_statx_prep()
30 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
50 req->flags |= REQ_F_NEED_CLEANUP; in io_statx_prep()
54 int io_statx(struct io_kiocb *req, unsigned int issue_flags) in io_statx() argument
56 struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); in io_statx()
63 io_req_set_res(req, ret, 0); in io_statx()
67 void io_statx_cleanup(struct io_kiocb *req) in io_statx_cleanup() argument
69 struct io_statx *sx = io_kiocb_to_cmd(req, struct io_statx); in io_statx_cleanup()

12