Lines Matching full:req
31 static inline bool io_file_supports_nowait(struct io_kiocb *req) in io_file_supports_nowait() argument
33 return req->flags & REQ_F_SUPPORT_NOWAIT; in io_file_supports_nowait()
55 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
59 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
65 if (req->ctx->compat) in io_iov_buffer_select_prep()
76 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
78 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw()
84 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
86 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
87 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
88 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
91 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw()
93 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw()
94 req->imu = ctx->user_bufs[index]; in io_prep_rw()
95 io_req_set_rsrc_node(req, ctx, 0); in io_prep_rw()
117 if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) { in io_prep_rw()
118 ret = io_iov_buffer_select_prep(req); in io_prep_rw()
126 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup() argument
128 struct io_async_rw *io = req->async_data; in io_readv_writev_cleanup()
154 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
156 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_kiocb_update_pos()
161 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
162 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
163 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
171 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue() argument
173 req->io_task_work.func = io_queue_iowq; in io_req_task_queue_reissue()
174 io_req_task_work_add(req); in io_req_task_queue_reissue()
178 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
180 struct io_async_rw *io = req->async_data; in io_resubmit_prep()
182 if (!req_has_async_data(req)) in io_resubmit_prep()
183 return !io_req_prep_async(req); in io_resubmit_prep()
188 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
190 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
191 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
195 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
209 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
214 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
218 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
224 static void io_req_end_write(struct io_kiocb *req) in io_req_end_write() argument
226 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
227 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_end_write()
237 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
239 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_io_end()
242 io_req_end_write(req); in io_req_io_end()
243 fsnotify_modify(req->file); in io_req_io_end()
245 fsnotify_access(req->file); in io_req_io_end()
249 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
251 if (unlikely(res != req->cqe.res)) { in __io_complete_rw_common()
253 io_rw_should_reissue(req)) { in __io_complete_rw_common()
258 io_req_io_end(req); in __io_complete_rw_common()
259 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; in __io_complete_rw_common()
262 req_set_fail(req); in __io_complete_rw_common()
263 req->cqe.res = res; in __io_complete_rw_common()
268 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
270 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
273 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
282 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_req_rw_complete() argument
284 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_rw_complete()
290 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_req_rw_complete()
293 io_req_io_end(req); in io_req_rw_complete()
295 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { in io_req_rw_complete()
298 req->cqe.flags |= io_put_kbuf(req, issue_flags); in io_req_rw_complete()
300 io_req_task_complete(req, ts); in io_req_rw_complete()
306 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw() local
309 if (__io_complete_rw_common(req, res)) in io_complete_rw()
311 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_complete_rw()
313 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
314 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); in io_complete_rw()
320 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw_iopoll() local
323 io_req_end_write(req); in io_complete_rw_iopoll()
324 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
325 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
326 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; in io_complete_rw_iopoll()
329 req->cqe.res = res; in io_complete_rw_iopoll()
333 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
336 static int kiocb_done(struct io_kiocb *req, ssize_t ret, in kiocb_done() argument
339 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in kiocb_done()
340 unsigned final_ret = io_fixup_rw_res(req, ret); in kiocb_done()
342 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
343 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
345 if (!__io_complete_rw_common(req, ret)) { in kiocb_done()
350 io_req_io_end(req); in kiocb_done()
351 io_req_set_res(req, final_ret, in kiocb_done()
352 io_put_kbuf(req, issue_flags)); in kiocb_done()
359 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
360 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
361 if (io_resubmit_prep(req)) in kiocb_done()
362 io_req_task_queue_reissue(req); in kiocb_done()
364 io_req_task_queue_fail(req, final_ret); in kiocb_done()
369 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, in __io_import_iovec() argument
373 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_iovec()
375 u8 opcode = req->opcode; in __io_import_iovec()
382 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); in __io_import_iovec()
392 (req->flags & REQ_F_BUFFER_SELECT)) { in __io_import_iovec()
393 if (io_do_buffer_select(req)) { in __io_import_iovec()
394 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
409 req->ctx->compat); in __io_import_iovec()
415 static inline int io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
419 *iovec = __io_import_iovec(rw, req, s, issue_flags); in io_import_iovec()
498 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
501 struct io_async_rw *io = req->async_data; in io_req_map_rw()
521 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
525 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
528 if (!force && !io_cold_defs[req->opcode].prep_async) in io_setup_async_rw()
530 if (!req_has_async_data(req)) { in io_setup_async_rw()
533 if (io_alloc_async_data(req)) { in io_setup_async_rw()
538 io_req_map_rw(req, iovec, s->fast_iov, &s->iter); in io_setup_async_rw()
539 iorw = req->async_data; in io_setup_async_rw()
546 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
548 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
556 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); in io_rw_prep_async()
562 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
568 int io_readv_prep_async(struct io_kiocb *req) in io_readv_prep_async() argument
570 return io_rw_prep_async(req, ITER_DEST); in io_readv_prep_async()
573 int io_writev_prep_async(struct io_kiocb *req) in io_writev_prep_async() argument
575 return io_rw_prep_async(req, ITER_SOURCE); in io_writev_prep_async()
592 struct io_kiocb *req = wait->private; in io_async_buf_func() local
593 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_async_buf_func()
603 io_req_task_queue(req); in io_async_buf_func()
619 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
621 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
623 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_retry()
627 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
638 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
642 wait->wait.private = req; in io_rw_should_retry()
663 static bool need_complete_io(struct io_kiocb *req) in need_complete_io() argument
665 return req->flags & REQ_F_ISREG || in need_complete_io()
666 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
669 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) in io_rw_init_file() argument
671 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_init_file()
673 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
674 struct file *file = req->file; in io_rw_init_file()
680 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
681 req->flags |= io_file_get_flags(file); in io_rw_init_file()
695 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req))) in io_rw_init_file()
696 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
705 req->iopoll_completed = 0; in io_rw_init_file()
715 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
717 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_read()
726 if (!req_has_async_data(req)) { in io_read()
727 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); in io_read()
731 io = req->async_data; in io_read()
738 if (io_do_buffer_select(req)) { in io_read()
739 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); in io_read()
752 ret = io_rw_init_file(req, FMODE_READ); in io_read()
757 req->cqe.res = iov_iter_count(&s->iter); in io_read()
761 if (unlikely(!io_file_supports_nowait(req))) { in io_read()
762 ret = io_setup_async_rw(req, iovec, s, true); in io_read()
771 ppos = io_kiocb_update_pos(req); in io_read()
773 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in io_read()
781 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
782 req->flags &= ~REQ_F_REISSUE; in io_read()
784 if (req->opcode == IORING_OP_READ && file_can_poll(req->file)) in io_read()
787 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
790 if (req->flags & REQ_F_NOWAIT) in io_read()
797 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in io_read()
798 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { in io_read()
810 ret2 = io_setup_async_rw(req, iovec, s, true); in io_read()
817 io = req->async_data; in io_read()
837 if (!io_rw_should_retry(req)) { in io_read()
842 req->cqe.res = iov_iter_count(&s->iter); in io_read()
860 return kiocb_done(req, ret, issue_flags); in io_read()
863 int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
865 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_write()
873 if (!req_has_async_data(req)) { in io_write()
874 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); in io_write()
878 struct io_async_rw *io = req->async_data; in io_write()
884 ret = io_rw_init_file(req, FMODE_WRITE); in io_write()
889 req->cqe.res = iov_iter_count(&s->iter); in io_write()
893 if (unlikely(!io_file_supports_nowait(req))) in io_write()
899 (req->flags & REQ_F_ISREG)) in io_write()
908 ppos = io_kiocb_update_pos(req); in io_write()
910 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
916 if (req->flags & REQ_F_ISREG) in io_write()
920 if (likely(req->file->f_op->write_iter)) in io_write()
921 ret2 = call_write_iter(req->file, kiocb, &s->iter); in io_write()
922 else if (req->file->f_op->write) in io_write()
927 if (req->flags & REQ_F_REISSUE) { in io_write()
928 req->flags &= ~REQ_F_REISSUE; in io_write()
939 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
943 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
946 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
949 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
950 req->cqe.res, ret2); in io_write()
958 ret = io_setup_async_rw(req, iovec, s, true); in io_write()
960 io = req->async_data; in io_write()
965 io_req_end_write(req); in io_write()
969 ret = kiocb_done(req, ret2, issue_flags); in io_write()
973 ret = io_setup_async_rw(req, iovec, s, false); in io_write()
976 io_req_end_write(req); in io_write()
987 void io_rw_fail(struct io_kiocb *req) in io_rw_fail() argument
991 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
992 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1010 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1011 struct file *file = req->file; in io_do_iopoll()
1019 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1022 if (req->opcode == IORING_OP_URING_CMD) { in io_do_iopoll()
1025 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_do_iopoll()
1029 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_do_iopoll()
1038 /* iopoll may have completed current req */ in io_do_iopoll()
1040 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1051 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1054 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1057 req->cqe.flags = io_put_kbuf(req, 0); in io_do_iopoll()