Lines Matching full:req
31 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask) in io_file_supports_nowait() argument
34 if (req->flags & REQ_F_SUPPORT_NOWAIT) in io_file_supports_nowait()
37 if (io_file_can_poll(req)) { in io_file_supports_nowait()
40 return vfs_poll(req->file, &pt) & mask; in io_file_supports_nowait()
65 static int io_iov_buffer_select_prep(struct io_kiocb *req) in io_iov_buffer_select_prep() argument
69 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_iov_buffer_select_prep()
75 if (req->ctx->compat) in io_iov_buffer_select_prep()
86 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
88 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_prep_rw()
94 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
96 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
97 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
98 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
101 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw()
103 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw()
104 req->imu = ctx->user_bufs[index]; in io_prep_rw()
105 io_req_set_rsrc_node(req, ctx, 0); in io_prep_rw()
127 if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) { in io_prep_rw()
128 ret = io_iov_buffer_select_prep(req); in io_prep_rw()
136 void io_readv_writev_cleanup(struct io_kiocb *req) in io_readv_writev_cleanup() argument
138 struct io_async_rw *io = req->async_data; in io_readv_writev_cleanup()
164 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
166 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_kiocb_update_pos()
171 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
172 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
173 rw->kiocb.ki_pos = req->file->f_pos; in io_kiocb_update_pos()
182 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
184 struct io_async_rw *io = req->async_data; in io_resubmit_prep()
186 if (!req_has_async_data(req)) in io_resubmit_prep()
187 return !io_req_prep_async(req); in io_resubmit_prep()
192 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
194 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
195 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
199 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
213 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
218 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
222 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
228 static void io_req_end_write(struct io_kiocb *req) in io_req_end_write() argument
230 if (req->flags & REQ_F_ISREG) { in io_req_end_write()
231 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_end_write()
241 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
243 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_io_end()
246 io_req_end_write(req); in io_req_io_end()
247 fsnotify_modify(req->file); in io_req_io_end()
249 fsnotify_access(req->file); in io_req_io_end()
253 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
255 if (unlikely(res != req->cqe.res)) { in __io_complete_rw_common()
257 io_rw_should_reissue(req)) { in __io_complete_rw_common()
262 io_req_io_end(req); in __io_complete_rw_common()
263 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; in __io_complete_rw_common()
266 req_set_fail(req); in __io_complete_rw_common()
267 req->cqe.res = res; in __io_complete_rw_common()
272 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
274 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
277 if (req_has_async_data(req) && io->bytes_done > 0) { in io_fixup_rw_res()
286 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts) in io_req_rw_complete() argument
288 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_req_rw_complete()
294 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_req_rw_complete()
297 io_req_io_end(req); in io_req_rw_complete()
299 if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) { in io_req_rw_complete()
302 req->cqe.flags |= io_put_kbuf(req, issue_flags); in io_req_rw_complete()
304 io_req_task_complete(req, ts); in io_req_rw_complete()
310 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw() local
313 if (__io_complete_rw_common(req, res)) in io_complete_rw()
315 io_req_set_res(req, io_fixup_rw_res(req, res), 0); in io_complete_rw()
317 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
318 __io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE); in io_complete_rw()
324 struct io_kiocb *req = cmd_to_io_kiocb(rw); in io_complete_rw_iopoll() local
327 io_req_end_write(req); in io_complete_rw_iopoll()
328 if (unlikely(res != req->cqe.res)) { in io_complete_rw_iopoll()
329 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
330 req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO; in io_complete_rw_iopoll()
333 req->cqe.res = res; in io_complete_rw_iopoll()
337 smp_store_release(&req->iopoll_completed, 1); in io_complete_rw_iopoll()
340 static int kiocb_done(struct io_kiocb *req, ssize_t ret, in kiocb_done() argument
343 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in kiocb_done()
344 unsigned final_ret = io_fixup_rw_res(req, ret); in kiocb_done()
346 if (ret >= 0 && req->flags & REQ_F_CUR_POS) in kiocb_done()
347 req->file->f_pos = rw->kiocb.ki_pos; in kiocb_done()
349 if (!__io_complete_rw_common(req, ret)) { in kiocb_done()
354 io_req_io_end(req); in kiocb_done()
355 io_req_set_res(req, final_ret, in kiocb_done()
356 io_put_kbuf(req, issue_flags)); in kiocb_done()
363 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
364 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
365 if (io_resubmit_prep(req)) in kiocb_done()
368 io_req_task_queue_fail(req, final_ret); in kiocb_done()
373 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req, in __io_import_iovec() argument
377 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_import_iovec()
379 u8 opcode = req->opcode; in __io_import_iovec()
386 ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len); in __io_import_iovec()
396 (req->flags & REQ_F_BUFFER_SELECT)) { in __io_import_iovec()
397 if (io_do_buffer_select(req)) { in __io_import_iovec()
398 buf = io_buffer_select(req, &sqe_len, issue_flags); in __io_import_iovec()
413 req->ctx->compat); in __io_import_iovec()
419 static inline int io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
423 *iovec = __io_import_iovec(rw, req, s, issue_flags); in io_import_iovec()
502 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
505 struct io_async_rw *io = req->async_data; in io_req_map_rw()
525 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
529 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
532 if (!force && !io_cold_defs[req->opcode].prep_async) in io_setup_async_rw()
534 if (!req_has_async_data(req)) { in io_setup_async_rw()
537 if (io_alloc_async_data(req)) { in io_setup_async_rw()
542 io_req_map_rw(req, iovec, s->fast_iov, &s->iter); in io_setup_async_rw()
543 iorw = req->async_data; in io_setup_async_rw()
550 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
552 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
560 ret = io_import_iovec(rw, req, &iov, &iorw->s, 0); in io_rw_prep_async()
566 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
572 int io_readv_prep_async(struct io_kiocb *req) in io_readv_prep_async() argument
574 return io_rw_prep_async(req, ITER_DEST); in io_readv_prep_async()
577 int io_writev_prep_async(struct io_kiocb *req) in io_writev_prep_async() argument
579 return io_rw_prep_async(req, ITER_SOURCE); in io_writev_prep_async()
596 struct io_kiocb *req = wait->private; in io_async_buf_func() local
597 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_async_buf_func()
607 io_req_task_queue(req); in io_async_buf_func()
623 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
625 struct io_async_rw *io = req->async_data; in io_rw_should_retry()
627 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_should_retry()
631 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
642 if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
646 wait->wait.private = req; in io_rw_should_retry()
667 static bool need_complete_io(struct io_kiocb *req) in need_complete_io() argument
669 return req->flags & REQ_F_ISREG || in need_complete_io()
670 S_ISBLK(file_inode(req->file)->i_mode); in need_complete_io()
673 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode) in io_rw_init_file() argument
675 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_rw_init_file()
677 struct io_ring_ctx *ctx = req->ctx; in io_rw_init_file()
678 struct file *file = req->file; in io_rw_init_file()
684 if (!(req->flags & REQ_F_FIXED_FILE)) in io_rw_init_file()
685 req->flags |= io_file_get_flags(file); in io_rw_init_file()
699 ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT)))) in io_rw_init_file()
700 req->flags |= REQ_F_NOWAIT; in io_rw_init_file()
709 req->iopoll_completed = 0; in io_rw_init_file()
719 static int __io_read(struct io_kiocb *req, unsigned int issue_flags) in __io_read() argument
721 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in __io_read()
730 if (!req_has_async_data(req)) { in __io_read()
731 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); in __io_read()
735 io = req->async_data; in __io_read()
742 if (io_do_buffer_select(req)) { in __io_read()
743 ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags); in __io_read()
756 ret = io_rw_init_file(req, FMODE_READ); in __io_read()
761 req->cqe.res = iov_iter_count(&s->iter); in __io_read()
765 if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) { in __io_read()
766 ret = io_setup_async_rw(req, iovec, s, true); in __io_read()
775 ppos = io_kiocb_update_pos(req); in __io_read()
777 ret = rw_verify_area(READ, req->file, ppos, req->cqe.res); in __io_read()
793 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in __io_read()
794 req->flags &= ~REQ_F_REISSUE; in __io_read()
796 if (req->opcode == IORING_OP_READ && io_file_can_poll(req)) in __io_read()
799 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_read()
802 if (req->flags & REQ_F_NOWAIT) in __io_read()
806 req->flags |= REQ_F_PARTIAL_IO; in __io_read()
807 io_kbuf_recycle(req, issue_flags); in __io_read()
811 } else if (ret == req->cqe.res || ret <= 0 || !force_nonblock || in __io_read()
812 (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) { in __io_read()
824 ret2 = io_setup_async_rw(req, iovec, s, true); in __io_read()
831 req->flags |= REQ_F_PARTIAL_IO; in __io_read()
832 io_kbuf_recycle(req, issue_flags); in __io_read()
834 io = req->async_data; in __io_read()
854 if (!io_rw_should_retry(req)) { in __io_read()
859 req->cqe.res = iov_iter_count(&s->iter); in __io_read()
880 int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
884 ret = __io_read(req, issue_flags); in io_read()
886 return kiocb_done(req, ret, issue_flags); in io_read()
891 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb) in io_kiocb_start_write() argument
896 if (!(req->flags & REQ_F_ISREG)) in io_kiocb_start_write()
910 int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
912 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_write()
920 if (!req_has_async_data(req)) { in io_write()
921 ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags); in io_write()
925 struct io_async_rw *io = req->async_data; in io_write()
931 ret = io_rw_init_file(req, FMODE_WRITE); in io_write()
936 req->cqe.res = iov_iter_count(&s->iter); in io_write()
940 if (unlikely(!io_file_supports_nowait(req, EPOLLOUT))) in io_write()
946 (req->flags & REQ_F_ISREG)) in io_write()
955 ppos = io_kiocb_update_pos(req); in io_write()
957 ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res); in io_write()
963 if (unlikely(!io_kiocb_start_write(req, kiocb))) in io_write()
967 if (likely(req->file->f_op->write_iter)) in io_write()
968 ret2 = call_write_iter(req->file, kiocb, &s->iter); in io_write()
969 else if (req->file->f_op->write) in io_write()
975 req->flags |= REQ_F_PARTIAL_IO; in io_write()
976 io_kbuf_recycle(req, issue_flags); in io_write()
979 if (req->flags & REQ_F_REISSUE) { in io_write()
980 req->flags &= ~REQ_F_REISSUE; in io_write()
991 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
995 if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL)) in io_write()
998 if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) { in io_write()
1001 trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2, in io_write()
1002 req->cqe.res, ret2); in io_write()
1010 ret = io_setup_async_rw(req, iovec, s, true); in io_write()
1012 io = req->async_data; in io_write()
1017 io_req_end_write(req); in io_write()
1021 ret = kiocb_done(req, ret2, issue_flags); in io_write()
1025 ret = io_setup_async_rw(req, iovec, s, false); in io_write()
1028 io_req_end_write(req); in io_write()
1039 void io_rw_fail(struct io_kiocb *req) in io_rw_fail() argument
1043 res = io_fixup_rw_res(req, req->cqe.res); in io_rw_fail()
1044 io_req_set_res(req, res, req->cqe.flags); in io_rw_fail()
1062 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1063 struct file *file = req->file; in io_do_iopoll()
1071 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1074 if (req->opcode == IORING_OP_URING_CMD) { in io_do_iopoll()
1077 ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd); in io_do_iopoll()
1081 struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw); in io_do_iopoll()
1090 /* iopoll may have completed current req */ in io_do_iopoll()
1092 READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
1103 struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list); in io_do_iopoll() local
1106 if (!smp_load_acquire(&req->iopoll_completed)) in io_do_iopoll()
1109 req->cqe.flags = io_put_kbuf(req, 0); in io_do_iopoll()