Lines Matching full:req
509 struct io_kiocb *req; member
809 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
904 struct io_kiocb *req; member
909 /* needs req->file assigned */
1078 static bool io_disarm_next(struct io_kiocb *req);
1085 static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1087 static void io_put_req(struct io_kiocb *req);
1088 static void io_put_req_deferred(struct io_kiocb *req);
1089 static void io_dismantle_req(struct io_kiocb *req);
1090 static void io_queue_linked_timeout(struct io_kiocb *req);
1094 static void io_clean_op(struct io_kiocb *req);
1096 struct io_kiocb *req, int fd, bool fixed,
1098 static void __io_queue_sqe(struct io_kiocb *req);
1101 static void io_req_task_queue(struct io_kiocb *req);
1103 static int io_req_prep_async(struct io_kiocb *req);
1105 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1107 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1143 #define req_ref_zero_or_close_to_overflow(req) \ argument
1144 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1146 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero() argument
1148 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1149 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero()
1152 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test() argument
1154 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1157 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_put_and_test()
1158 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test()
1161 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get() argument
1163 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1164 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_get()
1165 atomic_inc(&req->refs); in req_ref_get()
1168 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) in __io_req_set_refcount() argument
1170 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1171 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1172 atomic_set(&req->refs, nr); in __io_req_set_refcount()
1176 static inline void io_req_set_refcount(struct io_kiocb *req) in io_req_set_refcount() argument
1178 __io_req_set_refcount(req, 1); in io_req_set_refcount()
1181 static inline void io_req_set_rsrc_node(struct io_kiocb *req) in io_req_set_rsrc_node() argument
1183 struct io_ring_ctx *ctx = req->ctx; in io_req_set_rsrc_node()
1185 if (!req->fixed_rsrc_refs) { in io_req_set_rsrc_node()
1186 req->fixed_rsrc_refs = &ctx->rsrc_node->refs; in io_req_set_rsrc_node()
1187 percpu_ref_get(req->fixed_rsrc_refs); in io_req_set_rsrc_node()
1205 __must_hold(&req->ctx->timeout_lock) in io_match_task()
1207 struct io_kiocb *req; in io_match_task() local
1214 io_for_each_link(req, head) { in io_match_task()
1215 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1223 struct io_kiocb *req; in io_match_linked() local
1225 io_for_each_link(req, head) { in io_match_linked()
1226 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
1259 static inline void req_set_fail(struct io_kiocb *req) in req_set_fail() argument
1261 req->flags |= REQ_F_FAIL; in req_set_fail()
1264 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node() argument
1266 req_set_fail(req); in req_fail_link_node()
1267 req->result = res; in req_fail_link_node()
1277 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1279 return !req->timeout.off; in io_is_timeout_noseq()
1287 struct io_kiocb *req, *tmp; in io_fallback_req_func() local
1291 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) in io_fallback_req_func()
1292 req->io_task_work.func(req, &locked); in io_fallback_req_func()
1377 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1379 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1380 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1397 static inline bool io_req_ffs_set(struct io_kiocb *req) in io_req_ffs_set() argument
1399 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1402 static void io_req_track_inflight(struct io_kiocb *req) in io_req_track_inflight() argument
1404 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1405 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1406 atomic_inc(&req->task->io_uring->inflight_tracked); in io_req_track_inflight()
1410 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) in __io_prep_linked_timeout() argument
1412 if (WARN_ON_ONCE(!req->link)) in __io_prep_linked_timeout()
1415 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1416 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1419 io_req_set_refcount(req); in __io_prep_linked_timeout()
1420 __io_req_set_refcount(req->link, 2); in __io_prep_linked_timeout()
1421 return req->link; in __io_prep_linked_timeout()
1424 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
1426 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1428 return __io_prep_linked_timeout(req); in io_prep_linked_timeout()
1431 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1433 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1434 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1436 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1437 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1438 req->creds = get_current_cred(); in io_prep_async_work()
1441 req->work.list.next = NULL; in io_prep_async_work()
1442 req->work.flags = 0; in io_prep_async_work()
1443 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1444 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1446 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1448 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1449 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { in io_prep_async_work()
1451 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1455 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1459 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1460 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_link()
1463 io_for_each_link(cur, req) in io_prep_async_link()
1467 io_for_each_link(cur, req) in io_prep_async_link()
1472 static void io_queue_async_work(struct io_kiocb *req, bool *locked) in io_queue_async_work() argument
1474 struct io_ring_ctx *ctx = req->ctx; in io_queue_async_work()
1475 struct io_kiocb *link = io_prep_linked_timeout(req); in io_queue_async_work()
1476 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work()
1485 io_prep_async_link(req); in io_queue_async_work()
1494 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) in io_queue_async_work()
1495 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1497 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in io_queue_async_work()
1498 &req->work, req->flags); in io_queue_async_work()
1499 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1504 static void io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
1505 __must_hold(&req->ctx->completion_lock) in io_kill_timeout()
1506 __must_hold(&req->ctx->timeout_lock) in io_kill_timeout()
1508 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1512 req_set_fail(req); in io_kill_timeout()
1513 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1514 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1515 list_del_init(&req->timeout.list); in io_kill_timeout()
1516 io_fill_cqe_req(req, status, 0); in io_kill_timeout()
1517 io_put_req_deferred(req); in io_kill_timeout()
1527 if (req_need_defer(de->req, de->seq)) in io_queue_deferred()
1530 io_req_task_queue(de->req); in io_queue_deferred()
1539 struct io_kiocb *req, *tmp; in io_flush_timeouts() local
1542 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_flush_timeouts()
1545 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1555 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; in io_flush_timeouts()
1560 io_kill_timeout(req, 0); in io_flush_timeouts()
1817 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) in io_fill_cqe_req() argument
1819 __io_fill_cqe(req->ctx, req->user_data, res, cflags); in io_fill_cqe_req()
1829 static void io_req_complete_post(struct io_kiocb *req, s32 res, in io_req_complete_post() argument
1832 struct io_ring_ctx *ctx = req->ctx; in io_req_complete_post()
1835 __io_fill_cqe(ctx, req->user_data, res, cflags); in io_req_complete_post()
1840 if (req_ref_put_and_test(req)) { in io_req_complete_post()
1841 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1842 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1843 io_disarm_next(req); in io_req_complete_post()
1844 if (req->link) { in io_req_complete_post()
1845 io_req_task_queue(req->link); in io_req_complete_post()
1846 req->link = NULL; in io_req_complete_post()
1849 io_dismantle_req(req); in io_req_complete_post()
1850 io_put_task(req->task, 1); in io_req_complete_post()
1851 list_add(&req->inflight_entry, &ctx->locked_free_list); in io_req_complete_post()
1855 req = NULL; in io_req_complete_post()
1860 if (req) { in io_req_complete_post()
1866 static inline bool io_req_needs_clean(struct io_kiocb *req) in io_req_needs_clean() argument
1868 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1871 static inline void io_req_complete_state(struct io_kiocb *req, s32 res, in io_req_complete_state() argument
1874 if (io_req_needs_clean(req)) in io_req_complete_state()
1875 io_clean_op(req); in io_req_complete_state()
1876 req->result = res; in io_req_complete_state()
1877 req->compl.cflags = cflags; in io_req_complete_state()
1878 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1881 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, in __io_req_complete() argument
1885 io_req_complete_state(req, res, cflags); in __io_req_complete()
1887 io_req_complete_post(req, res, cflags); in __io_req_complete()
1890 static inline void io_req_complete(struct io_kiocb *req, s32 res) in io_req_complete() argument
1892 __io_req_complete(req, 0, res, 0); in io_req_complete()
1895 static void io_req_complete_failed(struct io_kiocb *req, s32 res) in io_req_complete_failed() argument
1897 req_set_fail(req); in io_req_complete_failed()
1898 io_req_complete_post(req, res, 0); in io_req_complete_failed()
1901 static void io_req_complete_fail_submit(struct io_kiocb *req) in io_req_complete_fail_submit() argument
1907 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1908 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
1909 io_req_complete_failed(req, req->result); in io_req_complete_fail_submit()
1916 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_preinit_req() argument
1918 req->ctx = ctx; in io_preinit_req()
1919 req->link = NULL; in io_preinit_req()
1920 req->async_data = NULL; in io_preinit_req()
1922 req->result = 0; in io_preinit_req()
1950 struct io_kiocb *req = list_first_entry(&state->free_list, in io_flush_cached_reqs() local
1953 list_del(&req->inflight_entry); in io_flush_cached_reqs()
1954 state->reqs[nr++] = req; in io_flush_cached_reqs()
2009 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
2011 unsigned int flags = req->flags; in io_dismantle_req()
2013 if (io_req_needs_clean(req)) in io_dismantle_req()
2014 io_clean_op(req); in io_dismantle_req()
2016 io_put_file(req->file); in io_dismantle_req()
2017 if (req->fixed_rsrc_refs) in io_dismantle_req()
2018 percpu_ref_put(req->fixed_rsrc_refs); in io_dismantle_req()
2019 if (req->async_data) { in io_dismantle_req()
2020 kfree(req->async_data); in io_dismantle_req()
2021 req->async_data = NULL; in io_dismantle_req()
2025 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
2027 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
2029 io_dismantle_req(req); in __io_free_req()
2030 io_put_task(req->task, 1); in __io_free_req()
2033 list_add(&req->inflight_entry, &ctx->locked_free_list); in __io_free_req()
2040 static inline void io_remove_next_linked(struct io_kiocb *req) in io_remove_next_linked() argument
2042 struct io_kiocb *nxt = req->link; in io_remove_next_linked()
2044 req->link = nxt->link; in io_remove_next_linked()
2048 static bool io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
2049 __must_hold(&req->ctx->completion_lock) in io_kill_linked_timeout()
2050 __must_hold(&req->ctx->timeout_lock) in io_kill_linked_timeout()
2052 struct io_kiocb *link = req->link; in io_kill_linked_timeout()
2057 io_remove_next_linked(req); in io_kill_linked_timeout()
2069 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
2070 __must_hold(&req->ctx->completion_lock) in io_fail_links()
2072 struct io_kiocb *nxt, *link = req->link; in io_fail_links()
2074 req->link = NULL; in io_fail_links()
2084 trace_io_uring_fail_link(req, link); in io_fail_links()
2091 static bool io_disarm_next(struct io_kiocb *req) in io_disarm_next() argument
2092 __must_hold(&req->ctx->completion_lock) in io_disarm_next()
2096 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2097 struct io_kiocb *link = req->link; in io_disarm_next()
2099 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2101 io_remove_next_linked(req); in io_disarm_next()
2106 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2107 struct io_ring_ctx *ctx = req->ctx; in io_disarm_next()
2110 posted = io_kill_linked_timeout(req); in io_disarm_next()
2113 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2114 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2115 posted |= (req->link != NULL); in io_disarm_next()
2116 io_fail_links(req); in io_disarm_next()
2121 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
2131 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2132 struct io_ring_ctx *ctx = req->ctx; in __io_req_find_next()
2136 posted = io_disarm_next(req); in __io_req_find_next()
2138 io_commit_cqring(req->ctx); in __io_req_find_next()
2143 nxt = req->link; in __io_req_find_next()
2144 req->link = NULL; in __io_req_find_next()
2148 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
2150 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2152 return __io_req_find_next(req); in io_req_find_next()
2192 struct io_kiocb *req = container_of(node, struct io_kiocb, in tctx_task_work() local
2195 if (req->ctx != ctx) { in tctx_task_work()
2197 ctx = req->ctx; in tctx_task_work()
2202 req->io_task_work.func(req, &locked); in tctx_task_work()
2216 static void io_req_task_work_add(struct io_kiocb *req) in io_req_task_work_add() argument
2218 struct task_struct *tsk = req->task; in io_req_task_work_add()
2228 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2244 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2257 req = container_of(node, struct io_kiocb, io_task_work.node); in io_req_task_work_add()
2259 if (llist_add(&req->io_task_work.fallback_node, in io_req_task_work_add()
2260 &req->ctx->fallback_llist)) in io_req_task_work_add()
2261 schedule_delayed_work(&req->ctx->fallback_work, 1); in io_req_task_work_add()
2265 static void io_req_task_cancel(struct io_kiocb *req, bool *locked) in io_req_task_cancel() argument
2267 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2271 io_req_complete_failed(req, req->result); in io_req_task_cancel()
2274 static void io_req_task_submit(struct io_kiocb *req, bool *locked) in io_req_task_submit() argument
2276 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2279 /* req->task == current here, checking PF_EXITING is safe */ in io_req_task_submit()
2280 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2281 __io_queue_sqe(req); in io_req_task_submit()
2283 io_req_complete_failed(req, -EFAULT); in io_req_task_submit()
2286 static void io_req_task_queue_fail(struct io_kiocb *req, int ret) in io_req_task_queue_fail() argument
2288 req->result = ret; in io_req_task_queue_fail()
2289 req->io_task_work.func = io_req_task_cancel; in io_req_task_queue_fail()
2290 io_req_task_work_add(req); in io_req_task_queue_fail()
2293 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2295 req->io_task_work.func = io_req_task_submit; in io_req_task_queue()
2296 io_req_task_work_add(req); in io_req_task_queue()
2299 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue() argument
2301 req->io_task_work.func = io_queue_async_work; in io_req_task_queue_reissue()
2302 io_req_task_work_add(req); in io_req_task_queue_reissue()
2305 static inline void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2307 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2313 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2315 io_queue_next(req); in io_free_req()
2316 __io_free_req(req); in io_free_req()
2319 static void io_free_req_work(struct io_kiocb *req, bool *locked) in io_free_req_work() argument
2321 io_free_req(req); in io_free_req_work()
2346 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, in io_req_free_batch() argument
2349 io_queue_next(req); in io_req_free_batch()
2350 io_dismantle_req(req); in io_req_free_batch()
2352 if (req->task != rb->task) { in io_req_free_batch()
2355 rb->task = req->task; in io_req_free_batch()
2362 state->reqs[state->free_reqs++] = req; in io_req_free_batch()
2364 list_add(&req->inflight_entry, &state->free_list); in io_req_free_batch()
2376 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2378 __io_fill_cqe(ctx, req->user_data, req->result, in io_submit_flush_completions()
2379 req->compl.cflags); in io_submit_flush_completions()
2387 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2389 if (req_ref_put_and_test(req)) in io_submit_flush_completions()
2390 io_req_free_batch(&rb, req, &ctx->submit_state); in io_submit_flush_completions()
2401 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2405 if (req_ref_put_and_test(req)) { in io_put_req_find_next()
2406 nxt = io_req_find_next(req); in io_put_req_find_next()
2407 __io_free_req(req); in io_put_req_find_next()
2412 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument
2414 if (req_ref_put_and_test(req)) in io_put_req()
2415 io_free_req(req); in io_put_req()
2418 static inline void io_put_req_deferred(struct io_kiocb *req) in io_put_req_deferred() argument
2420 if (req_ref_put_and_test(req)) { in io_put_req_deferred()
2421 req->io_task_work.func = io_free_req_work; in io_put_req_deferred()
2422 io_req_task_work_add(req); in io_put_req_deferred()
2441 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2447 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2452 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2456 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2458 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2459 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2489 struct io_kiocb *req; in io_iopoll_complete() local
2499 req = list_first_entry(done, struct io_kiocb, inflight_entry); in io_iopoll_complete()
2500 list_del(&req->inflight_entry); in io_iopoll_complete()
2501 cflags = io_put_rw_kbuf(req); in io_iopoll_complete()
2506 WRITE_ONCE(cqe->user_data, req->user_data); in io_iopoll_complete()
2507 WRITE_ONCE(cqe->res, req->result); in io_iopoll_complete()
2511 io_cqring_event_overflow(ctx, req->user_data, in io_iopoll_complete()
2512 req->result, cflags); in io_iopoll_complete()
2516 if (req_ref_put_and_test(req)) in io_iopoll_complete()
2517 io_req_free_batch(&rb, req, &ctx->submit_state); in io_iopoll_complete()
2528 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2538 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { in io_do_iopoll()
2539 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2547 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2548 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2560 /* iopoll may have completed current req */ in io_do_iopoll()
2561 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2562 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2653 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2659 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2660 struct super_block *sb = file_inode(req->file)->i_sb; in kiocb_end_write()
2668 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2670 struct io_async_rw *rw = req->async_data; in io_resubmit_prep()
2673 return !io_req_prep_async(req); in io_resubmit_prep()
2678 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2680 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
2681 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
2685 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2699 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
2704 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2708 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2718 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
2720 struct io_rw *rw = &req->rw; in io_req_io_end()
2723 kiocb_end_write(req); in io_req_io_end()
2724 fsnotify_modify(req->file); in io_req_io_end()
2726 fsnotify_access(req->file); in io_req_io_end()
2730 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
2732 if (res != req->result) { in __io_complete_rw_common()
2734 io_rw_should_reissue(req)) { in __io_complete_rw_common()
2739 io_req_io_end(req); in __io_complete_rw_common()
2740 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2743 req_set_fail(req); in __io_complete_rw_common()
2744 req->result = res; in __io_complete_rw_common()
2749 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
2751 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
2763 static void io_req_task_complete(struct io_kiocb *req, bool *locked) in io_req_task_complete() argument
2765 unsigned int cflags = io_put_rw_kbuf(req); in io_req_task_complete()
2766 int res = req->result; in io_req_task_complete()
2769 struct io_ring_ctx *ctx = req->ctx; in io_req_task_complete()
2772 io_req_complete_state(req, res, cflags); in io_req_task_complete()
2773 state->compl_reqs[state->compl_nr++] = req; in io_req_task_complete()
2777 io_req_complete_post(req, res, cflags); in io_req_task_complete()
2781 static void io_req_rw_complete(struct io_kiocb *req, bool *locked) in io_req_rw_complete() argument
2783 io_req_io_end(req); in io_req_rw_complete()
2784 io_req_task_complete(req, locked); in io_req_rw_complete()
2789 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2791 if (__io_complete_rw_common(req, res)) in io_complete_rw()
2793 req->result = io_fixup_rw_res(req, res); in io_complete_rw()
2794 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
2795 io_req_task_work_add(req); in io_complete_rw()
2800 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2803 kiocb_end_write(req); in io_complete_rw_iopoll()
2804 if (unlikely(res != req->result)) { in io_complete_rw_iopoll()
2805 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
2806 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2811 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2814 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2823 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2825 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2846 if (list_req->file != req->file) { in io_iopoll_req_issued()
2850 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); in io_iopoll_req_issued()
2860 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2861 list_add(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2863 list_add_tail(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2923 static bool io_file_supports_nowait(struct io_kiocb *req, int rw) in io_file_supports_nowait() argument
2925 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2927 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2930 return __io_file_supports_nowait(req->file, rw); in io_file_supports_nowait()
2933 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
2936 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2937 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2938 struct file *file = req->file; in io_prep_rw()
2942 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) in io_prep_rw()
2943 req->flags |= REQ_F_ISREG; in io_prep_rw()
2958 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) in io_prep_rw()
2959 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2978 req->iopoll_completed = 0; in io_prep_rw()
2986 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2987 req->imu = NULL; in io_prep_rw()
2989 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
2990 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
2991 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2994 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw()
2996 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw()
2997 req->imu = ctx->user_bufs[index]; in io_prep_rw()
2998 io_req_set_rsrc_node(req); in io_prep_rw()
3001 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
3002 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3027 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
3029 struct kiocb *kiocb = &req->rw.kiocb; in io_kiocb_update_pos()
3034 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
3035 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
3036 kiocb->ki_pos = req->file->f_pos; in io_kiocb_update_pos()
3047 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
3049 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
3050 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
3052 if (!__io_complete_rw_common(req, ret)) { in kiocb_done()
3057 io_req_io_end(req); in kiocb_done()
3058 __io_req_complete(req, issue_flags, in kiocb_done()
3059 io_fixup_rw_res(req, ret), in kiocb_done()
3060 io_put_rw_kbuf(req)); in kiocb_done()
3066 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
3067 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
3068 if (io_resubmit_prep(req)) { in kiocb_done()
3069 io_req_task_queue_reissue(req); in kiocb_done()
3071 unsigned int cflags = io_put_rw_kbuf(req); in kiocb_done()
3072 struct io_ring_ctx *ctx = req->ctx; in kiocb_done()
3074 ret = io_fixup_rw_res(req, ret); in kiocb_done()
3075 req_set_fail(req); in kiocb_done()
3078 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3081 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3087 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, in __io_import_fixed() argument
3090 size_t len = req->rw.len; in __io_import_fixed()
3091 u64 buf_end, buf_addr = req->rw.addr; in __io_import_fixed()
3145 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) in io_import_fixed() argument
3147 if (WARN_ON_ONCE(!req->imu)) in io_import_fixed()
3149 return __io_import_fixed(req, rw, iter, req->imu); in io_import_fixed()
3170 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
3176 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3179 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
3181 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
3183 head = xa_load(&req->ctx->io_buffers, bgid); in io_buffer_select()
3191 xa_erase(&req->ctx->io_buffers, bgid); in io_buffer_select()
3199 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
3204 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
3210 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
3211 bgid = req->buf_index; in io_rw_buffer_select()
3212 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
3215 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
3216 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3221 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3229 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3238 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3247 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3250 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3260 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3268 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3271 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3274 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3279 if (req->rw.len != 1) in io_iov_buffer_select()
3283 if (req->ctx->compat) in io_iov_buffer_select()
3284 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3287 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3290 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, in io_import_iovec() argument
3293 void __user *buf = u64_to_user_ptr(req->rw.addr); in io_import_iovec()
3294 size_t sqe_len = req->rw.len; in io_import_iovec()
3295 u8 opcode = req->opcode; in io_import_iovec()
3300 return io_import_fixed(req, rw, iter); in io_import_iovec()
3304 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3308 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3309 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in io_import_iovec()
3312 req->rw.len = sqe_len; in io_import_iovec()
3320 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3321 ret = io_iov_buffer_select(req, *iovec, needs_lock); in io_import_iovec()
3329 req->ctx->compat); in io_import_iovec()
3341 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3343 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3344 struct file *file = req->file; in loop_rw_iter()
3367 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3368 iovec.iov_len = req->rw.len; in loop_rw_iter()
3388 req->rw.addr += nr; in loop_rw_iter()
3389 req->rw.len -= nr; in loop_rw_iter()
3390 if (!req->rw.len) in loop_rw_iter()
3400 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3403 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3423 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3427 static inline int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3429 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in io_alloc_async_data()
3430 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in io_alloc_async_data()
3431 return req->async_data == NULL; in io_alloc_async_data()
3434 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3438 if (!force && !io_op_defs[req->opcode].needs_async_setup) in io_setup_async_rw()
3440 if (!req->async_data) { in io_setup_async_rw()
3443 if (io_alloc_async_data(req)) { in io_setup_async_rw()
3448 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3449 iorw = req->async_data; in io_setup_async_rw()
3456 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3458 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3462 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3469 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3474 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3476 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3478 return io_prep_rw(req, sqe, READ); in io_read_prep()
3495 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3503 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3505 io_req_task_queue(req); in io_async_buf_func()
3521 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3523 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3525 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3528 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3539 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3543 wait->wait.private = req; in io_rw_should_retry()
3552 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3554 if (req->file->f_op->read_iter) in io_iter_do_read()
3555 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3556 else if (req->file->f_op->read) in io_iter_do_read()
3557 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3562 static bool need_read_all(struct io_kiocb *req) in need_read_all() argument
3564 return req->flags & REQ_F_ISREG || in need_read_all()
3565 S_ISBLK(file_inode(req->file)->i_mode); in need_read_all()
3568 static int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
3571 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3573 struct io_async_rw *rw = req->async_data; in io_read()
3590 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3596 req->result = iov_iter_count(iter); in io_read()
3605 if (force_nonblock && !io_file_supports_nowait(req, READ)) { in io_read()
3606 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3610 ppos = io_kiocb_update_pos(req); in io_read()
3612 ret = rw_verify_area(READ, req->file, ppos, req->result); in io_read()
3618 ret = io_iter_do_read(req, iter); in io_read()
3620 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3621 req->flags &= ~REQ_F_REISSUE; in io_read()
3623 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3626 if (req->flags & REQ_F_NOWAIT) in io_read()
3631 } else if (ret <= 0 || ret == req->result || !force_nonblock || in io_read()
3632 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3644 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3649 rw = req->async_data; in io_read()
3672 if (!io_rw_should_retry(req)) { in io_read()
3677 req->result = iov_iter_count(iter); in io_read()
3684 ret = io_iter_do_read(req, iter); in io_read()
3700 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3702 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3704 return io_prep_rw(req, sqe, WRITE); in io_write_prep()
3707 static int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
3710 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3712 struct io_async_rw *rw = req->async_data; in io_write()
3724 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3730 req->result = iov_iter_count(iter); in io_write()
3739 if (force_nonblock && !io_file_supports_nowait(req, WRITE)) in io_write()
3744 (req->flags & REQ_F_ISREG)) in io_write()
3747 ppos = io_kiocb_update_pos(req); in io_write()
3749 ret = rw_verify_area(WRITE, req->file, ppos, req->result); in io_write()
3760 if (req->flags & REQ_F_ISREG) { in io_write()
3761 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3762 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3767 if (req->file->f_op->write_iter) in io_write()
3768 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3769 else if (req->file->f_op->write) in io_write()
3770 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3774 if (req->flags & REQ_F_REISSUE) { in io_write()
3775 req->flags &= ~REQ_F_REISSUE; in io_write()
3786 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3790 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3797 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3800 kiocb_end_write(req); in io_write()
3812 static int io_renameat_prep(struct io_kiocb *req, in io_renameat_prep() argument
3815 struct io_rename *ren = &req->rename; in io_renameat_prep()
3818 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3822 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3841 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3845 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
3847 struct io_rename *ren = &req->rename; in io_renameat()
3856 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3858 req_set_fail(req); in io_renameat()
3859 io_req_complete(req, ret); in io_renameat()
3863 static int io_unlinkat_prep(struct io_kiocb *req, in io_unlinkat_prep() argument
3866 struct io_unlink *un = &req->unlink; in io_unlinkat_prep()
3869 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3874 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3888 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3892 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
3894 struct io_unlink *un = &req->unlink; in io_unlinkat()
3905 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3907 req_set_fail(req); in io_unlinkat()
3908 io_req_complete(req, ret); in io_unlinkat()
3912 static int io_shutdown_prep(struct io_kiocb *req, in io_shutdown_prep() argument
3916 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
3922 req->shutdown.how = READ_ONCE(sqe->len); in io_shutdown_prep()
3929 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
3938 sock = sock_from_file(req->file, &ret); in io_shutdown()
3942 ret = __sys_shutdown_sock(sock, req->shutdown.how); in io_shutdown()
3944 req_set_fail(req); in io_shutdown()
3945 io_req_complete(req, ret); in io_shutdown()
3952 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3955 struct io_splice *sp = &req->splice; in __io_splice_prep()
3958 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3969 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3974 return __io_splice_prep(req, sqe); in io_tee_prep()
3977 static int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
3979 struct io_splice *sp = &req->splice; in io_tee()
3988 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_tee()
4002 req_set_fail(req); in io_tee()
4003 io_req_complete(req, ret); in io_tee()
4007 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
4009 struct io_splice *sp = &req->splice; in io_splice_prep()
4013 return __io_splice_prep(req, sqe); in io_splice_prep()
4016 static int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
4018 struct io_splice *sp = &req->splice; in io_splice()
4028 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_splice()
4045 req_set_fail(req); in io_splice()
4046 io_req_complete(req, ret); in io_splice()
4053 static int io_nop(struct io_kiocb *req, unsigned int issue_flags) in io_nop() argument
4055 struct io_ring_ctx *ctx = req->ctx; in io_nop()
4060 __io_req_complete(req, issue_flags, 0, 0); in io_nop()
4064 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
4066 struct io_ring_ctx *ctx = req->ctx; in io_fsync_prep()
4074 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4075 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4078 req->sync.off = READ_ONCE(sqe->off); in io_fsync_prep()
4079 req->sync.len = READ_ONCE(sqe->len); in io_fsync_prep()
4083 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
4085 loff_t end = req->sync.off + req->sync.len; in io_fsync()
4092 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
4094 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4096 req_set_fail(req); in io_fsync()
4097 io_req_complete(req, ret); in io_fsync()
4101 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
4107 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4110 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
4111 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
4112 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
4116 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
4123 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
4124 req->sync.len); in io_fallocate()
4126 req_set_fail(req); in io_fallocate()
4128 fsnotify_modify(req->file); in io_fallocate()
4129 io_req_complete(req, ret); in io_fallocate()
4133 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
4138 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4142 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4146 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4147 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4149 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
4151 req->open.filename = getname(fname); in __io_openat_prep()
4152 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
4153 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
4154 req->open.filename = NULL; in __io_openat_prep()
4158 req->open.file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
4159 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4162 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
4163 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4167 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
4172 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4173 return __io_openat_prep(req, sqe); in io_openat_prep()
4176 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
4187 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
4192 return __io_openat_prep(req, sqe); in io_openat2_prep()
4195 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
4200 bool fixed = !!req->open.file_slot; in io_openat2()
4203 ret = build_open_flags(&req->open.how, &op); in io_openat2()
4207 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; in io_openat2()
4213 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) in io_openat2()
4220 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4225 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
4250 ret = io_install_fixed_file(req, file, issue_flags, in io_openat2()
4251 req->open.file_slot - 1); in io_openat2()
4253 putname(req->open.filename); in io_openat2()
4254 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4256 req_set_fail(req); in io_openat2()
4257 __io_req_complete(req, issue_flags, ret, 0); in io_openat2()
4261 static int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
4263 return io_openat2(req, issue_flags); in io_openat()
4266 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
4269 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
4313 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_remove_buffers() argument
4315 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
4316 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
4330 req_set_fail(req); in io_remove_buffers()
4333 __io_req_complete(req, issue_flags, ret, 0); in io_remove_buffers()
4338 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
4342 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4401 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_provide_buffers() argument
4403 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4404 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4423 req_set_fail(req); in io_provide_buffers()
4425 __io_req_complete(req, issue_flags, ret, 0); in io_provide_buffers()
4430 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4436 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4439 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4440 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4441 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4443 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4447 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4457 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) in io_epoll_ctl() argument
4460 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4469 req_set_fail(req); in io_epoll_ctl()
4470 __io_req_complete(req, issue_flags, ret, 0); in io_epoll_ctl()
4477 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4482 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4485 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4486 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4487 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4494 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
4497 struct io_madvise *ma = &req->madvise; in io_madvise()
4505 req_set_fail(req); in io_madvise()
4506 io_req_complete(req, ret); in io_madvise()
4513 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4517 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4520 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4521 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4522 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4526 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
4528 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4542 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4544 req_set_fail(req); in io_fadvise()
4545 __io_req_complete(req, issue_flags, ret, 0); in io_fadvise()
4549 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4551 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4555 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4558 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4559 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4560 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4561 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4562 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4567 static int io_statx(struct io_kiocb *req, unsigned int issue_flags) in io_statx() argument
4569 struct io_statx *ctx = &req->statx; in io_statx()
4579 req_set_fail(req); in io_statx()
4580 io_req_complete(req, ret); in io_statx()
4584 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4586 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4591 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4594 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4595 req->close.file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
4596 if (req->close.file_slot && req->close.fd) in io_close_prep()
4602 static int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument
4605 struct io_close *close = &req->close; in io_close()
4610 if (req->close.file_slot) { in io_close()
4611 ret = io_close_fixed(req, issue_flags); in io_close()
4646 req_set_fail(req); in io_close()
4649 __io_req_complete(req, issue_flags, ret, 0); in io_close()
4653 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
4655 struct io_ring_ctx *ctx = req->ctx; in io_sfr_prep()
4663 req->sync.off = READ_ONCE(sqe->off); in io_sfr_prep()
4664 req->sync.len = READ_ONCE(sqe->len); in io_sfr_prep()
4665 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4669 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
4677 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4678 req->sync.flags); in io_sync_file_range()
4680 req_set_fail(req); in io_sync_file_range()
4681 io_req_complete(req, ret); in io_sync_file_range()
4693 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4696 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4700 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4704 async_msg = req->async_data; in io_setup_async_msg()
4705 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4718 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4723 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4724 req->sr_msg.msg_flags, &iomsg->free_iov); in io_sendmsg_copy_hdr()
4727 static int io_sendmsg_prep_async(struct io_kiocb *req) in io_sendmsg_prep_async() argument
4731 ret = io_sendmsg_copy_hdr(req, req->async_data); in io_sendmsg_prep_async()
4733 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4737 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4739 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4741 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4752 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4755 if (req->ctx->compat) in io_sendmsg_prep()
4762 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
4765 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg()
4771 sock = sock_from_file(req->file, &ret); in io_sendmsg()
4775 kmsg = req->async_data; in io_sendmsg()
4777 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4783 flags = req->sr_msg.msg_flags; in io_sendmsg()
4793 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4798 req->flags |= REQ_F_PARTIAL_IO; in io_sendmsg()
4799 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4801 req_set_fail(req); in io_sendmsg()
4806 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4811 __io_req_complete(req, issue_flags, ret, 0); in io_sendmsg()
4815 static int io_send(struct io_kiocb *req, unsigned int issue_flags) in io_send() argument
4817 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4825 sock = sock_from_file(req->file, &ret); in io_send()
4838 flags = req->sr_msg.msg_flags; in io_send()
4855 req->flags |= REQ_F_PARTIAL_IO; in io_send()
4858 req_set_fail(req); in io_send()
4864 __io_req_complete(req, issue_flags, ret, 0); in io_send()
4868 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4871 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4881 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4901 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4904 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4916 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4942 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4948 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4949 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4952 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4955 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4958 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4961 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4966 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4970 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4972 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4975 static int io_recvmsg_prep_async(struct io_kiocb *req) in io_recvmsg_prep_async() argument
4979 ret = io_recvmsg_copy_hdr(req, req->async_data); in io_recvmsg_prep_async()
4981 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
4985 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
4987 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4989 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
5001 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
5004 if (req->ctx->compat) in io_recvmsg_prep()
5011 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) in io_recvmsg() argument
5014 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg()
5022 sock = sock_from_file(req->file, &ret); in io_recvmsg()
5026 kmsg = req->async_data; in io_recvmsg()
5028 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
5034 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
5035 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
5039 kmsg->fast_iov[0].iov_len = req->sr_msg.len; in io_recvmsg()
5041 1, req->sr_msg.len); in io_recvmsg()
5044 flags = req->sr_msg.msg_flags; in io_recvmsg()
5050 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
5054 return io_setup_async_msg(req, kmsg); in io_recvmsg()
5059 req->flags |= REQ_F_PARTIAL_IO; in io_recvmsg()
5060 return io_setup_async_msg(req, kmsg); in io_recvmsg()
5062 req_set_fail(req); in io_recvmsg()
5064 req_set_fail(req); in io_recvmsg()
5067 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
5068 cflags = io_put_recv_kbuf(req); in io_recvmsg()
5072 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
5077 __io_req_complete(req, issue_flags, ret, cflags); in io_recvmsg()
5081 static int io_recv(struct io_kiocb *req, unsigned int issue_flags) in io_recv() argument
5084 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
5094 sock = sock_from_file(req->file, &ret); in io_recv()
5098 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
5099 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
5116 flags = req->sr_msg.msg_flags; in io_recv()
5132 req->flags |= REQ_F_PARTIAL_IO; in io_recv()
5135 req_set_fail(req); in io_recv()
5138 req_set_fail(req); in io_recv()
5140 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5141 cflags = io_put_recv_kbuf(req); in io_recv()
5146 __io_req_complete(req, issue_flags, ret, cflags); in io_recv()
5150 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
5152 struct io_accept *accept = &req->accept; in io_accept_prep()
5154 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5174 static int io_accept(struct io_kiocb *req, unsigned int issue_flags) in io_accept() argument
5176 struct io_accept *accept = &req->accept; in io_accept()
5188 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, in io_accept()
5196 req->flags |= REQ_F_PARTIAL_IO; in io_accept()
5201 req_set_fail(req); in io_accept()
5206 ret = io_install_fixed_file(req, file, issue_flags, in io_accept()
5209 __io_req_complete(req, issue_flags, ret, 0); in io_accept()
5213 static int io_connect_prep_async(struct io_kiocb *req) in io_connect_prep_async() argument
5215 struct io_async_connect *io = req->async_data; in io_connect_prep_async()
5216 struct io_connect *conn = &req->connect; in io_connect_prep_async()
5221 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
5223 struct io_connect *conn = &req->connect; in io_connect_prep()
5225 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5236 static int io_connect(struct io_kiocb *req, unsigned int issue_flags) in io_connect() argument
5243 if (req->async_data) { in io_connect()
5244 io = req->async_data; in io_connect()
5246 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
5247 req->connect.addr_len, in io_connect()
5256 ret = __sys_connect_file(req->file, &io->address, in io_connect()
5257 req->connect.addr_len, file_flags); in io_connect()
5259 if (req->async_data) in io_connect()
5261 if (io_alloc_async_data(req)) { in io_connect()
5265 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
5272 req_set_fail(req); in io_connect()
5273 __io_req_complete(req, issue_flags, ret, 0); in io_connect()
5278 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5285 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5292 static int io_##op##_prep_async(struct io_kiocb *req) \
5307 struct io_kiocb *req; member
5322 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
5331 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
5334 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
5343 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
5345 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
5346 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
5347 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
5350 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
5352 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
5355 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
5358 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
5359 return req->async_data; in io_poll_get_double()
5360 return req->apoll->double_poll; in io_poll_get_double()
5363 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
5365 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
5366 return &req->poll; in io_poll_get_single()
5367 return &req->apoll->poll; in io_poll_get_single()
5370 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5372 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5375 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5376 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5402 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
5404 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_remove_entries()
5405 struct io_poll_iocb *poll_double = io_poll_get_double(req); in io_poll_remove_entries()
5435 * the request, then the mask is stored in req->result.
5437 static int io_poll_check_events(struct io_kiocb *req) in io_poll_check_events() argument
5439 struct io_ring_ctx *ctx = req->ctx; in io_poll_check_events()
5440 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_check_events()
5443 /* req->task == current here, checking PF_EXITING is safe */ in io_poll_check_events()
5444 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
5445 io_poll_mark_cancelled(req); in io_poll_check_events()
5448 v = atomic_read(&req->poll_refs); in io_poll_check_events()
5461 req->result = 0; in io_poll_check_events()
5463 req->result = 0; in io_poll_check_events()
5469 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
5473 if (!req->result) { in io_poll_check_events()
5476 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_check_events()
5480 if (req->result && !(poll->events & EPOLLONESHOT)) { in io_poll_check_events()
5481 __poll_t mask = mangle_poll(req->result & poll->events); in io_poll_check_events()
5485 filled = io_fill_cqe_aux(ctx, req->user_data, mask, in io_poll_check_events()
5492 } else if (req->result) { in io_poll_check_events()
5497 req->result = 0; in io_poll_check_events()
5503 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & in io_poll_check_events()
5509 static void io_poll_task_func(struct io_kiocb *req, bool *locked) in io_poll_task_func() argument
5511 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
5514 ret = io_poll_check_events(req); in io_poll_task_func()
5519 req->result = mangle_poll(req->result & req->poll.events); in io_poll_task_func()
5521 req->result = ret; in io_poll_task_func()
5522 req_set_fail(req); in io_poll_task_func()
5525 io_poll_remove_entries(req); in io_poll_task_func()
5527 hash_del(&req->hash_node); in io_poll_task_func()
5529 io_req_complete_post(req, req->result, 0); in io_poll_task_func()
5532 static void io_apoll_task_func(struct io_kiocb *req, bool *locked) in io_apoll_task_func() argument
5534 struct io_ring_ctx *ctx = req->ctx; in io_apoll_task_func()
5537 ret = io_poll_check_events(req); in io_apoll_task_func()
5541 io_poll_remove_entries(req); in io_apoll_task_func()
5543 hash_del(&req->hash_node); in io_apoll_task_func()
5547 io_req_task_submit(req, locked); in io_apoll_task_func()
5549 io_req_complete_failed(req, ret); in io_apoll_task_func()
5552 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
5554 req->result = mask; in __io_poll_execute()
5555 if (req->opcode == IORING_OP_POLL_ADD) in __io_poll_execute()
5556 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
5558 req->io_task_work.func = io_apoll_task_func; in __io_poll_execute()
5560 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_poll_execute()
5561 io_req_task_work_add(req); in __io_poll_execute()
5564 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
5566 if (io_poll_get_ownership(req)) in io_poll_execute()
5567 __io_poll_execute(req, res); in io_poll_execute()
5570 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
5572 io_poll_mark_cancelled(req); in io_poll_cancel_req()
5574 io_poll_execute(req, 0); in io_poll_cancel_req()
5580 struct io_kiocb *req = wait->private; in io_poll_wake() local
5586 io_poll_mark_cancelled(req); in io_poll_wake()
5588 io_poll_execute(req, 0); in io_poll_wake()
5601 * as req->head is NULL'ed out, the request can be in io_poll_wake()
5613 if (io_poll_get_ownership(req)) { in io_poll_wake()
5622 __io_poll_execute(req, mask); in io_poll_wake()
5631 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5663 poll->wait.private = req; in __io_queue_proc()
5676 __io_queue_proc(&pt->req->poll, pt, head, in io_poll_queue_proc()
5677 (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5680 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5684 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5686 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5688 poll->file = req->file; in __io_arm_poll_handler()
5689 poll->wait.private = req; in __io_arm_poll_handler()
5692 ipt->req = req; in __io_arm_poll_handler()
5700 atomic_set(&req->poll_refs, 1); in __io_arm_poll_handler()
5701 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5704 io_poll_remove_entries(req); in __io_arm_poll_handler()
5705 /* no one else has access to the req, forget about the ref */ in __io_arm_poll_handler()
5709 io_poll_remove_entries(req); in __io_arm_poll_handler()
5716 io_poll_req_insert(req); in __io_arm_poll_handler()
5725 __io_poll_execute(req, mask); in __io_arm_poll_handler()
5733 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
5734 __io_poll_execute(req, 0); in __io_arm_poll_handler()
5742 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5761 static int io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5763 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5764 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5770 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5779 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5780 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5786 if (req->flags & REQ_F_POLLED) { in io_arm_poll_handler()
5787 apoll = req->apoll; in io_arm_poll_handler()
5800 req->apoll = apoll; in io_arm_poll_handler()
5801 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5804 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); in io_arm_poll_handler()
5808 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler()
5820 struct io_kiocb *req; in io_poll_remove_all() local
5829 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5830 if (io_match_task_safe(req, tsk, cancel_all)) { in io_poll_remove_all()
5831 hlist_del_init(&req->hash_node); in io_poll_remove_all()
5832 io_poll_cancel_req(req); in io_poll_remove_all()
5846 struct io_kiocb *req; in io_poll_find() local
5849 hlist_for_each_entry(req, list, hash_node) { in io_poll_find()
5850 if (sqe_addr != req->user_data) in io_poll_find()
5852 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
5854 return req; in io_poll_find()
5859 static bool io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
5862 if (!io_poll_get_ownership(req)) in io_poll_disarm()
5864 io_poll_remove_entries(req); in io_poll_disarm()
5865 hash_del(&req->hash_node); in io_poll_disarm()
5873 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); in io_poll_cancel() local
5875 if (!req) in io_poll_cancel()
5877 io_poll_cancel_req(req); in io_poll_cancel()
5895 static int io_poll_update_prep(struct io_kiocb *req, in io_poll_update_prep() argument
5898 struct io_poll_update *upd = &req->poll_update; in io_poll_update_prep()
5901 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
5928 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5930 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5933 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5941 io_req_set_refcount(req); in io_poll_add_prep()
5946 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
5948 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5954 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); in io_poll_add()
5956 req_set_fail(req); in io_poll_add()
5959 __io_req_complete(req, issue_flags, ret, 0); in io_poll_add()
5963 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) in io_poll_update() argument
5965 struct io_ring_ctx *ctx = req->ctx; in io_poll_update()
5972 preq = io_poll_find(ctx, req->poll_update.old_user_data, true); in io_poll_update()
5980 if (req->poll_update.update_events || req->poll_update.update_user_data) { in io_poll_update()
5982 if (req->poll_update.update_events) { in io_poll_update()
5984 preq->poll.events |= req->poll_update.events & 0xffff; in io_poll_update()
5987 if (req->poll_update.update_user_data) in io_poll_update()
5988 preq->user_data = req->poll_update.new_user_data; in io_poll_update()
5999 req_set_fail(req); in io_poll_update()
6001 io_req_complete(req, ret); in io_poll_update()
6006 static void io_req_task_timeout(struct io_kiocb *req, bool *locked) in io_req_task_timeout() argument
6008 req_set_fail(req); in io_req_task_timeout()
6009 io_req_complete_post(req, -ETIME, 0); in io_req_task_timeout()
6016 struct io_kiocb *req = data->req; in io_timeout_fn() local
6017 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
6021 list_del_init(&req->timeout.list); in io_timeout_fn()
6022 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
6023 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
6026 req->io_task_work.func = io_req_task_timeout; in io_timeout_fn()
6027 io_req_task_work_add(req); in io_timeout_fn()
6036 struct io_kiocb *req; in io_timeout_extract() local
6039 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_extract()
6040 found = user_data == req->user_data; in io_timeout_extract()
6047 io = req->async_data; in io_timeout_extract()
6050 list_del_init(&req->timeout.list); in io_timeout_extract()
6051 return req; in io_timeout_extract()
6058 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_cancel() local
6060 if (IS_ERR(req)) in io_timeout_cancel()
6061 return PTR_ERR(req); in io_timeout_cancel()
6063 req_set_fail(req); in io_timeout_cancel()
6064 io_fill_cqe_req(req, -ECANCELED, 0); in io_timeout_cancel()
6065 io_put_req_deferred(req); in io_timeout_cancel()
6090 struct io_kiocb *req; in io_linked_timeout_update() local
6093 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { in io_linked_timeout_update()
6094 found = user_data == req->user_data; in io_linked_timeout_update()
6101 io = req->async_data; in io_linked_timeout_update()
6114 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_update() local
6117 if (IS_ERR(req)) in io_timeout_update()
6118 return PTR_ERR(req); in io_timeout_update()
6120 req->timeout.off = 0; /* noseq */ in io_timeout_update()
6121 data = req->async_data; in io_timeout_update()
6122 list_add_tail(&req->timeout.list, &ctx->timeout_list); in io_timeout_update()
6129 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
6132 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove_prep()
6134 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
6136 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6170 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) in io_timeout_remove() argument
6172 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove()
6173 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
6176 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6194 req_set_fail(req); in io_timeout_remove()
6195 io_req_complete_post(req, ret, 0); in io_timeout_remove()
6199 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
6206 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6220 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6221 req->timeout.off = off; in io_timeout_prep()
6222 if (unlikely(off && !req->ctx->off_timeout_used)) in io_timeout_prep()
6223 req->ctx->off_timeout_used = true; in io_timeout_prep()
6225 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
6228 data = req->async_data; in io_timeout_prep()
6229 data->req = req; in io_timeout_prep()
6235 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6240 struct io_submit_link *link = &req->ctx->submit_state.link; in io_timeout_prep()
6246 req->timeout.head = link->last; in io_timeout_prep()
6252 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) in io_timeout() argument
6254 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
6255 struct io_timeout_data *data = req->async_data; in io_timeout()
6257 u32 tail, off = req->timeout.off; in io_timeout()
6266 if (io_is_timeout_noseq(req)) { in io_timeout()
6272 req->timeout.target_seq = tail + off; in io_timeout()
6295 list_add(&req->timeout.list, entry); in io_timeout()
6309 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
6312 return req->ctx == cd->ctx && req->user_data == cd->user_data; in io_cancel_cb()
6341 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) in io_try_cancel_userdata() argument
6343 struct io_ring_ctx *ctx = req->ctx; in io_try_cancel_userdata()
6346 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); in io_try_cancel_userdata()
6348 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); in io_try_cancel_userdata()
6364 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
6367 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6369 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6375 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
6379 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
6381 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
6382 u64 sqe_addr = req->cancel.addr; in io_async_cancel()
6386 ret = io_try_cancel_userdata(req, sqe_addr); in io_async_cancel()
6396 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6403 req_set_fail(req); in io_async_cancel()
6404 io_req_complete_post(req, ret, 0); in io_async_cancel()
6408 static int io_rsrc_update_prep(struct io_kiocb *req, in io_rsrc_update_prep() argument
6411 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6416 req->rsrc_update.offset = READ_ONCE(sqe->off); in io_rsrc_update_prep()
6417 req->rsrc_update.nr_args = READ_ONCE(sqe->len); in io_rsrc_update_prep()
6418 if (!req->rsrc_update.nr_args) in io_rsrc_update_prep()
6420 req->rsrc_update.arg = READ_ONCE(sqe->addr); in io_rsrc_update_prep()
6424 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) in io_files_update() argument
6426 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
6430 up.offset = req->rsrc_update.offset; in io_files_update()
6431 up.data = req->rsrc_update.arg; in io_files_update()
6439 &up, req->rsrc_update.nr_args); in io_files_update()
6443 req_set_fail(req); in io_files_update()
6444 __io_req_complete(req, issue_flags, ret, 0); in io_files_update()
6448 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
6450 switch (req->opcode) { in io_req_prep()
6456 return io_read_prep(req, sqe); in io_req_prep()
6460 return io_write_prep(req, sqe); in io_req_prep()
6462 return io_poll_add_prep(req, sqe); in io_req_prep()
6464 return io_poll_update_prep(req, sqe); in io_req_prep()
6466 return io_fsync_prep(req, sqe); in io_req_prep()
6468 return io_sfr_prep(req, sqe); in io_req_prep()
6471 return io_sendmsg_prep(req, sqe); in io_req_prep()
6474 return io_recvmsg_prep(req, sqe); in io_req_prep()
6476 return io_connect_prep(req, sqe); in io_req_prep()
6478 return io_timeout_prep(req, sqe, false); in io_req_prep()
6480 return io_timeout_remove_prep(req, sqe); in io_req_prep()
6482 return io_async_cancel_prep(req, sqe); in io_req_prep()
6484 return io_timeout_prep(req, sqe, true); in io_req_prep()
6486 return io_accept_prep(req, sqe); in io_req_prep()
6488 return io_fallocate_prep(req, sqe); in io_req_prep()
6490 return io_openat_prep(req, sqe); in io_req_prep()
6492 return io_close_prep(req, sqe); in io_req_prep()
6494 return io_rsrc_update_prep(req, sqe); in io_req_prep()
6496 return io_statx_prep(req, sqe); in io_req_prep()
6498 return io_fadvise_prep(req, sqe); in io_req_prep()
6500 return io_madvise_prep(req, sqe); in io_req_prep()
6502 return io_openat2_prep(req, sqe); in io_req_prep()
6504 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
6506 return io_splice_prep(req, sqe); in io_req_prep()
6508 return io_provide_buffers_prep(req, sqe); in io_req_prep()
6510 return io_remove_buffers_prep(req, sqe); in io_req_prep()
6512 return io_tee_prep(req, sqe); in io_req_prep()
6514 return io_shutdown_prep(req, sqe); in io_req_prep()
6516 return io_renameat_prep(req, sqe); in io_req_prep()
6518 return io_unlinkat_prep(req, sqe); in io_req_prep()
6522 req->opcode); in io_req_prep()
6526 static int io_req_prep_async(struct io_kiocb *req) in io_req_prep_async() argument
6528 if (!io_op_defs[req->opcode].needs_async_setup) in io_req_prep_async()
6530 if (WARN_ON_ONCE(req->async_data)) in io_req_prep_async()
6532 if (io_alloc_async_data(req)) in io_req_prep_async()
6535 switch (req->opcode) { in io_req_prep_async()
6537 return io_rw_prep_async(req, READ); in io_req_prep_async()
6539 return io_rw_prep_async(req, WRITE); in io_req_prep_async()
6541 return io_sendmsg_prep_async(req); in io_req_prep_async()
6543 return io_recvmsg_prep_async(req); in io_req_prep_async()
6545 return io_connect_prep_async(req); in io_req_prep_async()
6548 req->opcode); in io_req_prep_async()
6552 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
6554 u32 seq = req->ctx->cached_sq_head; in io_get_sequence()
6556 /* need original cached_sq_head, but it was increased for each req */ in io_get_sequence()
6557 io_for_each_link(req, req) in io_get_sequence()
6562 static bool io_drain_req(struct io_kiocb *req) in io_drain_req() argument
6565 struct io_ring_ctx *ctx = req->ctx; in io_drain_req()
6570 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6571 io_req_complete_fail_submit(req); in io_drain_req()
6582 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6586 io_for_each_link(pos, req->link) { in io_drain_req()
6589 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6594 /* Still need defer if there is pending req in defer list. */ in io_drain_req()
6597 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6604 seq = io_get_sequence(req); in io_drain_req()
6606 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_drain_req()
6609 ret = io_req_prep_async(req); in io_drain_req()
6612 io_prep_async_link(req); in io_drain_req()
6617 io_req_complete_failed(req, ret); in io_drain_req()
6622 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_drain_req()
6625 io_queue_async_work(req, NULL); in io_drain_req()
6629 trace_io_uring_defer(ctx, req, req->user_data); in io_drain_req()
6630 de->req = req; in io_drain_req()
6637 static void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
6639 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6640 switch (req->opcode) { in io_clean_op()
6644 kfree((void *)(unsigned long)req->rw.addr); in io_clean_op()
6648 kfree(req->sr_msg.kbuf); in io_clean_op()
6653 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6654 switch (req->opcode) { in io_clean_op()
6661 struct io_async_rw *io = req->async_data; in io_clean_op()
6668 struct io_async_msghdr *io = req->async_data; in io_clean_op()
6675 if (req->open.filename) in io_clean_op()
6676 putname(req->open.filename); in io_clean_op()
6679 putname(req->rename.oldpath); in io_clean_op()
6680 putname(req->rename.newpath); in io_clean_op()
6683 putname(req->unlink.filename); in io_clean_op()
6687 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6688 kfree(req->apoll->double_poll); in io_clean_op()
6689 kfree(req->apoll); in io_clean_op()
6690 req->apoll = NULL; in io_clean_op()
6692 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6693 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op()
6697 if (req->flags & REQ_F_CREDS) in io_clean_op()
6698 put_cred(req->creds); in io_clean_op()
6700 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6703 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) in io_issue_sqe() argument
6705 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
6709 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
6710 creds = override_creds(req->creds); in io_issue_sqe()
6712 switch (req->opcode) { in io_issue_sqe()
6714 ret = io_nop(req, issue_flags); in io_issue_sqe()
6719 ret = io_read(req, issue_flags); in io_issue_sqe()
6724 ret = io_write(req, issue_flags); in io_issue_sqe()
6727 ret = io_fsync(req, issue_flags); in io_issue_sqe()
6730 ret = io_poll_add(req, issue_flags); in io_issue_sqe()
6733 ret = io_poll_update(req, issue_flags); in io_issue_sqe()
6736 ret = io_sync_file_range(req, issue_flags); in io_issue_sqe()
6739 ret = io_sendmsg(req, issue_flags); in io_issue_sqe()
6742 ret = io_send(req, issue_flags); in io_issue_sqe()
6745 ret = io_recvmsg(req, issue_flags); in io_issue_sqe()
6748 ret = io_recv(req, issue_flags); in io_issue_sqe()
6751 ret = io_timeout(req, issue_flags); in io_issue_sqe()
6754 ret = io_timeout_remove(req, issue_flags); in io_issue_sqe()
6757 ret = io_accept(req, issue_flags); in io_issue_sqe()
6760 ret = io_connect(req, issue_flags); in io_issue_sqe()
6763 ret = io_async_cancel(req, issue_flags); in io_issue_sqe()
6766 ret = io_fallocate(req, issue_flags); in io_issue_sqe()
6769 ret = io_openat(req, issue_flags); in io_issue_sqe()
6772 ret = io_close(req, issue_flags); in io_issue_sqe()
6775 ret = io_files_update(req, issue_flags); in io_issue_sqe()
6778 ret = io_statx(req, issue_flags); in io_issue_sqe()
6781 ret = io_fadvise(req, issue_flags); in io_issue_sqe()
6784 ret = io_madvise(req, issue_flags); in io_issue_sqe()
6787 ret = io_openat2(req, issue_flags); in io_issue_sqe()
6790 ret = io_epoll_ctl(req, issue_flags); in io_issue_sqe()
6793 ret = io_splice(req, issue_flags); in io_issue_sqe()
6796 ret = io_provide_buffers(req, issue_flags); in io_issue_sqe()
6799 ret = io_remove_buffers(req, issue_flags); in io_issue_sqe()
6802 ret = io_tee(req, issue_flags); in io_issue_sqe()
6805 ret = io_shutdown(req, issue_flags); in io_issue_sqe()
6808 ret = io_renameat(req, issue_flags); in io_issue_sqe()
6811 ret = io_unlinkat(req, issue_flags); in io_issue_sqe()
6823 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
6824 io_iopoll_req_issued(req); in io_issue_sqe()
6831 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_free_work() local
6833 req = io_put_req_find_next(req); in io_wq_free_work()
6834 return req ? &req->work : NULL; in io_wq_free_work()
6839 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6844 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
6845 __io_req_set_refcount(req, 2); in io_wq_submit_work()
6847 req_ref_get(req); in io_wq_submit_work()
6849 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6858 ret = io_issue_sqe(req, 0); in io_wq_submit_work()
6864 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_wq_submit_work()
6872 io_req_task_queue_fail(req, ret); in io_wq_submit_work()
6903 struct io_kiocb *req, int fd, in io_file_get_fixed() argument
6918 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
6919 io_req_set_rsrc_node(req); in io_file_get_fixed()
6926 struct io_kiocb *req, int fd) in io_file_get_normal() argument
6934 io_req_track_inflight(req); in io_file_get_normal()
6939 struct io_kiocb *req, int fd, bool fixed, in io_file_get() argument
6943 return io_file_get_fixed(ctx, req, fd, issue_flags); in io_file_get()
6945 return io_file_get_normal(ctx, req, fd); in io_file_get()
6948 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) in io_req_task_link_timeout() argument
6950 struct io_kiocb *prev = req->timeout.prev; in io_req_task_link_timeout()
6954 if (!(req->task->flags & PF_EXITING)) in io_req_task_link_timeout()
6955 ret = io_try_cancel_userdata(req, prev->user_data); in io_req_task_link_timeout()
6956 io_req_complete_post(req, ret ?: -ETIME, 0); in io_req_task_link_timeout()
6959 io_req_complete_post(req, -ETIME, 0); in io_req_task_link_timeout()
6967 struct io_kiocb *prev, *req = data->req; in io_link_timeout_fn() local
6968 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6972 prev = req->timeout.head; in io_link_timeout_fn()
6973 req->timeout.head = NULL; in io_link_timeout_fn()
6984 list_del(&req->timeout.list); in io_link_timeout_fn()
6985 req->timeout.prev = prev; in io_link_timeout_fn()
6988 req->io_task_work.func = io_req_task_link_timeout; in io_link_timeout_fn()
6989 io_req_task_work_add(req); in io_link_timeout_fn()
6993 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6995 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
7002 if (req->timeout.head) { in io_queue_linked_timeout()
7003 struct io_timeout_data *data = req->async_data; in io_queue_linked_timeout()
7008 list_add_tail(&req->timeout.list, &ctx->ltimeout_list); in io_queue_linked_timeout()
7012 io_put_req(req); in io_queue_linked_timeout()
7015 static void __io_queue_sqe(struct io_kiocb *req) in __io_queue_sqe() argument
7016 __must_hold(&req->ctx->uring_lock) in __io_queue_sqe()
7022 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); in __io_queue_sqe()
7029 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
7030 struct io_ring_ctx *ctx = req->ctx; in __io_queue_sqe()
7033 state->compl_reqs[state->compl_nr++] = req; in __io_queue_sqe()
7039 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
7042 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
7043 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
7045 switch (io_arm_poll_handler(req)) { in __io_queue_sqe()
7055 io_queue_async_work(req, NULL); in __io_queue_sqe()
7062 io_req_complete_failed(req, ret); in __io_queue_sqe()
7066 static inline void io_queue_sqe(struct io_kiocb *req) in io_queue_sqe() argument
7067 __must_hold(&req->ctx->uring_lock) in io_queue_sqe()
7069 if (unlikely(req->ctx->drain_active) && io_drain_req(req)) in io_queue_sqe()
7072 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
7073 __io_queue_sqe(req); in io_queue_sqe()
7074 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
7075 io_req_complete_fail_submit(req); in io_queue_sqe()
7077 int ret = io_req_prep_async(req); in io_queue_sqe()
7080 io_req_complete_failed(req, ret); in io_queue_sqe()
7082 io_queue_async_work(req, NULL); in io_queue_sqe()
7092 struct io_kiocb *req, in io_check_restriction() argument
7098 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
7112 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
7120 /* req is partially pre-initialised, see io_preinit_req() */ in io_init_req()
7121 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
7123 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
7124 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
7125 req->file = NULL; in io_init_req()
7126 req->fixed_rsrc_refs = NULL; in io_init_req()
7127 req->task = current; in io_init_req()
7132 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
7134 if (!io_check_restriction(ctx, req, sqe_flags)) in io_init_req()
7138 !io_op_defs[req->opcode].buffer_select) in io_init_req()
7145 req->creds = xa_load(&ctx->personalities, personality); in io_init_req()
7146 if (!req->creds) in io_init_req()
7148 get_cred(req->creds); in io_init_req()
7149 req->flags |= REQ_F_CREDS; in io_init_req()
7158 io_op_defs[req->opcode].plug) { in io_init_req()
7163 if (io_op_defs[req->opcode].needs_file) { in io_init_req()
7164 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), in io_init_req()
7167 if (unlikely(!req->file)) in io_init_req()
7175 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_submit_sqe() argument
7182 ret = io_init_req(ctx, req, sqe); in io_submit_sqe()
7188 * we can judge a link req is failed or cancelled by if in io_submit_sqe()
7190 * it may be set REQ_F_FAIL because of other req's failure in io_submit_sqe()
7191 * so let's leverage req->result to distinguish if a head in io_submit_sqe()
7192 * is set REQ_F_FAIL because of its failure or other req's in io_submit_sqe()
7198 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7200 * the current req is a normal req, we should return in io_submit_sqe()
7203 io_req_complete_failed(req, ret); in io_submit_sqe()
7206 req_fail_link_node(req, ret); in io_submit_sqe()
7208 ret = io_req_prep(req, sqe); in io_submit_sqe()
7214 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, in io_submit_sqe()
7215 req->flags, true, in io_submit_sqe()
7228 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7229 ret = io_req_prep_async(req); in io_submit_sqe()
7231 req_fail_link_node(req, ret); in io_submit_sqe()
7236 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
7237 link->last->link = req; in io_submit_sqe()
7238 link->last = req; in io_submit_sqe()
7241 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7246 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7247 link->head = req; in io_submit_sqe()
7248 link->last = req; in io_submit_sqe()
7250 io_queue_sqe(req); in io_submit_sqe()
7341 struct io_kiocb *req; in io_submit_sqes() local
7343 req = io_alloc_req(ctx); in io_submit_sqes()
7344 if (unlikely(!req)) { in io_submit_sqes()
7351 list_add(&req->inflight_entry, &ctx->submit_state.free_list); in io_submit_sqes()
7356 if (io_submit_sqe(ctx, req, sqe)) in io_submit_sqes()
8404 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, in io_install_fixed_file() argument
8407 struct io_ring_ctx *ctx = req->ctx; in io_install_fixed_file()
8454 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_close_fixed() argument
8456 unsigned int offset = req->close.file_slot - 1; in io_close_fixed()
8457 struct io_ring_ctx *ctx = req->ctx; in io_close_fixed()
9266 struct io_kiocb *req, *nxt; in io_req_cache_free() local
9268 list_for_each_entry_safe(req, nxt, list, inflight_entry) { in io_req_cache_free()
9269 list_del(&req->inflight_entry); in io_req_cache_free()
9270 kmem_cache_free(req_cachep, req); in io_req_cache_free()
9424 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_ctx_cb() local
9426 return req->ctx == data; in io_cancel_ctx_cb()
9501 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
9506 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
9507 if (io_match_task(req, tsk, cancel_all)) { in io_kill_timeouts()
9508 io_kill_timeout(req, -ECANCELED); in io_kill_timeouts()
9570 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
9573 return io_match_task_safe(req, cancel->task, cancel->all); in io_cancel_task_cb()
9584 if (io_match_task_safe(de->req, task, cancel_all)) { in io_cancel_defer_files()
9596 io_req_complete_failed(de->req, -ECANCELED); in io_cancel_defer_files()
10150 struct io_kiocb *req; in __io_uring_show_fdinfo() local
10152 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
10153 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
10154 req->task->task_works != NULL); in __io_uring_show_fdinfo()