Lines Matching full:req
508 struct io_kiocb *req; member
808 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, bool *locked);
903 struct io_kiocb *req; member
908 /* needs req->file assigned */
1077 static bool io_disarm_next(struct io_kiocb *req);
1084 static void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags);
1086 static void io_put_req(struct io_kiocb *req);
1087 static void io_put_req_deferred(struct io_kiocb *req);
1088 static void io_dismantle_req(struct io_kiocb *req);
1089 static void io_queue_linked_timeout(struct io_kiocb *req);
1093 static void io_clean_op(struct io_kiocb *req);
1095 struct io_kiocb *req, int fd, bool fixed,
1097 static void __io_queue_sqe(struct io_kiocb *req);
1100 static void io_req_task_queue(struct io_kiocb *req);
1102 static int io_req_prep_async(struct io_kiocb *req);
1104 static int io_install_fixed_file(struct io_kiocb *req, struct file *file,
1106 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags);
1142 #define req_ref_zero_or_close_to_overflow(req) \ argument
1143 ((unsigned int) atomic_read(&(req->refs)) + 127u <= 127u)
1145 static inline bool req_ref_inc_not_zero(struct io_kiocb *req) in req_ref_inc_not_zero() argument
1147 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_inc_not_zero()
1148 return atomic_inc_not_zero(&req->refs); in req_ref_inc_not_zero()
1151 static inline bool req_ref_put_and_test(struct io_kiocb *req) in req_ref_put_and_test() argument
1153 if (likely(!(req->flags & REQ_F_REFCOUNT))) in req_ref_put_and_test()
1156 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_put_and_test()
1157 return atomic_dec_and_test(&req->refs); in req_ref_put_and_test()
1160 static inline void req_ref_get(struct io_kiocb *req) in req_ref_get() argument
1162 WARN_ON_ONCE(!(req->flags & REQ_F_REFCOUNT)); in req_ref_get()
1163 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req)); in req_ref_get()
1164 atomic_inc(&req->refs); in req_ref_get()
1167 static inline void __io_req_set_refcount(struct io_kiocb *req, int nr) in __io_req_set_refcount() argument
1169 if (!(req->flags & REQ_F_REFCOUNT)) { in __io_req_set_refcount()
1170 req->flags |= REQ_F_REFCOUNT; in __io_req_set_refcount()
1171 atomic_set(&req->refs, nr); in __io_req_set_refcount()
1175 static inline void io_req_set_refcount(struct io_kiocb *req) in io_req_set_refcount() argument
1177 __io_req_set_refcount(req, 1); in io_req_set_refcount()
1180 static inline void io_req_set_rsrc_node(struct io_kiocb *req) in io_req_set_rsrc_node() argument
1182 struct io_ring_ctx *ctx = req->ctx; in io_req_set_rsrc_node()
1184 if (!req->fixed_rsrc_refs) { in io_req_set_rsrc_node()
1185 req->fixed_rsrc_refs = &ctx->rsrc_node->refs; in io_req_set_rsrc_node()
1186 percpu_ref_get(req->fixed_rsrc_refs); in io_req_set_rsrc_node()
1204 __must_hold(&req->ctx->timeout_lock) in io_match_task()
1206 struct io_kiocb *req; in io_match_task() local
1213 io_for_each_link(req, head) { in io_match_task()
1214 if (req->flags & REQ_F_INFLIGHT) in io_match_task()
1222 struct io_kiocb *req; in io_match_linked() local
1224 io_for_each_link(req, head) { in io_match_linked()
1225 if (req->flags & REQ_F_INFLIGHT) in io_match_linked()
1258 static inline void req_set_fail(struct io_kiocb *req) in req_set_fail() argument
1260 req->flags |= REQ_F_FAIL; in req_set_fail()
1263 static inline void req_fail_link_node(struct io_kiocb *req, int res) in req_fail_link_node() argument
1265 req_set_fail(req); in req_fail_link_node()
1266 req->result = res; in req_fail_link_node()
1276 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1278 return !req->timeout.off; in io_is_timeout_noseq()
1286 struct io_kiocb *req, *tmp; in io_fallback_req_func() local
1290 llist_for_each_entry_safe(req, tmp, node, io_task_work.fallback_node) in io_fallback_req_func()
1291 req->io_task_work.func(req, &locked); in io_fallback_req_func()
1376 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1378 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1379 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1396 static inline bool io_req_ffs_set(struct io_kiocb *req) in io_req_ffs_set() argument
1398 return IS_ENABLED(CONFIG_64BIT) && (req->flags & REQ_F_FIXED_FILE); in io_req_ffs_set()
1401 static void io_req_track_inflight(struct io_kiocb *req) in io_req_track_inflight() argument
1403 if (!(req->flags & REQ_F_INFLIGHT)) { in io_req_track_inflight()
1404 req->flags |= REQ_F_INFLIGHT; in io_req_track_inflight()
1405 atomic_inc(&req->task->io_uring->inflight_tracked); in io_req_track_inflight()
1409 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req) in __io_prep_linked_timeout() argument
1411 if (WARN_ON_ONCE(!req->link)) in __io_prep_linked_timeout()
1414 req->flags &= ~REQ_F_ARM_LTIMEOUT; in __io_prep_linked_timeout()
1415 req->flags |= REQ_F_LINK_TIMEOUT; in __io_prep_linked_timeout()
1418 io_req_set_refcount(req); in __io_prep_linked_timeout()
1419 __io_req_set_refcount(req->link, 2); in __io_prep_linked_timeout()
1420 return req->link; in __io_prep_linked_timeout()
1423 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
1425 if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT))) in io_prep_linked_timeout()
1427 return __io_prep_linked_timeout(req); in io_prep_linked_timeout()
1430 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1432 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1433 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1435 if (!(req->flags & REQ_F_CREDS)) { in io_prep_async_work()
1436 req->flags |= REQ_F_CREDS; in io_prep_async_work()
1437 req->creds = get_current_cred(); in io_prep_async_work()
1440 req->work.list.next = NULL; in io_prep_async_work()
1441 req->work.flags = 0; in io_prep_async_work()
1442 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1443 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1445 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1447 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1448 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { in io_prep_async_work()
1450 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1454 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1458 if (req->flags & REQ_F_LINK_TIMEOUT) { in io_prep_async_link()
1459 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_link()
1462 io_for_each_link(cur, req) in io_prep_async_link()
1466 io_for_each_link(cur, req) in io_prep_async_link()
1471 static void io_queue_async_work(struct io_kiocb *req, bool *locked) in io_queue_async_work() argument
1473 struct io_ring_ctx *ctx = req->ctx; in io_queue_async_work()
1474 struct io_kiocb *link = io_prep_linked_timeout(req); in io_queue_async_work()
1475 struct io_uring_task *tctx = req->task->io_uring; in io_queue_async_work()
1484 io_prep_async_link(req); in io_queue_async_work()
1493 if (WARN_ON_ONCE(!same_thread_group(req->task, current))) in io_queue_async_work()
1494 req->work.flags |= IO_WQ_WORK_CANCEL; in io_queue_async_work()
1496 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in io_queue_async_work()
1497 &req->work, req->flags); in io_queue_async_work()
1498 io_wq_enqueue(tctx->io_wq, &req->work); in io_queue_async_work()
1503 static void io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
1504 __must_hold(&req->ctx->completion_lock) in io_kill_timeout()
1505 __must_hold(&req->ctx->timeout_lock) in io_kill_timeout()
1507 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1511 req_set_fail(req); in io_kill_timeout()
1512 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1513 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1514 list_del_init(&req->timeout.list); in io_kill_timeout()
1515 io_fill_cqe_req(req, status, 0); in io_kill_timeout()
1516 io_put_req_deferred(req); in io_kill_timeout()
1526 if (req_need_defer(de->req, de->seq)) in io_queue_deferred()
1529 io_req_task_queue(de->req); in io_queue_deferred()
1538 struct io_kiocb *req, *tmp; in io_flush_timeouts() local
1541 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_flush_timeouts()
1544 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1554 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; in io_flush_timeouts()
1559 io_kill_timeout(req, 0); in io_flush_timeouts()
1816 static noinline void io_fill_cqe_req(struct io_kiocb *req, s32 res, u32 cflags) in io_fill_cqe_req() argument
1818 __io_fill_cqe(req->ctx, req->user_data, res, cflags); in io_fill_cqe_req()
1828 static void io_req_complete_post(struct io_kiocb *req, s32 res, in io_req_complete_post() argument
1831 struct io_ring_ctx *ctx = req->ctx; in io_req_complete_post()
1834 __io_fill_cqe(ctx, req->user_data, res, cflags); in io_req_complete_post()
1839 if (req_ref_put_and_test(req)) { in io_req_complete_post()
1840 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_req_complete_post()
1841 if (req->flags & IO_DISARM_MASK) in io_req_complete_post()
1842 io_disarm_next(req); in io_req_complete_post()
1843 if (req->link) { in io_req_complete_post()
1844 io_req_task_queue(req->link); in io_req_complete_post()
1845 req->link = NULL; in io_req_complete_post()
1848 io_dismantle_req(req); in io_req_complete_post()
1849 io_put_task(req->task, 1); in io_req_complete_post()
1850 list_add(&req->inflight_entry, &ctx->locked_free_list); in io_req_complete_post()
1854 req = NULL; in io_req_complete_post()
1859 if (req) { in io_req_complete_post()
1865 static inline bool io_req_needs_clean(struct io_kiocb *req) in io_req_needs_clean() argument
1867 return req->flags & IO_REQ_CLEAN_FLAGS; in io_req_needs_clean()
1870 static inline void io_req_complete_state(struct io_kiocb *req, s32 res, in io_req_complete_state() argument
1873 if (io_req_needs_clean(req)) in io_req_complete_state()
1874 io_clean_op(req); in io_req_complete_state()
1875 req->result = res; in io_req_complete_state()
1876 req->compl.cflags = cflags; in io_req_complete_state()
1877 req->flags |= REQ_F_COMPLETE_INLINE; in io_req_complete_state()
1880 static inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags, in __io_req_complete() argument
1884 io_req_complete_state(req, res, cflags); in __io_req_complete()
1886 io_req_complete_post(req, res, cflags); in __io_req_complete()
1889 static inline void io_req_complete(struct io_kiocb *req, s32 res) in io_req_complete() argument
1891 __io_req_complete(req, 0, res, 0); in io_req_complete()
1894 static void io_req_complete_failed(struct io_kiocb *req, s32 res) in io_req_complete_failed() argument
1896 req_set_fail(req); in io_req_complete_failed()
1897 io_req_complete_post(req, res, 0); in io_req_complete_failed()
1900 static void io_req_complete_fail_submit(struct io_kiocb *req) in io_req_complete_fail_submit() argument
1906 req->flags &= ~REQ_F_HARDLINK; in io_req_complete_fail_submit()
1907 req->flags |= REQ_F_LINK; in io_req_complete_fail_submit()
1908 io_req_complete_failed(req, req->result); in io_req_complete_fail_submit()
1915 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx) in io_preinit_req() argument
1917 req->ctx = ctx; in io_preinit_req()
1918 req->link = NULL; in io_preinit_req()
1919 req->async_data = NULL; in io_preinit_req()
1921 req->result = 0; in io_preinit_req()
1949 struct io_kiocb *req = list_first_entry(&state->free_list, in io_flush_cached_reqs() local
1952 list_del(&req->inflight_entry); in io_flush_cached_reqs()
1953 state->reqs[nr++] = req; in io_flush_cached_reqs()
2008 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
2010 unsigned int flags = req->flags; in io_dismantle_req()
2012 if (io_req_needs_clean(req)) in io_dismantle_req()
2013 io_clean_op(req); in io_dismantle_req()
2015 io_put_file(req->file); in io_dismantle_req()
2016 if (req->fixed_rsrc_refs) in io_dismantle_req()
2017 percpu_ref_put(req->fixed_rsrc_refs); in io_dismantle_req()
2018 if (req->async_data) { in io_dismantle_req()
2019 kfree(req->async_data); in io_dismantle_req()
2020 req->async_data = NULL; in io_dismantle_req()
2024 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
2026 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
2028 io_dismantle_req(req); in __io_free_req()
2029 io_put_task(req->task, 1); in __io_free_req()
2032 list_add(&req->inflight_entry, &ctx->locked_free_list); in __io_free_req()
2039 static inline void io_remove_next_linked(struct io_kiocb *req) in io_remove_next_linked() argument
2041 struct io_kiocb *nxt = req->link; in io_remove_next_linked()
2043 req->link = nxt->link; in io_remove_next_linked()
2047 static bool io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
2048 __must_hold(&req->ctx->completion_lock) in io_kill_linked_timeout()
2049 __must_hold(&req->ctx->timeout_lock) in io_kill_linked_timeout()
2051 struct io_kiocb *link = req->link; in io_kill_linked_timeout()
2056 io_remove_next_linked(req); in io_kill_linked_timeout()
2068 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
2069 __must_hold(&req->ctx->completion_lock) in io_fail_links()
2071 struct io_kiocb *nxt, *link = req->link; in io_fail_links()
2073 req->link = NULL; in io_fail_links()
2083 trace_io_uring_fail_link(req, link); in io_fail_links()
2090 static bool io_disarm_next(struct io_kiocb *req) in io_disarm_next() argument
2091 __must_hold(&req->ctx->completion_lock) in io_disarm_next()
2095 if (req->flags & REQ_F_ARM_LTIMEOUT) { in io_disarm_next()
2096 struct io_kiocb *link = req->link; in io_disarm_next()
2098 req->flags &= ~REQ_F_ARM_LTIMEOUT; in io_disarm_next()
2100 io_remove_next_linked(req); in io_disarm_next()
2105 } else if (req->flags & REQ_F_LINK_TIMEOUT) { in io_disarm_next()
2106 struct io_ring_ctx *ctx = req->ctx; in io_disarm_next()
2109 posted = io_kill_linked_timeout(req); in io_disarm_next()
2112 if (unlikely((req->flags & REQ_F_FAIL) && in io_disarm_next()
2113 !(req->flags & REQ_F_HARDLINK))) { in io_disarm_next()
2114 posted |= (req->link != NULL); in io_disarm_next()
2115 io_fail_links(req); in io_disarm_next()
2120 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
2130 if (req->flags & IO_DISARM_MASK) { in __io_req_find_next()
2131 struct io_ring_ctx *ctx = req->ctx; in __io_req_find_next()
2135 posted = io_disarm_next(req); in __io_req_find_next()
2137 io_commit_cqring(req->ctx); in __io_req_find_next()
2142 nxt = req->link; in __io_req_find_next()
2143 req->link = NULL; in __io_req_find_next()
2147 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
2149 if (likely(!(req->flags & (REQ_F_LINK|REQ_F_HARDLINK)))) in io_req_find_next()
2151 return __io_req_find_next(req); in io_req_find_next()
2191 struct io_kiocb *req = container_of(node, struct io_kiocb, in tctx_task_work() local
2194 if (req->ctx != ctx) { in tctx_task_work()
2196 ctx = req->ctx; in tctx_task_work()
2201 req->io_task_work.func(req, &locked); in tctx_task_work()
2215 static void io_req_task_work_add(struct io_kiocb *req) in io_req_task_work_add() argument
2217 struct task_struct *tsk = req->task; in io_req_task_work_add()
2227 wq_list_add_tail(&req->io_task_work.node, &tctx->task_list); in io_req_task_work_add()
2243 notify = (req->ctx->flags & IORING_SETUP_SQPOLL) ? TWA_NONE : TWA_SIGNAL; in io_req_task_work_add()
2256 req = container_of(node, struct io_kiocb, io_task_work.node); in io_req_task_work_add()
2258 if (llist_add(&req->io_task_work.fallback_node, in io_req_task_work_add()
2259 &req->ctx->fallback_llist)) in io_req_task_work_add()
2260 schedule_delayed_work(&req->ctx->fallback_work, 1); in io_req_task_work_add()
2264 static void io_req_task_cancel(struct io_kiocb *req, bool *locked) in io_req_task_cancel() argument
2266 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2270 io_req_complete_failed(req, req->result); in io_req_task_cancel()
2273 static void io_req_task_submit(struct io_kiocb *req, bool *locked) in io_req_task_submit() argument
2275 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2278 /* req->task == current here, checking PF_EXITING is safe */ in io_req_task_submit()
2279 if (likely(!(req->task->flags & PF_EXITING))) in io_req_task_submit()
2280 __io_queue_sqe(req); in io_req_task_submit()
2282 io_req_complete_failed(req, -EFAULT); in io_req_task_submit()
2285 static void io_req_task_queue_fail(struct io_kiocb *req, int ret) in io_req_task_queue_fail() argument
2287 req->result = ret; in io_req_task_queue_fail()
2288 req->io_task_work.func = io_req_task_cancel; in io_req_task_queue_fail()
2289 io_req_task_work_add(req); in io_req_task_queue_fail()
2292 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2294 req->io_task_work.func = io_req_task_submit; in io_req_task_queue()
2295 io_req_task_work_add(req); in io_req_task_queue()
2298 static void io_req_task_queue_reissue(struct io_kiocb *req) in io_req_task_queue_reissue() argument
2300 req->io_task_work.func = io_queue_async_work; in io_req_task_queue_reissue()
2301 io_req_task_work_add(req); in io_req_task_queue_reissue()
2304 static inline void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2306 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2312 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2314 io_queue_next(req); in io_free_req()
2315 __io_free_req(req); in io_free_req()
2318 static void io_free_req_work(struct io_kiocb *req, bool *locked) in io_free_req_work() argument
2320 io_free_req(req); in io_free_req_work()
2345 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req, in io_req_free_batch() argument
2348 io_queue_next(req); in io_req_free_batch()
2349 io_dismantle_req(req); in io_req_free_batch()
2351 if (req->task != rb->task) { in io_req_free_batch()
2354 rb->task = req->task; in io_req_free_batch()
2361 state->reqs[state->free_reqs++] = req; in io_req_free_batch()
2363 list_add(&req->inflight_entry, &state->free_list); in io_req_free_batch()
2375 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2377 __io_fill_cqe(ctx, req->user_data, req->result, in io_submit_flush_completions()
2378 req->compl.cflags); in io_submit_flush_completions()
2386 struct io_kiocb *req = state->compl_reqs[i]; in io_submit_flush_completions() local
2388 if (req_ref_put_and_test(req)) in io_submit_flush_completions()
2389 io_req_free_batch(&rb, req, &ctx->submit_state); in io_submit_flush_completions()
2400 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2404 if (req_ref_put_and_test(req)) { in io_put_req_find_next()
2405 nxt = io_req_find_next(req); in io_put_req_find_next()
2406 __io_free_req(req); in io_put_req_find_next()
2411 static inline void io_put_req(struct io_kiocb *req) in io_put_req() argument
2413 if (req_ref_put_and_test(req)) in io_put_req()
2414 io_free_req(req); in io_put_req()
2417 static inline void io_put_req_deferred(struct io_kiocb *req) in io_put_req_deferred() argument
2419 if (req_ref_put_and_test(req)) { in io_put_req_deferred()
2420 req->io_task_work.func = io_free_req_work; in io_put_req_deferred()
2421 io_req_task_work_add(req); in io_put_req_deferred()
2440 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2446 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2451 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2455 if (likely(!(req->flags & REQ_F_BUFFER_SELECTED))) in io_put_rw_kbuf()
2457 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2458 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2479 struct io_kiocb *req; in io_iopoll_complete() local
2489 req = list_first_entry(done, struct io_kiocb, inflight_entry); in io_iopoll_complete()
2490 list_del(&req->inflight_entry); in io_iopoll_complete()
2491 cflags = io_put_rw_kbuf(req); in io_iopoll_complete()
2496 WRITE_ONCE(cqe->user_data, req->user_data); in io_iopoll_complete()
2497 WRITE_ONCE(cqe->res, req->result); in io_iopoll_complete()
2501 io_cqring_event_overflow(ctx, req->user_data, in io_iopoll_complete()
2502 req->result, cflags); in io_iopoll_complete()
2506 if (req_ref_put_and_test(req)) in io_iopoll_complete()
2507 io_req_free_batch(&rb, req, &ctx->submit_state); in io_iopoll_complete()
2518 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2528 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { in io_do_iopoll()
2529 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2537 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2538 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2550 /* iopoll may have completed current req */ in io_do_iopoll()
2551 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2552 list_move_tail(&req->inflight_entry, &done); in io_do_iopoll()
2643 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2649 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2650 struct super_block *sb = file_inode(req->file)->i_sb; in kiocb_end_write()
2658 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2660 struct io_async_rw *rw = req->async_data; in io_resubmit_prep()
2663 return !io_req_prep_async(req); in io_resubmit_prep()
2668 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2670 umode_t mode = file_inode(req->file)->i_mode; in io_rw_should_reissue()
2671 struct io_ring_ctx *ctx = req->ctx; in io_rw_should_reissue()
2675 if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() && in io_rw_should_reissue()
2689 if (!same_thread_group(req->task, current) || !in_task()) in io_rw_should_reissue()
2694 static bool io_resubmit_prep(struct io_kiocb *req) in io_resubmit_prep() argument
2698 static bool io_rw_should_reissue(struct io_kiocb *req) in io_rw_should_reissue() argument
2708 static void io_req_io_end(struct io_kiocb *req) in io_req_io_end() argument
2710 struct io_rw *rw = &req->rw; in io_req_io_end()
2713 kiocb_end_write(req); in io_req_io_end()
2714 fsnotify_modify(req->file); in io_req_io_end()
2716 fsnotify_access(req->file); in io_req_io_end()
2720 static bool __io_complete_rw_common(struct io_kiocb *req, long res) in __io_complete_rw_common() argument
2722 if (res != req->result) { in __io_complete_rw_common()
2724 io_rw_should_reissue(req)) { in __io_complete_rw_common()
2729 io_req_io_end(req); in __io_complete_rw_common()
2730 req->flags |= REQ_F_REISSUE; in __io_complete_rw_common()
2733 req_set_fail(req); in __io_complete_rw_common()
2734 req->result = res; in __io_complete_rw_common()
2739 static inline int io_fixup_rw_res(struct io_kiocb *req, long res) in io_fixup_rw_res() argument
2741 struct io_async_rw *io = req->async_data; in io_fixup_rw_res()
2753 static void io_req_task_complete(struct io_kiocb *req, bool *locked) in io_req_task_complete() argument
2755 unsigned int cflags = io_put_rw_kbuf(req); in io_req_task_complete()
2756 int res = req->result; in io_req_task_complete()
2759 struct io_ring_ctx *ctx = req->ctx; in io_req_task_complete()
2762 io_req_complete_state(req, res, cflags); in io_req_task_complete()
2763 state->compl_reqs[state->compl_nr++] = req; in io_req_task_complete()
2767 io_req_complete_post(req, res, cflags); in io_req_task_complete()
2771 static void io_req_rw_complete(struct io_kiocb *req, bool *locked) in io_req_rw_complete() argument
2773 io_req_io_end(req); in io_req_rw_complete()
2774 io_req_task_complete(req, locked); in io_req_rw_complete()
2779 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2781 if (__io_complete_rw_common(req, res)) in io_complete_rw()
2783 req->result = io_fixup_rw_res(req, res); in io_complete_rw()
2784 req->io_task_work.func = io_req_rw_complete; in io_complete_rw()
2785 io_req_task_work_add(req); in io_complete_rw()
2790 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2793 kiocb_end_write(req); in io_complete_rw_iopoll()
2794 if (unlikely(res != req->result)) { in io_complete_rw_iopoll()
2795 if (res == -EAGAIN && io_rw_should_reissue(req)) { in io_complete_rw_iopoll()
2796 req->flags |= REQ_F_REISSUE; in io_complete_rw_iopoll()
2801 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2804 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2813 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2815 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2836 if (list_req->file != req->file) { in io_iopoll_req_issued()
2840 queue_num1 = blk_qc_t_to_queue_num(req->rw.kiocb.ki_cookie); in io_iopoll_req_issued()
2850 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2851 list_add(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2853 list_add_tail(&req->inflight_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2913 static bool io_file_supports_nowait(struct io_kiocb *req, int rw) in io_file_supports_nowait() argument
2915 if (rw == READ && (req->flags & REQ_F_NOWAIT_READ)) in io_file_supports_nowait()
2917 else if (rw == WRITE && (req->flags & REQ_F_NOWAIT_WRITE)) in io_file_supports_nowait()
2920 return __io_file_supports_nowait(req->file, rw); in io_file_supports_nowait()
2923 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_prep_rw() argument
2926 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2927 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2928 struct file *file = req->file; in io_prep_rw()
2932 if (!io_req_ffs_set(req) && S_ISREG(file_inode(file)->i_mode)) in io_prep_rw()
2933 req->flags |= REQ_F_ISREG; in io_prep_rw()
2948 ((file->f_flags & O_NONBLOCK) && !io_file_supports_nowait(req, rw))) in io_prep_rw()
2949 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2968 req->iopoll_completed = 0; in io_prep_rw()
2976 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2977 req->imu = NULL; in io_prep_rw()
2979 if (req->opcode == IORING_OP_READ_FIXED || in io_prep_rw()
2980 req->opcode == IORING_OP_WRITE_FIXED) { in io_prep_rw()
2981 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2984 if (unlikely(req->buf_index >= ctx->nr_user_bufs)) in io_prep_rw()
2986 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs); in io_prep_rw()
2987 req->imu = ctx->user_bufs[index]; in io_prep_rw()
2988 io_req_set_rsrc_node(req); in io_prep_rw()
2991 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2992 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
3017 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req) in io_kiocb_update_pos() argument
3019 struct kiocb *kiocb = &req->rw.kiocb; in io_kiocb_update_pos()
3024 if (!(req->file->f_mode & FMODE_STREAM)) { in io_kiocb_update_pos()
3025 req->flags |= REQ_F_CUR_POS; in io_kiocb_update_pos()
3026 kiocb->ki_pos = req->file->f_pos; in io_kiocb_update_pos()
3037 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
3039 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
3040 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
3042 if (!__io_complete_rw_common(req, ret)) { in kiocb_done()
3047 io_req_io_end(req); in kiocb_done()
3048 __io_req_complete(req, issue_flags, in kiocb_done()
3049 io_fixup_rw_res(req, ret), in kiocb_done()
3050 io_put_rw_kbuf(req)); in kiocb_done()
3056 if (req->flags & REQ_F_REISSUE) { in kiocb_done()
3057 req->flags &= ~REQ_F_REISSUE; in kiocb_done()
3058 if (io_resubmit_prep(req)) { in kiocb_done()
3059 io_req_task_queue_reissue(req); in kiocb_done()
3061 unsigned int cflags = io_put_rw_kbuf(req); in kiocb_done()
3062 struct io_ring_ctx *ctx = req->ctx; in kiocb_done()
3064 ret = io_fixup_rw_res(req, ret); in kiocb_done()
3065 req_set_fail(req); in kiocb_done()
3068 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3071 __io_req_complete(req, issue_flags, ret, cflags); in kiocb_done()
3077 static int __io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter, in __io_import_fixed() argument
3080 size_t len = req->rw.len; in __io_import_fixed()
3081 u64 buf_end, buf_addr = req->rw.addr; in __io_import_fixed()
3135 static int io_import_fixed(struct io_kiocb *req, int rw, struct iov_iter *iter) in io_import_fixed() argument
3137 if (WARN_ON_ONCE(!req->imu)) in io_import_fixed()
3139 return __io_import_fixed(req, rw, iter, req->imu); in io_import_fixed()
3160 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
3166 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
3169 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
3171 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
3173 head = xa_load(&req->ctx->io_buffers, bgid); in io_buffer_select()
3181 xa_erase(&req->ctx->io_buffers, bgid); in io_buffer_select()
3189 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
3194 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
3200 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
3201 bgid = req->buf_index; in io_rw_buffer_select()
3202 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
3205 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
3206 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3211 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3219 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3228 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3237 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3240 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3250 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3258 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3261 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3264 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3269 if (req->rw.len != 1) in io_iov_buffer_select()
3273 if (req->ctx->compat) in io_iov_buffer_select()
3274 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3277 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3280 static int io_import_iovec(int rw, struct io_kiocb *req, struct iovec **iovec, in io_import_iovec() argument
3283 void __user *buf = u64_to_user_ptr(req->rw.addr); in io_import_iovec()
3284 size_t sqe_len = req->rw.len; in io_import_iovec()
3285 u8 opcode = req->opcode; in io_import_iovec()
3290 return io_import_fixed(req, rw, iter); in io_import_iovec()
3294 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in io_import_iovec()
3298 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3299 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in io_import_iovec()
3302 req->rw.len = sqe_len; in io_import_iovec()
3310 if (req->flags & REQ_F_BUFFER_SELECT) { in io_import_iovec()
3311 ret = io_iov_buffer_select(req, *iovec, needs_lock); in io_import_iovec()
3319 req->ctx->compat); in io_import_iovec()
3331 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3333 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3334 struct file *file = req->file; in loop_rw_iter()
3357 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3358 iovec.iov_len = req->rw.len; in loop_rw_iter()
3378 req->rw.addr += nr; in loop_rw_iter()
3379 req->rw.len -= nr; in loop_rw_iter()
3380 if (!req->rw.len) in loop_rw_iter()
3390 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3393 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3413 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3417 static inline int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3419 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in io_alloc_async_data()
3420 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in io_alloc_async_data()
3421 return req->async_data == NULL; in io_alloc_async_data()
3424 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3428 if (!force && !io_op_defs[req->opcode].needs_async_setup) in io_setup_async_rw()
3430 if (!req->async_data) { in io_setup_async_rw()
3433 if (io_alloc_async_data(req)) { in io_setup_async_rw()
3438 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3439 iorw = req->async_data; in io_setup_async_rw()
3446 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3448 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3452 ret = io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3459 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3464 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3466 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3468 return io_prep_rw(req, sqe, READ); in io_read_prep()
3485 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3493 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3495 io_req_task_queue(req); in io_async_buf_func()
3511 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3513 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3515 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3518 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3529 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3533 wait->wait.private = req; in io_rw_should_retry()
3542 static inline int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3544 if (req->file->f_op->read_iter) in io_iter_do_read()
3545 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3546 else if (req->file->f_op->read) in io_iter_do_read()
3547 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3552 static bool need_read_all(struct io_kiocb *req) in need_read_all() argument
3554 return req->flags & REQ_F_ISREG || in need_read_all()
3555 S_ISBLK(file_inode(req->file)->i_mode); in need_read_all()
3558 static int io_read(struct io_kiocb *req, unsigned int issue_flags) in io_read() argument
3561 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3563 struct io_async_rw *rw = req->async_data; in io_read()
3580 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3586 req->result = iov_iter_count(iter); in io_read()
3595 if (force_nonblock && !io_file_supports_nowait(req, READ)) { in io_read()
3596 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3600 ppos = io_kiocb_update_pos(req); in io_read()
3602 ret = rw_verify_area(READ, req->file, ppos, req->result); in io_read()
3608 ret = io_iter_do_read(req, iter); in io_read()
3610 if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) { in io_read()
3611 req->flags &= ~REQ_F_REISSUE; in io_read()
3613 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3616 if (req->flags & REQ_F_NOWAIT) in io_read()
3621 } else if (ret <= 0 || ret == req->result || !force_nonblock || in io_read()
3622 (req->flags & REQ_F_NOWAIT) || !need_read_all(req)) { in io_read()
3634 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3639 rw = req->async_data; in io_read()
3662 if (!io_rw_should_retry(req)) { in io_read()
3667 req->result = iov_iter_count(iter); in io_read()
3674 ret = io_iter_do_read(req, iter); in io_read()
3690 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3692 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3694 return io_prep_rw(req, sqe, WRITE); in io_write_prep()
3697 static int io_write(struct io_kiocb *req, unsigned int issue_flags) in io_write() argument
3700 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3702 struct io_async_rw *rw = req->async_data; in io_write()
3714 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3720 req->result = iov_iter_count(iter); in io_write()
3729 if (force_nonblock && !io_file_supports_nowait(req, WRITE)) in io_write()
3734 (req->flags & REQ_F_ISREG)) in io_write()
3737 ppos = io_kiocb_update_pos(req); in io_write()
3739 ret = rw_verify_area(WRITE, req->file, ppos, req->result); in io_write()
3750 if (req->flags & REQ_F_ISREG) { in io_write()
3751 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3752 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3757 if (req->file->f_op->write_iter) in io_write()
3758 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3759 else if (req->file->f_op->write) in io_write()
3760 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3764 if (req->flags & REQ_F_REISSUE) { in io_write()
3765 req->flags &= ~REQ_F_REISSUE; in io_write()
3776 if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT)) in io_write()
3780 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3787 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3790 kiocb_end_write(req); in io_write()
3802 static int io_renameat_prep(struct io_kiocb *req, in io_renameat_prep() argument
3805 struct io_rename *ren = &req->rename; in io_renameat_prep()
3808 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_renameat_prep()
3812 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_renameat_prep()
3831 req->flags |= REQ_F_NEED_CLEANUP; in io_renameat_prep()
3835 static int io_renameat(struct io_kiocb *req, unsigned int issue_flags) in io_renameat() argument
3837 struct io_rename *ren = &req->rename; in io_renameat()
3846 req->flags &= ~REQ_F_NEED_CLEANUP; in io_renameat()
3848 req_set_fail(req); in io_renameat()
3849 io_req_complete(req, ret); in io_renameat()
3853 static int io_unlinkat_prep(struct io_kiocb *req, in io_unlinkat_prep() argument
3856 struct io_unlink *un = &req->unlink; in io_unlinkat_prep()
3859 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_unlinkat_prep()
3864 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in io_unlinkat_prep()
3878 req->flags |= REQ_F_NEED_CLEANUP; in io_unlinkat_prep()
3882 static int io_unlinkat(struct io_kiocb *req, unsigned int issue_flags) in io_unlinkat() argument
3884 struct io_unlink *un = &req->unlink; in io_unlinkat()
3895 req->flags &= ~REQ_F_NEED_CLEANUP; in io_unlinkat()
3897 req_set_fail(req); in io_unlinkat()
3898 io_req_complete(req, ret); in io_unlinkat()
3902 static int io_shutdown_prep(struct io_kiocb *req, in io_shutdown_prep() argument
3906 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_shutdown_prep()
3912 req->shutdown.how = READ_ONCE(sqe->len); in io_shutdown_prep()
3919 static int io_shutdown(struct io_kiocb *req, unsigned int issue_flags) in io_shutdown() argument
3928 sock = sock_from_file(req->file, &ret); in io_shutdown()
3932 ret = __sys_shutdown_sock(sock, req->shutdown.how); in io_shutdown()
3934 req_set_fail(req); in io_shutdown()
3935 io_req_complete(req, ret); in io_shutdown()
3942 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3945 struct io_splice *sp = &req->splice; in __io_splice_prep()
3948 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3959 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3964 return __io_splice_prep(req, sqe); in io_tee_prep()
3967 static int io_tee(struct io_kiocb *req, unsigned int issue_flags) in io_tee() argument
3969 struct io_splice *sp = &req->splice; in io_tee()
3978 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_tee()
3992 req_set_fail(req); in io_tee()
3993 io_req_complete(req, ret); in io_tee()
3997 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
3999 struct io_splice *sp = &req->splice; in io_splice_prep()
4003 return __io_splice_prep(req, sqe); in io_splice_prep()
4006 static int io_splice(struct io_kiocb *req, unsigned int issue_flags) in io_splice() argument
4008 struct io_splice *sp = &req->splice; in io_splice()
4018 in = io_file_get(req->ctx, req, sp->splice_fd_in, in io_splice()
4035 req_set_fail(req); in io_splice()
4036 io_req_complete(req, ret); in io_splice()
4043 static int io_nop(struct io_kiocb *req, unsigned int issue_flags) in io_nop() argument
4045 struct io_ring_ctx *ctx = req->ctx; in io_nop()
4050 __io_req_complete(req, issue_flags, 0, 0); in io_nop()
4054 static int io_fsync_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fsync_prep() argument
4056 struct io_ring_ctx *ctx = req->ctx; in io_fsync_prep()
4064 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_fsync_prep()
4065 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_fsync_prep()
4068 req->sync.off = READ_ONCE(sqe->off); in io_fsync_prep()
4069 req->sync.len = READ_ONCE(sqe->len); in io_fsync_prep()
4073 static int io_fsync(struct io_kiocb *req, unsigned int issue_flags) in io_fsync() argument
4075 loff_t end = req->sync.off + req->sync.len; in io_fsync()
4082 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
4084 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
4086 req_set_fail(req); in io_fsync()
4087 io_req_complete(req, ret); in io_fsync()
4091 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
4097 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
4100 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
4101 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
4102 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
4106 static int io_fallocate(struct io_kiocb *req, unsigned int issue_flags) in io_fallocate() argument
4113 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
4114 req->sync.len); in io_fallocate()
4116 req_set_fail(req); in io_fallocate()
4118 fsnotify_modify(req->file); in io_fallocate()
4119 io_req_complete(req, ret); in io_fallocate()
4123 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
4128 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_openat_prep()
4132 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
4136 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
4137 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
4139 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
4141 req->open.filename = getname(fname); in __io_openat_prep()
4142 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
4143 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
4144 req->open.filename = NULL; in __io_openat_prep()
4148 req->open.file_slot = READ_ONCE(sqe->file_index); in __io_openat_prep()
4149 if (req->open.file_slot && (req->open.how.flags & O_CLOEXEC)) in __io_openat_prep()
4152 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
4153 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
4157 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
4162 req->open.how = build_open_how(flags, mode); in io_openat_prep()
4163 return __io_openat_prep(req, sqe); in io_openat_prep()
4166 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
4177 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
4182 return __io_openat_prep(req, sqe); in io_openat2_prep()
4185 static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) in io_openat2() argument
4190 bool fixed = !!req->open.file_slot; in io_openat2()
4193 ret = build_open_flags(&req->open.how, &op); in io_openat2()
4197 resolve_nonblock = req->open.how.resolve & RESOLVE_CACHED; in io_openat2()
4203 if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) in io_openat2()
4210 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
4215 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
4240 ret = io_install_fixed_file(req, file, issue_flags, in io_openat2()
4241 req->open.file_slot - 1); in io_openat2()
4243 putname(req->open.filename); in io_openat2()
4244 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
4246 req_set_fail(req); in io_openat2()
4247 __io_req_complete(req, issue_flags, ret, 0); in io_openat2()
4251 static int io_openat(struct io_kiocb *req, unsigned int issue_flags) in io_openat() argument
4253 return io_openat2(req, issue_flags); in io_openat()
4256 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
4259 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
4303 static int io_remove_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_remove_buffers() argument
4305 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
4306 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
4320 req_set_fail(req); in io_remove_buffers()
4323 __io_req_complete(req, issue_flags, ret, 0); in io_remove_buffers()
4328 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
4332 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4391 static int io_provide_buffers(struct io_kiocb *req, unsigned int issue_flags) in io_provide_buffers() argument
4393 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4394 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4413 req_set_fail(req); in io_provide_buffers()
4415 __io_req_complete(req, issue_flags, ret, 0); in io_provide_buffers()
4420 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4426 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_epoll_ctl_prep()
4429 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4430 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4431 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4433 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4437 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4447 static int io_epoll_ctl(struct io_kiocb *req, unsigned int issue_flags) in io_epoll_ctl() argument
4450 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4459 req_set_fail(req); in io_epoll_ctl()
4460 __io_req_complete(req, issue_flags, ret, 0); in io_epoll_ctl()
4467 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4472 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4475 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4476 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4477 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4484 static int io_madvise(struct io_kiocb *req, unsigned int issue_flags) in io_madvise() argument
4487 struct io_madvise *ma = &req->madvise; in io_madvise()
4495 req_set_fail(req); in io_madvise()
4496 io_req_complete(req, ret); in io_madvise()
4503 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4507 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4510 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4511 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4512 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4516 static int io_fadvise(struct io_kiocb *req, unsigned int issue_flags) in io_fadvise() argument
4518 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4532 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4534 req_set_fail(req); in io_fadvise()
4535 __io_req_complete(req, issue_flags, ret, 0); in io_fadvise()
4539 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4541 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_statx_prep()
4545 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4548 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4549 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4550 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4551 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4552 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4557 static int io_statx(struct io_kiocb *req, unsigned int issue_flags) in io_statx() argument
4559 struct io_statx *ctx = &req->statx; in io_statx()
4569 req_set_fail(req); in io_statx()
4570 io_req_complete(req, ret); in io_statx()
4574 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4576 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_close_prep()
4581 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4584 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4585 req->close.file_slot = READ_ONCE(sqe->file_index); in io_close_prep()
4586 if (req->close.file_slot && req->close.fd) in io_close_prep()
4592 static int io_close(struct io_kiocb *req, unsigned int issue_flags) in io_close() argument
4595 struct io_close *close = &req->close; in io_close()
4600 if (req->close.file_slot) { in io_close()
4601 ret = io_close_fixed(req, issue_flags); in io_close()
4636 req_set_fail(req); in io_close()
4639 __io_req_complete(req, issue_flags, ret, 0); in io_close()
4643 static int io_sfr_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sfr_prep() argument
4645 struct io_ring_ctx *ctx = req->ctx; in io_sfr_prep()
4653 req->sync.off = READ_ONCE(sqe->off); in io_sfr_prep()
4654 req->sync.len = READ_ONCE(sqe->len); in io_sfr_prep()
4655 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_sfr_prep()
4659 static int io_sync_file_range(struct io_kiocb *req, unsigned int issue_flags) in io_sync_file_range() argument
4667 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4668 req->sync.flags); in io_sync_file_range()
4670 req_set_fail(req); in io_sync_file_range()
4671 io_req_complete(req, ret); in io_sync_file_range()
4683 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4686 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4690 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4694 async_msg = req->async_data; in io_setup_async_msg()
4695 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4708 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4713 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4714 req->sr_msg.msg_flags, &iomsg->free_iov); in io_sendmsg_copy_hdr()
4717 static int io_sendmsg_prep_async(struct io_kiocb *req) in io_sendmsg_prep_async() argument
4721 ret = io_sendmsg_copy_hdr(req, req->async_data); in io_sendmsg_prep_async()
4723 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep_async()
4727 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4729 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4731 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4742 req->flags |= REQ_F_NOWAIT; in io_sendmsg_prep()
4745 if (req->ctx->compat) in io_sendmsg_prep()
4752 static int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags) in io_sendmsg() argument
4755 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg()
4761 sock = sock_from_file(req->file, &ret); in io_sendmsg()
4765 kmsg = req->async_data; in io_sendmsg()
4767 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4773 flags = req->sr_msg.msg_flags; in io_sendmsg()
4783 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4788 req->flags |= REQ_F_PARTIAL_IO; in io_sendmsg()
4789 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4791 req_set_fail(req); in io_sendmsg()
4796 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4801 __io_req_complete(req, issue_flags, ret, 0); in io_sendmsg()
4805 static int io_send(struct io_kiocb *req, unsigned int issue_flags) in io_send() argument
4807 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4815 sock = sock_from_file(req->file, &ret); in io_send()
4828 flags = req->sr_msg.msg_flags; in io_send()
4845 req->flags |= REQ_F_PARTIAL_IO; in io_send()
4848 req_set_fail(req); in io_send()
4854 __io_req_complete(req, issue_flags, ret, 0); in io_send()
4858 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4861 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4871 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4891 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4894 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4906 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4932 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4938 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4939 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4942 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4945 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4948 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4951 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4956 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4960 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4962 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4965 static int io_recvmsg_prep_async(struct io_kiocb *req) in io_recvmsg_prep_async() argument
4969 ret = io_recvmsg_copy_hdr(req, req->async_data); in io_recvmsg_prep_async()
4971 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep_async()
4975 static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_recvmsg_prep() argument
4977 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4979 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
4991 req->flags |= REQ_F_NOWAIT; in io_recvmsg_prep()
4994 if (req->ctx->compat) in io_recvmsg_prep()
5001 static int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags) in io_recvmsg() argument
5004 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg()
5012 sock = sock_from_file(req->file, &ret); in io_recvmsg()
5016 kmsg = req->async_data; in io_recvmsg()
5018 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
5024 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
5025 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
5029 kmsg->fast_iov[0].iov_len = req->sr_msg.len; in io_recvmsg()
5031 1, req->sr_msg.len); in io_recvmsg()
5034 flags = req->sr_msg.msg_flags; in io_recvmsg()
5040 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
5044 return io_setup_async_msg(req, kmsg); in io_recvmsg()
5049 req->flags |= REQ_F_PARTIAL_IO; in io_recvmsg()
5050 return io_setup_async_msg(req, kmsg); in io_recvmsg()
5052 req_set_fail(req); in io_recvmsg()
5054 req_set_fail(req); in io_recvmsg()
5057 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
5058 cflags = io_put_recv_kbuf(req); in io_recvmsg()
5062 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
5067 __io_req_complete(req, issue_flags, ret, cflags); in io_recvmsg()
5071 static int io_recv(struct io_kiocb *req, unsigned int issue_flags) in io_recv() argument
5074 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
5084 sock = sock_from_file(req->file, &ret); in io_recv()
5088 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
5089 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
5106 flags = req->sr_msg.msg_flags; in io_recv()
5122 req->flags |= REQ_F_PARTIAL_IO; in io_recv()
5125 req_set_fail(req); in io_recv()
5128 req_set_fail(req); in io_recv()
5130 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
5131 cflags = io_put_recv_kbuf(req); in io_recv()
5136 __io_req_complete(req, issue_flags, ret, cflags); in io_recv()
5140 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
5142 struct io_accept *accept = &req->accept; in io_accept_prep()
5144 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_accept_prep()
5164 static int io_accept(struct io_kiocb *req, unsigned int issue_flags) in io_accept() argument
5166 struct io_accept *accept = &req->accept; in io_accept()
5178 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len, in io_accept()
5186 req->flags |= REQ_F_PARTIAL_IO; in io_accept()
5191 req_set_fail(req); in io_accept()
5196 ret = io_install_fixed_file(req, file, issue_flags, in io_accept()
5199 __io_req_complete(req, issue_flags, ret, 0); in io_accept()
5203 static int io_connect_prep_async(struct io_kiocb *req) in io_connect_prep_async() argument
5205 struct io_async_connect *io = req->async_data; in io_connect_prep_async()
5206 struct io_connect *conn = &req->connect; in io_connect_prep_async()
5211 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
5213 struct io_connect *conn = &req->connect; in io_connect_prep()
5215 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_connect_prep()
5226 static int io_connect(struct io_kiocb *req, unsigned int issue_flags) in io_connect() argument
5233 if (req->async_data) { in io_connect()
5234 io = req->async_data; in io_connect()
5236 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
5237 req->connect.addr_len, in io_connect()
5246 ret = __sys_connect_file(req->file, &io->address, in io_connect()
5247 req->connect.addr_len, file_flags); in io_connect()
5249 if (req->async_data) in io_connect()
5251 if (io_alloc_async_data(req)) { in io_connect()
5255 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
5262 req_set_fail(req); in io_connect()
5263 __io_req_complete(req, issue_flags, ret, 0); in io_connect()
5268 static int io_##op(struct io_kiocb *req, unsigned int issue_flags) \
5275 static int io_##op##_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) \
5282 static int io_##op##_prep_async(struct io_kiocb *req) \
5297 struct io_kiocb *req; member
5312 static bool io_poll_get_ownership_slowpath(struct io_kiocb *req) in io_poll_get_ownership_slowpath() argument
5321 v = atomic_fetch_or(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_get_ownership_slowpath()
5324 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership_slowpath()
5333 static inline bool io_poll_get_ownership(struct io_kiocb *req) in io_poll_get_ownership() argument
5335 if (unlikely(atomic_read(&req->poll_refs) >= IO_POLL_REF_BIAS)) in io_poll_get_ownership()
5336 return io_poll_get_ownership_slowpath(req); in io_poll_get_ownership()
5337 return !(atomic_fetch_inc(&req->poll_refs) & IO_POLL_REF_MASK); in io_poll_get_ownership()
5340 static void io_poll_mark_cancelled(struct io_kiocb *req) in io_poll_mark_cancelled() argument
5342 atomic_or(IO_POLL_CANCEL_FLAG, &req->poll_refs); in io_poll_mark_cancelled()
5345 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
5348 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
5349 return req->async_data; in io_poll_get_double()
5350 return req->apoll->double_poll; in io_poll_get_double()
5353 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
5355 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
5356 return &req->poll; in io_poll_get_single()
5357 return &req->apoll->poll; in io_poll_get_single()
5360 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5362 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5365 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5366 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5392 static void io_poll_remove_entries(struct io_kiocb *req) in io_poll_remove_entries() argument
5394 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_remove_entries()
5395 struct io_poll_iocb *poll_double = io_poll_get_double(req); in io_poll_remove_entries()
5425 * the request, then the mask is stored in req->result.
5427 static int io_poll_check_events(struct io_kiocb *req) in io_poll_check_events() argument
5429 struct io_ring_ctx *ctx = req->ctx; in io_poll_check_events()
5430 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_check_events()
5433 /* req->task == current here, checking PF_EXITING is safe */ in io_poll_check_events()
5434 if (unlikely(req->task->flags & PF_EXITING)) in io_poll_check_events()
5435 io_poll_mark_cancelled(req); in io_poll_check_events()
5438 v = atomic_read(&req->poll_refs); in io_poll_check_events()
5451 req->result = 0; in io_poll_check_events()
5453 req->result = 0; in io_poll_check_events()
5459 atomic_andnot(IO_POLL_RETRY_FLAG, &req->poll_refs); in io_poll_check_events()
5463 if (!req->result) { in io_poll_check_events()
5466 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_check_events()
5470 if (req->result && !(poll->events & EPOLLONESHOT)) { in io_poll_check_events()
5471 __poll_t mask = mangle_poll(req->result & poll->events); in io_poll_check_events()
5475 filled = io_fill_cqe_aux(ctx, req->user_data, mask, in io_poll_check_events()
5482 } else if (req->result) { in io_poll_check_events()
5487 req->result = 0; in io_poll_check_events()
5493 } while (atomic_sub_return(v & IO_POLL_REF_MASK, &req->poll_refs) & in io_poll_check_events()
5499 static void io_poll_task_func(struct io_kiocb *req, bool *locked) in io_poll_task_func() argument
5501 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
5504 ret = io_poll_check_events(req); in io_poll_task_func()
5509 req->result = mangle_poll(req->result & req->poll.events); in io_poll_task_func()
5511 req->result = ret; in io_poll_task_func()
5512 req_set_fail(req); in io_poll_task_func()
5515 io_poll_remove_entries(req); in io_poll_task_func()
5517 hash_del(&req->hash_node); in io_poll_task_func()
5519 io_req_complete_post(req, req->result, 0); in io_poll_task_func()
5522 static void io_apoll_task_func(struct io_kiocb *req, bool *locked) in io_apoll_task_func() argument
5524 struct io_ring_ctx *ctx = req->ctx; in io_apoll_task_func()
5527 ret = io_poll_check_events(req); in io_apoll_task_func()
5531 io_poll_remove_entries(req); in io_apoll_task_func()
5533 hash_del(&req->hash_node); in io_apoll_task_func()
5537 io_req_task_submit(req, locked); in io_apoll_task_func()
5539 io_req_complete_failed(req, ret); in io_apoll_task_func()
5542 static void __io_poll_execute(struct io_kiocb *req, int mask) in __io_poll_execute() argument
5544 req->result = mask; in __io_poll_execute()
5545 if (req->opcode == IORING_OP_POLL_ADD) in __io_poll_execute()
5546 req->io_task_work.func = io_poll_task_func; in __io_poll_execute()
5548 req->io_task_work.func = io_apoll_task_func; in __io_poll_execute()
5550 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_poll_execute()
5551 io_req_task_work_add(req); in __io_poll_execute()
5554 static inline void io_poll_execute(struct io_kiocb *req, int res) in io_poll_execute() argument
5556 if (io_poll_get_ownership(req)) in io_poll_execute()
5557 __io_poll_execute(req, res); in io_poll_execute()
5560 static void io_poll_cancel_req(struct io_kiocb *req) in io_poll_cancel_req() argument
5562 io_poll_mark_cancelled(req); in io_poll_cancel_req()
5564 io_poll_execute(req, 0); in io_poll_cancel_req()
5570 struct io_kiocb *req = wait->private; in io_poll_wake() local
5576 io_poll_mark_cancelled(req); in io_poll_wake()
5578 io_poll_execute(req, 0); in io_poll_wake()
5591 * as req->head is NULL'ed out, the request can be in io_poll_wake()
5603 if (io_poll_get_ownership(req)) { in io_poll_wake()
5612 __io_poll_execute(req, mask); in io_poll_wake()
5621 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5653 poll->wait.private = req; in __io_queue_proc()
5666 __io_queue_proc(&pt->req->poll, pt, head, in io_poll_queue_proc()
5667 (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5670 static int __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5674 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5676 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5678 poll->file = req->file; in __io_arm_poll_handler()
5679 poll->wait.private = req; in __io_arm_poll_handler()
5682 ipt->req = req; in __io_arm_poll_handler()
5690 atomic_set(&req->poll_refs, 1); in __io_arm_poll_handler()
5691 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5694 io_poll_remove_entries(req); in __io_arm_poll_handler()
5695 /* no one else has access to the req, forget about the ref */ in __io_arm_poll_handler()
5699 io_poll_remove_entries(req); in __io_arm_poll_handler()
5706 io_poll_req_insert(req); in __io_arm_poll_handler()
5715 __io_poll_execute(req, mask); in __io_arm_poll_handler()
5723 if (atomic_cmpxchg(&req->poll_refs, 1, 0) != 1) in __io_arm_poll_handler()
5724 __io_poll_execute(req, 0); in __io_arm_poll_handler()
5732 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5743 static int io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5745 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5746 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5752 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5754 if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED) in io_arm_poll_handler()
5763 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5764 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5770 if (req->flags & REQ_F_POLLED) { in io_arm_poll_handler()
5771 apoll = req->apoll; in io_arm_poll_handler()
5779 req->apoll = apoll; in io_arm_poll_handler()
5780 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5783 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask); in io_arm_poll_handler()
5787 trace_io_uring_poll_arm(ctx, req, req->opcode, req->user_data, in io_arm_poll_handler()
5799 struct io_kiocb *req; in io_poll_remove_all() local
5808 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5809 if (io_match_task_safe(req, tsk, cancel_all)) { in io_poll_remove_all()
5810 hlist_del_init(&req->hash_node); in io_poll_remove_all()
5811 io_poll_cancel_req(req); in io_poll_remove_all()
5825 struct io_kiocb *req; in io_poll_find() local
5828 hlist_for_each_entry(req, list, hash_node) { in io_poll_find()
5829 if (sqe_addr != req->user_data) in io_poll_find()
5831 if (poll_only && req->opcode != IORING_OP_POLL_ADD) in io_poll_find()
5833 return req; in io_poll_find()
5838 static bool io_poll_disarm(struct io_kiocb *req) in io_poll_disarm() argument
5841 if (!io_poll_get_ownership(req)) in io_poll_disarm()
5843 io_poll_remove_entries(req); in io_poll_disarm()
5844 hash_del(&req->hash_node); in io_poll_disarm()
5852 struct io_kiocb *req = io_poll_find(ctx, sqe_addr, poll_only); in io_poll_cancel() local
5854 if (!req) in io_poll_cancel()
5856 io_poll_cancel_req(req); in io_poll_cancel()
5874 static int io_poll_update_prep(struct io_kiocb *req, in io_poll_update_prep() argument
5877 struct io_poll_update *upd = &req->poll_update; in io_poll_update_prep()
5880 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_update_prep()
5907 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5909 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5912 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5920 io_req_set_refcount(req); in io_poll_add_prep()
5925 static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags) in io_poll_add() argument
5927 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5933 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events); in io_poll_add()
5935 req_set_fail(req); in io_poll_add()
5938 __io_req_complete(req, issue_flags, ret, 0); in io_poll_add()
5942 static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags) in io_poll_update() argument
5944 struct io_ring_ctx *ctx = req->ctx; in io_poll_update()
5951 preq = io_poll_find(ctx, req->poll_update.old_user_data, true); in io_poll_update()
5959 if (req->poll_update.update_events || req->poll_update.update_user_data) { in io_poll_update()
5961 if (req->poll_update.update_events) { in io_poll_update()
5963 preq->poll.events |= req->poll_update.events & 0xffff; in io_poll_update()
5966 if (req->poll_update.update_user_data) in io_poll_update()
5967 preq->user_data = req->poll_update.new_user_data; in io_poll_update()
5978 req_set_fail(req); in io_poll_update()
5980 io_req_complete(req, ret); in io_poll_update()
5985 static void io_req_task_timeout(struct io_kiocb *req, bool *locked) in io_req_task_timeout() argument
5987 req_set_fail(req); in io_req_task_timeout()
5988 io_req_complete_post(req, -ETIME, 0); in io_req_task_timeout()
5995 struct io_kiocb *req = data->req; in io_timeout_fn() local
5996 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
6000 list_del_init(&req->timeout.list); in io_timeout_fn()
6001 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
6002 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
6005 req->io_task_work.func = io_req_task_timeout; in io_timeout_fn()
6006 io_req_task_work_add(req); in io_timeout_fn()
6015 struct io_kiocb *req; in io_timeout_extract() local
6018 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_extract()
6019 found = user_data == req->user_data; in io_timeout_extract()
6026 io = req->async_data; in io_timeout_extract()
6029 list_del_init(&req->timeout.list); in io_timeout_extract()
6030 return req; in io_timeout_extract()
6037 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_cancel() local
6039 if (IS_ERR(req)) in io_timeout_cancel()
6040 return PTR_ERR(req); in io_timeout_cancel()
6042 req_set_fail(req); in io_timeout_cancel()
6043 io_fill_cqe_req(req, -ECANCELED, 0); in io_timeout_cancel()
6044 io_put_req_deferred(req); in io_timeout_cancel()
6069 struct io_kiocb *req; in io_linked_timeout_update() local
6072 list_for_each_entry(req, &ctx->ltimeout_list, timeout.list) { in io_linked_timeout_update()
6073 found = user_data == req->user_data; in io_linked_timeout_update()
6080 io = req->async_data; in io_linked_timeout_update()
6093 struct io_kiocb *req = io_timeout_extract(ctx, user_data); in io_timeout_update() local
6096 if (IS_ERR(req)) in io_timeout_update()
6097 return PTR_ERR(req); in io_timeout_update()
6099 req->timeout.off = 0; /* noseq */ in io_timeout_update()
6100 data = req->async_data; in io_timeout_update()
6101 list_add_tail(&req->timeout.list, &ctx->timeout_list); in io_timeout_update()
6108 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
6111 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove_prep()
6113 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
6115 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
6149 static int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) in io_timeout_remove() argument
6151 struct io_timeout_rem *tr = &req->timeout_rem; in io_timeout_remove()
6152 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
6155 if (!(req->timeout_rem.flags & IORING_TIMEOUT_UPDATE)) { in io_timeout_remove()
6173 req_set_fail(req); in io_timeout_remove()
6174 io_req_complete_post(req, ret, 0); in io_timeout_remove()
6178 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
6185 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
6199 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6200 req->timeout.off = off; in io_timeout_prep()
6201 if (unlikely(off && !req->ctx->off_timeout_used)) in io_timeout_prep()
6202 req->ctx->off_timeout_used = true; in io_timeout_prep()
6204 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
6207 data = req->async_data; in io_timeout_prep()
6208 data->req = req; in io_timeout_prep()
6214 INIT_LIST_HEAD(&req->timeout.list); in io_timeout_prep()
6219 struct io_submit_link *link = &req->ctx->submit_state.link; in io_timeout_prep()
6225 req->timeout.head = link->last; in io_timeout_prep()
6231 static int io_timeout(struct io_kiocb *req, unsigned int issue_flags) in io_timeout() argument
6233 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
6234 struct io_timeout_data *data = req->async_data; in io_timeout()
6236 u32 tail, off = req->timeout.off; in io_timeout()
6245 if (io_is_timeout_noseq(req)) { in io_timeout()
6251 req->timeout.target_seq = tail + off; in io_timeout()
6274 list_add(&req->timeout.list, entry); in io_timeout()
6288 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
6291 return req->ctx == cd->ctx && req->user_data == cd->user_data; in io_cancel_cb()
6320 static int io_try_cancel_userdata(struct io_kiocb *req, u64 sqe_addr) in io_try_cancel_userdata() argument
6322 struct io_ring_ctx *ctx = req->ctx; in io_try_cancel_userdata()
6325 WARN_ON_ONCE(!io_wq_current_is_worker() && req->task != current); in io_try_cancel_userdata()
6327 ret = io_async_cancel_one(req->task->io_uring, sqe_addr, ctx); in io_try_cancel_userdata()
6343 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
6346 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
6348 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
6354 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
6358 static int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags) in io_async_cancel() argument
6360 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
6361 u64 sqe_addr = req->cancel.addr; in io_async_cancel()
6365 ret = io_try_cancel_userdata(req, sqe_addr); in io_async_cancel()
6375 ret = io_async_cancel_one(tctx, req->cancel.addr, ctx); in io_async_cancel()
6382 req_set_fail(req); in io_async_cancel()
6383 io_req_complete_post(req, ret, 0); in io_async_cancel()
6387 static int io_rsrc_update_prep(struct io_kiocb *req, in io_rsrc_update_prep() argument
6390 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_rsrc_update_prep()
6395 req->rsrc_update.offset = READ_ONCE(sqe->off); in io_rsrc_update_prep()
6396 req->rsrc_update.nr_args = READ_ONCE(sqe->len); in io_rsrc_update_prep()
6397 if (!req->rsrc_update.nr_args) in io_rsrc_update_prep()
6399 req->rsrc_update.arg = READ_ONCE(sqe->addr); in io_rsrc_update_prep()
6403 static int io_files_update(struct io_kiocb *req, unsigned int issue_flags) in io_files_update() argument
6405 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
6409 up.offset = req->rsrc_update.offset; in io_files_update()
6410 up.data = req->rsrc_update.arg; in io_files_update()
6418 &up, req->rsrc_update.nr_args); in io_files_update()
6422 req_set_fail(req); in io_files_update()
6423 __io_req_complete(req, issue_flags, ret, 0); in io_files_update()
6427 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
6429 switch (req->opcode) { in io_req_prep()
6435 return io_read_prep(req, sqe); in io_req_prep()
6439 return io_write_prep(req, sqe); in io_req_prep()
6441 return io_poll_add_prep(req, sqe); in io_req_prep()
6443 return io_poll_update_prep(req, sqe); in io_req_prep()
6445 return io_fsync_prep(req, sqe); in io_req_prep()
6447 return io_sfr_prep(req, sqe); in io_req_prep()
6450 return io_sendmsg_prep(req, sqe); in io_req_prep()
6453 return io_recvmsg_prep(req, sqe); in io_req_prep()
6455 return io_connect_prep(req, sqe); in io_req_prep()
6457 return io_timeout_prep(req, sqe, false); in io_req_prep()
6459 return io_timeout_remove_prep(req, sqe); in io_req_prep()
6461 return io_async_cancel_prep(req, sqe); in io_req_prep()
6463 return io_timeout_prep(req, sqe, true); in io_req_prep()
6465 return io_accept_prep(req, sqe); in io_req_prep()
6467 return io_fallocate_prep(req, sqe); in io_req_prep()
6469 return io_openat_prep(req, sqe); in io_req_prep()
6471 return io_close_prep(req, sqe); in io_req_prep()
6473 return io_rsrc_update_prep(req, sqe); in io_req_prep()
6475 return io_statx_prep(req, sqe); in io_req_prep()
6477 return io_fadvise_prep(req, sqe); in io_req_prep()
6479 return io_madvise_prep(req, sqe); in io_req_prep()
6481 return io_openat2_prep(req, sqe); in io_req_prep()
6483 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
6485 return io_splice_prep(req, sqe); in io_req_prep()
6487 return io_provide_buffers_prep(req, sqe); in io_req_prep()
6489 return io_remove_buffers_prep(req, sqe); in io_req_prep()
6491 return io_tee_prep(req, sqe); in io_req_prep()
6493 return io_shutdown_prep(req, sqe); in io_req_prep()
6495 return io_renameat_prep(req, sqe); in io_req_prep()
6497 return io_unlinkat_prep(req, sqe); in io_req_prep()
6501 req->opcode); in io_req_prep()
6505 static int io_req_prep_async(struct io_kiocb *req) in io_req_prep_async() argument
6507 if (!io_op_defs[req->opcode].needs_async_setup) in io_req_prep_async()
6509 if (WARN_ON_ONCE(req->async_data)) in io_req_prep_async()
6511 if (io_alloc_async_data(req)) in io_req_prep_async()
6514 switch (req->opcode) { in io_req_prep_async()
6516 return io_rw_prep_async(req, READ); in io_req_prep_async()
6518 return io_rw_prep_async(req, WRITE); in io_req_prep_async()
6520 return io_sendmsg_prep_async(req); in io_req_prep_async()
6522 return io_recvmsg_prep_async(req); in io_req_prep_async()
6524 return io_connect_prep_async(req); in io_req_prep_async()
6527 req->opcode); in io_req_prep_async()
6531 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
6533 u32 seq = req->ctx->cached_sq_head; in io_get_sequence()
6535 /* need original cached_sq_head, but it was increased for each req */ in io_get_sequence()
6536 io_for_each_link(req, req) in io_get_sequence()
6541 static bool io_drain_req(struct io_kiocb *req) in io_drain_req() argument
6544 struct io_ring_ctx *ctx = req->ctx; in io_drain_req()
6549 if (req->flags & REQ_F_FAIL) { in io_drain_req()
6550 io_req_complete_fail_submit(req); in io_drain_req()
6561 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6565 io_for_each_link(pos, req->link) { in io_drain_req()
6568 req->flags |= REQ_F_IO_DRAIN; in io_drain_req()
6573 /* Still need defer if there is pending req in defer list. */ in io_drain_req()
6576 !(req->flags & REQ_F_IO_DRAIN))) { in io_drain_req()
6583 seq = io_get_sequence(req); in io_drain_req()
6585 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_drain_req()
6588 ret = io_req_prep_async(req); in io_drain_req()
6591 io_prep_async_link(req); in io_drain_req()
6596 io_req_complete_failed(req, ret); in io_drain_req()
6601 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_drain_req()
6604 io_queue_async_work(req, NULL); in io_drain_req()
6608 trace_io_uring_defer(ctx, req, req->user_data); in io_drain_req()
6609 de->req = req; in io_drain_req()
6616 static void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
6618 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_clean_op()
6619 switch (req->opcode) { in io_clean_op()
6623 kfree((void *)(unsigned long)req->rw.addr); in io_clean_op()
6627 kfree(req->sr_msg.kbuf); in io_clean_op()
6632 if (req->flags & REQ_F_NEED_CLEANUP) { in io_clean_op()
6633 switch (req->opcode) { in io_clean_op()
6640 struct io_async_rw *io = req->async_data; in io_clean_op()
6647 struct io_async_msghdr *io = req->async_data; in io_clean_op()
6654 if (req->open.filename) in io_clean_op()
6655 putname(req->open.filename); in io_clean_op()
6658 putname(req->rename.oldpath); in io_clean_op()
6659 putname(req->rename.newpath); in io_clean_op()
6662 putname(req->unlink.filename); in io_clean_op()
6666 if ((req->flags & REQ_F_POLLED) && req->apoll) { in io_clean_op()
6667 kfree(req->apoll->double_poll); in io_clean_op()
6668 kfree(req->apoll); in io_clean_op()
6669 req->apoll = NULL; in io_clean_op()
6671 if (req->flags & REQ_F_INFLIGHT) { in io_clean_op()
6672 struct io_uring_task *tctx = req->task->io_uring; in io_clean_op()
6676 if (req->flags & REQ_F_CREDS) in io_clean_op()
6677 put_cred(req->creds); in io_clean_op()
6679 req->flags &= ~IO_REQ_CLEAN_FLAGS; in io_clean_op()
6682 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags) in io_issue_sqe() argument
6684 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
6688 if ((req->flags & REQ_F_CREDS) && req->creds != current_cred()) in io_issue_sqe()
6689 creds = override_creds(req->creds); in io_issue_sqe()
6691 switch (req->opcode) { in io_issue_sqe()
6693 ret = io_nop(req, issue_flags); in io_issue_sqe()
6698 ret = io_read(req, issue_flags); in io_issue_sqe()
6703 ret = io_write(req, issue_flags); in io_issue_sqe()
6706 ret = io_fsync(req, issue_flags); in io_issue_sqe()
6709 ret = io_poll_add(req, issue_flags); in io_issue_sqe()
6712 ret = io_poll_update(req, issue_flags); in io_issue_sqe()
6715 ret = io_sync_file_range(req, issue_flags); in io_issue_sqe()
6718 ret = io_sendmsg(req, issue_flags); in io_issue_sqe()
6721 ret = io_send(req, issue_flags); in io_issue_sqe()
6724 ret = io_recvmsg(req, issue_flags); in io_issue_sqe()
6727 ret = io_recv(req, issue_flags); in io_issue_sqe()
6730 ret = io_timeout(req, issue_flags); in io_issue_sqe()
6733 ret = io_timeout_remove(req, issue_flags); in io_issue_sqe()
6736 ret = io_accept(req, issue_flags); in io_issue_sqe()
6739 ret = io_connect(req, issue_flags); in io_issue_sqe()
6742 ret = io_async_cancel(req, issue_flags); in io_issue_sqe()
6745 ret = io_fallocate(req, issue_flags); in io_issue_sqe()
6748 ret = io_openat(req, issue_flags); in io_issue_sqe()
6751 ret = io_close(req, issue_flags); in io_issue_sqe()
6754 ret = io_files_update(req, issue_flags); in io_issue_sqe()
6757 ret = io_statx(req, issue_flags); in io_issue_sqe()
6760 ret = io_fadvise(req, issue_flags); in io_issue_sqe()
6763 ret = io_madvise(req, issue_flags); in io_issue_sqe()
6766 ret = io_openat2(req, issue_flags); in io_issue_sqe()
6769 ret = io_epoll_ctl(req, issue_flags); in io_issue_sqe()
6772 ret = io_splice(req, issue_flags); in io_issue_sqe()
6775 ret = io_provide_buffers(req, issue_flags); in io_issue_sqe()
6778 ret = io_remove_buffers(req, issue_flags); in io_issue_sqe()
6781 ret = io_tee(req, issue_flags); in io_issue_sqe()
6784 ret = io_shutdown(req, issue_flags); in io_issue_sqe()
6787 ret = io_renameat(req, issue_flags); in io_issue_sqe()
6790 ret = io_unlinkat(req, issue_flags); in io_issue_sqe()
6802 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) in io_issue_sqe()
6803 io_iopoll_req_issued(req); in io_issue_sqe()
6810 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_free_work() local
6812 req = io_put_req_find_next(req); in io_wq_free_work()
6813 return req ? &req->work : NULL; in io_wq_free_work()
6818 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6823 if (!(req->flags & REQ_F_REFCOUNT)) in io_wq_submit_work()
6824 __io_req_set_refcount(req, 2); in io_wq_submit_work()
6826 req_ref_get(req); in io_wq_submit_work()
6828 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6837 ret = io_issue_sqe(req, 0); in io_wq_submit_work()
6843 if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_wq_submit_work()
6851 io_req_task_queue_fail(req, ret); in io_wq_submit_work()
6882 struct io_kiocb *req, int fd, in io_file_get_fixed() argument
6897 req->flags |= (file_ptr << REQ_F_NOWAIT_READ_BIT); in io_file_get_fixed()
6898 io_req_set_rsrc_node(req); in io_file_get_fixed()
6905 struct io_kiocb *req, int fd) in io_file_get_normal() argument
6913 io_req_track_inflight(req); in io_file_get_normal()
6918 struct io_kiocb *req, int fd, bool fixed, in io_file_get() argument
6922 return io_file_get_fixed(ctx, req, fd, issue_flags); in io_file_get()
6924 return io_file_get_normal(ctx, req, fd); in io_file_get()
6927 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) in io_req_task_link_timeout() argument
6929 struct io_kiocb *prev = req->timeout.prev; in io_req_task_link_timeout()
6933 if (!(req->task->flags & PF_EXITING)) in io_req_task_link_timeout()
6934 ret = io_try_cancel_userdata(req, prev->user_data); in io_req_task_link_timeout()
6935 io_req_complete_post(req, ret ?: -ETIME, 0); in io_req_task_link_timeout()
6938 io_req_complete_post(req, -ETIME, 0); in io_req_task_link_timeout()
6946 struct io_kiocb *prev, *req = data->req; in io_link_timeout_fn() local
6947 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6951 prev = req->timeout.head; in io_link_timeout_fn()
6952 req->timeout.head = NULL; in io_link_timeout_fn()
6963 list_del(&req->timeout.list); in io_link_timeout_fn()
6964 req->timeout.prev = prev; in io_link_timeout_fn()
6967 req->io_task_work.func = io_req_task_link_timeout; in io_link_timeout_fn()
6968 io_req_task_work_add(req); in io_link_timeout_fn()
6972 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6974 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
6981 if (req->timeout.head) { in io_queue_linked_timeout()
6982 struct io_timeout_data *data = req->async_data; in io_queue_linked_timeout()
6987 list_add_tail(&req->timeout.list, &ctx->ltimeout_list); in io_queue_linked_timeout()
6991 io_put_req(req); in io_queue_linked_timeout()
6994 static void __io_queue_sqe(struct io_kiocb *req) in __io_queue_sqe() argument
6995 __must_hold(&req->ctx->uring_lock) in __io_queue_sqe()
7001 ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER); in __io_queue_sqe()
7008 if (req->flags & REQ_F_COMPLETE_INLINE) { in __io_queue_sqe()
7009 struct io_ring_ctx *ctx = req->ctx; in __io_queue_sqe()
7012 state->compl_reqs[state->compl_nr++] = req; in __io_queue_sqe()
7018 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
7021 } else if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
7022 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
7024 switch (io_arm_poll_handler(req)) { in __io_queue_sqe()
7034 io_queue_async_work(req, NULL); in __io_queue_sqe()
7041 io_req_complete_failed(req, ret); in __io_queue_sqe()
7045 static inline void io_queue_sqe(struct io_kiocb *req) in io_queue_sqe() argument
7046 __must_hold(&req->ctx->uring_lock) in io_queue_sqe()
7048 if (unlikely(req->ctx->drain_active) && io_drain_req(req)) in io_queue_sqe()
7051 if (likely(!(req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL)))) { in io_queue_sqe()
7052 __io_queue_sqe(req); in io_queue_sqe()
7053 } else if (req->flags & REQ_F_FAIL) { in io_queue_sqe()
7054 io_req_complete_fail_submit(req); in io_queue_sqe()
7056 int ret = io_req_prep_async(req); in io_queue_sqe()
7059 io_req_complete_failed(req, ret); in io_queue_sqe()
7061 io_queue_async_work(req, NULL); in io_queue_sqe()
7071 struct io_kiocb *req, in io_check_restriction() argument
7077 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
7091 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
7099 /* req is partially pre-initialised, see io_preinit_req() */ in io_init_req()
7100 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
7102 req->flags = sqe_flags = READ_ONCE(sqe->flags); in io_init_req()
7103 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
7104 req->file = NULL; in io_init_req()
7105 req->fixed_rsrc_refs = NULL; in io_init_req()
7106 req->task = current; in io_init_req()
7111 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
7113 if (!io_check_restriction(ctx, req, sqe_flags)) in io_init_req()
7117 !io_op_defs[req->opcode].buffer_select) in io_init_req()
7124 req->creds = xa_load(&ctx->personalities, personality); in io_init_req()
7125 if (!req->creds) in io_init_req()
7127 get_cred(req->creds); in io_init_req()
7128 req->flags |= REQ_F_CREDS; in io_init_req()
7137 io_op_defs[req->opcode].plug) { in io_init_req()
7142 if (io_op_defs[req->opcode].needs_file) { in io_init_req()
7143 req->file = io_file_get(ctx, req, READ_ONCE(sqe->fd), in io_init_req()
7146 if (unlikely(!req->file)) in io_init_req()
7154 static int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_submit_sqe() argument
7161 ret = io_init_req(ctx, req, sqe); in io_submit_sqe()
7167 * we can judge a link req is failed or cancelled by if in io_submit_sqe()
7169 * it may be set REQ_F_FAIL because of other req's failure in io_submit_sqe()
7170 * so let's leverage req->result to distinguish if a head in io_submit_sqe()
7171 * is set REQ_F_FAIL because of its failure or other req's in io_submit_sqe()
7177 } else if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7179 * the current req is a normal req, we should return in io_submit_sqe()
7182 io_req_complete_failed(req, ret); in io_submit_sqe()
7185 req_fail_link_node(req, ret); in io_submit_sqe()
7187 ret = io_req_prep(req, sqe); in io_submit_sqe()
7193 trace_io_uring_submit_sqe(ctx, req, req->opcode, req->user_data, in io_submit_sqe()
7194 req->flags, true, in io_submit_sqe()
7207 if (!(req->flags & REQ_F_FAIL)) { in io_submit_sqe()
7208 ret = io_req_prep_async(req); in io_submit_sqe()
7210 req_fail_link_node(req, ret); in io_submit_sqe()
7215 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
7216 link->last->link = req; in io_submit_sqe()
7217 link->last = req; in io_submit_sqe()
7220 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
7225 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
7226 link->head = req; in io_submit_sqe()
7227 link->last = req; in io_submit_sqe()
7229 io_queue_sqe(req); in io_submit_sqe()
7320 struct io_kiocb *req; in io_submit_sqes() local
7322 req = io_alloc_req(ctx); in io_submit_sqes()
7323 if (unlikely(!req)) { in io_submit_sqes()
7330 list_add(&req->inflight_entry, &ctx->submit_state.free_list); in io_submit_sqes()
7335 if (io_submit_sqe(ctx, req, sqe)) in io_submit_sqes()
8426 static int io_install_fixed_file(struct io_kiocb *req, struct file *file, in io_install_fixed_file() argument
8429 struct io_ring_ctx *ctx = req->ctx; in io_install_fixed_file()
8482 static int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags) in io_close_fixed() argument
8484 unsigned int offset = req->close.file_slot - 1; in io_close_fixed()
8485 struct io_ring_ctx *ctx = req->ctx; in io_close_fixed()
9297 struct io_kiocb *req, *nxt; in io_req_cache_free() local
9299 list_for_each_entry_safe(req, nxt, list, inflight_entry) { in io_req_cache_free()
9300 list_del(&req->inflight_entry); in io_req_cache_free()
9301 kmem_cache_free(req_cachep, req); in io_req_cache_free()
9455 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_ctx_cb() local
9457 return req->ctx == data; in io_cancel_ctx_cb()
9532 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
9537 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
9538 if (io_match_task(req, tsk, cancel_all)) { in io_kill_timeouts()
9539 io_kill_timeout(req, -ECANCELED); in io_kill_timeouts()
9601 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
9604 return io_match_task_safe(req, cancel->task, cancel->all); in io_cancel_task_cb()
9615 if (io_match_task_safe(de->req, task, cancel_all)) { in io_cancel_defer_files()
9627 io_req_complete_failed(de->req, -ECANCELED); in io_cancel_defer_files()
10180 struct io_kiocb *req; in __io_uring_show_fdinfo() local
10182 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
10183 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
10184 req->task->task_works != NULL); in __io_uring_show_fdinfo()