Lines Matching full:req
417 struct io_kiocb *req; member
711 struct io_kiocb *req; member
747 /* needs req->file assigned */
958 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
960 static void io_cqring_fill_event(struct io_kiocb *req, long res);
961 static void io_put_req(struct io_kiocb *req);
962 static void io_put_req_deferred(struct io_kiocb *req, int nr);
963 static void io_double_put_req(struct io_kiocb *req);
964 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
965 static void __io_queue_linked_timeout(struct io_kiocb *req);
966 static void io_queue_linked_timeout(struct io_kiocb *req);
970 static void __io_clean_op(struct io_kiocb *req);
972 struct io_kiocb *req, int fd, bool fixed);
973 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
976 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
979 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
982 static void io_req_drop_files(struct io_kiocb *req);
983 static void io_req_task_queue(struct io_kiocb *req);
1002 static inline void io_clean_op(struct io_kiocb *req) in io_clean_op() argument
1004 if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED)) in io_clean_op()
1005 __io_clean_op(req); in io_clean_op()
1008 static inline bool __io_match_files(struct io_kiocb *req, in __io_match_files() argument
1011 if (req->file && req->file->f_op == &io_uring_fops) in __io_match_files()
1014 return ((req->flags & REQ_F_WORK_INITIALIZED) && in __io_match_files()
1015 (req->work.flags & IO_WQ_WORK_FILES)) && in __io_match_files()
1016 req->work.identity->files == files; in __io_match_files()
1038 /* in terms of cancelation, always match if req task is dead */ in io_match_task()
1096 struct io_kiocb *req) in io_sq_thread_acquire_mm() argument
1098 if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM)) in io_sq_thread_acquire_mm()
1123 static inline void req_set_fail_links(struct io_kiocb *req) in req_set_fail_links() argument
1125 if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) in req_set_fail_links()
1126 req->flags |= REQ_F_FAIL_LINK; in req_set_fail_links()
1154 static inline void __io_req_init_async(struct io_kiocb *req) in __io_req_init_async() argument
1156 memset(&req->work, 0, sizeof(req->work)); in __io_req_init_async()
1157 req->flags |= REQ_F_WORK_INITIALIZED; in __io_req_init_async()
1164 static inline void io_req_init_async(struct io_kiocb *req) in io_req_init_async() argument
1166 struct io_uring_task *tctx = req->task->io_uring; in io_req_init_async()
1168 if (req->flags & REQ_F_WORK_INITIALIZED) in io_req_init_async()
1171 __io_req_init_async(req); in io_req_init_async()
1174 req->work.identity = tctx->identity; in io_req_init_async()
1176 refcount_inc(&req->work.identity->count); in io_req_init_async()
1191 static inline bool io_is_timeout_noseq(struct io_kiocb *req) in io_is_timeout_noseq() argument
1193 return !req->timeout.off; in io_is_timeout_noseq()
1256 static bool req_need_defer(struct io_kiocb *req, u32 seq) in req_need_defer() argument
1258 if (unlikely(req->flags & REQ_F_IO_DRAIN)) { in req_need_defer()
1259 struct io_ring_ctx *ctx = req->ctx; in req_need_defer()
1276 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req) in io_put_identity() argument
1278 if (req->work.identity == &tctx->__identity) in io_put_identity()
1280 if (refcount_dec_and_test(&req->work.identity->count)) in io_put_identity()
1281 kfree(req->work.identity); in io_put_identity()
1284 static void io_req_clean_work(struct io_kiocb *req) in io_req_clean_work() argument
1286 if (!(req->flags & REQ_F_WORK_INITIALIZED)) in io_req_clean_work()
1289 req->flags &= ~REQ_F_WORK_INITIALIZED; in io_req_clean_work()
1291 if (req->work.flags & IO_WQ_WORK_MM) { in io_req_clean_work()
1292 mmdrop(req->work.identity->mm); in io_req_clean_work()
1293 req->work.flags &= ~IO_WQ_WORK_MM; in io_req_clean_work()
1296 if (req->work.flags & IO_WQ_WORK_BLKCG) { in io_req_clean_work()
1297 css_put(req->work.identity->blkcg_css); in io_req_clean_work()
1298 req->work.flags &= ~IO_WQ_WORK_BLKCG; in io_req_clean_work()
1301 if (req->work.flags & IO_WQ_WORK_CREDS) { in io_req_clean_work()
1302 put_cred(req->work.identity->creds); in io_req_clean_work()
1303 req->work.flags &= ~IO_WQ_WORK_CREDS; in io_req_clean_work()
1305 if (req->work.flags & IO_WQ_WORK_FS) { in io_req_clean_work()
1306 struct fs_struct *fs = req->work.identity->fs; in io_req_clean_work()
1308 spin_lock(&req->work.identity->fs->lock); in io_req_clean_work()
1311 spin_unlock(&req->work.identity->fs->lock); in io_req_clean_work()
1314 req->work.flags &= ~IO_WQ_WORK_FS; in io_req_clean_work()
1316 if (req->flags & REQ_F_INFLIGHT) in io_req_clean_work()
1317 io_req_drop_files(req); in io_req_clean_work()
1319 io_put_identity(req->task->io_uring, req); in io_req_clean_work()
1326 static bool io_identity_cow(struct io_kiocb *req) in io_identity_cow() argument
1328 struct io_uring_task *tctx = req->task->io_uring; in io_identity_cow()
1332 if (req->work.flags & IO_WQ_WORK_CREDS) in io_identity_cow()
1333 creds = req->work.identity->creds; in io_identity_cow()
1335 id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL); in io_identity_cow()
1337 req->work.flags |= IO_WQ_WORK_CANCEL; in io_identity_cow()
1354 /* drop tctx and req identity references, if needed */ in io_identity_cow()
1358 if (req->work.identity != &tctx->__identity && in io_identity_cow()
1359 refcount_dec_and_test(&req->work.identity->count)) in io_identity_cow()
1360 kfree(req->work.identity); in io_identity_cow()
1362 req->work.identity = id; in io_identity_cow()
1367 static bool io_grab_identity(struct io_kiocb *req) in io_grab_identity() argument
1369 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_grab_identity()
1370 struct io_identity *id = req->work.identity; in io_grab_identity()
1371 struct io_ring_ctx *ctx = req->ctx; in io_grab_identity()
1376 req->work.flags |= IO_WQ_WORK_FSIZE; in io_grab_identity()
1379 if (!(req->work.flags & IO_WQ_WORK_BLKCG) && in io_grab_identity()
1391 req->work.flags |= IO_WQ_WORK_BLKCG; in io_grab_identity()
1395 if (!(req->work.flags & IO_WQ_WORK_CREDS)) { in io_grab_identity()
1399 req->work.flags |= IO_WQ_WORK_CREDS; in io_grab_identity()
1406 if (!(req->work.flags & IO_WQ_WORK_FS) && in io_grab_identity()
1413 req->work.flags |= IO_WQ_WORK_FS; in io_grab_identity()
1415 req->work.flags |= IO_WQ_WORK_CANCEL; in io_grab_identity()
1419 if (!(req->work.flags & IO_WQ_WORK_FILES) && in io_grab_identity()
1421 !(req->flags & REQ_F_NO_FILE_TABLE)) { in io_grab_identity()
1428 if (!(req->flags & REQ_F_INFLIGHT)) { in io_grab_identity()
1429 req->flags |= REQ_F_INFLIGHT; in io_grab_identity()
1432 list_add(&req->inflight_entry, &ctx->inflight_list); in io_grab_identity()
1435 req->work.flags |= IO_WQ_WORK_FILES; in io_grab_identity()
1437 if (!(req->work.flags & IO_WQ_WORK_MM) && in io_grab_identity()
1442 req->work.flags |= IO_WQ_WORK_MM; in io_grab_identity()
1448 static void io_prep_async_work(struct io_kiocb *req) in io_prep_async_work() argument
1450 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_prep_async_work()
1451 struct io_ring_ctx *ctx = req->ctx; in io_prep_async_work()
1454 io_req_init_async(req); in io_prep_async_work()
1455 id = req->work.identity; in io_prep_async_work()
1457 if (req->flags & REQ_F_FORCE_ASYNC) in io_prep_async_work()
1458 req->work.flags |= IO_WQ_WORK_CONCURRENT; in io_prep_async_work()
1460 if (req->flags & REQ_F_ISREG) { in io_prep_async_work()
1462 io_wq_hash_work(&req->work, file_inode(req->file)); in io_prep_async_work()
1463 } else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) { in io_prep_async_work()
1465 req->work.flags |= IO_WQ_WORK_UNBOUND; in io_prep_async_work()
1469 if (io_grab_identity(req)) in io_prep_async_work()
1472 if (!io_identity_cow(req)) in io_prep_async_work()
1476 if (!io_grab_identity(req)) in io_prep_async_work()
1480 static void io_prep_async_link(struct io_kiocb *req) in io_prep_async_link() argument
1484 io_prep_async_work(req); in io_prep_async_link()
1485 if (req->flags & REQ_F_LINK_HEAD) in io_prep_async_link()
1486 list_for_each_entry(cur, &req->link_list, link_list) in io_prep_async_link()
1490 static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) in __io_queue_async_work() argument
1492 struct io_ring_ctx *ctx = req->ctx; in __io_queue_async_work()
1493 struct io_kiocb *link = io_prep_linked_timeout(req); in __io_queue_async_work()
1495 trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, in __io_queue_async_work()
1496 &req->work, req->flags); in __io_queue_async_work()
1497 io_wq_enqueue(ctx->io_wq, &req->work); in __io_queue_async_work()
1501 static void io_queue_async_work(struct io_kiocb *req) in io_queue_async_work() argument
1506 io_prep_async_link(req); in io_queue_async_work()
1507 link = __io_queue_async_work(req); in io_queue_async_work()
1513 static void io_kill_timeout(struct io_kiocb *req, int status) in io_kill_timeout() argument
1515 struct io_timeout_data *io = req->async_data; in io_kill_timeout()
1521 req_set_fail_links(req); in io_kill_timeout()
1522 atomic_set(&req->ctx->cq_timeouts, in io_kill_timeout()
1523 atomic_read(&req->ctx->cq_timeouts) + 1); in io_kill_timeout()
1524 list_del_init(&req->timeout.list); in io_kill_timeout()
1525 io_cqring_fill_event(req, status); in io_kill_timeout()
1526 io_put_req_deferred(req, 1); in io_kill_timeout()
1536 struct io_kiocb *req, *tmp; in io_kill_timeouts() local
1540 list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) { in io_kill_timeouts()
1541 if (io_match_task(req, tsk, files)) { in io_kill_timeouts()
1542 io_kill_timeout(req, -ECANCELED); in io_kill_timeouts()
1556 if (req_need_defer(de->req, de->seq)) in __io_queue_deferred()
1559 io_req_task_queue(de->req); in __io_queue_deferred()
1575 struct io_kiocb *req = list_first_entry(&ctx->timeout_list, in io_flush_timeouts() local
1578 if (io_is_timeout_noseq(req)) in io_flush_timeouts()
1588 events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush; in io_flush_timeouts()
1593 list_del_init(&req->timeout.list); in io_flush_timeouts()
1594 io_kill_timeout(req, 0); in io_flush_timeouts()
1674 struct io_kiocb *req, *tmp; in __io_cqring_overflow_flush() local
1688 list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) { in __io_cqring_overflow_flush()
1689 if (!io_match_task(req, tsk, files)) in __io_cqring_overflow_flush()
1696 list_move(&req->compl.list, &list); in __io_cqring_overflow_flush()
1698 WRITE_ONCE(cqe->user_data, req->user_data); in __io_cqring_overflow_flush()
1699 WRITE_ONCE(cqe->res, req->result); in __io_cqring_overflow_flush()
1700 WRITE_ONCE(cqe->flags, req->compl.cflags); in __io_cqring_overflow_flush()
1715 req = list_first_entry(&list, struct io_kiocb, compl.list); in __io_cqring_overflow_flush()
1716 list_del(&req->compl.list); in __io_cqring_overflow_flush()
1717 io_put_req(req); in __io_cqring_overflow_flush()
1737 static void __io_cqring_fill_event(struct io_kiocb *req, long res, in __io_cqring_fill_event() argument
1740 struct io_ring_ctx *ctx = req->ctx; in __io_cqring_fill_event()
1743 trace_io_uring_complete(ctx, req->user_data, res); in __io_cqring_fill_event()
1752 WRITE_ONCE(cqe->user_data, req->user_data); in __io_cqring_fill_event()
1756 atomic_read(&req->task->io_uring->in_idle)) { in __io_cqring_fill_event()
1770 io_clean_op(req); in __io_cqring_fill_event()
1771 req->result = res; in __io_cqring_fill_event()
1772 req->compl.cflags = cflags; in __io_cqring_fill_event()
1773 refcount_inc(&req->refs); in __io_cqring_fill_event()
1774 list_add_tail(&req->compl.list, &ctx->cq_overflow_list); in __io_cqring_fill_event()
1778 static void io_cqring_fill_event(struct io_kiocb *req, long res) in io_cqring_fill_event() argument
1780 __io_cqring_fill_event(req, res, 0); in io_cqring_fill_event()
1783 static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) in io_cqring_add_event() argument
1785 struct io_ring_ctx *ctx = req->ctx; in io_cqring_add_event()
1789 __io_cqring_fill_event(req, res, cflags); in io_cqring_add_event()
1802 struct io_kiocb *req; in io_submit_flush_completions() local
1804 req = list_first_entry(&cs->list, struct io_kiocb, compl.list); in io_submit_flush_completions()
1805 list_del(&req->compl.list); in io_submit_flush_completions()
1806 __io_cqring_fill_event(req, req->result, req->compl.cflags); in io_submit_flush_completions()
1811 * because of a potential deadlock with req->work.fs->lock in io_submit_flush_completions()
1813 if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT in io_submit_flush_completions()
1816 io_put_req(req); in io_submit_flush_completions()
1819 io_put_req(req); in io_submit_flush_completions()
1829 static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, in __io_req_complete() argument
1833 io_cqring_add_event(req, res, cflags); in __io_req_complete()
1834 io_put_req(req); in __io_req_complete()
1836 io_clean_op(req); in __io_req_complete()
1837 req->result = res; in __io_req_complete()
1838 req->compl.cflags = cflags; in __io_req_complete()
1839 list_add_tail(&req->compl.list, &cs->list); in __io_req_complete()
1845 static void io_req_complete(struct io_kiocb *req, long res) in io_req_complete() argument
1847 __io_req_complete(req, res, 0, NULL); in io_req_complete()
1850 static inline bool io_is_fallback_req(struct io_kiocb *req) in io_is_fallback_req() argument
1852 return req == (struct io_kiocb *) in io_is_fallback_req()
1853 ((unsigned long) req->ctx->fallback_req & ~1UL); in io_is_fallback_req()
1858 struct io_kiocb *req; in io_get_fallback_req() local
1860 req = ctx->fallback_req; in io_get_fallback_req()
1862 return req; in io_get_fallback_req()
1897 static inline void io_put_file(struct io_kiocb *req, struct file *file, in io_put_file() argument
1901 percpu_ref_put(req->fixed_file_refs); in io_put_file()
1906 static void io_dismantle_req(struct io_kiocb *req) in io_dismantle_req() argument
1908 io_clean_op(req); in io_dismantle_req()
1910 if (req->async_data) in io_dismantle_req()
1911 kfree(req->async_data); in io_dismantle_req()
1912 if (req->file) in io_dismantle_req()
1913 io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); in io_dismantle_req()
1915 io_req_clean_work(req); in io_dismantle_req()
1918 static void __io_free_req(struct io_kiocb *req) in __io_free_req() argument
1920 struct io_uring_task *tctx = req->task->io_uring; in __io_free_req()
1921 struct io_ring_ctx *ctx = req->ctx; in __io_free_req()
1923 io_dismantle_req(req); in __io_free_req()
1928 put_task_struct(req->task); in __io_free_req()
1930 if (likely(!io_is_fallback_req(req))) in __io_free_req()
1931 kmem_cache_free(req_cachep, req); in __io_free_req()
1937 static void io_kill_linked_timeout(struct io_kiocb *req) in io_kill_linked_timeout() argument
1939 struct io_ring_ctx *ctx = req->ctx; in io_kill_linked_timeout()
1945 link = list_first_entry_or_null(&req->link_list, struct io_kiocb, in io_kill_linked_timeout()
1949 * req -> link t-out -> link t-out [-> ...] in io_kill_linked_timeout()
1963 req->flags &= ~REQ_F_LINK_TIMEOUT; in io_kill_linked_timeout()
1972 static struct io_kiocb *io_req_link_next(struct io_kiocb *req) in io_req_link_next() argument
1981 if (unlikely(list_empty(&req->link_list))) in io_req_link_next()
1984 nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list); in io_req_link_next()
1985 list_del_init(&req->link_list); in io_req_link_next()
1994 static void io_fail_links(struct io_kiocb *req) in io_fail_links() argument
1996 struct io_ring_ctx *ctx = req->ctx; in io_fail_links()
2000 while (!list_empty(&req->link_list)) { in io_fail_links()
2001 struct io_kiocb *link = list_first_entry(&req->link_list, in io_fail_links()
2005 trace_io_uring_fail_link(req, link); in io_fail_links()
2026 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) in __io_req_find_next() argument
2028 req->flags &= ~REQ_F_LINK_HEAD; in __io_req_find_next()
2029 if (req->flags & REQ_F_LINK_TIMEOUT) in __io_req_find_next()
2030 io_kill_linked_timeout(req); in __io_req_find_next()
2038 if (likely(!(req->flags & REQ_F_FAIL_LINK))) in __io_req_find_next()
2039 return io_req_link_next(req); in __io_req_find_next()
2040 io_fail_links(req); in __io_req_find_next()
2044 static struct io_kiocb *io_req_find_next(struct io_kiocb *req) in io_req_find_next() argument
2046 if (likely(!(req->flags & REQ_F_LINK_HEAD))) in io_req_find_next()
2048 return __io_req_find_next(req); in io_req_find_next()
2051 static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok) in io_req_task_work_add() argument
2053 struct task_struct *tsk = req->task; in io_req_task_work_add()
2054 struct io_ring_ctx *ctx = req->ctx; in io_req_task_work_add()
2071 ret = task_work_add(tsk, &req->task_work, notify); in io_req_task_work_add()
2078 static void io_req_task_work_add_fallback(struct io_kiocb *req, in io_req_task_work_add_fallback() argument
2081 struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq); in io_req_task_work_add_fallback()
2083 init_task_work(&req->task_work, cb); in io_req_task_work_add_fallback()
2084 task_work_add(tsk, &req->task_work, TWA_NONE); in io_req_task_work_add_fallback()
2088 static void __io_req_task_cancel(struct io_kiocb *req, int error) in __io_req_task_cancel() argument
2090 struct io_ring_ctx *ctx = req->ctx; in __io_req_task_cancel()
2093 io_cqring_fill_event(req, error); in __io_req_task_cancel()
2098 req_set_fail_links(req); in __io_req_task_cancel()
2099 io_double_put_req(req); in __io_req_task_cancel()
2104 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_req_task_cancel() local
2105 struct io_ring_ctx *ctx = req->ctx; in io_req_task_cancel()
2108 __io_req_task_cancel(req, -ECANCELED); in io_req_task_cancel()
2113 static void __io_req_task_submit(struct io_kiocb *req) in __io_req_task_submit() argument
2115 struct io_ring_ctx *ctx = req->ctx; in __io_req_task_submit()
2119 __io_queue_sqe(req, NULL); in __io_req_task_submit()
2121 __io_req_task_cancel(req, -EFAULT); in __io_req_task_submit()
2130 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_req_task_submit() local
2131 struct io_ring_ctx *ctx = req->ctx; in io_req_task_submit()
2133 __io_req_task_submit(req); in io_req_task_submit()
2137 static void io_req_task_queue(struct io_kiocb *req) in io_req_task_queue() argument
2141 init_task_work(&req->task_work, io_req_task_submit); in io_req_task_queue()
2142 percpu_ref_get(&req->ctx->refs); in io_req_task_queue()
2144 ret = io_req_task_work_add(req, true); in io_req_task_queue()
2146 io_req_task_work_add_fallback(req, io_req_task_cancel); in io_req_task_queue()
2149 static void io_queue_next(struct io_kiocb *req) in io_queue_next() argument
2151 struct io_kiocb *nxt = io_req_find_next(req); in io_queue_next()
2157 static void io_free_req(struct io_kiocb *req) in io_free_req() argument
2159 io_queue_next(req); in io_free_req()
2160 __io_free_req(req); in io_free_req()
2202 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) in io_req_free_batch() argument
2204 if (unlikely(io_is_fallback_req(req))) { in io_req_free_batch()
2205 io_free_req(req); in io_req_free_batch()
2208 if (req->flags & REQ_F_LINK_HEAD) in io_req_free_batch()
2209 io_queue_next(req); in io_req_free_batch()
2211 if (req->task != rb->task) { in io_req_free_batch()
2220 rb->task = req->task; in io_req_free_batch()
2225 io_dismantle_req(req); in io_req_free_batch()
2226 rb->reqs[rb->to_free++] = req; in io_req_free_batch()
2228 __io_req_free_batch_flush(req->ctx, rb); in io_req_free_batch()
2235 static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) in io_put_req_find_next() argument
2239 if (refcount_dec_and_test(&req->refs)) { in io_put_req_find_next()
2240 nxt = io_req_find_next(req); in io_put_req_find_next()
2241 __io_free_req(req); in io_put_req_find_next()
2246 static void io_put_req(struct io_kiocb *req) in io_put_req() argument
2248 if (refcount_dec_and_test(&req->refs)) in io_put_req()
2249 io_free_req(req); in io_put_req()
2254 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_put_req_deferred_cb() local
2256 io_free_req(req); in io_put_req_deferred_cb()
2259 static void io_free_req_deferred(struct io_kiocb *req) in io_free_req_deferred() argument
2263 init_task_work(&req->task_work, io_put_req_deferred_cb); in io_free_req_deferred()
2264 ret = io_req_task_work_add(req, true); in io_free_req_deferred()
2266 io_req_task_work_add_fallback(req, io_put_req_deferred_cb); in io_free_req_deferred()
2269 static inline void io_put_req_deferred(struct io_kiocb *req, int refs) in io_put_req_deferred() argument
2271 if (refcount_sub_and_test(refs, &req->refs)) in io_put_req_deferred()
2272 io_free_req_deferred(req); in io_put_req_deferred()
2275 static struct io_wq_work *io_steal_work(struct io_kiocb *req) in io_steal_work() argument
2284 if (refcount_read(&req->refs) != 1) in io_steal_work()
2287 nxt = io_req_find_next(req); in io_steal_work()
2291 static void io_double_put_req(struct io_kiocb *req) in io_double_put_req() argument
2294 if (refcount_sub_and_test(2, &req->refs)) in io_double_put_req()
2295 io_free_req(req); in io_double_put_req()
2315 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) in io_put_kbuf() argument
2321 req->flags &= ~REQ_F_BUFFER_SELECTED; in io_put_kbuf()
2326 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) in io_put_rw_kbuf() argument
2330 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_put_rw_kbuf()
2331 return io_put_kbuf(req, kbuf); in io_put_rw_kbuf()
2353 struct io_kiocb *req; in io_iopoll_queue() local
2356 req = list_first_entry(again, struct io_kiocb, iopoll_entry); in io_iopoll_queue()
2357 list_del(&req->iopoll_entry); in io_iopoll_queue()
2358 __io_complete_rw(req, -EAGAIN, 0, NULL); in io_iopoll_queue()
2369 struct io_kiocb *req; in io_iopoll_complete() local
2379 req = list_first_entry(done, struct io_kiocb, iopoll_entry); in io_iopoll_complete()
2380 if (READ_ONCE(req->result) == -EAGAIN) { in io_iopoll_complete()
2381 req->result = 0; in io_iopoll_complete()
2382 req->iopoll_completed = 0; in io_iopoll_complete()
2383 list_move_tail(&req->iopoll_entry, &again); in io_iopoll_complete()
2386 list_del(&req->iopoll_entry); in io_iopoll_complete()
2388 if (req->flags & REQ_F_BUFFER_SELECTED) in io_iopoll_complete()
2389 cflags = io_put_rw_kbuf(req); in io_iopoll_complete()
2391 __io_cqring_fill_event(req, req->result, cflags); in io_iopoll_complete()
2394 if (refcount_dec_and_test(&req->refs)) in io_iopoll_complete()
2395 io_req_free_batch(&rb, req); in io_iopoll_complete()
2410 struct io_kiocb *req, *tmp; in io_do_iopoll() local
2422 list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) { in io_do_iopoll()
2423 struct kiocb *kiocb = &req->rw.kiocb; in io_do_iopoll()
2430 if (READ_ONCE(req->iopoll_completed)) { in io_do_iopoll()
2431 list_move_tail(&req->iopoll_entry, &done); in io_do_iopoll()
2441 /* iopoll may have completed current req */ in io_do_iopoll()
2442 if (READ_ONCE(req->iopoll_completed)) in io_do_iopoll()
2443 list_move_tail(&req->iopoll_entry, &done); in io_do_iopoll()
2557 static void kiocb_end_write(struct io_kiocb *req) in kiocb_end_write() argument
2563 if (req->flags & REQ_F_ISREG) { in kiocb_end_write()
2564 struct inode *inode = file_inode(req->file); in kiocb_end_write()
2568 file_end_write(req->file); in kiocb_end_write()
2574 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_common() local
2578 kiocb_end_write(req); in io_complete_rw_common()
2580 if (res != req->result) in io_complete_rw_common()
2581 req_set_fail_links(req); in io_complete_rw_common()
2582 if (req->flags & REQ_F_BUFFER_SELECTED) in io_complete_rw_common()
2583 cflags = io_put_rw_kbuf(req); in io_complete_rw_common()
2584 __io_req_complete(req, res, cflags, cs); in io_complete_rw_common()
2588 static bool io_resubmit_prep(struct io_kiocb *req, int error) in io_resubmit_prep() argument
2590 req_set_fail_links(req); in io_resubmit_prep()
2595 static bool io_rw_reissue(struct io_kiocb *req, long res) in io_rw_reissue() argument
2598 umode_t mode = file_inode(req->file)->i_mode; in io_rw_reissue()
2610 if (percpu_ref_is_dying(&req->ctx->refs)) in io_rw_reissue()
2613 ret = io_sq_thread_acquire_mm(req->ctx, req); in io_rw_reissue()
2615 if (io_resubmit_prep(req, ret)) { in io_rw_reissue()
2616 refcount_inc(&req->refs); in io_rw_reissue()
2617 io_queue_async_work(req); in io_rw_reissue()
2625 static void __io_complete_rw(struct io_kiocb *req, long res, long res2, in __io_complete_rw() argument
2628 if (!io_rw_reissue(req, res)) in __io_complete_rw()
2629 io_complete_rw_common(&req->rw.kiocb, res, cs); in __io_complete_rw()
2634 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw() local
2636 __io_complete_rw(req, res, res2, NULL); in io_complete_rw()
2641 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in io_complete_rw_iopoll() local
2644 kiocb_end_write(req); in io_complete_rw_iopoll()
2646 if (res != -EAGAIN && res != req->result) in io_complete_rw_iopoll()
2647 req_set_fail_links(req); in io_complete_rw_iopoll()
2649 WRITE_ONCE(req->result, res); in io_complete_rw_iopoll()
2652 WRITE_ONCE(req->iopoll_completed, 1); in io_complete_rw_iopoll()
2661 static void io_iopoll_req_issued(struct io_kiocb *req) in io_iopoll_req_issued() argument
2663 struct io_ring_ctx *ctx = req->ctx; in io_iopoll_req_issued()
2677 if (list_req->file != req->file) in io_iopoll_req_issued()
2685 if (READ_ONCE(req->iopoll_completed)) in io_iopoll_req_issued()
2686 list_add(&req->iopoll_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2688 list_add_tail(&req->iopoll_entry, &ctx->iopoll_list); in io_iopoll_req_issued()
2779 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_rw() argument
2781 struct io_ring_ctx *ctx = req->ctx; in io_prep_rw()
2782 struct kiocb *kiocb = &req->rw.kiocb; in io_prep_rw()
2786 if (S_ISREG(file_inode(req->file)->i_mode)) in io_prep_rw()
2787 req->flags |= REQ_F_ISREG; in io_prep_rw()
2790 if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) { in io_prep_rw()
2791 req->flags |= REQ_F_CUR_POS; in io_prep_rw()
2792 kiocb->ki_pos = req->file->f_pos; in io_prep_rw()
2812 req->flags |= REQ_F_NOWAIT; in io_prep_rw()
2821 req->iopoll_completed = 0; in io_prep_rw()
2828 req->rw.addr = READ_ONCE(sqe->addr); in io_prep_rw()
2829 req->rw.len = READ_ONCE(sqe->len); in io_prep_rw()
2830 req->buf_index = READ_ONCE(sqe->buf_index); in io_prep_rw()
2858 struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); in kiocb_done() local
2859 struct io_async_rw *io = req->async_data; in kiocb_done()
2869 if (req->flags & REQ_F_CUR_POS) in kiocb_done()
2870 req->file->f_pos = kiocb->ki_pos; in kiocb_done()
2872 __io_complete_rw(req, ret, 0, cs); in kiocb_done()
2877 static ssize_t io_import_fixed(struct io_kiocb *req, int rw, in io_import_fixed() argument
2880 struct io_ring_ctx *ctx = req->ctx; in io_import_fixed()
2881 size_t len = req->rw.len; in io_import_fixed()
2883 u16 index, buf_index = req->buf_index; in io_import_fixed()
2891 buf_addr = req->rw.addr; in io_import_fixed()
2963 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len, in io_buffer_select() argument
2969 if (req->flags & REQ_F_BUFFER_SELECTED) in io_buffer_select()
2972 io_ring_submit_lock(req->ctx, needs_lock); in io_buffer_select()
2974 lockdep_assert_held(&req->ctx->uring_lock); in io_buffer_select()
2976 head = xa_load(&req->ctx->io_buffers, bgid); in io_buffer_select()
2984 xa_erase(&req->ctx->io_buffers, bgid); in io_buffer_select()
2992 io_ring_submit_unlock(req->ctx, needs_lock); in io_buffer_select()
2997 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len, in io_rw_buffer_select() argument
3003 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_rw_buffer_select()
3004 bgid = req->buf_index; in io_rw_buffer_select()
3005 kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock); in io_rw_buffer_select()
3008 req->rw.addr = (u64) (unsigned long) kbuf; in io_rw_buffer_select()
3009 req->flags |= REQ_F_BUFFER_SELECTED; in io_rw_buffer_select()
3014 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov, in io_compat_import() argument
3022 uiov = u64_to_user_ptr(req->rw.addr); in io_compat_import()
3031 buf = io_rw_buffer_select(req, &len, needs_lock); in io_compat_import()
3040 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in __io_iov_buffer_select() argument
3043 struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr); in __io_iov_buffer_select()
3053 buf = io_rw_buffer_select(req, &len, needs_lock); in __io_iov_buffer_select()
3061 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, in io_iov_buffer_select() argument
3064 if (req->flags & REQ_F_BUFFER_SELECTED) { in io_iov_buffer_select()
3067 kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; in io_iov_buffer_select()
3072 if (req->rw.len != 1) in io_iov_buffer_select()
3076 if (req->ctx->compat) in io_iov_buffer_select()
3077 return io_compat_import(req, iov, needs_lock); in io_iov_buffer_select()
3080 return __io_iov_buffer_select(req, iov, needs_lock); in io_iov_buffer_select()
3083 static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, in __io_import_iovec() argument
3087 void __user *buf = u64_to_user_ptr(req->rw.addr); in __io_import_iovec()
3088 size_t sqe_len = req->rw.len; in __io_import_iovec()
3092 opcode = req->opcode; in __io_import_iovec()
3095 return io_import_fixed(req, rw, iter); in __io_import_iovec()
3099 if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT)) in __io_import_iovec()
3103 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
3104 buf = io_rw_buffer_select(req, &sqe_len, needs_lock); in __io_import_iovec()
3107 req->rw.len = sqe_len; in __io_import_iovec()
3115 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_import_iovec()
3116 ret = io_iov_buffer_select(req, *iovec, needs_lock); in __io_import_iovec()
3126 req->ctx->compat); in __io_import_iovec()
3129 static ssize_t io_import_iovec(int rw, struct io_kiocb *req, in io_import_iovec() argument
3133 struct io_async_rw *iorw = req->async_data; in io_import_iovec()
3136 return __io_import_iovec(rw, req, iovec, iter, needs_lock); in io_import_iovec()
3150 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter) in loop_rw_iter() argument
3152 struct kiocb *kiocb = &req->rw.kiocb; in loop_rw_iter()
3153 struct file *file = req->file; in loop_rw_iter()
3173 iovec.iov_base = u64_to_user_ptr(req->rw.addr); in loop_rw_iter()
3174 iovec.iov_len = req->rw.len; in loop_rw_iter()
3193 req->rw.len -= nr; in loop_rw_iter()
3194 req->rw.addr += nr; in loop_rw_iter()
3204 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, in io_req_map_rw() argument
3207 struct io_async_rw *rw = req->async_data; in io_req_map_rw()
3227 req->flags |= REQ_F_NEED_CLEANUP; in io_req_map_rw()
3231 static inline int __io_alloc_async_data(struct io_kiocb *req) in __io_alloc_async_data() argument
3233 WARN_ON_ONCE(!io_op_defs[req->opcode].async_size); in __io_alloc_async_data()
3234 req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL); in __io_alloc_async_data()
3235 return req->async_data == NULL; in __io_alloc_async_data()
3238 static int io_alloc_async_data(struct io_kiocb *req) in io_alloc_async_data() argument
3240 if (!io_op_defs[req->opcode].needs_async_data) in io_alloc_async_data()
3243 return __io_alloc_async_data(req); in io_alloc_async_data()
3246 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, in io_setup_async_rw() argument
3250 if (!force && !io_op_defs[req->opcode].needs_async_data) in io_setup_async_rw()
3252 if (!req->async_data) { in io_setup_async_rw()
3253 if (__io_alloc_async_data(req)) in io_setup_async_rw()
3256 io_req_map_rw(req, iovec, fast_iov, iter); in io_setup_async_rw()
3261 static inline int io_rw_prep_async(struct io_kiocb *req, int rw) in io_rw_prep_async() argument
3263 struct io_async_rw *iorw = req->async_data; in io_rw_prep_async()
3267 ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false); in io_rw_prep_async()
3274 req->flags |= REQ_F_NEED_CLEANUP; in io_rw_prep_async()
3278 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_read_prep() argument
3282 ret = io_prep_rw(req, sqe); in io_read_prep()
3286 if (unlikely(!(req->file->f_mode & FMODE_READ))) in io_read_prep()
3290 if (!req->async_data) in io_read_prep()
3292 return io_rw_prep_async(req, READ); in io_read_prep()
3309 struct io_kiocb *req = wait->private; in io_async_buf_func() local
3318 req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; in io_async_buf_func()
3321 init_task_work(&req->task_work, io_req_task_submit); in io_async_buf_func()
3322 percpu_ref_get(&req->ctx->refs); in io_async_buf_func()
3325 refcount_inc(&req->refs); in io_async_buf_func()
3326 ret = io_req_task_work_add(req, true); in io_async_buf_func()
3328 io_req_task_work_add_fallback(req, io_req_task_cancel); in io_async_buf_func()
3344 static bool io_rw_should_retry(struct io_kiocb *req) in io_rw_should_retry() argument
3346 struct io_async_rw *rw = req->async_data; in io_rw_should_retry()
3348 struct kiocb *kiocb = &req->rw.kiocb; in io_rw_should_retry()
3351 if (req->flags & REQ_F_NOWAIT) in io_rw_should_retry()
3362 if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) in io_rw_should_retry()
3366 wait->wait.private = req; in io_rw_should_retry()
3375 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) in io_iter_do_read() argument
3377 if (req->file->f_op->read_iter) in io_iter_do_read()
3378 return call_read_iter(req->file, &req->rw.kiocb, iter); in io_iter_do_read()
3379 else if (req->file->f_op->read) in io_iter_do_read()
3380 return loop_rw_iter(READ, req, iter); in io_iter_do_read()
3385 static int io_read(struct io_kiocb *req, bool force_nonblock, in io_read() argument
3389 struct kiocb *kiocb = &req->rw.kiocb; in io_read()
3392 struct io_async_rw *rw = req->async_data; in io_read()
3399 ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); in io_read()
3404 req->result = io_size; in io_read()
3415 no_async = force_nonblock && !io_file_supports_async(req->file, READ); in io_read()
3419 ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size); in io_read()
3423 ret = io_iter_do_read(req, iter); in io_read()
3432 if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_read()
3435 if (req->file->f_flags & O_NONBLOCK) in io_read()
3448 (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG)) in io_read()
3453 ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); in io_read()
3460 rw = req->async_data; in io_read()
3468 if (!io_rw_should_retry(req)) { in io_read()
3479 ret = io_iter_do_read(req, iter); in io_read()
3498 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_write_prep() argument
3502 ret = io_prep_rw(req, sqe); in io_write_prep()
3506 if (unlikely(!(req->file->f_mode & FMODE_WRITE))) in io_write_prep()
3510 if (!req->async_data) in io_write_prep()
3512 return io_rw_prep_async(req, WRITE); in io_write_prep()
3515 static int io_write(struct io_kiocb *req, bool force_nonblock, in io_write() argument
3519 struct kiocb *kiocb = &req->rw.kiocb; in io_write()
3522 struct io_async_rw *rw = req->async_data; in io_write()
3528 ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); in io_write()
3533 req->result = io_size; in io_write()
3542 if (force_nonblock && !io_file_supports_async(req->file, WRITE)) in io_write()
3547 (req->flags & REQ_F_ISREG)) in io_write()
3550 ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size); in io_write()
3561 if (req->flags & REQ_F_ISREG) { in io_write()
3562 sb_start_write(file_inode(req->file)->i_sb); in io_write()
3563 __sb_writers_release(file_inode(req->file)->i_sb, in io_write()
3568 if (req->file->f_op->write_iter) in io_write()
3569 ret2 = call_write_iter(req->file, kiocb, iter); in io_write()
3570 else if (req->file->f_op->write) in io_write()
3571 ret2 = loop_rw_iter(WRITE, req, iter); in io_write()
3582 if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) in io_write()
3586 if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) in io_write()
3594 ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); in io_write()
3605 static int __io_splice_prep(struct io_kiocb *req, in __io_splice_prep() argument
3608 struct io_splice* sp = &req->splice; in __io_splice_prep()
3611 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in __io_splice_prep()
3621 sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in), in __io_splice_prep()
3625 req->flags |= REQ_F_NEED_CLEANUP; in __io_splice_prep()
3632 io_req_init_async(req); in __io_splice_prep()
3633 req->work.flags |= IO_WQ_WORK_UNBOUND; in __io_splice_prep()
3639 static int io_tee_prep(struct io_kiocb *req, in io_tee_prep() argument
3644 return __io_splice_prep(req, sqe); in io_tee_prep()
3647 static int io_tee(struct io_kiocb *req, bool force_nonblock) in io_tee() argument
3649 struct io_splice *sp = &req->splice; in io_tee()
3660 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); in io_tee()
3661 req->flags &= ~REQ_F_NEED_CLEANUP; in io_tee()
3664 req_set_fail_links(req); in io_tee()
3665 io_req_complete(req, ret); in io_tee()
3669 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_splice_prep() argument
3671 struct io_splice* sp = &req->splice; in io_splice_prep()
3675 return __io_splice_prep(req, sqe); in io_splice_prep()
3678 static int io_splice(struct io_kiocb *req, bool force_nonblock) in io_splice() argument
3680 struct io_splice *sp = &req->splice; in io_splice()
3696 io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); in io_splice()
3697 req->flags &= ~REQ_F_NEED_CLEANUP; in io_splice()
3700 req_set_fail_links(req); in io_splice()
3701 io_req_complete(req, ret); in io_splice()
3708 static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) in io_nop() argument
3710 struct io_ring_ctx *ctx = req->ctx; in io_nop()
3715 __io_req_complete(req, 0, 0, cs); in io_nop()
3719 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_fsync() argument
3721 struct io_ring_ctx *ctx = req->ctx; in io_prep_fsync()
3723 if (!req->file) in io_prep_fsync()
3732 req->sync.flags = READ_ONCE(sqe->fsync_flags); in io_prep_fsync()
3733 if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC)) in io_prep_fsync()
3736 req->sync.off = READ_ONCE(sqe->off); in io_prep_fsync()
3737 req->sync.len = READ_ONCE(sqe->len); in io_prep_fsync()
3741 static int io_fsync(struct io_kiocb *req, bool force_nonblock) in io_fsync() argument
3743 loff_t end = req->sync.off + req->sync.len; in io_fsync()
3750 ret = vfs_fsync_range(req->file, req->sync.off, in io_fsync()
3752 req->sync.flags & IORING_FSYNC_DATASYNC); in io_fsync()
3754 req_set_fail_links(req); in io_fsync()
3755 io_req_complete(req, ret); in io_fsync()
3759 static int io_fallocate_prep(struct io_kiocb *req, in io_fallocate_prep() argument
3765 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fallocate_prep()
3768 req->sync.off = READ_ONCE(sqe->off); in io_fallocate_prep()
3769 req->sync.len = READ_ONCE(sqe->addr); in io_fallocate_prep()
3770 req->sync.mode = READ_ONCE(sqe->len); in io_fallocate_prep()
3774 static int io_fallocate(struct io_kiocb *req, bool force_nonblock) in io_fallocate() argument
3781 ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, in io_fallocate()
3782 req->sync.len); in io_fallocate()
3784 req_set_fail_links(req); in io_fallocate()
3785 io_req_complete(req, ret); in io_fallocate()
3789 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in __io_openat_prep() argument
3796 if (unlikely(req->flags & REQ_F_FIXED_FILE)) in __io_openat_prep()
3800 if (!(req->open.how.flags & O_PATH) && force_o_largefile()) in __io_openat_prep()
3801 req->open.how.flags |= O_LARGEFILE; in __io_openat_prep()
3803 req->open.dfd = READ_ONCE(sqe->fd); in __io_openat_prep()
3805 req->open.filename = getname(fname); in __io_openat_prep()
3806 if (IS_ERR(req->open.filename)) { in __io_openat_prep()
3807 ret = PTR_ERR(req->open.filename); in __io_openat_prep()
3808 req->open.filename = NULL; in __io_openat_prep()
3811 req->open.nofile = rlimit(RLIMIT_NOFILE); in __io_openat_prep()
3812 req->open.ignore_nonblock = false; in __io_openat_prep()
3813 req->flags |= REQ_F_NEED_CLEANUP; in __io_openat_prep()
3817 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat_prep() argument
3821 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_openat_prep()
3825 req->open.how = build_open_how(flags, mode); in io_openat_prep()
3826 return __io_openat_prep(req, sqe); in io_openat_prep()
3829 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_openat2_prep() argument
3835 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_openat2_prep()
3842 ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how, in io_openat2_prep()
3847 return __io_openat_prep(req, sqe); in io_openat2_prep()
3850 static int io_openat2(struct io_kiocb *req, bool force_nonblock) in io_openat2() argument
3856 if (force_nonblock && !req->open.ignore_nonblock) in io_openat2()
3859 ret = build_open_flags(&req->open.how, &op); in io_openat2()
3863 ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile); in io_openat2()
3867 file = do_filp_open(req->open.dfd, req->open.filename, &op); in io_openat2()
3881 req->open.ignore_nonblock = true; in io_openat2()
3882 refcount_inc(&req->refs); in io_openat2()
3883 io_req_task_queue(req); in io_openat2()
3891 putname(req->open.filename); in io_openat2()
3892 req->flags &= ~REQ_F_NEED_CLEANUP; in io_openat2()
3894 req_set_fail_links(req); in io_openat2()
3895 io_req_complete(req, ret); in io_openat2()
3899 static int io_openat(struct io_kiocb *req, bool force_nonblock) in io_openat() argument
3901 return io_openat2(req, force_nonblock); in io_openat()
3904 static int io_remove_buffers_prep(struct io_kiocb *req, in io_remove_buffers_prep() argument
3907 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep()
3950 static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, in io_remove_buffers() argument
3953 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers()
3954 struct io_ring_ctx *ctx = req->ctx; in io_remove_buffers()
3967 req_set_fail_links(req); in io_remove_buffers()
3971 __io_req_complete(req, ret, 0, cs); in io_remove_buffers()
3975 __io_req_complete(req, ret, 0, cs); in io_remove_buffers()
3980 static int io_provide_buffers_prep(struct io_kiocb *req, in io_provide_buffers_prep() argument
3984 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep()
4042 static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, in io_provide_buffers() argument
4045 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers()
4046 struct io_ring_ctx *ctx = req->ctx; in io_provide_buffers()
4063 req_set_fail_links(req); in io_provide_buffers()
4067 __io_req_complete(req, ret, 0, cs); in io_provide_buffers()
4071 __io_req_complete(req, ret, 0, cs); in io_provide_buffers()
4076 static int io_epoll_ctl_prep(struct io_kiocb *req, in io_epoll_ctl_prep() argument
4082 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) in io_epoll_ctl_prep()
4085 req->epoll.epfd = READ_ONCE(sqe->fd); in io_epoll_ctl_prep()
4086 req->epoll.op = READ_ONCE(sqe->len); in io_epoll_ctl_prep()
4087 req->epoll.fd = READ_ONCE(sqe->off); in io_epoll_ctl_prep()
4089 if (ep_op_has_event(req->epoll.op)) { in io_epoll_ctl_prep()
4093 if (copy_from_user(&req->epoll.event, ev, sizeof(*ev))) in io_epoll_ctl_prep()
4103 static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, in io_epoll_ctl() argument
4107 struct io_epoll *ie = &req->epoll; in io_epoll_ctl()
4115 req_set_fail_links(req); in io_epoll_ctl()
4116 __io_req_complete(req, ret, 0, cs); in io_epoll_ctl()
4123 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_madvise_prep() argument
4128 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_madvise_prep()
4131 req->madvise.addr = READ_ONCE(sqe->addr); in io_madvise_prep()
4132 req->madvise.len = READ_ONCE(sqe->len); in io_madvise_prep()
4133 req->madvise.advice = READ_ONCE(sqe->fadvise_advice); in io_madvise_prep()
4140 static int io_madvise(struct io_kiocb *req, bool force_nonblock) in io_madvise() argument
4143 struct io_madvise *ma = &req->madvise; in io_madvise()
4151 req_set_fail_links(req); in io_madvise()
4152 io_req_complete(req, ret); in io_madvise()
4159 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_fadvise_prep() argument
4163 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_fadvise_prep()
4166 req->fadvise.offset = READ_ONCE(sqe->off); in io_fadvise_prep()
4167 req->fadvise.len = READ_ONCE(sqe->len); in io_fadvise_prep()
4168 req->fadvise.advice = READ_ONCE(sqe->fadvise_advice); in io_fadvise_prep()
4172 static int io_fadvise(struct io_kiocb *req, bool force_nonblock) in io_fadvise() argument
4174 struct io_fadvise *fa = &req->fadvise; in io_fadvise()
4188 ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); in io_fadvise()
4190 req_set_fail_links(req); in io_fadvise()
4191 io_req_complete(req, ret); in io_fadvise()
4195 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_statx_prep() argument
4197 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) in io_statx_prep()
4201 if (req->flags & REQ_F_FIXED_FILE) in io_statx_prep()
4204 req->statx.dfd = READ_ONCE(sqe->fd); in io_statx_prep()
4205 req->statx.mask = READ_ONCE(sqe->len); in io_statx_prep()
4206 req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_statx_prep()
4207 req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2)); in io_statx_prep()
4208 req->statx.flags = READ_ONCE(sqe->statx_flags); in io_statx_prep()
4213 static int io_statx(struct io_kiocb *req, bool force_nonblock) in io_statx() argument
4215 struct io_statx *ctx = &req->statx; in io_statx()
4221 req->flags |= REQ_F_NO_FILE_TABLE; in io_statx()
4229 req_set_fail_links(req); in io_statx()
4230 io_req_complete(req, ret); in io_statx()
4234 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_close_prep() argument
4241 io_req_init_async(req); in io_close_prep()
4243 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_close_prep()
4248 if (req->flags & REQ_F_FIXED_FILE) in io_close_prep()
4251 req->close.fd = READ_ONCE(sqe->fd); in io_close_prep()
4252 if ((req->file && req->file->f_op == &io_uring_fops)) in io_close_prep()
4255 req->close.put_file = NULL; in io_close_prep()
4259 static int io_close(struct io_kiocb *req, bool force_nonblock, in io_close() argument
4262 struct io_close *close = &req->close; in io_close()
4275 req->work.flags |= IO_WQ_WORK_NO_CANCEL; in io_close()
4277 req->flags &= ~REQ_F_NOWAIT; in io_close()
4279 req->flags |= REQ_F_NO_FILE_TABLE; in io_close()
4284 ret = filp_close(close->put_file, req->work.identity->files); in io_close()
4286 req_set_fail_links(req); in io_close()
4289 __io_req_complete(req, ret, 0, cs); in io_close()
4293 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_sfr() argument
4295 struct io_ring_ctx *ctx = req->ctx; in io_prep_sfr()
4297 if (!req->file) in io_prep_sfr()
4306 req->sync.off = READ_ONCE(sqe->off); in io_prep_sfr()
4307 req->sync.len = READ_ONCE(sqe->len); in io_prep_sfr()
4308 req->sync.flags = READ_ONCE(sqe->sync_range_flags); in io_prep_sfr()
4312 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) in io_sync_file_range() argument
4320 ret = sync_file_range(req->file, req->sync.off, req->sync.len, in io_sync_file_range()
4321 req->sync.flags); in io_sync_file_range()
4323 req_set_fail_links(req); in io_sync_file_range()
4324 io_req_complete(req, ret); in io_sync_file_range()
4329 static int io_setup_async_msg(struct io_kiocb *req, in io_setup_async_msg() argument
4332 struct io_async_msghdr *async_msg = req->async_data; in io_setup_async_msg()
4336 if (io_alloc_async_data(req)) { in io_setup_async_msg()
4341 async_msg = req->async_data; in io_setup_async_msg()
4342 req->flags |= REQ_F_NEED_CLEANUP; in io_setup_async_msg()
4347 static int io_sendmsg_copy_hdr(struct io_kiocb *req, in io_sendmsg_copy_hdr() argument
4352 return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, in io_sendmsg_copy_hdr()
4353 req->sr_msg.msg_flags, &iomsg->iov); in io_sendmsg_copy_hdr()
4356 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4358 struct io_async_msghdr *async_msg = req->async_data; in io_sendmsg_prep()
4359 struct io_sr_msg *sr = &req->sr_msg; in io_sendmsg_prep()
4362 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_sendmsg_prep()
4370 if (req->ctx->compat) in io_sendmsg_prep()
4374 if (!async_msg || !io_op_defs[req->opcode].needs_async_data) in io_sendmsg_prep()
4376 ret = io_sendmsg_copy_hdr(req, async_msg); in io_sendmsg_prep()
4378 req->flags |= REQ_F_NEED_CLEANUP; in io_sendmsg_prep()
4382 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, in io_sendmsg() argument
4391 sock = sock_from_file(req->file, &ret); in io_sendmsg()
4395 if (req->async_data) { in io_sendmsg()
4396 kmsg = req->async_data; in io_sendmsg()
4403 ret = io_sendmsg_copy_hdr(req, &iomsg); in io_sendmsg()
4409 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; in io_sendmsg()
4411 req->flags |= REQ_F_NOWAIT; in io_sendmsg()
4420 return io_setup_async_msg(req, kmsg); in io_sendmsg()
4426 req->flags &= ~REQ_F_NEED_CLEANUP; in io_sendmsg()
4428 req_set_fail_links(req); in io_sendmsg()
4429 __io_req_complete(req, ret, 0, cs); in io_sendmsg()
4433 static int io_send(struct io_kiocb *req, bool force_nonblock, in io_send() argument
4436 struct io_sr_msg *sr = &req->sr_msg; in io_send()
4444 sock = sock_from_file(req->file, &ret); in io_send()
4457 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; in io_send()
4459 req->flags |= REQ_F_NOWAIT; in io_send()
4474 req_set_fail_links(req); in io_send()
4475 __io_req_complete(req, ret, 0, cs); in io_send()
4479 static int __io_recvmsg_copy_hdr(struct io_kiocb *req, in __io_recvmsg_copy_hdr() argument
4482 struct io_sr_msg *sr = &req->sr_msg; in __io_recvmsg_copy_hdr()
4492 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_recvmsg_copy_hdr()
4513 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, in __io_compat_recvmsg_copy_hdr() argument
4517 struct io_sr_msg *sr = &req->sr_msg; in __io_compat_recvmsg_copy_hdr()
4530 if (req->flags & REQ_F_BUFFER_SELECT) { in __io_compat_recvmsg_copy_hdr()
4556 static int io_recvmsg_copy_hdr(struct io_kiocb *req, in io_recvmsg_copy_hdr() argument
4563 if (req->ctx->compat) in io_recvmsg_copy_hdr()
4564 return __io_compat_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4567 return __io_recvmsg_copy_hdr(req, iomsg); in io_recvmsg_copy_hdr()
4570 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, in io_recv_buffer_select() argument
4573 struct io_sr_msg *sr = &req->sr_msg; in io_recv_buffer_select()
4576 kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); in io_recv_buffer_select()
4581 req->flags |= REQ_F_BUFFER_SELECTED; in io_recv_buffer_select()
4585 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) in io_put_recv_kbuf() argument
4587 return io_put_kbuf(req, req->sr_msg.kbuf); in io_put_recv_kbuf()
4590 static int io_recvmsg_prep(struct io_kiocb *req, in io_recvmsg_prep() argument
4593 struct io_async_msghdr *async_msg = req->async_data; in io_recvmsg_prep()
4594 struct io_sr_msg *sr = &req->sr_msg; in io_recvmsg_prep()
4597 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_recvmsg_prep()
4606 if (req->ctx->compat) in io_recvmsg_prep()
4610 if (!async_msg || !io_op_defs[req->opcode].needs_async_data) in io_recvmsg_prep()
4612 ret = io_recvmsg_copy_hdr(req, async_msg); in io_recvmsg_prep()
4614 req->flags |= REQ_F_NEED_CLEANUP; in io_recvmsg_prep()
4618 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, in io_recvmsg() argument
4628 sock = sock_from_file(req->file, &ret); in io_recvmsg()
4632 if (req->async_data) { in io_recvmsg()
4633 kmsg = req->async_data; in io_recvmsg()
4640 ret = io_recvmsg_copy_hdr(req, &iomsg); in io_recvmsg()
4646 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recvmsg()
4647 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recvmsg()
4652 1, req->sr_msg.len); in io_recvmsg()
4655 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; in io_recvmsg()
4657 req->flags |= REQ_F_NOWAIT; in io_recvmsg()
4664 ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, in io_recvmsg()
4667 return io_setup_async_msg(req, kmsg); in io_recvmsg()
4671 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recvmsg()
4672 cflags = io_put_recv_kbuf(req); in io_recvmsg()
4675 req->flags &= ~REQ_F_NEED_CLEANUP; in io_recvmsg()
4677 req_set_fail_links(req); in io_recvmsg()
4678 __io_req_complete(req, ret, cflags, cs); in io_recvmsg()
4682 static int io_recv(struct io_kiocb *req, bool force_nonblock, in io_recv() argument
4686 struct io_sr_msg *sr = &req->sr_msg; in io_recv()
4695 sock = sock_from_file(req->file, &ret); in io_recv()
4699 if (req->flags & REQ_F_BUFFER_SELECT) { in io_recv()
4700 kbuf = io_recv_buffer_select(req, !force_nonblock); in io_recv()
4717 flags = req->sr_msg.msg_flags | MSG_NOSIGNAL; in io_recv()
4719 req->flags |= REQ_F_NOWAIT; in io_recv()
4732 if (req->flags & REQ_F_BUFFER_SELECTED) in io_recv()
4733 cflags = io_put_recv_kbuf(req); in io_recv()
4735 req_set_fail_links(req); in io_recv()
4736 __io_req_complete(req, ret, cflags, cs); in io_recv()
4740 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
4742 struct io_accept *accept = &req->accept; in io_accept_prep()
4744 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_accept_prep()
4756 static int io_accept(struct io_kiocb *req, bool force_nonblock, in io_accept() argument
4759 struct io_accept *accept = &req->accept; in io_accept()
4763 if (req->file->f_flags & O_NONBLOCK) in io_accept()
4764 req->flags |= REQ_F_NOWAIT; in io_accept()
4766 ret = __sys_accept4_file(req->file, file_flags, accept->addr, in io_accept()
4774 req_set_fail_links(req); in io_accept()
4776 __io_req_complete(req, ret, 0, cs); in io_accept()
4780 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
4782 struct io_connect *conn = &req->connect; in io_connect_prep()
4783 struct io_async_connect *io = req->async_data; in io_connect_prep()
4785 if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) in io_connect_prep()
4801 static int io_connect(struct io_kiocb *req, bool force_nonblock, in io_connect() argument
4808 if (req->async_data) { in io_connect()
4809 io = req->async_data; in io_connect()
4811 ret = move_addr_to_kernel(req->connect.addr, in io_connect()
4812 req->connect.addr_len, in io_connect()
4821 ret = __sys_connect_file(req->file, &io->address, in io_connect()
4822 req->connect.addr_len, file_flags); in io_connect()
4824 if (req->async_data) in io_connect()
4826 if (io_alloc_async_data(req)) { in io_connect()
4830 io = req->async_data; in io_connect()
4831 memcpy(req->async_data, &__io, sizeof(__io)); in io_connect()
4838 req_set_fail_links(req); in io_connect()
4839 __io_req_complete(req, ret, 0, cs); in io_connect()
4843 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_sendmsg_prep() argument
4848 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, in io_sendmsg() argument
4854 static int io_send(struct io_kiocb *req, bool force_nonblock, in io_send() argument
4860 static int io_recvmsg_prep(struct io_kiocb *req, in io_recvmsg_prep() argument
4866 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, in io_recvmsg() argument
4872 static int io_recv(struct io_kiocb *req, bool force_nonblock, in io_recv() argument
4878 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_accept_prep() argument
4883 static int io_accept(struct io_kiocb *req, bool force_nonblock, in io_accept() argument
4889 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_connect_prep() argument
4894 static int io_connect(struct io_kiocb *req, bool force_nonblock, in io_connect() argument
4903 struct io_kiocb *req; member
4908 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, in __io_async_wake() argument
4918 trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask); in __io_async_wake()
4922 req->result = mask; in __io_async_wake()
4923 init_task_work(&req->task_work, func); in __io_async_wake()
4924 percpu_ref_get(&req->ctx->refs); in __io_async_wake()
4932 twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); in __io_async_wake()
4940 ret = io_req_task_work_add(req, twa_signal_ok); in __io_async_wake()
4943 io_req_task_work_add_fallback(req, func); in __io_async_wake()
4948 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) in io_poll_rewait() argument
4949 __acquires(&req->ctx->completion_lock) in io_poll_rewait()
4951 struct io_ring_ctx *ctx = req->ctx; in io_poll_rewait()
4953 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
4956 req->result = vfs_poll(req->file, &pt) & poll->events; in io_poll_rewait()
4960 if (!req->result && !READ_ONCE(poll->canceled)) { in io_poll_rewait()
4968 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) in io_poll_get_double() argument
4971 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_double()
4972 return req->async_data; in io_poll_get_double()
4973 return req->apoll->double_poll; in io_poll_get_double()
4976 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) in io_poll_get_single() argument
4978 if (req->opcode == IORING_OP_POLL_ADD) in io_poll_get_single()
4979 return &req->poll; in io_poll_get_single()
4980 return &req->apoll->poll; in io_poll_get_single()
4983 static void io_poll_remove_double(struct io_kiocb *req) in io_poll_remove_double() argument
4985 struct io_poll_iocb *poll = io_poll_get_double(req); in io_poll_remove_double()
4987 lockdep_assert_held(&req->ctx->completion_lock); in io_poll_remove_double()
4995 refcount_dec(&req->refs); in io_poll_remove_double()
5001 static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) in io_poll_complete() argument
5003 struct io_ring_ctx *ctx = req->ctx; in io_poll_complete()
5005 io_poll_remove_double(req); in io_poll_complete()
5006 req->poll.done = true; in io_poll_complete()
5007 io_cqring_fill_event(req, error ? error : mangle_poll(mask)); in io_poll_complete()
5013 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_poll_task_func() local
5014 struct io_ring_ctx *ctx = req->ctx; in io_poll_task_func()
5017 if (io_poll_rewait(req, &req->poll)) { in io_poll_task_func()
5020 hash_del(&req->hash_node); in io_poll_task_func()
5021 io_poll_complete(req, req->result, 0); in io_poll_task_func()
5024 nxt = io_put_req_find_next(req); in io_poll_task_func()
5036 struct io_kiocb *req = wait->private; in io_poll_double_wake() local
5037 struct io_poll_iocb *poll = io_poll_get_single(req); in io_poll_double_wake()
5061 refcount_dec(&req->refs); in io_poll_double_wake()
5080 struct io_kiocb *req = pt->req; in __io_queue_proc() local
5104 refcount_inc(&req->refs); in __io_queue_proc()
5105 poll->wait.private = req; in __io_queue_proc()
5122 struct async_poll *apoll = pt->req->apoll; in io_async_queue_proc()
5129 struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); in io_async_task_func() local
5130 struct async_poll *apoll = req->apoll; in io_async_task_func()
5131 struct io_ring_ctx *ctx = req->ctx; in io_async_task_func()
5133 trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); in io_async_task_func()
5135 if (io_poll_rewait(req, &apoll->poll)) { in io_async_task_func()
5141 /* If req is still hashed, it cannot have been canceled. Don't check. */ in io_async_task_func()
5142 if (hash_hashed(&req->hash_node)) in io_async_task_func()
5143 hash_del(&req->hash_node); in io_async_task_func()
5145 io_poll_remove_double(req); in io_async_task_func()
5149 __io_req_task_submit(req); in io_async_task_func()
5151 __io_req_task_cancel(req, -ECANCELED); in io_async_task_func()
5161 struct io_kiocb *req = wait->private; in io_async_wake() local
5162 struct io_poll_iocb *poll = &req->apoll->poll; in io_async_wake()
5164 trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data, in io_async_wake()
5167 return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func); in io_async_wake()
5170 static void io_poll_req_insert(struct io_kiocb *req) in io_poll_req_insert() argument
5172 struct io_ring_ctx *ctx = req->ctx; in io_poll_req_insert()
5175 list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)]; in io_poll_req_insert()
5176 hlist_add_head(&req->hash_node, list); in io_poll_req_insert()
5179 static __poll_t __io_arm_poll_handler(struct io_kiocb *req, in __io_arm_poll_handler() argument
5185 struct io_ring_ctx *ctx = req->ctx; in __io_arm_poll_handler()
5188 if (req->file->f_op->may_pollfree) { in __io_arm_poll_handler()
5193 INIT_HLIST_NODE(&req->hash_node); in __io_arm_poll_handler()
5195 poll->file = req->file; in __io_arm_poll_handler()
5196 poll->wait.private = req; in __io_arm_poll_handler()
5199 ipt->req = req; in __io_arm_poll_handler()
5203 mask = vfs_poll(req->file, &ipt->pt) & poll->events; in __io_arm_poll_handler()
5209 io_poll_remove_double(req); in __io_arm_poll_handler()
5223 io_poll_req_insert(req); in __io_arm_poll_handler()
5230 static bool io_arm_poll_handler(struct io_kiocb *req) in io_arm_poll_handler() argument
5232 const struct io_op_def *def = &io_op_defs[req->opcode]; in io_arm_poll_handler()
5233 struct io_ring_ctx *ctx = req->ctx; in io_arm_poll_handler()
5239 if (!req->file || !file_can_poll(req->file)) in io_arm_poll_handler()
5241 if (req->flags & REQ_F_POLLED) in io_arm_poll_handler()
5250 if (!io_file_supports_async(req->file, rw)) in io_arm_poll_handler()
5258 req->flags |= REQ_F_POLLED; in io_arm_poll_handler()
5259 req->apoll = apoll; in io_arm_poll_handler()
5268 if ((req->opcode == IORING_OP_RECVMSG) && in io_arm_poll_handler()
5269 (req->sr_msg.msg_flags & MSG_ERRQUEUE)) in io_arm_poll_handler()
5276 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, in io_arm_poll_handler()
5279 io_poll_remove_double(req); in io_arm_poll_handler()
5286 trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask, in io_arm_poll_handler()
5291 static bool __io_poll_remove_one(struct io_kiocb *req, in __io_poll_remove_one() argument
5303 hash_del(&req->hash_node); in __io_poll_remove_one()
5307 static bool io_poll_remove_one(struct io_kiocb *req) in io_poll_remove_one() argument
5311 io_poll_remove_double(req); in io_poll_remove_one()
5313 if (req->opcode == IORING_OP_POLL_ADD) { in io_poll_remove_one()
5314 do_complete = __io_poll_remove_one(req, &req->poll); in io_poll_remove_one()
5316 struct async_poll *apoll = req->apoll; in io_poll_remove_one()
5319 do_complete = __io_poll_remove_one(req, &apoll->poll); in io_poll_remove_one()
5321 io_put_req(req); in io_poll_remove_one()
5328 io_cqring_fill_event(req, -ECANCELED); in io_poll_remove_one()
5329 io_commit_cqring(req->ctx); in io_poll_remove_one()
5330 req_set_fail_links(req); in io_poll_remove_one()
5331 io_put_req_deferred(req, 1); in io_poll_remove_one()
5344 struct io_kiocb *req; in io_poll_remove_all() local
5352 hlist_for_each_entry_safe(req, tmp, list, hash_node) { in io_poll_remove_all()
5353 if (io_match_task(req, tsk, files)) in io_poll_remove_all()
5354 posted += io_poll_remove_one(req); in io_poll_remove_all()
5368 struct io_kiocb *req; in io_poll_cancel() local
5371 hlist_for_each_entry(req, list, hash_node) { in io_poll_cancel()
5372 if (sqe_addr != req->user_data) in io_poll_cancel()
5374 if (io_poll_remove_one(req)) in io_poll_cancel()
5382 static int io_poll_remove_prep(struct io_kiocb *req, in io_poll_remove_prep() argument
5385 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_remove_prep()
5391 req->poll.addr = READ_ONCE(sqe->addr); in io_poll_remove_prep()
5399 static int io_poll_remove(struct io_kiocb *req) in io_poll_remove() argument
5401 struct io_ring_ctx *ctx = req->ctx; in io_poll_remove()
5405 addr = req->poll.addr; in io_poll_remove()
5411 req_set_fail_links(req); in io_poll_remove()
5412 io_req_complete(req, ret); in io_poll_remove()
5419 struct io_kiocb *req = wait->private; in io_poll_wake() local
5420 struct io_poll_iocb *poll = &req->poll; in io_poll_wake()
5422 return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func); in io_poll_wake()
5430 __io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data); in io_poll_queue_proc()
5433 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add_prep() argument
5435 struct io_poll_iocb *poll = &req->poll; in io_poll_add_prep()
5438 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_poll_add_prep()
5452 static int io_poll_add(struct io_kiocb *req) in io_poll_add() argument
5454 struct io_poll_iocb *poll = &req->poll; in io_poll_add()
5455 struct io_ring_ctx *ctx = req->ctx; in io_poll_add()
5461 mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, in io_poll_add()
5466 io_poll_complete(req, mask, 0); in io_poll_add()
5472 io_put_req(req); in io_poll_add()
5481 struct io_kiocb *req = data->req; in io_timeout_fn() local
5482 struct io_ring_ctx *ctx = req->ctx; in io_timeout_fn()
5486 list_del_init(&req->timeout.list); in io_timeout_fn()
5487 atomic_set(&req->ctx->cq_timeouts, in io_timeout_fn()
5488 atomic_read(&req->ctx->cq_timeouts) + 1); in io_timeout_fn()
5490 io_cqring_fill_event(req, -ETIME); in io_timeout_fn()
5495 req_set_fail_links(req); in io_timeout_fn()
5496 io_put_req(req); in io_timeout_fn()
5500 static int __io_timeout_cancel(struct io_kiocb *req) in __io_timeout_cancel() argument
5502 struct io_timeout_data *io = req->async_data; in __io_timeout_cancel()
5508 list_del_init(&req->timeout.list); in __io_timeout_cancel()
5510 req_set_fail_links(req); in __io_timeout_cancel()
5511 io_cqring_fill_event(req, -ECANCELED); in __io_timeout_cancel()
5512 io_put_req_deferred(req, 1); in __io_timeout_cancel()
5518 struct io_kiocb *req; in io_timeout_cancel() local
5521 list_for_each_entry(req, &ctx->timeout_list, timeout.list) { in io_timeout_cancel()
5522 if (user_data == req->user_data) { in io_timeout_cancel()
5531 return __io_timeout_cancel(req); in io_timeout_cancel()
5534 static int io_timeout_remove_prep(struct io_kiocb *req, in io_timeout_remove_prep() argument
5537 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_remove_prep()
5539 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_timeout_remove_prep()
5545 req->timeout_rem.addr = READ_ONCE(sqe->addr); in io_timeout_remove_prep()
5552 static int io_timeout_remove(struct io_kiocb *req) in io_timeout_remove() argument
5554 struct io_ring_ctx *ctx = req->ctx; in io_timeout_remove()
5558 ret = io_timeout_cancel(ctx, req->timeout_rem.addr); in io_timeout_remove()
5560 io_cqring_fill_event(req, ret); in io_timeout_remove()
5565 req_set_fail_links(req); in io_timeout_remove()
5566 io_put_req(req); in io_timeout_remove()
5570 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_timeout_prep() argument
5577 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_timeout_prep()
5588 req->timeout.off = off; in io_timeout_prep()
5590 if (!req->async_data && io_alloc_async_data(req)) in io_timeout_prep()
5593 data = req->async_data; in io_timeout_prep()
5594 data->req = req; in io_timeout_prep()
5608 static int io_timeout(struct io_kiocb *req) in io_timeout() argument
5610 struct io_ring_ctx *ctx = req->ctx; in io_timeout()
5611 struct io_timeout_data *data = req->async_data; in io_timeout()
5613 u32 tail, off = req->timeout.off; in io_timeout()
5622 if (io_is_timeout_noseq(req)) { in io_timeout()
5628 req->timeout.target_seq = tail + off; in io_timeout()
5651 list_add(&req->timeout.list, entry); in io_timeout()
5660 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_cb() local
5662 return req->user_data == (unsigned long) data; in io_cancel_cb()
5687 struct io_kiocb *req, __u64 sqe_addr, in io_async_find_and_cancel() argument
5707 io_cqring_fill_event(req, ret); in io_async_find_and_cancel()
5713 req_set_fail_links(req); in io_async_find_and_cancel()
5714 io_put_req(req); in io_async_find_and_cancel()
5717 static int io_async_cancel_prep(struct io_kiocb *req, in io_async_cancel_prep() argument
5720 if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) in io_async_cancel_prep()
5722 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_async_cancel_prep()
5728 req->cancel.addr = READ_ONCE(sqe->addr); in io_async_cancel_prep()
5732 static int io_async_cancel(struct io_kiocb *req) in io_async_cancel() argument
5734 struct io_ring_ctx *ctx = req->ctx; in io_async_cancel()
5736 io_async_find_and_cancel(ctx, req, req->cancel.addr, 0); in io_async_cancel()
5740 static int io_files_update_prep(struct io_kiocb *req, in io_files_update_prep() argument
5743 if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) in io_files_update_prep()
5745 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) in io_files_update_prep()
5750 req->files_update.offset = READ_ONCE(sqe->off); in io_files_update_prep()
5751 req->files_update.nr_args = READ_ONCE(sqe->len); in io_files_update_prep()
5752 if (!req->files_update.nr_args) in io_files_update_prep()
5754 req->files_update.arg = READ_ONCE(sqe->addr); in io_files_update_prep()
5758 static int io_files_update(struct io_kiocb *req, bool force_nonblock, in io_files_update() argument
5761 struct io_ring_ctx *ctx = req->ctx; in io_files_update()
5768 up.offset = req->files_update.offset; in io_files_update()
5769 up.fds = req->files_update.arg; in io_files_update()
5772 ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args); in io_files_update()
5776 req_set_fail_links(req); in io_files_update()
5777 __io_req_complete(req, ret, 0, cs); in io_files_update()
5781 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_prep() argument
5783 switch (req->opcode) { in io_req_prep()
5789 return io_read_prep(req, sqe); in io_req_prep()
5793 return io_write_prep(req, sqe); in io_req_prep()
5795 return io_poll_add_prep(req, sqe); in io_req_prep()
5797 return io_poll_remove_prep(req, sqe); in io_req_prep()
5799 return io_prep_fsync(req, sqe); in io_req_prep()
5801 return io_prep_sfr(req, sqe); in io_req_prep()
5804 return io_sendmsg_prep(req, sqe); in io_req_prep()
5807 return io_recvmsg_prep(req, sqe); in io_req_prep()
5809 return io_connect_prep(req, sqe); in io_req_prep()
5811 return io_timeout_prep(req, sqe, false); in io_req_prep()
5813 return io_timeout_remove_prep(req, sqe); in io_req_prep()
5815 return io_async_cancel_prep(req, sqe); in io_req_prep()
5817 return io_timeout_prep(req, sqe, true); in io_req_prep()
5819 return io_accept_prep(req, sqe); in io_req_prep()
5821 return io_fallocate_prep(req, sqe); in io_req_prep()
5823 return io_openat_prep(req, sqe); in io_req_prep()
5825 return io_close_prep(req, sqe); in io_req_prep()
5827 return io_files_update_prep(req, sqe); in io_req_prep()
5829 return io_statx_prep(req, sqe); in io_req_prep()
5831 return io_fadvise_prep(req, sqe); in io_req_prep()
5833 return io_madvise_prep(req, sqe); in io_req_prep()
5835 return io_openat2_prep(req, sqe); in io_req_prep()
5837 return io_epoll_ctl_prep(req, sqe); in io_req_prep()
5839 return io_splice_prep(req, sqe); in io_req_prep()
5841 return io_provide_buffers_prep(req, sqe); in io_req_prep()
5843 return io_remove_buffers_prep(req, sqe); in io_req_prep()
5845 return io_tee_prep(req, sqe); in io_req_prep()
5849 req->opcode); in io_req_prep()
5853 static int io_req_defer_prep(struct io_kiocb *req, in io_req_defer_prep() argument
5858 if (io_alloc_async_data(req)) in io_req_defer_prep()
5860 return io_req_prep(req, sqe); in io_req_defer_prep()
5863 static u32 io_get_sequence(struct io_kiocb *req) in io_get_sequence() argument
5866 struct io_ring_ctx *ctx = req->ctx; in io_get_sequence()
5869 if (req->flags & REQ_F_LINK_HEAD) in io_get_sequence()
5870 list_for_each_entry(pos, &req->link_list, link_list) in io_get_sequence()
5877 static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_req_defer() argument
5879 struct io_ring_ctx *ctx = req->ctx; in io_req_defer()
5884 /* Still need defer if there is pending req in defer list. */ in io_req_defer()
5886 !(req->flags & REQ_F_IO_DRAIN))) in io_req_defer()
5889 seq = io_get_sequence(req); in io_req_defer()
5891 if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) in io_req_defer()
5894 if (!req->async_data) { in io_req_defer()
5895 ret = io_req_defer_prep(req, sqe); in io_req_defer()
5899 io_prep_async_link(req); in io_req_defer()
5905 if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { in io_req_defer()
5908 io_queue_async_work(req); in io_req_defer()
5912 trace_io_uring_defer(ctx, req, req->user_data); in io_req_defer()
5913 de->req = req; in io_req_defer()
5920 static void io_req_drop_files(struct io_kiocb *req) in io_req_drop_files() argument
5922 struct io_ring_ctx *ctx = req->ctx; in io_req_drop_files()
5923 struct io_uring_task *tctx = req->task->io_uring; in io_req_drop_files()
5926 if (req->work.flags & IO_WQ_WORK_FILES) { in io_req_drop_files()
5927 put_files_struct(req->work.identity->files); in io_req_drop_files()
5928 put_nsproxy(req->work.identity->nsproxy); in io_req_drop_files()
5931 list_del(&req->inflight_entry); in io_req_drop_files()
5933 req->flags &= ~REQ_F_INFLIGHT; in io_req_drop_files()
5934 req->work.flags &= ~IO_WQ_WORK_FILES; in io_req_drop_files()
5939 static void __io_clean_op(struct io_kiocb *req) in __io_clean_op() argument
5941 if (req->flags & REQ_F_BUFFER_SELECTED) { in __io_clean_op()
5942 switch (req->opcode) { in __io_clean_op()
5946 kfree((void *)(unsigned long)req->rw.addr); in __io_clean_op()
5950 kfree(req->sr_msg.kbuf); in __io_clean_op()
5953 req->flags &= ~REQ_F_BUFFER_SELECTED; in __io_clean_op()
5956 if (req->flags & REQ_F_NEED_CLEANUP) { in __io_clean_op()
5957 switch (req->opcode) { in __io_clean_op()
5964 struct io_async_rw *io = req->async_data; in __io_clean_op()
5971 struct io_async_msghdr *io = req->async_data; in __io_clean_op()
5978 io_put_file(req, req->splice.file_in, in __io_clean_op()
5979 (req->splice.flags & SPLICE_F_FD_IN_FIXED)); in __io_clean_op()
5983 if (req->open.filename) in __io_clean_op()
5984 putname(req->open.filename); in __io_clean_op()
5987 req->flags &= ~REQ_F_NEED_CLEANUP; in __io_clean_op()
5991 static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock, in io_issue_sqe() argument
5994 struct io_ring_ctx *ctx = req->ctx; in io_issue_sqe()
5997 switch (req->opcode) { in io_issue_sqe()
5999 ret = io_nop(req, cs); in io_issue_sqe()
6004 ret = io_read(req, force_nonblock, cs); in io_issue_sqe()
6009 ret = io_write(req, force_nonblock, cs); in io_issue_sqe()
6012 ret = io_fsync(req, force_nonblock); in io_issue_sqe()
6015 ret = io_poll_add(req); in io_issue_sqe()
6018 ret = io_poll_remove(req); in io_issue_sqe()
6021 ret = io_sync_file_range(req, force_nonblock); in io_issue_sqe()
6024 ret = io_sendmsg(req, force_nonblock, cs); in io_issue_sqe()
6027 ret = io_send(req, force_nonblock, cs); in io_issue_sqe()
6030 ret = io_recvmsg(req, force_nonblock, cs); in io_issue_sqe()
6033 ret = io_recv(req, force_nonblock, cs); in io_issue_sqe()
6036 ret = io_timeout(req); in io_issue_sqe()
6039 ret = io_timeout_remove(req); in io_issue_sqe()
6042 ret = io_accept(req, force_nonblock, cs); in io_issue_sqe()
6045 ret = io_connect(req, force_nonblock, cs); in io_issue_sqe()
6048 ret = io_async_cancel(req); in io_issue_sqe()
6051 ret = io_fallocate(req, force_nonblock); in io_issue_sqe()
6054 ret = io_openat(req, force_nonblock); in io_issue_sqe()
6057 ret = io_close(req, force_nonblock, cs); in io_issue_sqe()
6060 ret = io_files_update(req, force_nonblock, cs); in io_issue_sqe()
6063 ret = io_statx(req, force_nonblock); in io_issue_sqe()
6066 ret = io_fadvise(req, force_nonblock); in io_issue_sqe()
6069 ret = io_madvise(req, force_nonblock); in io_issue_sqe()
6072 ret = io_openat2(req, force_nonblock); in io_issue_sqe()
6075 ret = io_epoll_ctl(req, force_nonblock, cs); in io_issue_sqe()
6078 ret = io_splice(req, force_nonblock); in io_issue_sqe()
6081 ret = io_provide_buffers(req, force_nonblock, cs); in io_issue_sqe()
6084 ret = io_remove_buffers(req, force_nonblock, cs); in io_issue_sqe()
6087 ret = io_tee(req, force_nonblock); in io_issue_sqe()
6098 if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) { in io_issue_sqe()
6105 io_iopoll_req_issued(req); in io_issue_sqe()
6116 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_wq_submit_work() local
6120 timeout = io_prep_linked_timeout(req); in io_wq_submit_work()
6128 refcount_inc(&req->refs); in io_wq_submit_work()
6129 percpu_ref_get(&req->ctx->refs); in io_wq_submit_work()
6130 io_req_task_work_add_fallback(req, io_req_task_cancel); in io_wq_submit_work()
6131 return io_steal_work(req); in io_wq_submit_work()
6136 ret = io_issue_sqe(req, false, NULL); in io_wq_submit_work()
6151 if (req->ctx->flags & IORING_SETUP_IOPOLL) in io_wq_submit_work()
6152 lock_ctx = req->ctx; in io_wq_submit_work()
6166 req_set_fail_links(req); in io_wq_submit_work()
6167 io_req_complete(req, ret); in io_wq_submit_work()
6173 return io_steal_work(req); in io_wq_submit_work()
6186 struct io_kiocb *req, int fd, bool fixed) in io_file_get() argument
6188 struct io_ring_ctx *ctx = req->ctx; in io_file_get()
6197 req->fixed_file_refs = &ctx->file_data->node->refs; in io_file_get()
6198 percpu_ref_get(req->fixed_file_refs); in io_file_get()
6206 !(req->flags & REQ_F_INFLIGHT)) { in io_file_get()
6207 io_req_init_async(req); in io_file_get()
6208 req->flags |= REQ_F_INFLIGHT; in io_file_get()
6211 list_add(&req->inflight_entry, &ctx->inflight_list); in io_file_get()
6218 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req, in io_req_set_file() argument
6223 fixed = (req->flags & REQ_F_FIXED_FILE) != 0; in io_req_set_file()
6224 if (unlikely(!fixed && io_async_submit(req->ctx))) in io_req_set_file()
6227 req->file = io_file_get(state, req, fd, fixed); in io_req_set_file()
6228 if (req->file || io_op_defs[req->opcode].needs_file_no_error) in io_req_set_file()
6237 struct io_kiocb *req = data->req; in io_link_timeout_fn() local
6238 struct io_ring_ctx *ctx = req->ctx; in io_link_timeout_fn()
6248 if (!list_empty(&req->link_list)) { in io_link_timeout_fn()
6249 prev = list_entry(req->link_list.prev, struct io_kiocb, in io_link_timeout_fn()
6252 list_del_init(&req->link_list); in io_link_timeout_fn()
6260 io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); in io_link_timeout_fn()
6263 io_cqring_add_event(req, -ETIME, 0); in io_link_timeout_fn()
6264 io_put_req_deferred(req, 1); in io_link_timeout_fn()
6269 static void __io_queue_linked_timeout(struct io_kiocb *req) in __io_queue_linked_timeout() argument
6275 if (!list_empty(&req->link_list)) { in __io_queue_linked_timeout()
6276 struct io_timeout_data *data = req->async_data; in __io_queue_linked_timeout()
6284 static void io_queue_linked_timeout(struct io_kiocb *req) in io_queue_linked_timeout() argument
6286 struct io_ring_ctx *ctx = req->ctx; in io_queue_linked_timeout()
6289 __io_queue_linked_timeout(req); in io_queue_linked_timeout()
6293 io_put_req(req); in io_queue_linked_timeout()
6296 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) in io_prep_linked_timeout() argument
6300 if (!(req->flags & REQ_F_LINK_HEAD)) in io_prep_linked_timeout()
6302 if (req->flags & REQ_F_LINK_TIMEOUT) in io_prep_linked_timeout()
6305 nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, in io_prep_linked_timeout()
6311 req->flags |= REQ_F_LINK_TIMEOUT; in io_prep_linked_timeout()
6315 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs) in __io_queue_sqe() argument
6322 linked_timeout = io_prep_linked_timeout(req); in __io_queue_sqe()
6324 if ((req->flags & REQ_F_WORK_INITIALIZED) && in __io_queue_sqe()
6325 (req->work.flags & IO_WQ_WORK_CREDS) && in __io_queue_sqe()
6326 req->work.identity->creds != current_cred()) { in __io_queue_sqe()
6329 if (old_creds == req->work.identity->creds) in __io_queue_sqe()
6332 old_creds = override_creds(req->work.identity->creds); in __io_queue_sqe()
6335 ret = io_issue_sqe(req, true, cs); in __io_queue_sqe()
6341 if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { in __io_queue_sqe()
6342 if (!io_arm_poll_handler(req)) { in __io_queue_sqe()
6347 io_queue_async_work(req); in __io_queue_sqe()
6354 req = io_put_req_find_next(req); in __io_queue_sqe()
6358 if (req) { in __io_queue_sqe()
6359 if (!(req->flags & REQ_F_FORCE_ASYNC)) in __io_queue_sqe()
6361 io_queue_async_work(req); in __io_queue_sqe()
6365 req->flags &= ~REQ_F_LINK_TIMEOUT; in __io_queue_sqe()
6366 req_set_fail_links(req); in __io_queue_sqe()
6367 io_put_req(req); in __io_queue_sqe()
6368 io_req_complete(req, ret); in __io_queue_sqe()
6375 static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_queue_sqe() argument
6380 ret = io_req_defer(req, sqe); in io_queue_sqe()
6384 req_set_fail_links(req); in io_queue_sqe()
6385 io_put_req(req); in io_queue_sqe()
6386 io_req_complete(req, ret); in io_queue_sqe()
6388 } else if (req->flags & REQ_F_FORCE_ASYNC) { in io_queue_sqe()
6389 if (!req->async_data) { in io_queue_sqe()
6390 ret = io_req_defer_prep(req, sqe); in io_queue_sqe()
6394 io_queue_async_work(req); in io_queue_sqe()
6397 ret = io_req_prep(req, sqe); in io_queue_sqe()
6401 __io_queue_sqe(req, cs); in io_queue_sqe()
6405 static inline void io_queue_link_head(struct io_kiocb *req, in io_queue_link_head() argument
6408 if (unlikely(req->flags & REQ_F_FAIL_LINK)) { in io_queue_link_head()
6409 io_put_req(req); in io_queue_link_head()
6410 io_req_complete(req, -ECANCELED); in io_queue_link_head()
6412 io_queue_sqe(req, NULL, cs); in io_queue_link_head()
6415 static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_submit_sqe() argument
6418 struct io_ring_ctx *ctx = req->ctx; in io_submit_sqe()
6438 if (req->flags & REQ_F_IO_DRAIN) { in io_submit_sqe()
6442 ret = io_req_defer_prep(req, sqe); in io_submit_sqe()
6448 trace_io_uring_link(ctx, req, head); in io_submit_sqe()
6449 list_add_tail(&req->link_list, &head->link_list); in io_submit_sqe()
6452 if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { in io_submit_sqe()
6458 req->flags |= REQ_F_IO_DRAIN; in io_submit_sqe()
6461 if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) { in io_submit_sqe()
6462 req->flags |= REQ_F_LINK_HEAD; in io_submit_sqe()
6463 INIT_LIST_HEAD(&req->link_list); in io_submit_sqe()
6465 ret = io_req_defer_prep(req, sqe); in io_submit_sqe()
6467 req->flags |= REQ_F_FAIL_LINK; in io_submit_sqe()
6468 *link = req; in io_submit_sqe()
6470 io_queue_sqe(req, sqe, cs); in io_submit_sqe()
6559 struct io_kiocb *req, in io_check_restriction() argument
6565 if (!test_bit(req->opcode, ctx->restrictions.sqe_op)) in io_check_restriction()
6583 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, in io_init_req() argument
6590 req->opcode = READ_ONCE(sqe->opcode); in io_init_req()
6591 req->user_data = READ_ONCE(sqe->user_data); in io_init_req()
6592 req->async_data = NULL; in io_init_req()
6593 req->file = NULL; in io_init_req()
6594 req->ctx = ctx; in io_init_req()
6595 req->flags = 0; in io_init_req()
6597 refcount_set(&req->refs, 2); in io_init_req()
6598 req->task = current; in io_init_req()
6599 req->result = 0; in io_init_req()
6601 if (unlikely(req->opcode >= IORING_OP_LAST)) in io_init_req()
6604 if (unlikely(io_sq_thread_acquire_mm(ctx, req))) in io_init_req()
6612 if (unlikely(!io_check_restriction(ctx, req, sqe_flags))) in io_init_req()
6616 !io_op_defs[req->opcode].buffer_select) in io_init_req()
6628 __io_req_init_async(req); in io_init_req()
6630 req->work.identity = iod; in io_init_req()
6631 req->work.flags |= IO_WQ_WORK_CREDS; in io_init_req()
6635 req->flags |= sqe_flags; in io_init_req()
6637 if (!io_op_defs[req->opcode].needs_file) in io_init_req()
6640 ret = io_req_set_file(state, req, READ_ONCE(sqe->fd)); in io_init_req()
6670 struct io_kiocb *req; in io_submit_sqes() local
6678 req = io_alloc_req(ctx, &state); in io_submit_sqes()
6679 if (unlikely(!req)) { in io_submit_sqes()
6688 err = io_init_req(ctx, req, sqe, &state); in io_submit_sqes()
6691 io_put_req(req); in io_submit_sqes()
6692 io_req_complete(req, err); in io_submit_sqes()
6696 trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, in io_submit_sqes()
6698 err = io_submit_sqe(req, sqe, &link, &state.comp); in io_submit_sqes()
7827 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_free_work() local
7830 io_put_req(req); in io_free_work()
8530 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_ctx_cb() local
8532 return req->ctx == data; in io_cancel_ctx_cb()
8597 struct io_kiocb *req = container_of(work, struct io_kiocb, work); in io_cancel_task_cb() local
8601 if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) { in io_cancel_task_cb()
8603 struct io_ring_ctx *ctx = req->ctx; in io_cancel_task_cb()
8607 ret = io_match_task(req, cancel->task, cancel->files); in io_cancel_task_cb()
8610 ret = io_match_task(req, cancel->task, cancel->files); in io_cancel_task_cb()
8624 if (io_match_task(de->req, task, files)) { in io_cancel_defer_files()
8634 req_set_fail_links(de->req); in io_cancel_defer_files()
8635 io_put_req(de->req); in io_cancel_defer_files()
8636 io_req_complete(de->req, -ECANCELED); in io_cancel_defer_files()
8645 struct io_kiocb *req; in io_uring_count_inflight() local
8649 list_for_each_entry(req, &ctx->inflight_list, inflight_entry) in io_uring_count_inflight()
8650 cnt += io_match_task(req, task, files); in io_uring_count_inflight()
9208 struct io_kiocb *req; in __io_uring_show_fdinfo() local
9210 hlist_for_each_entry(req, list, hash_node) in __io_uring_show_fdinfo()
9211 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, in __io_uring_show_fdinfo()
9212 req->task->task_works != NULL); in __io_uring_show_fdinfo()