Lines Matching refs:sqe
271 const struct io_uring_sqe *sqe; member
506 if (req->submit.sqe) { in io_queue_async_work()
1099 const struct io_uring_sqe *sqe = s->sqe; in io_prep_rw() local
1123 kiocb->ki_pos = READ_ONCE(sqe->off); in io_prep_rw()
1127 ioprio = READ_ONCE(sqe->ioprio); in io_prep_rw()
1137 ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags)); in io_prep_rw()
1187 const struct io_uring_sqe *sqe, in io_import_fixed() argument
1190 size_t len = READ_ONCE(sqe->len); in io_import_fixed()
1200 buf_index = READ_ONCE(sqe->buf_index); in io_import_fixed()
1206 buf_addr = READ_ONCE(sqe->addr); in io_import_fixed()
1264 const struct io_uring_sqe *sqe = req->submit.sqe; in io_import_iovec() local
1265 void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr)); in io_import_iovec()
1266 size_t sqe_len = READ_ONCE(sqe->len); in io_import_iovec()
1272 ssize_t ret = io_import_fixed(ctx, rw, sqe, iter); in io_import_iovec()
1573 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_fsync() argument
1582 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) in io_prep_fsync()
1588 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_fsync() argument
1591 loff_t sqe_off = READ_ONCE(sqe->off); in io_fsync()
1592 loff_t sqe_len = READ_ONCE(sqe->len); in io_fsync()
1597 fsync_flags = READ_ONCE(sqe->fsync_flags); in io_fsync()
1601 ret = io_prep_fsync(req, sqe); in io_fsync()
1615 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_fsync()
1620 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_prep_sfr() argument
1630 if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index)) in io_prep_sfr()
1637 const struct io_uring_sqe *sqe, in io_sync_file_range() argument
1645 ret = io_prep_sfr(req, sqe); in io_sync_file_range()
1653 sqe_off = READ_ONCE(sqe->off); in io_sync_file_range()
1654 sqe_len = READ_ONCE(sqe->len); in io_sync_file_range()
1655 flags = READ_ONCE(sqe->sync_range_flags); in io_sync_file_range()
1661 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_sync_file_range()
1667 static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_send_recvmsg() argument
1683 flags = READ_ONCE(sqe->msg_flags); in io_send_recvmsg()
1695 READ_ONCE(sqe->addr); in io_send_recvmsg()
1705 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_send_recvmsg()
1711 static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_sendmsg() argument
1715 return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock); in io_sendmsg()
1721 static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe, in io_recvmsg() argument
1725 return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock); in io_recvmsg()
1762 static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_remove() argument
1770 if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index || in io_poll_remove()
1771 sqe->poll_events) in io_poll_remove()
1776 if (READ_ONCE(sqe->addr) == poll_req->user_data) { in io_poll_remove()
1784 io_cqring_add_event(req->ctx, sqe->user_data, ret); in io_poll_remove()
1885 static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_poll_add() argument
1896 if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index) in io_poll_add()
1901 req->submit.sqe = NULL; in io_poll_add()
1903 events = READ_ONCE(sqe->poll_events); in io_poll_add()
1985 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe) in io_timeout() argument
1995 if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags || in io_timeout()
1996 sqe->len != 1) in io_timeout()
1999 if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr))) in io_timeout()
2009 count = READ_ONCE(sqe->off); in io_timeout()
2091 memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy)); in io_req_defer()
2092 req->submit.sqe = sqe_copy; in io_req_defer()
2105 req->user_data = READ_ONCE(s->sqe->user_data); in __io_submit_sqe()
2115 if (unlikely(s->sqe->buf_index)) in __io_submit_sqe()
2120 if (unlikely(s->sqe->buf_index)) in __io_submit_sqe()
2131 ret = io_fsync(req, s->sqe, force_nonblock); in __io_submit_sqe()
2134 ret = io_poll_add(req, s->sqe); in __io_submit_sqe()
2137 ret = io_poll_remove(req, s->sqe); in __io_submit_sqe()
2140 ret = io_sync_file_range(req, s->sqe, force_nonblock); in __io_submit_sqe()
2143 ret = io_sendmsg(req, s->sqe, force_nonblock); in __io_submit_sqe()
2146 ret = io_recvmsg(req, s->sqe, force_nonblock); in __io_submit_sqe()
2149 ret = io_timeout(req, s->sqe); in __io_submit_sqe()
2214 const struct io_uring_sqe *sqe = s->sqe; in io_sq_wq_submit_work() local
2280 io_cqring_add_event(ctx, sqe->user_data, ret); in io_sq_wq_submit_work()
2285 kfree(sqe); in io_sq_wq_submit_work()
2413 flags = READ_ONCE(s->sqe->flags); in io_req_set_file()
2414 fd = READ_ONCE(s->sqe->fd); in io_req_set_file()
2460 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); in __io_queue_sqe()
2464 s->sqe = sqe_copy; in __io_queue_sqe()
2505 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_queue_sqe()
2533 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_queue_link_head()
2565 if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) { in io_submit_sqe()
2582 io_cqring_add_event(ctx, s->sqe->user_data, ret); in io_submit_sqe()
2586 req->user_data = s->sqe->user_data; in io_submit_sqe()
2615 sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL); in io_submit_sqe()
2621 s->sqe = sqe_copy; in io_submit_sqe()
2624 } else if (s->sqe->flags & IOSQE_IO_LINK) { in io_submit_sqe()
2703 s->sqe = &ctx->sq_sqes[head]; in io_get_sqring()
2704 s->opcode = READ_ONCE(s->sqe->opcode); in io_get_sqring()
2746 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; in io_submit_sqes()
2748 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { in io_submit_sqes()
2761 io_cqring_add_event(ctx, s.sqe->user_data, in io_submit_sqes()
2940 prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0; in io_ring_submit()
2942 if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) { in io_ring_submit()