1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/io_uring.h>
6 #include <linux/security.h>
7 #include <linux/nospec.h>
8
9 #include <uapi/linux/io_uring.h>
10
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "uring_cmd.h"
14
io_uring_cmd_work(struct io_kiocb * req,bool * locked)15 static void io_uring_cmd_work(struct io_kiocb *req, bool *locked)
16 {
17 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
18 unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
19
20 ioucmd->task_work_cb(ioucmd, issue_flags);
21 }
22
io_uring_cmd_complete_in_task(struct io_uring_cmd * ioucmd,void (* task_work_cb)(struct io_uring_cmd *,unsigned))23 void io_uring_cmd_complete_in_task(struct io_uring_cmd *ioucmd,
24 void (*task_work_cb)(struct io_uring_cmd *, unsigned))
25 {
26 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
27
28 ioucmd->task_work_cb = task_work_cb;
29 req->io_task_work.func = io_uring_cmd_work;
30 io_req_task_work_add(req);
31 }
32 EXPORT_SYMBOL_GPL(io_uring_cmd_complete_in_task);
33
io_req_set_cqe32_extra(struct io_kiocb * req,u64 extra1,u64 extra2)34 static inline void io_req_set_cqe32_extra(struct io_kiocb *req,
35 u64 extra1, u64 extra2)
36 {
37 req->extra1 = extra1;
38 req->extra2 = extra2;
39 req->flags |= REQ_F_CQE32_INIT;
40 }
41
42 /*
43 * Called by consumers of io_uring_cmd, if they originally returned
44 * -EIOCBQUEUED upon receiving the command.
45 */
io_uring_cmd_done(struct io_uring_cmd * ioucmd,ssize_t ret,ssize_t res2,unsigned issue_flags)46 void io_uring_cmd_done(struct io_uring_cmd *ioucmd, ssize_t ret, ssize_t res2,
47 unsigned issue_flags)
48 {
49 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
50
51 if (ret < 0)
52 req_set_fail(req);
53
54 io_req_set_res(req, ret, 0);
55 if (req->ctx->flags & IORING_SETUP_CQE32)
56 io_req_set_cqe32_extra(req, res2, 0);
57 if (req->ctx->flags & IORING_SETUP_IOPOLL)
58 /* order with io_iopoll_req_issued() checking ->iopoll_complete */
59 smp_store_release(&req->iopoll_completed, 1);
60 else
61 __io_req_complete(req, issue_flags);
62 }
63 EXPORT_SYMBOL_GPL(io_uring_cmd_done);
64
io_uring_cmd_prep_async(struct io_kiocb * req)65 int io_uring_cmd_prep_async(struct io_kiocb *req)
66 {
67 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
68 size_t cmd_size;
69
70 BUILD_BUG_ON(uring_cmd_pdu_size(0) != 16);
71 BUILD_BUG_ON(uring_cmd_pdu_size(1) != 80);
72
73 cmd_size = uring_cmd_pdu_size(req->ctx->flags & IORING_SETUP_SQE128);
74
75 memcpy(req->async_data, ioucmd->cmd, cmd_size);
76 return 0;
77 }
78
io_uring_cmd_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)79 int io_uring_cmd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
80 {
81 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
82
83 if (sqe->__pad1)
84 return -EINVAL;
85
86 ioucmd->flags = READ_ONCE(sqe->uring_cmd_flags);
87 if (ioucmd->flags & ~IORING_URING_CMD_FIXED)
88 return -EINVAL;
89
90 if (ioucmd->flags & IORING_URING_CMD_FIXED) {
91 struct io_ring_ctx *ctx = req->ctx;
92 u16 index;
93
94 req->buf_index = READ_ONCE(sqe->buf_index);
95 if (unlikely(req->buf_index >= ctx->nr_user_bufs))
96 return -EFAULT;
97 index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
98 req->imu = ctx->user_bufs[index];
99 io_req_set_rsrc_node(req, ctx, 0);
100 }
101 ioucmd->cmd = sqe->cmd;
102 ioucmd->cmd_op = READ_ONCE(sqe->cmd_op);
103 return 0;
104 }
105
io_uring_cmd(struct io_kiocb * req,unsigned int issue_flags)106 int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
107 {
108 struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
109 struct io_ring_ctx *ctx = req->ctx;
110 struct file *file = req->file;
111 int ret;
112
113 if (!file->f_op->uring_cmd)
114 return -EOPNOTSUPP;
115
116 ret = security_uring_cmd(ioucmd);
117 if (ret)
118 return ret;
119
120 if (ctx->flags & IORING_SETUP_SQE128)
121 issue_flags |= IO_URING_F_SQE128;
122 if (ctx->flags & IORING_SETUP_CQE32)
123 issue_flags |= IO_URING_F_CQE32;
124 if (ctx->flags & IORING_SETUP_IOPOLL) {
125 if (!file->f_op->uring_cmd_iopoll)
126 return -EOPNOTSUPP;
127 issue_flags |= IO_URING_F_IOPOLL;
128 req->iopoll_completed = 0;
129 WRITE_ONCE(ioucmd->cookie, NULL);
130 }
131
132 if (req_has_async_data(req))
133 ioucmd->cmd = req->async_data;
134
135 ret = file->f_op->uring_cmd(ioucmd, issue_flags);
136 if (ret == -EAGAIN) {
137 if (!req_has_async_data(req)) {
138 if (io_alloc_async_data(req))
139 return -ENOMEM;
140 io_uring_cmd_prep_async(req);
141 }
142 return -EAGAIN;
143 }
144
145 if (ret != -EIOCBQUEUED) {
146 if (ret < 0)
147 req_set_fail(req);
148 io_req_set_res(req, ret, 0);
149 return ret;
150 }
151
152 return IOU_ISSUE_SKIP_COMPLETE;
153 }
154
io_uring_cmd_import_fixed(u64 ubuf,unsigned long len,int rw,struct iov_iter * iter,void * ioucmd)155 int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
156 struct iov_iter *iter, void *ioucmd)
157 {
158 struct io_kiocb *req = cmd_to_io_kiocb(ioucmd);
159
160 return io_import_fixed(rw, iter, req->imu, ubuf, len);
161 }
162 EXPORT_SYMBOL_GPL(io_uring_cmd_import_fixed);
163