1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/nospec.h>
7 #include <linux/io_uring.h>
8
9 #include <uapi/linux/io_uring.h>
10
11 #include "io_uring.h"
12 #include "rsrc.h"
13 #include "filetable.h"
14 #include "alloc_cache.h"
15 #include "msg_ring.h"
16
17 /* All valid masks for MSG_RING */
18 #define IORING_MSG_RING_MASK (IORING_MSG_RING_CQE_SKIP | \
19 IORING_MSG_RING_FLAGS_PASS)
20
21 struct io_msg {
22 struct file *file;
23 struct file *src_file;
24 struct callback_head tw;
25 u64 user_data;
26 u32 len;
27 u32 cmd;
28 u32 src_fd;
29 union {
30 u32 dst_fd;
31 u32 cqe_flags;
32 };
33 u32 flags;
34 };
35
io_double_unlock_ctx(struct io_ring_ctx * octx)36 static void io_double_unlock_ctx(struct io_ring_ctx *octx)
37 {
38 mutex_unlock(&octx->uring_lock);
39 }
40
io_double_lock_ctx(struct io_ring_ctx * octx,unsigned int issue_flags)41 static int io_double_lock_ctx(struct io_ring_ctx *octx,
42 unsigned int issue_flags)
43 {
44 /*
45 * To ensure proper ordering between the two ctxs, we can only
46 * attempt a trylock on the target. If that fails and we already have
47 * the source ctx lock, punt to io-wq.
48 */
49 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
50 if (!mutex_trylock(&octx->uring_lock))
51 return -EAGAIN;
52 return 0;
53 }
54 mutex_lock(&octx->uring_lock);
55 return 0;
56 }
57
io_msg_ring_cleanup(struct io_kiocb * req)58 void io_msg_ring_cleanup(struct io_kiocb *req)
59 {
60 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
61
62 if (WARN_ON_ONCE(!msg->src_file))
63 return;
64
65 fput(msg->src_file);
66 msg->src_file = NULL;
67 }
68
io_msg_need_remote(struct io_ring_ctx * target_ctx)69 static inline bool io_msg_need_remote(struct io_ring_ctx *target_ctx)
70 {
71 return target_ctx->task_complete;
72 }
73
io_msg_tw_complete(struct io_kiocb * req,struct io_tw_state * ts)74 static void io_msg_tw_complete(struct io_kiocb *req, struct io_tw_state *ts)
75 {
76 struct io_ring_ctx *ctx = req->ctx;
77
78 io_add_aux_cqe(ctx, req->cqe.user_data, req->cqe.res, req->cqe.flags);
79 if (spin_trylock(&ctx->msg_lock)) {
80 if (io_alloc_cache_put(&ctx->msg_cache, req))
81 req = NULL;
82 spin_unlock(&ctx->msg_lock);
83 }
84 if (req)
85 kfree_rcu(req, rcu_head);
86 percpu_ref_put(&ctx->refs);
87 }
88
io_msg_remote_post(struct io_ring_ctx * ctx,struct io_kiocb * req,int res,u32 cflags,u64 user_data)89 static int io_msg_remote_post(struct io_ring_ctx *ctx, struct io_kiocb *req,
90 int res, u32 cflags, u64 user_data)
91 {
92 req->task = READ_ONCE(ctx->submitter_task);
93 if (!req->task) {
94 kfree_rcu(req, rcu_head);
95 return -EOWNERDEAD;
96 }
97 req->opcode = IORING_OP_NOP;
98 req->cqe.user_data = user_data;
99 io_req_set_res(req, res, cflags);
100 percpu_ref_get(&ctx->refs);
101 req->ctx = ctx;
102 req->io_task_work.func = io_msg_tw_complete;
103 io_req_task_work_add_remote(req, ctx, IOU_F_TWQ_LAZY_WAKE);
104 return 0;
105 }
106
io_msg_get_kiocb(struct io_ring_ctx * ctx)107 static struct io_kiocb *io_msg_get_kiocb(struct io_ring_ctx *ctx)
108 {
109 struct io_kiocb *req = NULL;
110
111 if (spin_trylock(&ctx->msg_lock)) {
112 req = io_alloc_cache_get(&ctx->msg_cache);
113 spin_unlock(&ctx->msg_lock);
114 if (req)
115 return req;
116 }
117 return kmem_cache_alloc(req_cachep, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
118 }
119
io_msg_data_remote(struct io_kiocb * req)120 static int io_msg_data_remote(struct io_kiocb *req)
121 {
122 struct io_ring_ctx *target_ctx = req->file->private_data;
123 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
124 struct io_kiocb *target;
125 u32 flags = 0;
126
127 target = io_msg_get_kiocb(req->ctx);
128 if (unlikely(!target))
129 return -ENOMEM;
130
131 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
132 flags = msg->cqe_flags;
133
134 return io_msg_remote_post(target_ctx, target, msg->len, flags,
135 msg->user_data);
136 }
137
io_msg_ring_data(struct io_kiocb * req,unsigned int issue_flags)138 static int io_msg_ring_data(struct io_kiocb *req, unsigned int issue_flags)
139 {
140 struct io_ring_ctx *target_ctx = req->file->private_data;
141 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
142 u32 flags = 0;
143 int ret;
144
145 if (msg->src_fd || msg->flags & ~IORING_MSG_RING_FLAGS_PASS)
146 return -EINVAL;
147 if (!(msg->flags & IORING_MSG_RING_FLAGS_PASS) && msg->dst_fd)
148 return -EINVAL;
149 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
150 return -EBADFD;
151
152 if (io_msg_need_remote(target_ctx))
153 return io_msg_data_remote(req);
154
155 if (msg->flags & IORING_MSG_RING_FLAGS_PASS)
156 flags = msg->cqe_flags;
157
158 ret = -EOVERFLOW;
159 if (target_ctx->flags & IORING_SETUP_IOPOLL) {
160 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
161 return -EAGAIN;
162 }
163 if (io_post_aux_cqe(target_ctx, msg->user_data, msg->len, flags))
164 ret = 0;
165 if (target_ctx->flags & IORING_SETUP_IOPOLL)
166 io_double_unlock_ctx(target_ctx);
167 return ret;
168 }
169
io_msg_grab_file(struct io_kiocb * req,unsigned int issue_flags)170 static struct file *io_msg_grab_file(struct io_kiocb *req, unsigned int issue_flags)
171 {
172 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
173 struct io_ring_ctx *ctx = req->ctx;
174 struct file *file = NULL;
175 int idx = msg->src_fd;
176
177 io_ring_submit_lock(ctx, issue_flags);
178 if (likely(idx < ctx->nr_user_files)) {
179 idx = array_index_nospec(idx, ctx->nr_user_files);
180 file = io_file_from_index(&ctx->file_table, idx);
181 if (file)
182 get_file(file);
183 }
184 io_ring_submit_unlock(ctx, issue_flags);
185 return file;
186 }
187
io_msg_install_complete(struct io_kiocb * req,unsigned int issue_flags)188 static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flags)
189 {
190 struct io_ring_ctx *target_ctx = req->file->private_data;
191 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
192 struct file *src_file = msg->src_file;
193 int ret;
194
195 if (unlikely(io_double_lock_ctx(target_ctx, issue_flags)))
196 return -EAGAIN;
197
198 ret = __io_fixed_fd_install(target_ctx, src_file, msg->dst_fd);
199 if (ret < 0)
200 goto out_unlock;
201
202 msg->src_file = NULL;
203 req->flags &= ~REQ_F_NEED_CLEANUP;
204
205 if (msg->flags & IORING_MSG_RING_CQE_SKIP)
206 goto out_unlock;
207 /*
208 * If this fails, the target still received the file descriptor but
209 * wasn't notified of the fact. This means that if this request
210 * completes with -EOVERFLOW, then the sender must ensure that a
211 * later IORING_OP_MSG_RING delivers the message.
212 */
213 if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0))
214 ret = -EOVERFLOW;
215 out_unlock:
216 io_double_unlock_ctx(target_ctx);
217 return ret;
218 }
219
io_msg_tw_fd_complete(struct callback_head * head)220 static void io_msg_tw_fd_complete(struct callback_head *head)
221 {
222 struct io_msg *msg = container_of(head, struct io_msg, tw);
223 struct io_kiocb *req = cmd_to_io_kiocb(msg);
224 int ret = -EOWNERDEAD;
225
226 if (!(current->flags & PF_EXITING))
227 ret = io_msg_install_complete(req, IO_URING_F_UNLOCKED);
228 if (ret < 0)
229 req_set_fail(req);
230 io_req_queue_tw_complete(req, ret);
231 }
232
io_msg_fd_remote(struct io_kiocb * req)233 static int io_msg_fd_remote(struct io_kiocb *req)
234 {
235 struct io_ring_ctx *ctx = req->file->private_data;
236 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
237 struct task_struct *task = READ_ONCE(ctx->submitter_task);
238
239 if (unlikely(!task))
240 return -EOWNERDEAD;
241
242 init_task_work(&msg->tw, io_msg_tw_fd_complete);
243 if (task_work_add(task, &msg->tw, TWA_SIGNAL))
244 return -EOWNERDEAD;
245
246 return IOU_ISSUE_SKIP_COMPLETE;
247 }
248
io_msg_send_fd(struct io_kiocb * req,unsigned int issue_flags)249 static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags)
250 {
251 struct io_ring_ctx *target_ctx = req->file->private_data;
252 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
253 struct io_ring_ctx *ctx = req->ctx;
254 struct file *src_file = msg->src_file;
255
256 if (msg->len)
257 return -EINVAL;
258 if (target_ctx == ctx)
259 return -EINVAL;
260 if (target_ctx->flags & IORING_SETUP_R_DISABLED)
261 return -EBADFD;
262 if (!src_file) {
263 src_file = io_msg_grab_file(req, issue_flags);
264 if (!src_file)
265 return -EBADF;
266 msg->src_file = src_file;
267 req->flags |= REQ_F_NEED_CLEANUP;
268 }
269
270 if (io_msg_need_remote(target_ctx))
271 return io_msg_fd_remote(req);
272 return io_msg_install_complete(req, issue_flags);
273 }
274
io_msg_ring_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)275 int io_msg_ring_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
276 {
277 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
278
279 if (unlikely(sqe->buf_index || sqe->personality))
280 return -EINVAL;
281
282 msg->src_file = NULL;
283 msg->user_data = READ_ONCE(sqe->off);
284 msg->len = READ_ONCE(sqe->len);
285 msg->cmd = READ_ONCE(sqe->addr);
286 msg->src_fd = READ_ONCE(sqe->addr3);
287 msg->dst_fd = READ_ONCE(sqe->file_index);
288 msg->flags = READ_ONCE(sqe->msg_ring_flags);
289 if (msg->flags & ~IORING_MSG_RING_MASK)
290 return -EINVAL;
291
292 return 0;
293 }
294
io_msg_ring(struct io_kiocb * req,unsigned int issue_flags)295 int io_msg_ring(struct io_kiocb *req, unsigned int issue_flags)
296 {
297 struct io_msg *msg = io_kiocb_to_cmd(req, struct io_msg);
298 int ret;
299
300 ret = -EBADFD;
301 if (!io_is_uring_fops(req->file))
302 goto done;
303
304 switch (msg->cmd) {
305 case IORING_MSG_DATA:
306 ret = io_msg_ring_data(req, issue_flags);
307 break;
308 case IORING_MSG_SEND_FD:
309 ret = io_msg_send_fd(req, issue_flags);
310 break;
311 default:
312 ret = -EINVAL;
313 break;
314 }
315
316 done:
317 if (ret < 0) {
318 if (ret == -EAGAIN || ret == IOU_ISSUE_SKIP_COMPLETE)
319 return ret;
320 req_set_fail(req);
321 }
322 io_req_set_res(req, ret, 0);
323 return IOU_OK;
324 }
325
io_msg_cache_free(const void * entry)326 void io_msg_cache_free(const void *entry)
327 {
328 struct io_kiocb *req = (struct io_kiocb *) entry;
329
330 kmem_cache_free(req_cachep, req);
331 }
332