1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/mm.h>
6 #include <linux/slab.h>
7 #include <linux/nospec.h>
8 #include <linux/io_uring.h>
9
10 #include <uapi/linux/io_uring.h>
11
12 #include "io_uring.h"
13 #include "tctx.h"
14
io_init_wq_offload(struct io_ring_ctx * ctx,struct task_struct * task)15 static struct io_wq *io_init_wq_offload(struct io_ring_ctx *ctx,
16 struct task_struct *task)
17 {
18 struct io_wq_hash *hash;
19 struct io_wq_data data;
20 unsigned int concurrency;
21
22 mutex_lock(&ctx->uring_lock);
23 hash = ctx->hash_map;
24 if (!hash) {
25 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
26 if (!hash) {
27 mutex_unlock(&ctx->uring_lock);
28 return ERR_PTR(-ENOMEM);
29 }
30 refcount_set(&hash->refs, 1);
31 init_waitqueue_head(&hash->wait);
32 ctx->hash_map = hash;
33 }
34 mutex_unlock(&ctx->uring_lock);
35
36 data.hash = hash;
37 data.task = task;
38 data.free_work = io_wq_free_work;
39 data.do_work = io_wq_submit_work;
40
41 /* Do QD, or 4 * CPUS, whatever is smallest */
42 concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
43
44 return io_wq_create(concurrency, &data);
45 }
46
__io_uring_free(struct task_struct * tsk)47 void __io_uring_free(struct task_struct *tsk)
48 {
49 struct io_uring_task *tctx = tsk->io_uring;
50
51 WARN_ON_ONCE(!xa_empty(&tctx->xa));
52 WARN_ON_ONCE(tctx->io_wq);
53 WARN_ON_ONCE(tctx->cached_refs);
54
55 percpu_counter_destroy(&tctx->inflight);
56 kfree(tctx);
57 tsk->io_uring = NULL;
58 }
59
io_uring_alloc_task_context(struct task_struct * task,struct io_ring_ctx * ctx)60 __cold int io_uring_alloc_task_context(struct task_struct *task,
61 struct io_ring_ctx *ctx)
62 {
63 struct io_uring_task *tctx;
64 int ret;
65
66 tctx = kzalloc(sizeof(*tctx), GFP_KERNEL);
67 if (unlikely(!tctx))
68 return -ENOMEM;
69
70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
71 if (unlikely(ret)) {
72 kfree(tctx);
73 return ret;
74 }
75
76 tctx->io_wq = io_init_wq_offload(ctx, task);
77 if (IS_ERR(tctx->io_wq)) {
78 ret = PTR_ERR(tctx->io_wq);
79 percpu_counter_destroy(&tctx->inflight);
80 kfree(tctx);
81 return ret;
82 }
83
84 xa_init(&tctx->xa);
85 init_waitqueue_head(&tctx->wait);
86 atomic_set(&tctx->in_idle, 0);
87 atomic_set(&tctx->inflight_tracked, 0);
88 task->io_uring = tctx;
89 init_llist_head(&tctx->task_list);
90 init_task_work(&tctx->task_work, tctx_task_work);
91 return 0;
92 }
93
__io_uring_add_tctx_node(struct io_ring_ctx * ctx)94 int __io_uring_add_tctx_node(struct io_ring_ctx *ctx)
95 {
96 struct io_uring_task *tctx = current->io_uring;
97 struct io_tctx_node *node;
98 int ret;
99
100 if (unlikely(!tctx)) {
101 ret = io_uring_alloc_task_context(current, ctx);
102 if (unlikely(ret))
103 return ret;
104
105 tctx = current->io_uring;
106 if (ctx->iowq_limits_set) {
107 unsigned int limits[2] = { ctx->iowq_limits[0],
108 ctx->iowq_limits[1], };
109
110 ret = io_wq_max_workers(tctx->io_wq, limits);
111 if (ret)
112 return ret;
113 }
114 }
115 if (!xa_load(&tctx->xa, (unsigned long)ctx)) {
116 node = kmalloc(sizeof(*node), GFP_KERNEL);
117 if (!node)
118 return -ENOMEM;
119 node->ctx = ctx;
120 node->task = current;
121
122 ret = xa_err(xa_store(&tctx->xa, (unsigned long)ctx,
123 node, GFP_KERNEL));
124 if (ret) {
125 kfree(node);
126 return ret;
127 }
128
129 mutex_lock(&ctx->uring_lock);
130 list_add(&node->ctx_node, &ctx->tctx_list);
131 mutex_unlock(&ctx->uring_lock);
132 }
133 return 0;
134 }
135
__io_uring_add_tctx_node_from_submit(struct io_ring_ctx * ctx)136 int __io_uring_add_tctx_node_from_submit(struct io_ring_ctx *ctx)
137 {
138 int ret;
139
140 if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
141 && ctx->submitter_task != current)
142 return -EEXIST;
143
144 ret = __io_uring_add_tctx_node(ctx);
145 if (ret)
146 return ret;
147
148 current->io_uring->last = ctx;
149 return 0;
150 }
151
152 /*
153 * Remove this io_uring_file -> task mapping.
154 */
io_uring_del_tctx_node(unsigned long index)155 __cold void io_uring_del_tctx_node(unsigned long index)
156 {
157 struct io_uring_task *tctx = current->io_uring;
158 struct io_tctx_node *node;
159
160 if (!tctx)
161 return;
162 node = xa_erase(&tctx->xa, index);
163 if (!node)
164 return;
165
166 WARN_ON_ONCE(current != node->task);
167 WARN_ON_ONCE(list_empty(&node->ctx_node));
168
169 mutex_lock(&node->ctx->uring_lock);
170 list_del(&node->ctx_node);
171 mutex_unlock(&node->ctx->uring_lock);
172
173 if (tctx->last == node->ctx)
174 tctx->last = NULL;
175 kfree(node);
176 }
177
io_uring_clean_tctx(struct io_uring_task * tctx)178 __cold void io_uring_clean_tctx(struct io_uring_task *tctx)
179 {
180 struct io_wq *wq = tctx->io_wq;
181 struct io_tctx_node *node;
182 unsigned long index;
183
184 xa_for_each(&tctx->xa, index, node) {
185 io_uring_del_tctx_node(index);
186 cond_resched();
187 }
188 if (wq) {
189 /*
190 * Must be after io_uring_del_tctx_node() (removes nodes under
191 * uring_lock) to avoid race with io_uring_try_cancel_iowq().
192 */
193 io_wq_put_and_exit(wq);
194 tctx->io_wq = NULL;
195 }
196 }
197
io_uring_unreg_ringfd(void)198 void io_uring_unreg_ringfd(void)
199 {
200 struct io_uring_task *tctx = current->io_uring;
201 int i;
202
203 for (i = 0; i < IO_RINGFD_REG_MAX; i++) {
204 if (tctx->registered_rings[i]) {
205 fput(tctx->registered_rings[i]);
206 tctx->registered_rings[i] = NULL;
207 }
208 }
209 }
210
io_ring_add_registered_fd(struct io_uring_task * tctx,int fd,int start,int end)211 static int io_ring_add_registered_fd(struct io_uring_task *tctx, int fd,
212 int start, int end)
213 {
214 struct file *file;
215 int offset;
216
217 for (offset = start; offset < end; offset++) {
218 offset = array_index_nospec(offset, IO_RINGFD_REG_MAX);
219 if (tctx->registered_rings[offset])
220 continue;
221
222 file = fget(fd);
223 if (!file) {
224 return -EBADF;
225 } else if (!io_is_uring_fops(file)) {
226 fput(file);
227 return -EOPNOTSUPP;
228 }
229 tctx->registered_rings[offset] = file;
230 return offset;
231 }
232
233 return -EBUSY;
234 }
235
236 /*
237 * Register a ring fd to avoid fdget/fdput for each io_uring_enter()
238 * invocation. User passes in an array of struct io_uring_rsrc_update
239 * with ->data set to the ring_fd, and ->offset given for the desired
240 * index. If no index is desired, application may set ->offset == -1U
241 * and we'll find an available index. Returns number of entries
242 * successfully processed, or < 0 on error if none were processed.
243 */
io_ringfd_register(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)244 int io_ringfd_register(struct io_ring_ctx *ctx, void __user *__arg,
245 unsigned nr_args)
246 {
247 struct io_uring_rsrc_update __user *arg = __arg;
248 struct io_uring_rsrc_update reg;
249 struct io_uring_task *tctx;
250 int ret, i;
251
252 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
253 return -EINVAL;
254
255 mutex_unlock(&ctx->uring_lock);
256 ret = __io_uring_add_tctx_node(ctx);
257 mutex_lock(&ctx->uring_lock);
258 if (ret)
259 return ret;
260
261 tctx = current->io_uring;
262 for (i = 0; i < nr_args; i++) {
263 int start, end;
264
265 if (copy_from_user(®, &arg[i], sizeof(reg))) {
266 ret = -EFAULT;
267 break;
268 }
269
270 if (reg.resv) {
271 ret = -EINVAL;
272 break;
273 }
274
275 if (reg.offset == -1U) {
276 start = 0;
277 end = IO_RINGFD_REG_MAX;
278 } else {
279 if (reg.offset >= IO_RINGFD_REG_MAX) {
280 ret = -EINVAL;
281 break;
282 }
283 start = reg.offset;
284 end = start + 1;
285 }
286
287 ret = io_ring_add_registered_fd(tctx, reg.data, start, end);
288 if (ret < 0)
289 break;
290
291 reg.offset = ret;
292 if (copy_to_user(&arg[i], ®, sizeof(reg))) {
293 fput(tctx->registered_rings[reg.offset]);
294 tctx->registered_rings[reg.offset] = NULL;
295 ret = -EFAULT;
296 break;
297 }
298 }
299
300 return i ? i : ret;
301 }
302
io_ringfd_unregister(struct io_ring_ctx * ctx,void __user * __arg,unsigned nr_args)303 int io_ringfd_unregister(struct io_ring_ctx *ctx, void __user *__arg,
304 unsigned nr_args)
305 {
306 struct io_uring_rsrc_update __user *arg = __arg;
307 struct io_uring_task *tctx = current->io_uring;
308 struct io_uring_rsrc_update reg;
309 int ret = 0, i;
310
311 if (!nr_args || nr_args > IO_RINGFD_REG_MAX)
312 return -EINVAL;
313 if (!tctx)
314 return 0;
315
316 for (i = 0; i < nr_args; i++) {
317 if (copy_from_user(®, &arg[i], sizeof(reg))) {
318 ret = -EFAULT;
319 break;
320 }
321 if (reg.resv || reg.data || reg.offset >= IO_RINGFD_REG_MAX) {
322 ret = -EINVAL;
323 break;
324 }
325
326 reg.offset = array_index_nospec(reg.offset, IO_RINGFD_REG_MAX);
327 if (tctx->registered_rings[reg.offset]) {
328 fput(tctx->registered_rings[reg.offset]);
329 tctx->registered_rings[reg.offset] = NULL;
330 }
331 }
332
333 return i ? i : ret;
334 }
335