1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Contains the core associated with submission side polling of the SQ
4 * ring, offloading submissions from the application to a kernel thread.
5 */
6 #include <linux/kernel.h>
7 #include <linux/errno.h>
8 #include <linux/file.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/audit.h>
12 #include <linux/security.h>
13 #include <linux/io_uring.h>
14
15 #include <uapi/linux/io_uring.h>
16
17 #include "io_uring.h"
18 #include "sqpoll.h"
19
20 #define IORING_SQPOLL_CAP_ENTRIES_VALUE 8
21
22 enum {
23 IO_SQ_THREAD_SHOULD_STOP = 0,
24 IO_SQ_THREAD_SHOULD_PARK,
25 };
26
io_sq_thread_unpark(struct io_sq_data * sqd)27 void io_sq_thread_unpark(struct io_sq_data *sqd)
28 __releases(&sqd->lock)
29 {
30 WARN_ON_ONCE(sqd->thread == current);
31
32 /*
33 * Do the dance but not conditional clear_bit() because it'd race with
34 * other threads incrementing park_pending and setting the bit.
35 */
36 clear_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
37 if (atomic_dec_return(&sqd->park_pending))
38 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
39 mutex_unlock(&sqd->lock);
40 }
41
io_sq_thread_park(struct io_sq_data * sqd)42 void io_sq_thread_park(struct io_sq_data *sqd)
43 __acquires(&sqd->lock)
44 {
45 WARN_ON_ONCE(sqd->thread == current);
46
47 atomic_inc(&sqd->park_pending);
48 set_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state);
49 mutex_lock(&sqd->lock);
50 if (sqd->thread)
51 wake_up_process(sqd->thread);
52 }
53
io_sq_thread_stop(struct io_sq_data * sqd)54 void io_sq_thread_stop(struct io_sq_data *sqd)
55 {
56 WARN_ON_ONCE(sqd->thread == current);
57 WARN_ON_ONCE(test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state));
58
59 set_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
60 mutex_lock(&sqd->lock);
61 if (sqd->thread)
62 wake_up_process(sqd->thread);
63 mutex_unlock(&sqd->lock);
64 wait_for_completion(&sqd->exited);
65 }
66
io_put_sq_data(struct io_sq_data * sqd)67 void io_put_sq_data(struct io_sq_data *sqd)
68 {
69 if (refcount_dec_and_test(&sqd->refs)) {
70 WARN_ON_ONCE(atomic_read(&sqd->park_pending));
71
72 io_sq_thread_stop(sqd);
73 kfree(sqd);
74 }
75 }
76
io_sqd_update_thread_idle(struct io_sq_data * sqd)77 static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd)
78 {
79 struct io_ring_ctx *ctx;
80 unsigned sq_thread_idle = 0;
81
82 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
83 sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle);
84 sqd->sq_thread_idle = sq_thread_idle;
85 }
86
io_sq_thread_finish(struct io_ring_ctx * ctx)87 void io_sq_thread_finish(struct io_ring_ctx *ctx)
88 {
89 struct io_sq_data *sqd = ctx->sq_data;
90
91 if (sqd) {
92 io_sq_thread_park(sqd);
93 list_del_init(&ctx->sqd_list);
94 io_sqd_update_thread_idle(sqd);
95 io_sq_thread_unpark(sqd);
96
97 io_put_sq_data(sqd);
98 ctx->sq_data = NULL;
99 }
100 }
101
io_attach_sq_data(struct io_uring_params * p)102 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
103 {
104 struct io_ring_ctx *ctx_attach;
105 struct io_sq_data *sqd;
106 struct fd f;
107
108 f = fdget(p->wq_fd);
109 if (!f.file)
110 return ERR_PTR(-ENXIO);
111 if (!io_is_uring_fops(f.file)) {
112 fdput(f);
113 return ERR_PTR(-EINVAL);
114 }
115
116 ctx_attach = f.file->private_data;
117 sqd = ctx_attach->sq_data;
118 if (!sqd) {
119 fdput(f);
120 return ERR_PTR(-EINVAL);
121 }
122 if (sqd->task_tgid != current->tgid) {
123 fdput(f);
124 return ERR_PTR(-EPERM);
125 }
126
127 refcount_inc(&sqd->refs);
128 fdput(f);
129 return sqd;
130 }
131
io_get_sq_data(struct io_uring_params * p,bool * attached)132 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p,
133 bool *attached)
134 {
135 struct io_sq_data *sqd;
136
137 *attached = false;
138 if (p->flags & IORING_SETUP_ATTACH_WQ) {
139 sqd = io_attach_sq_data(p);
140 if (!IS_ERR(sqd)) {
141 *attached = true;
142 return sqd;
143 }
144 /* fall through for EPERM case, setup new sqd/task */
145 if (PTR_ERR(sqd) != -EPERM)
146 return sqd;
147 }
148
149 sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
150 if (!sqd)
151 return ERR_PTR(-ENOMEM);
152
153 atomic_set(&sqd->park_pending, 0);
154 refcount_set(&sqd->refs, 1);
155 INIT_LIST_HEAD(&sqd->ctx_list);
156 mutex_init(&sqd->lock);
157 init_waitqueue_head(&sqd->wait);
158 init_completion(&sqd->exited);
159 return sqd;
160 }
161
io_sqd_events_pending(struct io_sq_data * sqd)162 static inline bool io_sqd_events_pending(struct io_sq_data *sqd)
163 {
164 return READ_ONCE(sqd->state);
165 }
166
__io_sq_thread(struct io_ring_ctx * ctx,bool cap_entries)167 static int __io_sq_thread(struct io_ring_ctx *ctx, bool cap_entries)
168 {
169 unsigned int to_submit;
170 int ret = 0;
171
172 to_submit = io_sqring_entries(ctx);
173 /* if we're handling multiple rings, cap submit size for fairness */
174 if (cap_entries && to_submit > IORING_SQPOLL_CAP_ENTRIES_VALUE)
175 to_submit = IORING_SQPOLL_CAP_ENTRIES_VALUE;
176
177 if (!wq_list_empty(&ctx->iopoll_list) || to_submit) {
178 const struct cred *creds = NULL;
179
180 if (ctx->sq_creds != current_cred())
181 creds = override_creds(ctx->sq_creds);
182
183 mutex_lock(&ctx->uring_lock);
184 if (!wq_list_empty(&ctx->iopoll_list))
185 io_do_iopoll(ctx, true);
186
187 /*
188 * Don't submit if refs are dying, good for io_uring_register(),
189 * but also it is relied upon by io_ring_exit_work()
190 */
191 if (to_submit && likely(!percpu_ref_is_dying(&ctx->refs)) &&
192 !(ctx->flags & IORING_SETUP_R_DISABLED))
193 ret = io_submit_sqes(ctx, to_submit);
194 mutex_unlock(&ctx->uring_lock);
195
196 if (to_submit && wq_has_sleeper(&ctx->sqo_sq_wait))
197 wake_up(&ctx->sqo_sq_wait);
198 if (creds)
199 revert_creds(creds);
200 }
201
202 return ret;
203 }
204
io_sqd_handle_event(struct io_sq_data * sqd)205 static bool io_sqd_handle_event(struct io_sq_data *sqd)
206 {
207 bool did_sig = false;
208 struct ksignal ksig;
209
210 if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) ||
211 signal_pending(current)) {
212 mutex_unlock(&sqd->lock);
213 if (signal_pending(current))
214 did_sig = get_signal(&ksig);
215 cond_resched();
216 mutex_lock(&sqd->lock);
217 }
218 return did_sig || test_bit(IO_SQ_THREAD_SHOULD_STOP, &sqd->state);
219 }
220
io_sq_thread(void * data)221 static int io_sq_thread(void *data)
222 {
223 struct io_sq_data *sqd = data;
224 struct io_ring_ctx *ctx;
225 unsigned long timeout = 0;
226 char buf[TASK_COMM_LEN];
227 DEFINE_WAIT(wait);
228
229 snprintf(buf, sizeof(buf), "iou-sqp-%d", sqd->task_pid);
230 set_task_comm(current, buf);
231
232 if (sqd->sq_cpu != -1)
233 set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu));
234 else
235 set_cpus_allowed_ptr(current, cpu_online_mask);
236 current->flags |= PF_NO_SETAFFINITY;
237
238 mutex_lock(&sqd->lock);
239 while (1) {
240 bool cap_entries, sqt_spin = false;
241
242 if (io_sqd_events_pending(sqd) || signal_pending(current)) {
243 if (io_sqd_handle_event(sqd))
244 break;
245 timeout = jiffies + sqd->sq_thread_idle;
246 }
247
248 cap_entries = !list_is_singular(&sqd->ctx_list);
249 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
250 int ret = __io_sq_thread(ctx, cap_entries);
251
252 if (!sqt_spin && (ret > 0 || !wq_list_empty(&ctx->iopoll_list)))
253 sqt_spin = true;
254 }
255 if (io_run_task_work())
256 sqt_spin = true;
257
258 if (sqt_spin || !time_after(jiffies, timeout)) {
259 if (sqt_spin)
260 timeout = jiffies + sqd->sq_thread_idle;
261 if (unlikely(need_resched())) {
262 mutex_unlock(&sqd->lock);
263 cond_resched();
264 mutex_lock(&sqd->lock);
265 }
266 continue;
267 }
268
269 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
270 if (!io_sqd_events_pending(sqd) && !task_work_pending(current)) {
271 bool needs_sched = true;
272
273 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
274 atomic_or(IORING_SQ_NEED_WAKEUP,
275 &ctx->rings->sq_flags);
276 if ((ctx->flags & IORING_SETUP_IOPOLL) &&
277 !wq_list_empty(&ctx->iopoll_list)) {
278 needs_sched = false;
279 break;
280 }
281
282 /*
283 * Ensure the store of the wakeup flag is not
284 * reordered with the load of the SQ tail
285 */
286 smp_mb__after_atomic();
287
288 if (io_sqring_entries(ctx)) {
289 needs_sched = false;
290 break;
291 }
292 }
293
294 if (needs_sched) {
295 mutex_unlock(&sqd->lock);
296 schedule();
297 mutex_lock(&sqd->lock);
298 }
299 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
300 atomic_andnot(IORING_SQ_NEED_WAKEUP,
301 &ctx->rings->sq_flags);
302 }
303
304 finish_wait(&sqd->wait, &wait);
305 timeout = jiffies + sqd->sq_thread_idle;
306 }
307
308 io_uring_cancel_generic(true, sqd);
309 sqd->thread = NULL;
310 list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
311 atomic_or(IORING_SQ_NEED_WAKEUP, &ctx->rings->sq_flags);
312 io_run_task_work();
313 mutex_unlock(&sqd->lock);
314
315 complete(&sqd->exited);
316 do_exit(0);
317 }
318
io_sqpoll_wait_sq(struct io_ring_ctx * ctx)319 int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
320 {
321 DEFINE_WAIT(wait);
322
323 do {
324 if (!io_sqring_full(ctx))
325 break;
326 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
327
328 if (!io_sqring_full(ctx))
329 break;
330 schedule();
331 } while (!signal_pending(current));
332
333 finish_wait(&ctx->sqo_sq_wait, &wait);
334 return 0;
335 }
336
io_sq_offload_create(struct io_ring_ctx * ctx,struct io_uring_params * p)337 __cold int io_sq_offload_create(struct io_ring_ctx *ctx,
338 struct io_uring_params *p)
339 {
340 int ret;
341
342 /* Retain compatibility with failing for an invalid attach attempt */
343 if ((ctx->flags & (IORING_SETUP_ATTACH_WQ | IORING_SETUP_SQPOLL)) ==
344 IORING_SETUP_ATTACH_WQ) {
345 struct fd f;
346
347 f = fdget(p->wq_fd);
348 if (!f.file)
349 return -ENXIO;
350 if (!io_is_uring_fops(f.file)) {
351 fdput(f);
352 return -EINVAL;
353 }
354 fdput(f);
355 }
356 if (ctx->flags & IORING_SETUP_SQPOLL) {
357 struct task_struct *tsk;
358 struct io_sq_data *sqd;
359 bool attached;
360
361 ret = security_uring_sqpoll();
362 if (ret)
363 return ret;
364
365 sqd = io_get_sq_data(p, &attached);
366 if (IS_ERR(sqd)) {
367 ret = PTR_ERR(sqd);
368 goto err;
369 }
370
371 ctx->sq_creds = get_current_cred();
372 ctx->sq_data = sqd;
373 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
374 if (!ctx->sq_thread_idle)
375 ctx->sq_thread_idle = HZ;
376
377 io_sq_thread_park(sqd);
378 list_add(&ctx->sqd_list, &sqd->ctx_list);
379 io_sqd_update_thread_idle(sqd);
380 /* don't attach to a dying SQPOLL thread, would be racy */
381 ret = (attached && !sqd->thread) ? -ENXIO : 0;
382 io_sq_thread_unpark(sqd);
383
384 if (ret < 0)
385 goto err;
386 if (attached)
387 return 0;
388
389 if (p->flags & IORING_SETUP_SQ_AFF) {
390 int cpu = p->sq_thread_cpu;
391
392 ret = -EINVAL;
393 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
394 goto err_sqpoll;
395 sqd->sq_cpu = cpu;
396 } else {
397 sqd->sq_cpu = -1;
398 }
399
400 sqd->task_pid = current->pid;
401 sqd->task_tgid = current->tgid;
402 tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE);
403 if (IS_ERR(tsk)) {
404 ret = PTR_ERR(tsk);
405 goto err_sqpoll;
406 }
407
408 sqd->thread = tsk;
409 ret = io_uring_alloc_task_context(tsk, ctx);
410 wake_up_new_task(tsk);
411 if (ret)
412 goto err;
413 } else if (p->flags & IORING_SETUP_SQ_AFF) {
414 /* Can't have SQ_AFF without SQPOLL */
415 ret = -EINVAL;
416 goto err;
417 }
418
419 return 0;
420 err_sqpoll:
421 complete(&ctx->sq_data->exited);
422 err:
423 io_sq_thread_finish(ctx);
424 return ret;
425 }
426
io_sqpoll_wq_cpu_affinity(struct io_ring_ctx * ctx,cpumask_var_t mask)427 __cold int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx,
428 cpumask_var_t mask)
429 {
430 struct io_sq_data *sqd = ctx->sq_data;
431 int ret = -EINVAL;
432
433 if (sqd) {
434 io_sq_thread_park(sqd);
435 /* Don't set affinity for a dying thread */
436 if (sqd->thread)
437 ret = io_wq_cpu_affinity(sqd->thread->io_uring, mask);
438 io_sq_thread_unpark(sqd);
439 }
440
441 return ret;
442 }
443