1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50 
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <linux/anon_inodes.h>
63 #include <linux/sched/mm.h>
64 #include <linux/uaccess.h>
65 #include <linux/nospec.h>
66 #include <linux/fsnotify.h>
67 #include <linux/fadvise.h>
68 #include <linux/task_work.h>
69 #include <linux/io_uring.h>
70 #include <linux/io_uring/cmd.h>
71 #include <linux/audit.h>
72 #include <linux/security.h>
73 #include <asm/shmparam.h>
74 
75 #define CREATE_TRACE_POINTS
76 #include <trace/events/io_uring.h>
77 
78 #include <uapi/linux/io_uring.h>
79 
80 #include "io-wq.h"
81 
82 #include "io_uring.h"
83 #include "opdef.h"
84 #include "refs.h"
85 #include "tctx.h"
86 #include "register.h"
87 #include "sqpoll.h"
88 #include "fdinfo.h"
89 #include "kbuf.h"
90 #include "rsrc.h"
91 #include "cancel.h"
92 #include "net.h"
93 #include "notif.h"
94 #include "waitid.h"
95 #include "futex.h"
96 #include "napi.h"
97 #include "uring_cmd.h"
98 #include "msg_ring.h"
99 #include "memmap.h"
100 
101 #include "timeout.h"
102 #include "poll.h"
103 #include "rw.h"
104 #include "alloc_cache.h"
105 #include "eventfd.h"
106 
107 #define IORING_MAX_ENTRIES	32768
108 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
109 
110 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
111 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
112 
113 #define SQE_VALID_FLAGS	(SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
114 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
115 
116 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
117 				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
118 				REQ_F_ASYNC_DATA)
119 
120 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
121 				 IO_REQ_CLEAN_FLAGS)
122 
123 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
124 
125 #define IO_COMPL_BATCH			32
126 #define IO_REQ_ALLOC_BATCH		8
127 
128 struct io_defer_entry {
129 	struct list_head	list;
130 	struct io_kiocb		*req;
131 	u32			seq;
132 };
133 
134 /* requests with any of those set should undergo io_disarm_next() */
135 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
136 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
137 
138 /*
139  * No waiters. It's larger than any valid value of the tw counter
140  * so that tests against ->cq_wait_nr would fail and skip wake_up().
141  */
142 #define IO_CQ_WAKE_INIT		(-1U)
143 /* Forced wake up if there is a waiter regardless of ->cq_wait_nr */
144 #define IO_CQ_WAKE_FORCE	(IO_CQ_WAKE_INIT >> 1)
145 
146 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
147 					 struct task_struct *task,
148 					 bool cancel_all);
149 
150 static void io_queue_sqe(struct io_kiocb *req);
151 
152 struct kmem_cache *req_cachep;
153 static struct workqueue_struct *iou_wq __ro_after_init;
154 
155 static int __read_mostly sysctl_io_uring_disabled;
156 static int __read_mostly sysctl_io_uring_group = -1;
157 
158 #ifdef CONFIG_SYSCTL
159 static struct ctl_table kernel_io_uring_disabled_table[] = {
160 	{
161 		.procname	= "io_uring_disabled",
162 		.data		= &sysctl_io_uring_disabled,
163 		.maxlen		= sizeof(sysctl_io_uring_disabled),
164 		.mode		= 0644,
165 		.proc_handler	= proc_dointvec_minmax,
166 		.extra1		= SYSCTL_ZERO,
167 		.extra2		= SYSCTL_TWO,
168 	},
169 	{
170 		.procname	= "io_uring_group",
171 		.data		= &sysctl_io_uring_group,
172 		.maxlen		= sizeof(gid_t),
173 		.mode		= 0644,
174 		.proc_handler	= proc_dointvec,
175 	},
176 };
177 #endif
178 
__io_cqring_events(struct io_ring_ctx * ctx)179 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
180 {
181 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
182 }
183 
__io_cqring_events_user(struct io_ring_ctx * ctx)184 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
185 {
186 	return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
187 }
188 
io_match_linked(struct io_kiocb * head)189 static bool io_match_linked(struct io_kiocb *head)
190 {
191 	struct io_kiocb *req;
192 
193 	io_for_each_link(req, head) {
194 		if (req->flags & REQ_F_INFLIGHT)
195 			return true;
196 	}
197 	return false;
198 }
199 
200 /*
201  * As io_match_task() but protected against racing with linked timeouts.
202  * User must not hold timeout_lock.
203  */
io_match_task_safe(struct io_kiocb * head,struct task_struct * task,bool cancel_all)204 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
205 			bool cancel_all)
206 {
207 	bool matched;
208 
209 	if (task && head->task != task)
210 		return false;
211 	if (cancel_all)
212 		return true;
213 
214 	if (head->flags & REQ_F_LINK_TIMEOUT) {
215 		struct io_ring_ctx *ctx = head->ctx;
216 
217 		/* protect against races with linked timeouts */
218 		spin_lock_irq(&ctx->timeout_lock);
219 		matched = io_match_linked(head);
220 		spin_unlock_irq(&ctx->timeout_lock);
221 	} else {
222 		matched = io_match_linked(head);
223 	}
224 	return matched;
225 }
226 
req_fail_link_node(struct io_kiocb * req,int res)227 static inline void req_fail_link_node(struct io_kiocb *req, int res)
228 {
229 	req_set_fail(req);
230 	io_req_set_res(req, res, 0);
231 }
232 
io_req_add_to_cache(struct io_kiocb * req,struct io_ring_ctx * ctx)233 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
234 {
235 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
236 }
237 
io_ring_ctx_ref_free(struct percpu_ref * ref)238 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
239 {
240 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
241 
242 	complete(&ctx->ref_comp);
243 }
244 
io_fallback_req_func(struct work_struct * work)245 static __cold void io_fallback_req_func(struct work_struct *work)
246 {
247 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
248 						fallback_work.work);
249 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
250 	struct io_kiocb *req, *tmp;
251 	struct io_tw_state ts = {};
252 
253 	percpu_ref_get(&ctx->refs);
254 	mutex_lock(&ctx->uring_lock);
255 	llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
256 		req->io_task_work.func(req, &ts);
257 	io_submit_flush_completions(ctx);
258 	mutex_unlock(&ctx->uring_lock);
259 	percpu_ref_put(&ctx->refs);
260 }
261 
io_alloc_hash_table(struct io_hash_table * table,unsigned bits)262 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
263 {
264 	unsigned hash_buckets = 1U << bits;
265 	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
266 
267 	table->hbs = kmalloc(hash_size, GFP_KERNEL);
268 	if (!table->hbs)
269 		return -ENOMEM;
270 
271 	table->hash_bits = bits;
272 	init_hash_table(table, hash_buckets);
273 	return 0;
274 }
275 
io_ring_ctx_alloc(struct io_uring_params * p)276 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
277 {
278 	struct io_ring_ctx *ctx;
279 	int hash_bits;
280 	bool ret;
281 
282 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
283 	if (!ctx)
284 		return NULL;
285 
286 	xa_init(&ctx->io_bl_xa);
287 
288 	/*
289 	 * Use 5 bits less than the max cq entries, that should give us around
290 	 * 32 entries per hash list if totally full and uniformly spread, but
291 	 * don't keep too many buckets to not overconsume memory.
292 	 */
293 	hash_bits = ilog2(p->cq_entries) - 5;
294 	hash_bits = clamp(hash_bits, 1, 8);
295 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
296 		goto err;
297 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
298 		goto err;
299 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
300 			    0, GFP_KERNEL))
301 		goto err;
302 
303 	ctx->flags = p->flags;
304 	atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
305 	init_waitqueue_head(&ctx->sqo_sq_wait);
306 	INIT_LIST_HEAD(&ctx->sqd_list);
307 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
308 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
309 	ret = io_alloc_cache_init(&ctx->rsrc_node_cache, IO_NODE_ALLOC_CACHE_MAX,
310 			    sizeof(struct io_rsrc_node));
311 	ret |= io_alloc_cache_init(&ctx->apoll_cache, IO_POLL_ALLOC_CACHE_MAX,
312 			    sizeof(struct async_poll));
313 	ret |= io_alloc_cache_init(&ctx->netmsg_cache, IO_ALLOC_CACHE_MAX,
314 			    sizeof(struct io_async_msghdr));
315 	ret |= io_alloc_cache_init(&ctx->rw_cache, IO_ALLOC_CACHE_MAX,
316 			    sizeof(struct io_async_rw));
317 	ret |= io_alloc_cache_init(&ctx->uring_cache, IO_ALLOC_CACHE_MAX,
318 			    sizeof(struct uring_cache));
319 	spin_lock_init(&ctx->msg_lock);
320 	ret |= io_alloc_cache_init(&ctx->msg_cache, IO_ALLOC_CACHE_MAX,
321 			    sizeof(struct io_kiocb));
322 	ret |= io_futex_cache_init(ctx);
323 	if (ret)
324 		goto free_ref;
325 	init_completion(&ctx->ref_comp);
326 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
327 	mutex_init(&ctx->uring_lock);
328 	init_waitqueue_head(&ctx->cq_wait);
329 	init_waitqueue_head(&ctx->poll_wq);
330 	init_waitqueue_head(&ctx->rsrc_quiesce_wq);
331 	spin_lock_init(&ctx->completion_lock);
332 	spin_lock_init(&ctx->timeout_lock);
333 	INIT_WQ_LIST(&ctx->iopoll_list);
334 	INIT_LIST_HEAD(&ctx->io_buffers_comp);
335 	INIT_LIST_HEAD(&ctx->defer_list);
336 	INIT_LIST_HEAD(&ctx->timeout_list);
337 	INIT_LIST_HEAD(&ctx->ltimeout_list);
338 	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
339 	init_llist_head(&ctx->work_llist);
340 	INIT_LIST_HEAD(&ctx->tctx_list);
341 	ctx->submit_state.free_list.next = NULL;
342 	INIT_HLIST_HEAD(&ctx->waitid_list);
343 #ifdef CONFIG_FUTEX
344 	INIT_HLIST_HEAD(&ctx->futex_list);
345 #endif
346 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
347 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
348 	INIT_HLIST_HEAD(&ctx->cancelable_uring_cmd);
349 	io_napi_init(ctx);
350 
351 	return ctx;
352 
353 free_ref:
354 	percpu_ref_exit(&ctx->refs);
355 err:
356 	io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
357 	io_alloc_cache_free(&ctx->apoll_cache, kfree);
358 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
359 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
360 	io_alloc_cache_free(&ctx->uring_cache, kfree);
361 	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
362 	io_futex_cache_free(ctx);
363 	kfree(ctx->cancel_table.hbs);
364 	kfree(ctx->cancel_table_locked.hbs);
365 	xa_destroy(&ctx->io_bl_xa);
366 	kfree(ctx);
367 	return NULL;
368 }
369 
io_account_cq_overflow(struct io_ring_ctx * ctx)370 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
371 {
372 	struct io_rings *r = ctx->rings;
373 
374 	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
375 	ctx->cq_extra--;
376 }
377 
req_need_defer(struct io_kiocb * req,u32 seq)378 static bool req_need_defer(struct io_kiocb *req, u32 seq)
379 {
380 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
381 		struct io_ring_ctx *ctx = req->ctx;
382 
383 		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
384 	}
385 
386 	return false;
387 }
388 
io_clean_op(struct io_kiocb * req)389 static void io_clean_op(struct io_kiocb *req)
390 {
391 	if (req->flags & REQ_F_BUFFER_SELECTED) {
392 		spin_lock(&req->ctx->completion_lock);
393 		io_kbuf_drop(req);
394 		spin_unlock(&req->ctx->completion_lock);
395 	}
396 
397 	if (req->flags & REQ_F_NEED_CLEANUP) {
398 		const struct io_cold_def *def = &io_cold_defs[req->opcode];
399 
400 		if (def->cleanup)
401 			def->cleanup(req);
402 	}
403 	if ((req->flags & REQ_F_POLLED) && req->apoll) {
404 		kfree(req->apoll->double_poll);
405 		kfree(req->apoll);
406 		req->apoll = NULL;
407 	}
408 	if (req->flags & REQ_F_INFLIGHT) {
409 		struct io_uring_task *tctx = req->task->io_uring;
410 
411 		atomic_dec(&tctx->inflight_tracked);
412 	}
413 	if (req->flags & REQ_F_CREDS)
414 		put_cred(req->creds);
415 	if (req->flags & REQ_F_ASYNC_DATA) {
416 		kfree(req->async_data);
417 		req->async_data = NULL;
418 	}
419 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
420 }
421 
io_req_track_inflight(struct io_kiocb * req)422 static inline void io_req_track_inflight(struct io_kiocb *req)
423 {
424 	if (!(req->flags & REQ_F_INFLIGHT)) {
425 		req->flags |= REQ_F_INFLIGHT;
426 		atomic_inc(&req->task->io_uring->inflight_tracked);
427 	}
428 }
429 
__io_prep_linked_timeout(struct io_kiocb * req)430 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
431 {
432 	if (WARN_ON_ONCE(!req->link))
433 		return NULL;
434 
435 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
436 	req->flags |= REQ_F_LINK_TIMEOUT;
437 
438 	/* linked timeouts should have two refs once prep'ed */
439 	io_req_set_refcount(req);
440 	__io_req_set_refcount(req->link, 2);
441 	return req->link;
442 }
443 
io_prep_async_work(struct io_kiocb * req)444 static void io_prep_async_work(struct io_kiocb *req)
445 {
446 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
447 	struct io_ring_ctx *ctx = req->ctx;
448 
449 	if (!(req->flags & REQ_F_CREDS)) {
450 		req->flags |= REQ_F_CREDS;
451 		req->creds = get_current_cred();
452 	}
453 
454 	req->work.list.next = NULL;
455 	atomic_set(&req->work.flags, 0);
456 	if (req->flags & REQ_F_FORCE_ASYNC)
457 		atomic_or(IO_WQ_WORK_CONCURRENT, &req->work.flags);
458 
459 	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
460 		req->flags |= io_file_get_flags(req->file);
461 
462 	if (req->file && (req->flags & REQ_F_ISREG)) {
463 		bool should_hash = def->hash_reg_file;
464 
465 		/* don't serialize this request if the fs doesn't need it */
466 		if (should_hash && (req->file->f_flags & O_DIRECT) &&
467 		    (req->file->f_op->fop_flags & FOP_DIO_PARALLEL_WRITE))
468 			should_hash = false;
469 		if (should_hash || (ctx->flags & IORING_SETUP_IOPOLL))
470 			io_wq_hash_work(&req->work, file_inode(req->file));
471 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
472 		if (def->unbound_nonreg_file)
473 			atomic_or(IO_WQ_WORK_UNBOUND, &req->work.flags);
474 	}
475 }
476 
io_prep_async_link(struct io_kiocb * req)477 static void io_prep_async_link(struct io_kiocb *req)
478 {
479 	struct io_kiocb *cur;
480 
481 	if (req->flags & REQ_F_LINK_TIMEOUT) {
482 		struct io_ring_ctx *ctx = req->ctx;
483 
484 		spin_lock_irq(&ctx->timeout_lock);
485 		io_for_each_link(cur, req)
486 			io_prep_async_work(cur);
487 		spin_unlock_irq(&ctx->timeout_lock);
488 	} else {
489 		io_for_each_link(cur, req)
490 			io_prep_async_work(cur);
491 	}
492 }
493 
io_queue_iowq(struct io_kiocb * req)494 static void io_queue_iowq(struct io_kiocb *req)
495 {
496 	struct io_uring_task *tctx = req->task->io_uring;
497 
498 	BUG_ON(!tctx);
499 
500 	if ((current->flags & PF_KTHREAD) || !tctx->io_wq) {
501 		io_req_task_queue_fail(req, -ECANCELED);
502 		return;
503 	}
504 
505 	/* init ->work of the whole link before punting */
506 	io_prep_async_link(req);
507 
508 	/*
509 	 * Not expected to happen, but if we do have a bug where this _can_
510 	 * happen, catch it here and ensure the request is marked as
511 	 * canceled. That will make io-wq go through the usual work cancel
512 	 * procedure rather than attempt to run this request (or create a new
513 	 * worker for it).
514 	 */
515 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
516 		atomic_or(IO_WQ_WORK_CANCEL, &req->work.flags);
517 
518 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
519 	io_wq_enqueue(tctx->io_wq, &req->work);
520 }
521 
io_req_queue_iowq_tw(struct io_kiocb * req,struct io_tw_state * ts)522 static void io_req_queue_iowq_tw(struct io_kiocb *req, struct io_tw_state *ts)
523 {
524 	io_queue_iowq(req);
525 }
526 
io_req_queue_iowq(struct io_kiocb * req)527 void io_req_queue_iowq(struct io_kiocb *req)
528 {
529 	req->io_task_work.func = io_req_queue_iowq_tw;
530 	io_req_task_work_add(req);
531 }
532 
io_queue_deferred(struct io_ring_ctx * ctx)533 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
534 {
535 	while (!list_empty(&ctx->defer_list)) {
536 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
537 						struct io_defer_entry, list);
538 
539 		if (req_need_defer(de->req, de->seq))
540 			break;
541 		list_del_init(&de->list);
542 		io_req_task_queue(de->req);
543 		kfree(de);
544 	}
545 }
546 
__io_commit_cqring_flush(struct io_ring_ctx * ctx)547 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
548 {
549 	if (ctx->poll_activated)
550 		io_poll_wq_wake(ctx);
551 	if (ctx->off_timeout_used)
552 		io_flush_timeouts(ctx);
553 	if (ctx->drain_active) {
554 		spin_lock(&ctx->completion_lock);
555 		io_queue_deferred(ctx);
556 		spin_unlock(&ctx->completion_lock);
557 	}
558 	if (ctx->has_evfd)
559 		io_eventfd_flush_signal(ctx);
560 }
561 
__io_cq_lock(struct io_ring_ctx * ctx)562 static inline void __io_cq_lock(struct io_ring_ctx *ctx)
563 {
564 	if (!ctx->lockless_cq)
565 		spin_lock(&ctx->completion_lock);
566 }
567 
io_cq_lock(struct io_ring_ctx * ctx)568 static inline void io_cq_lock(struct io_ring_ctx *ctx)
569 	__acquires(ctx->completion_lock)
570 {
571 	spin_lock(&ctx->completion_lock);
572 }
573 
__io_cq_unlock_post(struct io_ring_ctx * ctx)574 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
575 {
576 	io_commit_cqring(ctx);
577 	if (!ctx->task_complete) {
578 		if (!ctx->lockless_cq)
579 			spin_unlock(&ctx->completion_lock);
580 		/* IOPOLL rings only need to wake up if it's also SQPOLL */
581 		if (!ctx->syscall_iopoll)
582 			io_cqring_wake(ctx);
583 	}
584 	io_commit_cqring_flush(ctx);
585 }
586 
io_cq_unlock_post(struct io_ring_ctx * ctx)587 static void io_cq_unlock_post(struct io_ring_ctx *ctx)
588 	__releases(ctx->completion_lock)
589 {
590 	io_commit_cqring(ctx);
591 	spin_unlock(&ctx->completion_lock);
592 	io_cqring_wake(ctx);
593 	io_commit_cqring_flush(ctx);
594 }
595 
__io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool dying)596 static void __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool dying)
597 {
598 	size_t cqe_size = sizeof(struct io_uring_cqe);
599 
600 	lockdep_assert_held(&ctx->uring_lock);
601 
602 	/* don't abort if we're dying, entries must get freed */
603 	if (!dying && __io_cqring_events(ctx) == ctx->cq_entries)
604 		return;
605 
606 	if (ctx->flags & IORING_SETUP_CQE32)
607 		cqe_size <<= 1;
608 
609 	io_cq_lock(ctx);
610 	while (!list_empty(&ctx->cq_overflow_list)) {
611 		struct io_uring_cqe *cqe;
612 		struct io_overflow_cqe *ocqe;
613 
614 		ocqe = list_first_entry(&ctx->cq_overflow_list,
615 					struct io_overflow_cqe, list);
616 
617 		if (!dying) {
618 			if (!io_get_cqe_overflow(ctx, &cqe, true))
619 				break;
620 			memcpy(cqe, &ocqe->cqe, cqe_size);
621 		}
622 		list_del(&ocqe->list);
623 		kfree(ocqe);
624 
625 		/*
626 		 * For silly syzbot cases that deliberately overflow by huge
627 		 * amounts, check if we need to resched and drop and
628 		 * reacquire the locks if so. Nothing real would ever hit this.
629 		 * Ideally we'd have a non-posting unlock for this, but hard
630 		 * to care for a non-real case.
631 		 */
632 		if (need_resched()) {
633 			ctx->cqe_sentinel = ctx->cqe_cached;
634 			io_cq_unlock_post(ctx);
635 			mutex_unlock(&ctx->uring_lock);
636 			cond_resched();
637 			mutex_lock(&ctx->uring_lock);
638 			io_cq_lock(ctx);
639 		}
640 	}
641 
642 	if (list_empty(&ctx->cq_overflow_list)) {
643 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
644 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
645 	}
646 	io_cq_unlock_post(ctx);
647 }
648 
io_cqring_overflow_kill(struct io_ring_ctx * ctx)649 static void io_cqring_overflow_kill(struct io_ring_ctx *ctx)
650 {
651 	if (ctx->rings)
652 		__io_cqring_overflow_flush(ctx, true);
653 }
654 
io_cqring_do_overflow_flush(struct io_ring_ctx * ctx)655 static void io_cqring_do_overflow_flush(struct io_ring_ctx *ctx)
656 {
657 	mutex_lock(&ctx->uring_lock);
658 	__io_cqring_overflow_flush(ctx, false);
659 	mutex_unlock(&ctx->uring_lock);
660 }
661 
662 /* can be called by any task */
io_put_task_remote(struct task_struct * task)663 static void io_put_task_remote(struct task_struct *task)
664 {
665 	struct io_uring_task *tctx = task->io_uring;
666 
667 	percpu_counter_sub(&tctx->inflight, 1);
668 	if (unlikely(atomic_read(&tctx->in_cancel)))
669 		wake_up(&tctx->wait);
670 	put_task_struct(task);
671 }
672 
673 /* used by a task to put its own references */
io_put_task_local(struct task_struct * task)674 static void io_put_task_local(struct task_struct *task)
675 {
676 	task->io_uring->cached_refs++;
677 }
678 
679 /* must to be called somewhat shortly after putting a request */
io_put_task(struct task_struct * task)680 static inline void io_put_task(struct task_struct *task)
681 {
682 	if (likely(task == current))
683 		io_put_task_local(task);
684 	else
685 		io_put_task_remote(task);
686 }
687 
io_task_refs_refill(struct io_uring_task * tctx)688 void io_task_refs_refill(struct io_uring_task *tctx)
689 {
690 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
691 
692 	percpu_counter_add(&tctx->inflight, refill);
693 	refcount_add(refill, ¤t->usage);
694 	tctx->cached_refs += refill;
695 }
696 
io_uring_drop_tctx_refs(struct task_struct * task)697 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
698 {
699 	struct io_uring_task *tctx = task->io_uring;
700 	unsigned int refs = tctx->cached_refs;
701 
702 	if (refs) {
703 		tctx->cached_refs = 0;
704 		percpu_counter_sub(&tctx->inflight, refs);
705 		put_task_struct_many(task, refs);
706 	}
707 }
708 
io_cqring_event_overflow(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,u64 extra1,u64 extra2)709 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
710 				     s32 res, u32 cflags, u64 extra1, u64 extra2)
711 {
712 	struct io_overflow_cqe *ocqe;
713 	size_t ocq_size = sizeof(struct io_overflow_cqe);
714 	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
715 
716 	lockdep_assert_held(&ctx->completion_lock);
717 
718 	if (is_cqe32)
719 		ocq_size += sizeof(struct io_uring_cqe);
720 
721 	ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
722 	trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
723 	if (!ocqe) {
724 		/*
725 		 * If we're in ring overflow flush mode, or in task cancel mode,
726 		 * or cannot allocate an overflow entry, then we need to drop it
727 		 * on the floor.
728 		 */
729 		io_account_cq_overflow(ctx);
730 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
731 		return false;
732 	}
733 	if (list_empty(&ctx->cq_overflow_list)) {
734 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
735 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
736 
737 	}
738 	ocqe->cqe.user_data = user_data;
739 	ocqe->cqe.res = res;
740 	ocqe->cqe.flags = cflags;
741 	if (is_cqe32) {
742 		ocqe->cqe.big_cqe[0] = extra1;
743 		ocqe->cqe.big_cqe[1] = extra2;
744 	}
745 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
746 	return true;
747 }
748 
io_req_cqe_overflow(struct io_kiocb * req)749 static void io_req_cqe_overflow(struct io_kiocb *req)
750 {
751 	io_cqring_event_overflow(req->ctx, req->cqe.user_data,
752 				req->cqe.res, req->cqe.flags,
753 				req->big_cqe.extra1, req->big_cqe.extra2);
754 	memset(&req->big_cqe, 0, sizeof(req->big_cqe));
755 }
756 
757 /*
758  * writes to the cq entry need to come after reading head; the
759  * control dependency is enough as we're using WRITE_ONCE to
760  * fill the cq entry
761  */
io_cqe_cache_refill(struct io_ring_ctx * ctx,bool overflow)762 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow)
763 {
764 	struct io_rings *rings = ctx->rings;
765 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
766 	unsigned int free, queued, len;
767 
768 	/*
769 	 * Posting into the CQ when there are pending overflowed CQEs may break
770 	 * ordering guarantees, which will affect links, F_MORE users and more.
771 	 * Force overflow the completion.
772 	 */
773 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
774 		return false;
775 
776 	/* userspace may cheat modifying the tail, be safe and do min */
777 	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
778 	free = ctx->cq_entries - queued;
779 	/* we need a contiguous range, limit based on the current array offset */
780 	len = min(free, ctx->cq_entries - off);
781 	if (!len)
782 		return false;
783 
784 	if (ctx->flags & IORING_SETUP_CQE32) {
785 		off <<= 1;
786 		len <<= 1;
787 	}
788 
789 	ctx->cqe_cached = &rings->cqes[off];
790 	ctx->cqe_sentinel = ctx->cqe_cached + len;
791 	return true;
792 }
793 
io_fill_cqe_aux(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)794 static bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res,
795 			      u32 cflags)
796 {
797 	struct io_uring_cqe *cqe;
798 
799 	ctx->cq_extra++;
800 
801 	/*
802 	 * If we can't get a cq entry, userspace overflowed the
803 	 * submission (by quite a lot). Increment the overflow count in
804 	 * the ring.
805 	 */
806 	if (likely(io_get_cqe(ctx, &cqe))) {
807 		trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
808 
809 		WRITE_ONCE(cqe->user_data, user_data);
810 		WRITE_ONCE(cqe->res, res);
811 		WRITE_ONCE(cqe->flags, cflags);
812 
813 		if (ctx->flags & IORING_SETUP_CQE32) {
814 			WRITE_ONCE(cqe->big_cqe[0], 0);
815 			WRITE_ONCE(cqe->big_cqe[1], 0);
816 		}
817 		return true;
818 	}
819 	return false;
820 }
821 
__io_post_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)822 static bool __io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res,
823 			      u32 cflags)
824 {
825 	bool filled;
826 
827 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags);
828 	if (!filled)
829 		filled = io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
830 
831 	return filled;
832 }
833 
io_post_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)834 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
835 {
836 	bool filled;
837 
838 	io_cq_lock(ctx);
839 	filled = __io_post_aux_cqe(ctx, user_data, res, cflags);
840 	io_cq_unlock_post(ctx);
841 	return filled;
842 }
843 
844 /*
845  * Must be called from inline task_work so we now a flush will happen later,
846  * and obviously with ctx->uring_lock held (tw always has that).
847  */
io_add_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags)848 void io_add_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags)
849 {
850 	if (!io_fill_cqe_aux(ctx, user_data, res, cflags)) {
851 		spin_lock(&ctx->completion_lock);
852 		io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
853 		spin_unlock(&ctx->completion_lock);
854 	}
855 	ctx->submit_state.cq_flush = true;
856 }
857 
858 /*
859  * A helper for multishot requests posting additional CQEs.
860  * Should only be used from a task_work including IO_URING_F_MULTISHOT.
861  */
io_req_post_cqe(struct io_kiocb * req,s32 res,u32 cflags)862 bool io_req_post_cqe(struct io_kiocb *req, s32 res, u32 cflags)
863 {
864 	struct io_ring_ctx *ctx = req->ctx;
865 	bool posted;
866 
867 	/*
868 	 * If multishot has already posted deferred completions, ensure that
869 	 * those are flushed first before posting this one. If not, CQEs
870 	 * could get reordered.
871 	 */
872 	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
873 		__io_submit_flush_completions(ctx);
874 
875 	lockdep_assert(!io_wq_current_is_worker());
876 	lockdep_assert_held(&ctx->uring_lock);
877 
878 	if (!ctx->lockless_cq) {
879 		spin_lock(&ctx->completion_lock);
880 		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
881 		spin_unlock(&ctx->completion_lock);
882 	} else {
883 		posted = io_fill_cqe_aux(ctx, req->cqe.user_data, res, cflags);
884 	}
885 
886 	ctx->submit_state.cq_flush = true;
887 	return posted;
888 }
889 
io_req_complete_post(struct io_kiocb * req,unsigned issue_flags)890 static void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags)
891 {
892 	struct io_ring_ctx *ctx = req->ctx;
893 
894 	/*
895 	 * All execution paths but io-wq use the deferred completions by
896 	 * passing IO_URING_F_COMPLETE_DEFER and thus should not end up here.
897 	 */
898 	if (WARN_ON_ONCE(!(issue_flags & IO_URING_F_IOWQ)))
899 		return;
900 
901 	/*
902 	 * Handle special CQ sync cases via task_work. DEFER_TASKRUN requires
903 	 * the submitter task context, IOPOLL protects with uring_lock.
904 	 */
905 	if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL)) {
906 		req->io_task_work.func = io_req_task_complete;
907 		io_req_task_work_add(req);
908 		return;
909 	}
910 
911 	io_cq_lock(ctx);
912 	if (!(req->flags & REQ_F_CQE_SKIP)) {
913 		if (!io_fill_cqe_req(ctx, req))
914 			io_req_cqe_overflow(req);
915 	}
916 	io_cq_unlock_post(ctx);
917 
918 	/*
919 	 * We don't free the request here because we know it's called from
920 	 * io-wq only, which holds a reference, so it cannot be the last put.
921 	 */
922 	req_ref_put(req);
923 }
924 
io_req_defer_failed(struct io_kiocb * req,s32 res)925 void io_req_defer_failed(struct io_kiocb *req, s32 res)
926 	__must_hold(&ctx->uring_lock)
927 {
928 	const struct io_cold_def *def = &io_cold_defs[req->opcode];
929 
930 	lockdep_assert_held(&req->ctx->uring_lock);
931 
932 	req_set_fail(req);
933 	io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
934 	if (def->fail)
935 		def->fail(req);
936 	io_req_complete_defer(req);
937 }
938 
939 /*
940  * Don't initialise the fields below on every allocation, but do that in
941  * advance and keep them valid across allocations.
942  */
io_preinit_req(struct io_kiocb * req,struct io_ring_ctx * ctx)943 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
944 {
945 	req->ctx = ctx;
946 	req->link = NULL;
947 	req->async_data = NULL;
948 	/* not necessary, but safer to zero */
949 	memset(&req->cqe, 0, sizeof(req->cqe));
950 	memset(&req->big_cqe, 0, sizeof(req->big_cqe));
951 }
952 
953 /*
954  * A request might get retired back into the request caches even before opcode
955  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
956  * Because of that, io_alloc_req() should be called only under ->uring_lock
957  * and with extra caution to not get a request that is still worked on.
958  */
__io_alloc_req_refill(struct io_ring_ctx * ctx)959 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
960 	__must_hold(&ctx->uring_lock)
961 {
962 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
963 	void *reqs[IO_REQ_ALLOC_BATCH];
964 	int ret;
965 
966 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
967 
968 	/*
969 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
970 	 * retry single alloc to be on the safe side.
971 	 */
972 	if (unlikely(ret <= 0)) {
973 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
974 		if (!reqs[0])
975 			return false;
976 		ret = 1;
977 	}
978 
979 	percpu_ref_get_many(&ctx->refs, ret);
980 	while (ret--) {
981 		struct io_kiocb *req = reqs[ret];
982 
983 		io_preinit_req(req, ctx);
984 		io_req_add_to_cache(req, ctx);
985 	}
986 	return true;
987 }
988 
io_free_req(struct io_kiocb * req)989 __cold void io_free_req(struct io_kiocb *req)
990 {
991 	/* refs were already put, restore them for io_req_task_complete() */
992 	req->flags &= ~REQ_F_REFCOUNT;
993 	/* we only want to free it, don't post CQEs */
994 	req->flags |= REQ_F_CQE_SKIP;
995 	req->io_task_work.func = io_req_task_complete;
996 	io_req_task_work_add(req);
997 }
998 
__io_req_find_next_prep(struct io_kiocb * req)999 static void __io_req_find_next_prep(struct io_kiocb *req)
1000 {
1001 	struct io_ring_ctx *ctx = req->ctx;
1002 
1003 	spin_lock(&ctx->completion_lock);
1004 	io_disarm_next(req);
1005 	spin_unlock(&ctx->completion_lock);
1006 }
1007 
io_req_find_next(struct io_kiocb * req)1008 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
1009 {
1010 	struct io_kiocb *nxt;
1011 
1012 	/*
1013 	 * If LINK is set, we have dependent requests in this chain. If we
1014 	 * didn't fail this request, queue the first one up, moving any other
1015 	 * dependencies to the next request. In case of failure, fail the rest
1016 	 * of the chain.
1017 	 */
1018 	if (unlikely(req->flags & IO_DISARM_MASK))
1019 		__io_req_find_next_prep(req);
1020 	nxt = req->link;
1021 	req->link = NULL;
1022 	return nxt;
1023 }
1024 
ctx_flush_and_put(struct io_ring_ctx * ctx,struct io_tw_state * ts)1025 static void ctx_flush_and_put(struct io_ring_ctx *ctx, struct io_tw_state *ts)
1026 {
1027 	if (!ctx)
1028 		return;
1029 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1030 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1031 
1032 	io_submit_flush_completions(ctx);
1033 	mutex_unlock(&ctx->uring_lock);
1034 	percpu_ref_put(&ctx->refs);
1035 }
1036 
1037 /*
1038  * Run queued task_work, returning the number of entries processed in *count.
1039  * If more entries than max_entries are available, stop processing once this
1040  * is reached and return the rest of the list.
1041  */
io_handle_tw_list(struct llist_node * node,unsigned int * count,unsigned int max_entries)1042 struct llist_node *io_handle_tw_list(struct llist_node *node,
1043 				     unsigned int *count,
1044 				     unsigned int max_entries)
1045 {
1046 	struct io_ring_ctx *ctx = NULL;
1047 	struct io_tw_state ts = { };
1048 
1049 	do {
1050 		struct llist_node *next = node->next;
1051 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1052 						    io_task_work.node);
1053 
1054 		if (req->ctx != ctx) {
1055 			ctx_flush_and_put(ctx, &ts);
1056 			ctx = req->ctx;
1057 			mutex_lock(&ctx->uring_lock);
1058 			percpu_ref_get(&ctx->refs);
1059 		}
1060 		INDIRECT_CALL_2(req->io_task_work.func,
1061 				io_poll_task_func, io_req_rw_complete,
1062 				req, &ts);
1063 		node = next;
1064 		(*count)++;
1065 		if (unlikely(need_resched())) {
1066 			ctx_flush_and_put(ctx, &ts);
1067 			ctx = NULL;
1068 			cond_resched();
1069 		}
1070 	} while (node && *count < max_entries);
1071 
1072 	ctx_flush_and_put(ctx, &ts);
1073 	return node;
1074 }
1075 
1076 /**
1077  * io_llist_xchg - swap all entries in a lock-less list
1078  * @head:	the head of lock-less list to delete all entries
1079  * @new:	new entry as the head of the list
1080  *
1081  * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1082  * The order of entries returned is from the newest to the oldest added one.
1083  */
io_llist_xchg(struct llist_head * head,struct llist_node * new)1084 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1085 					       struct llist_node *new)
1086 {
1087 	return xchg(&head->first, new);
1088 }
1089 
io_fallback_tw(struct io_uring_task * tctx,bool sync)1090 static __cold void io_fallback_tw(struct io_uring_task *tctx, bool sync)
1091 {
1092 	struct llist_node *node = llist_del_all(&tctx->task_list);
1093 	struct io_ring_ctx *last_ctx = NULL;
1094 	struct io_kiocb *req;
1095 
1096 	while (node) {
1097 		req = container_of(node, struct io_kiocb, io_task_work.node);
1098 		node = node->next;
1099 		if (last_ctx != req->ctx) {
1100 			if (last_ctx) {
1101 				if (sync)
1102 					flush_delayed_work(&last_ctx->fallback_work);
1103 				percpu_ref_put(&last_ctx->refs);
1104 			}
1105 			last_ctx = req->ctx;
1106 			percpu_ref_get(&last_ctx->refs);
1107 		}
1108 		if (llist_add(&req->io_task_work.node, &last_ctx->fallback_llist))
1109 			schedule_delayed_work(&last_ctx->fallback_work, 1);
1110 	}
1111 
1112 	if (last_ctx) {
1113 		if (sync)
1114 			flush_delayed_work(&last_ctx->fallback_work);
1115 		percpu_ref_put(&last_ctx->refs);
1116 	}
1117 }
1118 
tctx_task_work_run(struct io_uring_task * tctx,unsigned int max_entries,unsigned int * count)1119 struct llist_node *tctx_task_work_run(struct io_uring_task *tctx,
1120 				      unsigned int max_entries,
1121 				      unsigned int *count)
1122 {
1123 	struct llist_node *node;
1124 
1125 	if (unlikely(current->flags & PF_EXITING)) {
1126 		io_fallback_tw(tctx, true);
1127 		return NULL;
1128 	}
1129 
1130 	node = llist_del_all(&tctx->task_list);
1131 	if (node) {
1132 		node = llist_reverse_order(node);
1133 		node = io_handle_tw_list(node, count, max_entries);
1134 	}
1135 
1136 	/* relaxed read is enough as only the task itself sets ->in_cancel */
1137 	if (unlikely(atomic_read(&tctx->in_cancel)))
1138 		io_uring_drop_tctx_refs(current);
1139 
1140 	trace_io_uring_task_work_run(tctx, *count);
1141 	return node;
1142 }
1143 
tctx_task_work(struct callback_head * cb)1144 void tctx_task_work(struct callback_head *cb)
1145 {
1146 	struct io_uring_task *tctx;
1147 	struct llist_node *ret;
1148 	unsigned int count = 0;
1149 
1150 	tctx = container_of(cb, struct io_uring_task, task_work);
1151 	ret = tctx_task_work_run(tctx, UINT_MAX, &count);
1152 	/* can't happen */
1153 	WARN_ON_ONCE(ret);
1154 }
1155 
io_req_local_work_add(struct io_kiocb * req,struct io_ring_ctx * ctx,unsigned flags)1156 static inline void io_req_local_work_add(struct io_kiocb *req,
1157 					 struct io_ring_ctx *ctx,
1158 					 unsigned flags)
1159 {
1160 	unsigned nr_wait, nr_tw, nr_tw_prev;
1161 	struct llist_node *head;
1162 
1163 	/* See comment above IO_CQ_WAKE_INIT */
1164 	BUILD_BUG_ON(IO_CQ_WAKE_FORCE <= IORING_MAX_CQ_ENTRIES);
1165 
1166 	/*
1167 	 * We don't know how many reuqests is there in the link and whether
1168 	 * they can even be queued lazily, fall back to non-lazy.
1169 	 */
1170 	if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK))
1171 		flags &= ~IOU_F_TWQ_LAZY_WAKE;
1172 
1173 	guard(rcu)();
1174 
1175 	head = READ_ONCE(ctx->work_llist.first);
1176 	do {
1177 		nr_tw_prev = 0;
1178 		if (head) {
1179 			struct io_kiocb *first_req = container_of(head,
1180 							struct io_kiocb,
1181 							io_task_work.node);
1182 			/*
1183 			 * Might be executed at any moment, rely on
1184 			 * SLAB_TYPESAFE_BY_RCU to keep it alive.
1185 			 */
1186 			nr_tw_prev = READ_ONCE(first_req->nr_tw);
1187 		}
1188 
1189 		/*
1190 		 * Theoretically, it can overflow, but that's fine as one of
1191 		 * previous adds should've tried to wake the task.
1192 		 */
1193 		nr_tw = nr_tw_prev + 1;
1194 		if (!(flags & IOU_F_TWQ_LAZY_WAKE))
1195 			nr_tw = IO_CQ_WAKE_FORCE;
1196 
1197 		req->nr_tw = nr_tw;
1198 		req->io_task_work.node.next = head;
1199 	} while (!try_cmpxchg(&ctx->work_llist.first, &head,
1200 			      &req->io_task_work.node));
1201 
1202 	/*
1203 	 * cmpxchg implies a full barrier, which pairs with the barrier
1204 	 * in set_current_state() on the io_cqring_wait() side. It's used
1205 	 * to ensure that either we see updated ->cq_wait_nr, or waiters
1206 	 * going to sleep will observe the work added to the list, which
1207 	 * is similar to the wait/wawke task state sync.
1208 	 */
1209 
1210 	if (!head) {
1211 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1212 			atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1213 		if (ctx->has_evfd)
1214 			io_eventfd_signal(ctx);
1215 	}
1216 
1217 	nr_wait = atomic_read(&ctx->cq_wait_nr);
1218 	/* not enough or no one is waiting */
1219 	if (nr_tw < nr_wait)
1220 		return;
1221 	/* the previous add has already woken it up */
1222 	if (nr_tw_prev >= nr_wait)
1223 		return;
1224 	wake_up_state(ctx->submitter_task, TASK_INTERRUPTIBLE);
1225 }
1226 
io_req_normal_work_add(struct io_kiocb * req)1227 static void io_req_normal_work_add(struct io_kiocb *req)
1228 {
1229 	struct io_uring_task *tctx = req->task->io_uring;
1230 	struct io_ring_ctx *ctx = req->ctx;
1231 
1232 	/* task_work already pending, we're done */
1233 	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1234 		return;
1235 
1236 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1237 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1238 
1239 	/* SQPOLL doesn't need the task_work added, it'll run it itself */
1240 	if (ctx->flags & IORING_SETUP_SQPOLL) {
1241 		__set_notify_signal(req->task);
1242 		return;
1243 	}
1244 
1245 	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1246 		return;
1247 
1248 	io_fallback_tw(tctx, false);
1249 }
1250 
__io_req_task_work_add(struct io_kiocb * req,unsigned flags)1251 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags)
1252 {
1253 	if (req->ctx->flags & IORING_SETUP_DEFER_TASKRUN)
1254 		io_req_local_work_add(req, req->ctx, flags);
1255 	else
1256 		io_req_normal_work_add(req);
1257 }
1258 
io_req_task_work_add_remote(struct io_kiocb * req,struct io_ring_ctx * ctx,unsigned flags)1259 void io_req_task_work_add_remote(struct io_kiocb *req, struct io_ring_ctx *ctx,
1260 				 unsigned flags)
1261 {
1262 	if (WARN_ON_ONCE(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN)))
1263 		return;
1264 	io_req_local_work_add(req, ctx, flags);
1265 }
1266 
io_move_task_work_from_local(struct io_ring_ctx * ctx)1267 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1268 {
1269 	struct llist_node *node;
1270 
1271 	node = llist_del_all(&ctx->work_llist);
1272 	while (node) {
1273 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1274 						    io_task_work.node);
1275 
1276 		node = node->next;
1277 		io_req_normal_work_add(req);
1278 	}
1279 }
1280 
io_run_local_work_continue(struct io_ring_ctx * ctx,int events,int min_events)1281 static bool io_run_local_work_continue(struct io_ring_ctx *ctx, int events,
1282 				       int min_events)
1283 {
1284 	if (llist_empty(&ctx->work_llist))
1285 		return false;
1286 	if (events < min_events)
1287 		return true;
1288 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1289 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1290 	return false;
1291 }
1292 
__io_run_local_work(struct io_ring_ctx * ctx,struct io_tw_state * ts,int min_events)1293 static int __io_run_local_work(struct io_ring_ctx *ctx, struct io_tw_state *ts,
1294 			       int min_events)
1295 {
1296 	struct llist_node *node;
1297 	unsigned int loops = 0;
1298 	int ret = 0;
1299 
1300 	if (WARN_ON_ONCE(ctx->submitter_task != current))
1301 		return -EEXIST;
1302 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1303 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1304 again:
1305 	/*
1306 	 * llists are in reverse order, flip it back the right way before
1307 	 * running the pending items.
1308 	 */
1309 	node = llist_reverse_order(io_llist_xchg(&ctx->work_llist, NULL));
1310 	while (node) {
1311 		struct llist_node *next = node->next;
1312 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1313 						    io_task_work.node);
1314 		INDIRECT_CALL_2(req->io_task_work.func,
1315 				io_poll_task_func, io_req_rw_complete,
1316 				req, ts);
1317 		ret++;
1318 		node = next;
1319 	}
1320 	loops++;
1321 
1322 	if (io_run_local_work_continue(ctx, ret, min_events))
1323 		goto again;
1324 	io_submit_flush_completions(ctx);
1325 	if (io_run_local_work_continue(ctx, ret, min_events))
1326 		goto again;
1327 
1328 	trace_io_uring_local_work_run(ctx, ret, loops);
1329 	return ret;
1330 }
1331 
io_run_local_work_locked(struct io_ring_ctx * ctx,int min_events)1332 static inline int io_run_local_work_locked(struct io_ring_ctx *ctx,
1333 					   int min_events)
1334 {
1335 	struct io_tw_state ts = {};
1336 
1337 	if (llist_empty(&ctx->work_llist))
1338 		return 0;
1339 	return __io_run_local_work(ctx, &ts, min_events);
1340 }
1341 
io_run_local_work(struct io_ring_ctx * ctx,int min_events)1342 static int io_run_local_work(struct io_ring_ctx *ctx, int min_events)
1343 {
1344 	struct io_tw_state ts = {};
1345 	int ret;
1346 
1347 	mutex_lock(&ctx->uring_lock);
1348 	ret = __io_run_local_work(ctx, &ts, min_events);
1349 	mutex_unlock(&ctx->uring_lock);
1350 	return ret;
1351 }
1352 
io_req_task_cancel(struct io_kiocb * req,struct io_tw_state * ts)1353 static void io_req_task_cancel(struct io_kiocb *req, struct io_tw_state *ts)
1354 {
1355 	io_tw_lock(req->ctx, ts);
1356 	io_req_defer_failed(req, req->cqe.res);
1357 }
1358 
io_req_task_submit(struct io_kiocb * req,struct io_tw_state * ts)1359 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts)
1360 {
1361 	struct io_ring_ctx *ctx = req->ctx;
1362 
1363 	io_tw_lock(ctx, ts);
1364 	if (unlikely(io_should_terminate_tw(ctx)))
1365 		io_req_defer_failed(req, -EFAULT);
1366 	else if (req->flags & REQ_F_FORCE_ASYNC)
1367 		io_queue_iowq(req);
1368 	else
1369 		io_queue_sqe(req);
1370 }
1371 
io_req_task_queue_fail(struct io_kiocb * req,int ret)1372 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1373 {
1374 	io_req_set_res(req, ret, 0);
1375 	req->io_task_work.func = io_req_task_cancel;
1376 	io_req_task_work_add(req);
1377 }
1378 
io_req_task_queue(struct io_kiocb * req)1379 void io_req_task_queue(struct io_kiocb *req)
1380 {
1381 	req->io_task_work.func = io_req_task_submit;
1382 	io_req_task_work_add(req);
1383 }
1384 
io_queue_next(struct io_kiocb * req)1385 void io_queue_next(struct io_kiocb *req)
1386 {
1387 	struct io_kiocb *nxt = io_req_find_next(req);
1388 
1389 	if (nxt)
1390 		io_req_task_queue(nxt);
1391 }
1392 
io_free_batch_list(struct io_ring_ctx * ctx,struct io_wq_work_node * node)1393 static void io_free_batch_list(struct io_ring_ctx *ctx,
1394 			       struct io_wq_work_node *node)
1395 	__must_hold(&ctx->uring_lock)
1396 {
1397 	do {
1398 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1399 						    comp_list);
1400 
1401 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1402 			if (req->flags & REQ_F_REFCOUNT) {
1403 				node = req->comp_list.next;
1404 				if (!req_ref_put_and_test(req))
1405 					continue;
1406 			}
1407 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1408 				struct async_poll *apoll = req->apoll;
1409 
1410 				if (apoll->double_poll)
1411 					kfree(apoll->double_poll);
1412 				if (!io_alloc_cache_put(&ctx->apoll_cache, apoll))
1413 					kfree(apoll);
1414 				req->flags &= ~REQ_F_POLLED;
1415 			}
1416 			if (req->flags & IO_REQ_LINK_FLAGS)
1417 				io_queue_next(req);
1418 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1419 				io_clean_op(req);
1420 		}
1421 		io_put_file(req);
1422 		io_put_rsrc_node(ctx, req->rsrc_node);
1423 		io_put_task(req->task);
1424 
1425 		node = req->comp_list.next;
1426 		io_req_add_to_cache(req, ctx);
1427 	} while (node);
1428 }
1429 
__io_submit_flush_completions(struct io_ring_ctx * ctx)1430 void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1431 	__must_hold(&ctx->uring_lock)
1432 {
1433 	struct io_submit_state *state = &ctx->submit_state;
1434 	struct io_wq_work_node *node;
1435 
1436 	__io_cq_lock(ctx);
1437 	__wq_list_for_each(node, &state->compl_reqs) {
1438 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1439 					    comp_list);
1440 
1441 		if (!(req->flags & REQ_F_CQE_SKIP) &&
1442 		    unlikely(!io_fill_cqe_req(ctx, req))) {
1443 			if (ctx->lockless_cq) {
1444 				spin_lock(&ctx->completion_lock);
1445 				io_req_cqe_overflow(req);
1446 				spin_unlock(&ctx->completion_lock);
1447 			} else {
1448 				io_req_cqe_overflow(req);
1449 			}
1450 		}
1451 	}
1452 	__io_cq_unlock_post(ctx);
1453 
1454 	if (!wq_list_empty(&state->compl_reqs)) {
1455 		io_free_batch_list(ctx, state->compl_reqs.first);
1456 		INIT_WQ_LIST(&state->compl_reqs);
1457 	}
1458 	ctx->submit_state.cq_flush = false;
1459 }
1460 
io_cqring_events(struct io_ring_ctx * ctx)1461 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1462 {
1463 	/* See comment at the top of this file */
1464 	smp_rmb();
1465 	return __io_cqring_events(ctx);
1466 }
1467 
1468 /*
1469  * We can't just wait for polled events to come to us, we have to actively
1470  * find and complete them.
1471  */
io_iopoll_try_reap_events(struct io_ring_ctx * ctx)1472 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1473 {
1474 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1475 		return;
1476 
1477 	mutex_lock(&ctx->uring_lock);
1478 	while (!wq_list_empty(&ctx->iopoll_list)) {
1479 		/* let it sleep and repeat later if can't complete a request */
1480 		if (io_do_iopoll(ctx, true) == 0)
1481 			break;
1482 		/*
1483 		 * Ensure we allow local-to-the-cpu processing to take place,
1484 		 * in this case we need to ensure that we reap all events.
1485 		 * Also let task_work, etc. to progress by releasing the mutex
1486 		 */
1487 		if (need_resched()) {
1488 			mutex_unlock(&ctx->uring_lock);
1489 			cond_resched();
1490 			mutex_lock(&ctx->uring_lock);
1491 		}
1492 	}
1493 	mutex_unlock(&ctx->uring_lock);
1494 }
1495 
io_iopoll_check(struct io_ring_ctx * ctx,long min)1496 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1497 {
1498 	unsigned int nr_events = 0;
1499 	unsigned long check_cq;
1500 
1501 	lockdep_assert_held(&ctx->uring_lock);
1502 
1503 	if (!io_allowed_run_tw(ctx))
1504 		return -EEXIST;
1505 
1506 	check_cq = READ_ONCE(ctx->check_cq);
1507 	if (unlikely(check_cq)) {
1508 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1509 			__io_cqring_overflow_flush(ctx, false);
1510 		/*
1511 		 * Similarly do not spin if we have not informed the user of any
1512 		 * dropped CQE.
1513 		 */
1514 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1515 			return -EBADR;
1516 	}
1517 	/*
1518 	 * Don't enter poll loop if we already have events pending.
1519 	 * If we do, we can potentially be spinning for commands that
1520 	 * already triggered a CQE (eg in error).
1521 	 */
1522 	if (io_cqring_events(ctx))
1523 		return 0;
1524 
1525 	do {
1526 		int ret = 0;
1527 
1528 		/*
1529 		 * If a submit got punted to a workqueue, we can have the
1530 		 * application entering polling for a command before it gets
1531 		 * issued. That app will hold the uring_lock for the duration
1532 		 * of the poll right here, so we need to take a breather every
1533 		 * now and then to ensure that the issue has a chance to add
1534 		 * the poll to the issued list. Otherwise we can spin here
1535 		 * forever, while the workqueue is stuck trying to acquire the
1536 		 * very same mutex.
1537 		 */
1538 		if (wq_list_empty(&ctx->iopoll_list) ||
1539 		    io_task_work_pending(ctx)) {
1540 			u32 tail = ctx->cached_cq_tail;
1541 
1542 			(void) io_run_local_work_locked(ctx, min);
1543 
1544 			if (task_work_pending(current) ||
1545 			    wq_list_empty(&ctx->iopoll_list)) {
1546 				mutex_unlock(&ctx->uring_lock);
1547 				io_run_task_work();
1548 				mutex_lock(&ctx->uring_lock);
1549 			}
1550 			/* some requests don't go through iopoll_list */
1551 			if (tail != ctx->cached_cq_tail ||
1552 			    wq_list_empty(&ctx->iopoll_list))
1553 				break;
1554 		}
1555 		ret = io_do_iopoll(ctx, !min);
1556 		if (unlikely(ret < 0))
1557 			return ret;
1558 
1559 		if (task_sigpending(current))
1560 			return -EINTR;
1561 		if (need_resched())
1562 			break;
1563 
1564 		nr_events += ret;
1565 	} while (nr_events < min);
1566 
1567 	return 0;
1568 }
1569 
io_req_task_complete(struct io_kiocb * req,struct io_tw_state * ts)1570 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts)
1571 {
1572 	io_req_complete_defer(req);
1573 }
1574 
1575 /*
1576  * After the iocb has been issued, it's safe to be found on the poll list.
1577  * Adding the kiocb to the list AFTER submission ensures that we don't
1578  * find it from a io_do_iopoll() thread before the issuer is done
1579  * accessing the kiocb cookie.
1580  */
io_iopoll_req_issued(struct io_kiocb * req,unsigned int issue_flags)1581 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1582 {
1583 	struct io_ring_ctx *ctx = req->ctx;
1584 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1585 
1586 	/* workqueue context doesn't hold uring_lock, grab it now */
1587 	if (unlikely(needs_lock))
1588 		mutex_lock(&ctx->uring_lock);
1589 
1590 	/*
1591 	 * Track whether we have multiple files in our lists. This will impact
1592 	 * how we do polling eventually, not spinning if we're on potentially
1593 	 * different devices.
1594 	 */
1595 	if (wq_list_empty(&ctx->iopoll_list)) {
1596 		ctx->poll_multi_queue = false;
1597 	} else if (!ctx->poll_multi_queue) {
1598 		struct io_kiocb *list_req;
1599 
1600 		list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1601 					comp_list);
1602 		if (list_req->file != req->file)
1603 			ctx->poll_multi_queue = true;
1604 	}
1605 
1606 	/*
1607 	 * For fast devices, IO may have already completed. If it has, add
1608 	 * it to the front so we find it first.
1609 	 */
1610 	if (READ_ONCE(req->iopoll_completed))
1611 		wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1612 	else
1613 		wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1614 
1615 	if (unlikely(needs_lock)) {
1616 		/*
1617 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1618 		 * in sq thread task context or in io worker task context. If
1619 		 * current task context is sq thread, we don't need to check
1620 		 * whether should wake up sq thread.
1621 		 */
1622 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1623 		    wq_has_sleeper(&ctx->sq_data->wait))
1624 			wake_up(&ctx->sq_data->wait);
1625 
1626 		mutex_unlock(&ctx->uring_lock);
1627 	}
1628 }
1629 
io_file_get_flags(struct file * file)1630 io_req_flags_t io_file_get_flags(struct file *file)
1631 {
1632 	io_req_flags_t res = 0;
1633 
1634 	if (S_ISREG(file_inode(file)->i_mode))
1635 		res |= REQ_F_ISREG;
1636 	if ((file->f_flags & O_NONBLOCK) || (file->f_mode & FMODE_NOWAIT))
1637 		res |= REQ_F_SUPPORT_NOWAIT;
1638 	return res;
1639 }
1640 
io_alloc_async_data(struct io_kiocb * req)1641 bool io_alloc_async_data(struct io_kiocb *req)
1642 {
1643 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1644 
1645 	WARN_ON_ONCE(!def->async_size);
1646 	req->async_data = kmalloc(def->async_size, GFP_KERNEL);
1647 	if (req->async_data) {
1648 		req->flags |= REQ_F_ASYNC_DATA;
1649 		return false;
1650 	}
1651 	return true;
1652 }
1653 
io_get_sequence(struct io_kiocb * req)1654 static u32 io_get_sequence(struct io_kiocb *req)
1655 {
1656 	u32 seq = req->ctx->cached_sq_head;
1657 	struct io_kiocb *cur;
1658 
1659 	/* need original cached_sq_head, but it was increased for each req */
1660 	io_for_each_link(cur, req)
1661 		seq--;
1662 	return seq;
1663 }
1664 
io_drain_req(struct io_kiocb * req)1665 static __cold void io_drain_req(struct io_kiocb *req)
1666 	__must_hold(&ctx->uring_lock)
1667 {
1668 	struct io_ring_ctx *ctx = req->ctx;
1669 	struct io_defer_entry *de;
1670 	int ret;
1671 	u32 seq = io_get_sequence(req);
1672 
1673 	/* Still need defer if there is pending req in defer list. */
1674 	spin_lock(&ctx->completion_lock);
1675 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1676 		spin_unlock(&ctx->completion_lock);
1677 queue:
1678 		ctx->drain_active = false;
1679 		io_req_task_queue(req);
1680 		return;
1681 	}
1682 	spin_unlock(&ctx->completion_lock);
1683 
1684 	io_prep_async_link(req);
1685 	de = kmalloc(sizeof(*de), GFP_KERNEL_ACCOUNT);
1686 	if (!de) {
1687 		ret = -ENOMEM;
1688 		io_req_defer_failed(req, ret);
1689 		return;
1690 	}
1691 
1692 	spin_lock(&ctx->completion_lock);
1693 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1694 		spin_unlock(&ctx->completion_lock);
1695 		kfree(de);
1696 		goto queue;
1697 	}
1698 
1699 	trace_io_uring_defer(req);
1700 	de->req = req;
1701 	de->seq = seq;
1702 	list_add_tail(&de->list, &ctx->defer_list);
1703 	spin_unlock(&ctx->completion_lock);
1704 }
1705 
io_assign_file(struct io_kiocb * req,const struct io_issue_def * def,unsigned int issue_flags)1706 static bool io_assign_file(struct io_kiocb *req, const struct io_issue_def *def,
1707 			   unsigned int issue_flags)
1708 {
1709 	if (req->file || !def->needs_file)
1710 		return true;
1711 
1712 	if (req->flags & REQ_F_FIXED_FILE)
1713 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1714 	else
1715 		req->file = io_file_get_normal(req, req->cqe.fd);
1716 
1717 	return !!req->file;
1718 }
1719 
1720 #define REQ_ISSUE_SLOW_FLAGS	(REQ_F_CREDS | REQ_F_ARM_LTIMEOUT)
1721 
io_issue_sqe(struct io_kiocb * req,unsigned int issue_flags)1722 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1723 {
1724 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1725 	const struct cred *creds = NULL;
1726 	struct io_kiocb *link = NULL;
1727 	int ret;
1728 
1729 	if (unlikely(!io_assign_file(req, def, issue_flags)))
1730 		return -EBADF;
1731 
1732 	if (unlikely(req->flags & REQ_ISSUE_SLOW_FLAGS)) {
1733 		if ((req->flags & REQ_F_CREDS) && req->creds != current_cred())
1734 			creds = override_creds(req->creds);
1735 		if (req->flags & REQ_F_ARM_LTIMEOUT)
1736 			link = __io_prep_linked_timeout(req);
1737 	}
1738 
1739 	if (!def->audit_skip)
1740 		audit_uring_entry(req->opcode);
1741 
1742 	ret = def->issue(req, issue_flags);
1743 
1744 	if (!def->audit_skip)
1745 		audit_uring_exit(!ret, ret);
1746 
1747 	if (unlikely(creds || link)) {
1748 		if (creds)
1749 			revert_creds(creds);
1750 		if (link)
1751 			io_queue_linked_timeout(link);
1752 	}
1753 
1754 	if (ret == IOU_OK) {
1755 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1756 			io_req_complete_defer(req);
1757 		else
1758 			io_req_complete_post(req, issue_flags);
1759 
1760 		return 0;
1761 	}
1762 
1763 	if (ret == IOU_ISSUE_SKIP_COMPLETE) {
1764 		ret = 0;
1765 
1766 		/* If the op doesn't have a file, we're not polling for it */
1767 		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1768 			io_iopoll_req_issued(req, issue_flags);
1769 	}
1770 	return ret;
1771 }
1772 
io_poll_issue(struct io_kiocb * req,struct io_tw_state * ts)1773 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts)
1774 {
1775 	io_tw_lock(req->ctx, ts);
1776 	return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT|
1777 				 IO_URING_F_COMPLETE_DEFER);
1778 }
1779 
io_wq_free_work(struct io_wq_work * work)1780 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1781 {
1782 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1783 	struct io_kiocb *nxt = NULL;
1784 
1785 	if (req_ref_put_and_test_atomic(req)) {
1786 		if (req->flags & IO_REQ_LINK_FLAGS)
1787 			nxt = io_req_find_next(req);
1788 		io_free_req(req);
1789 	}
1790 	return nxt ? &nxt->work : NULL;
1791 }
1792 
io_wq_submit_work(struct io_wq_work * work)1793 void io_wq_submit_work(struct io_wq_work *work)
1794 {
1795 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1796 	const struct io_issue_def *def = &io_issue_defs[req->opcode];
1797 	unsigned int issue_flags = IO_URING_F_UNLOCKED | IO_URING_F_IOWQ;
1798 	bool needs_poll = false;
1799 	int ret = 0, err = -ECANCELED;
1800 
1801 	/* one will be dropped by ->io_wq_free_work() after returning to io-wq */
1802 	if (!(req->flags & REQ_F_REFCOUNT))
1803 		__io_req_set_refcount(req, 2);
1804 	else
1805 		req_ref_get(req);
1806 
1807 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1808 	if (atomic_read(&work->flags) & IO_WQ_WORK_CANCEL) {
1809 fail:
1810 		io_req_task_queue_fail(req, err);
1811 		return;
1812 	}
1813 	if (!io_assign_file(req, def, issue_flags)) {
1814 		err = -EBADF;
1815 		atomic_or(IO_WQ_WORK_CANCEL, &work->flags);
1816 		goto fail;
1817 	}
1818 
1819 	/*
1820 	 * If DEFER_TASKRUN is set, it's only allowed to post CQEs from the
1821 	 * submitter task context. Final request completions are handed to the
1822 	 * right context, however this is not the case of auxiliary CQEs,
1823 	 * which is the main mean of operation for multishot requests.
1824 	 * Don't allow any multishot execution from io-wq. It's more restrictive
1825 	 * than necessary and also cleaner.
1826 	 */
1827 	if (req->flags & (REQ_F_MULTISHOT|REQ_F_APOLL_MULTISHOT)) {
1828 		err = -EBADFD;
1829 		if (!io_file_can_poll(req))
1830 			goto fail;
1831 		if (req->file->f_flags & O_NONBLOCK ||
1832 		    req->file->f_mode & FMODE_NOWAIT) {
1833 			err = -ECANCELED;
1834 			if (io_arm_poll_handler(req, issue_flags) != IO_APOLL_OK)
1835 				goto fail;
1836 			return;
1837 		} else {
1838 			req->flags &= ~(REQ_F_APOLL_MULTISHOT|REQ_F_MULTISHOT);
1839 		}
1840 	}
1841 
1842 	if (req->flags & REQ_F_FORCE_ASYNC) {
1843 		bool opcode_poll = def->pollin || def->pollout;
1844 
1845 		if (opcode_poll && io_file_can_poll(req)) {
1846 			needs_poll = true;
1847 			issue_flags |= IO_URING_F_NONBLOCK;
1848 		}
1849 	}
1850 
1851 	do {
1852 		ret = io_issue_sqe(req, issue_flags);
1853 		if (ret != -EAGAIN)
1854 			break;
1855 
1856 		/*
1857 		 * If REQ_F_NOWAIT is set, then don't wait or retry with
1858 		 * poll. -EAGAIN is final for that case.
1859 		 */
1860 		if (req->flags & REQ_F_NOWAIT)
1861 			break;
1862 
1863 		/*
1864 		 * We can get EAGAIN for iopolled IO even though we're
1865 		 * forcing a sync submission from here, since we can't
1866 		 * wait for request slots on the block side.
1867 		 */
1868 		if (!needs_poll) {
1869 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1870 				break;
1871 			if (io_wq_worker_stopped())
1872 				break;
1873 			cond_resched();
1874 			continue;
1875 		}
1876 
1877 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1878 			return;
1879 		/* aborted or ready, in either case retry blocking */
1880 		needs_poll = false;
1881 		issue_flags &= ~IO_URING_F_NONBLOCK;
1882 	} while (1);
1883 
1884 	/* avoid locking problems by failing it from a clean context */
1885 	if (ret)
1886 		io_req_task_queue_fail(req, ret);
1887 }
1888 
io_file_get_fixed(struct io_kiocb * req,int fd,unsigned int issue_flags)1889 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1890 				      unsigned int issue_flags)
1891 {
1892 	struct io_ring_ctx *ctx = req->ctx;
1893 	struct io_fixed_file *slot;
1894 	struct file *file = NULL;
1895 
1896 	io_ring_submit_lock(ctx, issue_flags);
1897 
1898 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1899 		goto out;
1900 	fd = array_index_nospec(fd, ctx->nr_user_files);
1901 	slot = io_fixed_file_slot(&ctx->file_table, fd);
1902 	if (!req->rsrc_node)
1903 		__io_req_set_rsrc_node(req, ctx);
1904 	req->flags |= io_slot_flags(slot);
1905 	file = io_slot_file(slot);
1906 out:
1907 	io_ring_submit_unlock(ctx, issue_flags);
1908 	return file;
1909 }
1910 
io_file_get_normal(struct io_kiocb * req,int fd)1911 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1912 {
1913 	struct file *file = fget(fd);
1914 
1915 	trace_io_uring_file_get(req, fd);
1916 
1917 	/* we don't allow fixed io_uring files */
1918 	if (file && io_is_uring_fops(file))
1919 		io_req_track_inflight(req);
1920 	return file;
1921 }
1922 
io_queue_async(struct io_kiocb * req,int ret)1923 static void io_queue_async(struct io_kiocb *req, int ret)
1924 	__must_hold(&req->ctx->uring_lock)
1925 {
1926 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1927 		io_req_defer_failed(req, ret);
1928 		return;
1929 	}
1930 
1931 	switch (io_arm_poll_handler(req, 0)) {
1932 	case IO_APOLL_READY:
1933 		io_kbuf_recycle(req, 0);
1934 		io_req_task_queue(req);
1935 		break;
1936 	case IO_APOLL_ABORTED:
1937 		io_kbuf_recycle(req, 0);
1938 		io_queue_iowq(req);
1939 		break;
1940 	case IO_APOLL_OK:
1941 		break;
1942 	}
1943 }
1944 
io_queue_sqe(struct io_kiocb * req)1945 static inline void io_queue_sqe(struct io_kiocb *req)
1946 	__must_hold(&req->ctx->uring_lock)
1947 {
1948 	int ret;
1949 
1950 	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
1951 
1952 	/*
1953 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
1954 	 * doesn't support non-blocking read/write attempts
1955 	 */
1956 	if (unlikely(ret))
1957 		io_queue_async(req, ret);
1958 }
1959 
io_queue_sqe_fallback(struct io_kiocb * req)1960 static void io_queue_sqe_fallback(struct io_kiocb *req)
1961 	__must_hold(&req->ctx->uring_lock)
1962 {
1963 	if (unlikely(req->flags & REQ_F_FAIL)) {
1964 		/*
1965 		 * We don't submit, fail them all, for that replace hardlinks
1966 		 * with normal links. Extra REQ_F_LINK is tolerated.
1967 		 */
1968 		req->flags &= ~REQ_F_HARDLINK;
1969 		req->flags |= REQ_F_LINK;
1970 		io_req_defer_failed(req, req->cqe.res);
1971 	} else {
1972 		if (unlikely(req->ctx->drain_active))
1973 			io_drain_req(req);
1974 		else
1975 			io_queue_iowq(req);
1976 	}
1977 }
1978 
1979 /*
1980  * Check SQE restrictions (opcode and flags).
1981  *
1982  * Returns 'true' if SQE is allowed, 'false' otherwise.
1983  */
io_check_restriction(struct io_ring_ctx * ctx,struct io_kiocb * req,unsigned int sqe_flags)1984 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
1985 					struct io_kiocb *req,
1986 					unsigned int sqe_flags)
1987 {
1988 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
1989 		return false;
1990 
1991 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
1992 	    ctx->restrictions.sqe_flags_required)
1993 		return false;
1994 
1995 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
1996 			  ctx->restrictions.sqe_flags_required))
1997 		return false;
1998 
1999 	return true;
2000 }
2001 
io_init_req_drain(struct io_kiocb * req)2002 static void io_init_req_drain(struct io_kiocb *req)
2003 {
2004 	struct io_ring_ctx *ctx = req->ctx;
2005 	struct io_kiocb *head = ctx->submit_state.link.head;
2006 
2007 	ctx->drain_active = true;
2008 	if (head) {
2009 		/*
2010 		 * If we need to drain a request in the middle of a link, drain
2011 		 * the head request and the next request/link after the current
2012 		 * link. Considering sequential execution of links,
2013 		 * REQ_F_IO_DRAIN will be maintained for every request of our
2014 		 * link.
2015 		 */
2016 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2017 		ctx->drain_next = true;
2018 	}
2019 }
2020 
io_init_fail_req(struct io_kiocb * req,int err)2021 static __cold int io_init_fail_req(struct io_kiocb *req, int err)
2022 {
2023 	/* ensure per-opcode data is cleared if we fail before prep */
2024 	memset(&req->cmd.data, 0, sizeof(req->cmd.data));
2025 	return err;
2026 }
2027 
io_init_req(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)2028 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
2029 		       const struct io_uring_sqe *sqe)
2030 	__must_hold(&ctx->uring_lock)
2031 {
2032 	const struct io_issue_def *def;
2033 	unsigned int sqe_flags;
2034 	int personality;
2035 	u8 opcode;
2036 
2037 	/* req is partially pre-initialised, see io_preinit_req() */
2038 	req->opcode = opcode = READ_ONCE(sqe->opcode);
2039 	/* same numerical values with corresponding REQ_F_*, safe to copy */
2040 	sqe_flags = READ_ONCE(sqe->flags);
2041 	req->flags = (__force io_req_flags_t) sqe_flags;
2042 	req->cqe.user_data = READ_ONCE(sqe->user_data);
2043 	req->file = NULL;
2044 	req->rsrc_node = NULL;
2045 	req->task = current;
2046 	req->cancel_seq_set = false;
2047 
2048 	if (unlikely(opcode >= IORING_OP_LAST)) {
2049 		req->opcode = 0;
2050 		return io_init_fail_req(req, -EINVAL);
2051 	}
2052 	opcode = array_index_nospec(opcode, IORING_OP_LAST);
2053 
2054 	def = &io_issue_defs[opcode];
2055 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2056 		/* enforce forwards compatibility on users */
2057 		if (sqe_flags & ~SQE_VALID_FLAGS)
2058 			return io_init_fail_req(req, -EINVAL);
2059 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
2060 			if (!def->buffer_select)
2061 				return io_init_fail_req(req, -EOPNOTSUPP);
2062 			req->buf_index = READ_ONCE(sqe->buf_group);
2063 		}
2064 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2065 			ctx->drain_disabled = true;
2066 		if (sqe_flags & IOSQE_IO_DRAIN) {
2067 			if (ctx->drain_disabled)
2068 				return io_init_fail_req(req, -EOPNOTSUPP);
2069 			io_init_req_drain(req);
2070 		}
2071 	}
2072 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2073 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2074 			return io_init_fail_req(req, -EACCES);
2075 		/* knock it to the slow queue path, will be drained there */
2076 		if (ctx->drain_active)
2077 			req->flags |= REQ_F_FORCE_ASYNC;
2078 		/* if there is no link, we're at "next" request and need to drain */
2079 		if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2080 			ctx->drain_next = false;
2081 			ctx->drain_active = true;
2082 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2083 		}
2084 	}
2085 
2086 	if (!def->ioprio && sqe->ioprio)
2087 		return io_init_fail_req(req, -EINVAL);
2088 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2089 		return io_init_fail_req(req, -EINVAL);
2090 
2091 	if (def->needs_file) {
2092 		struct io_submit_state *state = &ctx->submit_state;
2093 
2094 		req->cqe.fd = READ_ONCE(sqe->fd);
2095 
2096 		/*
2097 		 * Plug now if we have more than 2 IO left after this, and the
2098 		 * target is potentially a read/write to block based storage.
2099 		 */
2100 		if (state->need_plug && def->plug) {
2101 			state->plug_started = true;
2102 			state->need_plug = false;
2103 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2104 		}
2105 	}
2106 
2107 	personality = READ_ONCE(sqe->personality);
2108 	if (personality) {
2109 		int ret;
2110 
2111 		req->creds = xa_load(&ctx->personalities, personality);
2112 		if (!req->creds)
2113 			return io_init_fail_req(req, -EINVAL);
2114 		get_cred(req->creds);
2115 		ret = security_uring_override_creds(req->creds);
2116 		if (ret) {
2117 			put_cred(req->creds);
2118 			return io_init_fail_req(req, ret);
2119 		}
2120 		req->flags |= REQ_F_CREDS;
2121 	}
2122 
2123 	return def->prep(req, sqe);
2124 }
2125 
io_submit_fail_init(const struct io_uring_sqe * sqe,struct io_kiocb * req,int ret)2126 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2127 				      struct io_kiocb *req, int ret)
2128 {
2129 	struct io_ring_ctx *ctx = req->ctx;
2130 	struct io_submit_link *link = &ctx->submit_state.link;
2131 	struct io_kiocb *head = link->head;
2132 
2133 	trace_io_uring_req_failed(sqe, req, ret);
2134 
2135 	/*
2136 	 * Avoid breaking links in the middle as it renders links with SQPOLL
2137 	 * unusable. Instead of failing eagerly, continue assembling the link if
2138 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2139 	 * should find the flag and handle the rest.
2140 	 */
2141 	req_fail_link_node(req, ret);
2142 	if (head && !(head->flags & REQ_F_FAIL))
2143 		req_fail_link_node(head, -ECANCELED);
2144 
2145 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2146 		if (head) {
2147 			link->last->link = req;
2148 			link->head = NULL;
2149 			req = head;
2150 		}
2151 		io_queue_sqe_fallback(req);
2152 		return ret;
2153 	}
2154 
2155 	if (head)
2156 		link->last->link = req;
2157 	else
2158 		link->head = req;
2159 	link->last = req;
2160 	return 0;
2161 }
2162 
io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)2163 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2164 			 const struct io_uring_sqe *sqe)
2165 	__must_hold(&ctx->uring_lock)
2166 {
2167 	struct io_submit_link *link = &ctx->submit_state.link;
2168 	int ret;
2169 
2170 	ret = io_init_req(ctx, req, sqe);
2171 	if (unlikely(ret))
2172 		return io_submit_fail_init(sqe, req, ret);
2173 
2174 	trace_io_uring_submit_req(req);
2175 
2176 	/*
2177 	 * If we already have a head request, queue this one for async
2178 	 * submittal once the head completes. If we don't have a head but
2179 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2180 	 * submitted sync once the chain is complete. If none of those
2181 	 * conditions are true (normal request), then just queue it.
2182 	 */
2183 	if (unlikely(link->head)) {
2184 		trace_io_uring_link(req, link->last);
2185 		link->last->link = req;
2186 		link->last = req;
2187 
2188 		if (req->flags & IO_REQ_LINK_FLAGS)
2189 			return 0;
2190 		/* last request of the link, flush it */
2191 		req = link->head;
2192 		link->head = NULL;
2193 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2194 			goto fallback;
2195 
2196 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2197 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2198 		if (req->flags & IO_REQ_LINK_FLAGS) {
2199 			link->head = req;
2200 			link->last = req;
2201 		} else {
2202 fallback:
2203 			io_queue_sqe_fallback(req);
2204 		}
2205 		return 0;
2206 	}
2207 
2208 	io_queue_sqe(req);
2209 	return 0;
2210 }
2211 
2212 /*
2213  * Batched submission is done, ensure local IO is flushed out.
2214  */
io_submit_state_end(struct io_ring_ctx * ctx)2215 static void io_submit_state_end(struct io_ring_ctx *ctx)
2216 {
2217 	struct io_submit_state *state = &ctx->submit_state;
2218 
2219 	if (unlikely(state->link.head))
2220 		io_queue_sqe_fallback(state->link.head);
2221 	/* flush only after queuing links as they can generate completions */
2222 	io_submit_flush_completions(ctx);
2223 	if (state->plug_started)
2224 		blk_finish_plug(&state->plug);
2225 }
2226 
2227 /*
2228  * Start submission side cache.
2229  */
io_submit_state_start(struct io_submit_state * state,unsigned int max_ios)2230 static void io_submit_state_start(struct io_submit_state *state,
2231 				  unsigned int max_ios)
2232 {
2233 	state->plug_started = false;
2234 	state->need_plug = max_ios > 2;
2235 	state->submit_nr = max_ios;
2236 	/* set only head, no need to init link_last in advance */
2237 	state->link.head = NULL;
2238 }
2239 
io_commit_sqring(struct io_ring_ctx * ctx)2240 static void io_commit_sqring(struct io_ring_ctx *ctx)
2241 {
2242 	struct io_rings *rings = ctx->rings;
2243 
2244 	/*
2245 	 * Ensure any loads from the SQEs are done at this point,
2246 	 * since once we write the new head, the application could
2247 	 * write new data to them.
2248 	 */
2249 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2250 }
2251 
2252 /*
2253  * Fetch an sqe, if one is available. Note this returns a pointer to memory
2254  * that is mapped by userspace. This means that care needs to be taken to
2255  * ensure that reads are stable, as we cannot rely on userspace always
2256  * being a good citizen. If members of the sqe are validated and then later
2257  * used, it's important that those reads are done through READ_ONCE() to
2258  * prevent a re-load down the line.
2259  */
io_get_sqe(struct io_ring_ctx * ctx,const struct io_uring_sqe ** sqe)2260 static bool io_get_sqe(struct io_ring_ctx *ctx, const struct io_uring_sqe **sqe)
2261 {
2262 	unsigned mask = ctx->sq_entries - 1;
2263 	unsigned head = ctx->cached_sq_head++ & mask;
2264 
2265 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY)) {
2266 		head = READ_ONCE(ctx->sq_array[head]);
2267 		if (unlikely(head >= ctx->sq_entries)) {
2268 			/* drop invalid entries */
2269 			spin_lock(&ctx->completion_lock);
2270 			ctx->cq_extra--;
2271 			spin_unlock(&ctx->completion_lock);
2272 			WRITE_ONCE(ctx->rings->sq_dropped,
2273 				   READ_ONCE(ctx->rings->sq_dropped) + 1);
2274 			return false;
2275 		}
2276 	}
2277 
2278 	/*
2279 	 * The cached sq head (or cq tail) serves two purposes:
2280 	 *
2281 	 * 1) allows us to batch the cost of updating the user visible
2282 	 *    head updates.
2283 	 * 2) allows the kernel side to track the head on its own, even
2284 	 *    though the application is the one updating it.
2285 	 */
2286 
2287 	/* double index for 128-byte SQEs, twice as long */
2288 	if (ctx->flags & IORING_SETUP_SQE128)
2289 		head <<= 1;
2290 	*sqe = &ctx->sq_sqes[head];
2291 	return true;
2292 }
2293 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr)2294 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2295 	__must_hold(&ctx->uring_lock)
2296 {
2297 	unsigned int entries = io_sqring_entries(ctx);
2298 	unsigned int left;
2299 	int ret;
2300 
2301 	if (unlikely(!entries))
2302 		return 0;
2303 	/* make sure SQ entry isn't read before tail */
2304 	ret = left = min(nr, entries);
2305 	io_get_task_refs(left);
2306 	io_submit_state_start(&ctx->submit_state, left);
2307 
2308 	do {
2309 		const struct io_uring_sqe *sqe;
2310 		struct io_kiocb *req;
2311 
2312 		if (unlikely(!io_alloc_req(ctx, &req)))
2313 			break;
2314 		if (unlikely(!io_get_sqe(ctx, &sqe))) {
2315 			io_req_add_to_cache(req, ctx);
2316 			break;
2317 		}
2318 
2319 		/*
2320 		 * Continue submitting even for sqe failure if the
2321 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2322 		 */
2323 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2324 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2325 			left--;
2326 			break;
2327 		}
2328 	} while (--left);
2329 
2330 	if (unlikely(left)) {
2331 		ret -= left;
2332 		/* try again if it submitted nothing and can't allocate a req */
2333 		if (!ret && io_req_cache_empty(ctx))
2334 			ret = -EAGAIN;
2335 		current->io_uring->cached_refs += left;
2336 	}
2337 
2338 	io_submit_state_end(ctx);
2339 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2340 	io_commit_sqring(ctx);
2341 	return ret;
2342 }
2343 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)2344 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2345 			    int wake_flags, void *key)
2346 {
2347 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
2348 
2349 	/*
2350 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2351 	 * the task, and the next invocation will do it.
2352 	 */
2353 	if (io_should_wake(iowq) || io_has_work(iowq->ctx))
2354 		return autoremove_wake_function(curr, mode, wake_flags, key);
2355 	return -1;
2356 }
2357 
io_run_task_work_sig(struct io_ring_ctx * ctx)2358 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2359 {
2360 	if (!llist_empty(&ctx->work_llist)) {
2361 		__set_current_state(TASK_RUNNING);
2362 		if (io_run_local_work(ctx, INT_MAX) > 0)
2363 			return 0;
2364 	}
2365 	if (io_run_task_work() > 0)
2366 		return 0;
2367 	if (task_sigpending(current))
2368 		return -EINTR;
2369 	return 0;
2370 }
2371 
current_pending_io(void)2372 static bool current_pending_io(void)
2373 {
2374 	struct io_uring_task *tctx = current->io_uring;
2375 
2376 	if (!tctx)
2377 		return false;
2378 	return percpu_counter_read_positive(&tctx->inflight);
2379 }
2380 
io_cqring_timer_wakeup(struct hrtimer * timer)2381 static enum hrtimer_restart io_cqring_timer_wakeup(struct hrtimer *timer)
2382 {
2383 	struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
2384 
2385 	WRITE_ONCE(iowq->hit_timeout, 1);
2386 	iowq->min_timeout = 0;
2387 	wake_up_process(iowq->wq.private);
2388 	return HRTIMER_NORESTART;
2389 }
2390 
2391 /*
2392  * Doing min_timeout portion. If we saw any timeouts, events, or have work,
2393  * wake up. If not, and we have a normal timeout, switch to that and keep
2394  * sleeping.
2395  */
io_cqring_min_timer_wakeup(struct hrtimer * timer)2396 static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer)
2397 {
2398 	struct io_wait_queue *iowq = container_of(timer, struct io_wait_queue, t);
2399 	struct io_ring_ctx *ctx = iowq->ctx;
2400 
2401 	/* no general timeout, or shorter (or equal), we are done */
2402 	if (iowq->timeout == KTIME_MAX ||
2403 	    ktime_compare(iowq->min_timeout, iowq->timeout) >= 0)
2404 		goto out_wake;
2405 	/* work we may need to run, wake function will see if we need to wake */
2406 	if (io_has_work(ctx))
2407 		goto out_wake;
2408 	/* got events since we started waiting, min timeout is done */
2409 	if (iowq->cq_min_tail != READ_ONCE(ctx->rings->cq.tail))
2410 		goto out_wake;
2411 	/* if we have any events and min timeout expired, we're done */
2412 	if (io_cqring_events(ctx))
2413 		goto out_wake;
2414 
2415 	/*
2416 	 * If using deferred task_work running and application is waiting on
2417 	 * more than one request, ensure we reset it now where we are switching
2418 	 * to normal sleeps. Any request completion post min_wait should wake
2419 	 * the task and return.
2420 	 */
2421 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2422 		atomic_set(&ctx->cq_wait_nr, 1);
2423 		smp_mb();
2424 		if (!llist_empty(&ctx->work_llist))
2425 			goto out_wake;
2426 	}
2427 
2428 	iowq->t.function = io_cqring_timer_wakeup;
2429 	hrtimer_set_expires(timer, iowq->timeout);
2430 	return HRTIMER_RESTART;
2431 out_wake:
2432 	return io_cqring_timer_wakeup(timer);
2433 }
2434 
io_cqring_schedule_timeout(struct io_wait_queue * iowq,clockid_t clock_id,ktime_t start_time)2435 static int io_cqring_schedule_timeout(struct io_wait_queue *iowq,
2436 				      clockid_t clock_id, ktime_t start_time)
2437 {
2438 	ktime_t timeout;
2439 
2440 	hrtimer_init_on_stack(&iowq->t, clock_id, HRTIMER_MODE_ABS);
2441 	if (iowq->min_timeout) {
2442 		timeout = ktime_add_ns(iowq->min_timeout, start_time);
2443 		iowq->t.function = io_cqring_min_timer_wakeup;
2444 	} else {
2445 		timeout = iowq->timeout;
2446 		iowq->t.function = io_cqring_timer_wakeup;
2447 	}
2448 
2449 	hrtimer_set_expires_range_ns(&iowq->t, timeout, 0);
2450 	hrtimer_start_expires(&iowq->t, HRTIMER_MODE_ABS);
2451 
2452 	if (!READ_ONCE(iowq->hit_timeout))
2453 		schedule();
2454 
2455 	hrtimer_cancel(&iowq->t);
2456 	destroy_hrtimer_on_stack(&iowq->t);
2457 	__set_current_state(TASK_RUNNING);
2458 
2459 	return READ_ONCE(iowq->hit_timeout) ? -ETIME : 0;
2460 }
2461 
__io_cqring_wait_schedule(struct io_ring_ctx * ctx,struct io_wait_queue * iowq,ktime_t start_time)2462 static int __io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2463 				     struct io_wait_queue *iowq,
2464 				     ktime_t start_time)
2465 {
2466 	int ret = 0;
2467 
2468 	/*
2469 	 * Mark us as being in io_wait if we have pending requests, so cpufreq
2470 	 * can take into account that the task is waiting for IO - turns out
2471 	 * to be important for low QD IO.
2472 	 */
2473 	if (current_pending_io())
2474 		current->in_iowait = 1;
2475 	if (iowq->timeout != KTIME_MAX || iowq->min_timeout)
2476 		ret = io_cqring_schedule_timeout(iowq, ctx->clockid, start_time);
2477 	else
2478 		schedule();
2479 	current->in_iowait = 0;
2480 	return ret;
2481 }
2482 
2483 /* If this returns > 0, the caller should retry */
io_cqring_wait_schedule(struct io_ring_ctx * ctx,struct io_wait_queue * iowq,ktime_t start_time)2484 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2485 					  struct io_wait_queue *iowq,
2486 					  ktime_t start_time)
2487 {
2488 	if (unlikely(READ_ONCE(ctx->check_cq)))
2489 		return 1;
2490 	if (unlikely(!llist_empty(&ctx->work_llist)))
2491 		return 1;
2492 	if (unlikely(task_work_pending(current)))
2493 		return 1;
2494 	if (unlikely(task_sigpending(current)))
2495 		return -EINTR;
2496 	if (unlikely(io_should_wake(iowq)))
2497 		return 0;
2498 
2499 	return __io_cqring_wait_schedule(ctx, iowq, start_time);
2500 }
2501 
2502 struct ext_arg {
2503 	size_t argsz;
2504 	struct __kernel_timespec __user *ts;
2505 	const sigset_t __user *sig;
2506 	ktime_t min_time;
2507 };
2508 
2509 /*
2510  * Wait until events become available, if we don't already have some. The
2511  * application must reap them itself, as they reside on the shared cq ring.
2512  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,u32 flags,struct ext_arg * ext_arg)2513 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, u32 flags,
2514 			  struct ext_arg *ext_arg)
2515 {
2516 	struct io_wait_queue iowq;
2517 	struct io_rings *rings = ctx->rings;
2518 	ktime_t start_time;
2519 	int ret;
2520 
2521 	if (!io_allowed_run_tw(ctx))
2522 		return -EEXIST;
2523 	if (!llist_empty(&ctx->work_llist))
2524 		io_run_local_work(ctx, min_events);
2525 	io_run_task_work();
2526 
2527 	if (unlikely(test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)))
2528 		io_cqring_do_overflow_flush(ctx);
2529 	if (__io_cqring_events_user(ctx) >= min_events)
2530 		return 0;
2531 
2532 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2533 	iowq.wq.private = current;
2534 	INIT_LIST_HEAD(&iowq.wq.entry);
2535 	iowq.ctx = ctx;
2536 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2537 	iowq.cq_min_tail = READ_ONCE(ctx->rings->cq.tail);
2538 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2539 	iowq.hit_timeout = 0;
2540 	iowq.min_timeout = ext_arg->min_time;
2541 	iowq.timeout = KTIME_MAX;
2542 	start_time = io_get_time(ctx);
2543 
2544 	if (ext_arg->ts) {
2545 		struct timespec64 ts;
2546 
2547 		if (get_timespec64(&ts, ext_arg->ts))
2548 			return -EFAULT;
2549 
2550 		iowq.timeout = timespec64_to_ktime(ts);
2551 		if (!(flags & IORING_ENTER_ABS_TIMER))
2552 			iowq.timeout = ktime_add(iowq.timeout, start_time);
2553 	}
2554 
2555 	if (ext_arg->sig) {
2556 #ifdef CONFIG_COMPAT
2557 		if (in_compat_syscall())
2558 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)ext_arg->sig,
2559 						      ext_arg->argsz);
2560 		else
2561 #endif
2562 			ret = set_user_sigmask(ext_arg->sig, ext_arg->argsz);
2563 
2564 		if (ret)
2565 			return ret;
2566 	}
2567 
2568 	io_napi_busy_loop(ctx, &iowq);
2569 
2570 	trace_io_uring_cqring_wait(ctx, min_events);
2571 	do {
2572 		unsigned long check_cq;
2573 		int nr_wait;
2574 
2575 		/* if min timeout has been hit, don't reset wait count */
2576 		if (!iowq.hit_timeout)
2577 			nr_wait = (int) iowq.cq_tail -
2578 					READ_ONCE(ctx->rings->cq.tail);
2579 		else
2580 			nr_wait = 1;
2581 
2582 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
2583 			atomic_set(&ctx->cq_wait_nr, nr_wait);
2584 			set_current_state(TASK_INTERRUPTIBLE);
2585 		} else {
2586 			prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2587 							TASK_INTERRUPTIBLE);
2588 		}
2589 
2590 		ret = io_cqring_wait_schedule(ctx, &iowq, start_time);
2591 		__set_current_state(TASK_RUNNING);
2592 		atomic_set(&ctx->cq_wait_nr, IO_CQ_WAKE_INIT);
2593 
2594 		/*
2595 		 * Run task_work after scheduling and before io_should_wake().
2596 		 * If we got woken because of task_work being processed, run it
2597 		 * now rather than let the caller do another wait loop.
2598 		 */
2599 		if (!llist_empty(&ctx->work_llist))
2600 			io_run_local_work(ctx, nr_wait);
2601 		io_run_task_work();
2602 
2603 		/*
2604 		 * Non-local task_work will be run on exit to userspace, but
2605 		 * if we're using DEFER_TASKRUN, then we could have waited
2606 		 * with a timeout for a number of requests. If the timeout
2607 		 * hits, we could have some requests ready to process. Ensure
2608 		 * this break is _after_ we have run task_work, to avoid
2609 		 * deferring running potentially pending requests until the
2610 		 * next time we wait for events.
2611 		 */
2612 		if (ret < 0)
2613 			break;
2614 
2615 		check_cq = READ_ONCE(ctx->check_cq);
2616 		if (unlikely(check_cq)) {
2617 			/* let the caller flush overflows, retry */
2618 			if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2619 				io_cqring_do_overflow_flush(ctx);
2620 			if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT)) {
2621 				ret = -EBADR;
2622 				break;
2623 			}
2624 		}
2625 
2626 		if (io_should_wake(&iowq)) {
2627 			ret = 0;
2628 			break;
2629 		}
2630 		cond_resched();
2631 	} while (1);
2632 
2633 	if (!(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
2634 		finish_wait(&ctx->cq_wait, &iowq.wq);
2635 	restore_saved_sigmask_unless(ret == -EINTR);
2636 
2637 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2638 }
2639 
io_rings_map(struct io_ring_ctx * ctx,unsigned long uaddr,size_t size)2640 static void *io_rings_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2641 			  size_t size)
2642 {
2643 	return __io_uaddr_map(&ctx->ring_pages, &ctx->n_ring_pages, uaddr,
2644 				size);
2645 }
2646 
io_sqes_map(struct io_ring_ctx * ctx,unsigned long uaddr,size_t size)2647 static void *io_sqes_map(struct io_ring_ctx *ctx, unsigned long uaddr,
2648 			 size_t size)
2649 {
2650 	return __io_uaddr_map(&ctx->sqe_pages, &ctx->n_sqe_pages, uaddr,
2651 				size);
2652 }
2653 
io_rings_free(struct io_ring_ctx * ctx)2654 static void io_rings_free(struct io_ring_ctx *ctx)
2655 {
2656 	if (!(ctx->flags & IORING_SETUP_NO_MMAP)) {
2657 		io_pages_unmap(ctx->rings, &ctx->ring_pages, &ctx->n_ring_pages,
2658 				true);
2659 		io_pages_unmap(ctx->sq_sqes, &ctx->sqe_pages, &ctx->n_sqe_pages,
2660 				true);
2661 	} else {
2662 		io_pages_free(&ctx->ring_pages, ctx->n_ring_pages);
2663 		ctx->n_ring_pages = 0;
2664 		io_pages_free(&ctx->sqe_pages, ctx->n_sqe_pages);
2665 		ctx->n_sqe_pages = 0;
2666 		vunmap(ctx->rings);
2667 		vunmap(ctx->sq_sqes);
2668 	}
2669 
2670 	ctx->rings = NULL;
2671 	ctx->sq_sqes = NULL;
2672 }
2673 
rings_size(struct io_ring_ctx * ctx,unsigned int sq_entries,unsigned int cq_entries,size_t * sq_offset)2674 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2675 				unsigned int cq_entries, size_t *sq_offset)
2676 {
2677 	struct io_rings *rings;
2678 	size_t off, sq_array_size;
2679 
2680 	off = struct_size(rings, cqes, cq_entries);
2681 	if (off == SIZE_MAX)
2682 		return SIZE_MAX;
2683 	if (ctx->flags & IORING_SETUP_CQE32) {
2684 		if (check_shl_overflow(off, 1, &off))
2685 			return SIZE_MAX;
2686 	}
2687 
2688 #ifdef CONFIG_SMP
2689 	off = ALIGN(off, SMP_CACHE_BYTES);
2690 	if (off == 0)
2691 		return SIZE_MAX;
2692 #endif
2693 
2694 	if (ctx->flags & IORING_SETUP_NO_SQARRAY) {
2695 		*sq_offset = SIZE_MAX;
2696 		return off;
2697 	}
2698 
2699 	*sq_offset = off;
2700 
2701 	sq_array_size = array_size(sizeof(u32), sq_entries);
2702 	if (sq_array_size == SIZE_MAX)
2703 		return SIZE_MAX;
2704 
2705 	if (check_add_overflow(off, sq_array_size, &off))
2706 		return SIZE_MAX;
2707 
2708 	return off;
2709 }
2710 
io_req_caches_free(struct io_ring_ctx * ctx)2711 static void io_req_caches_free(struct io_ring_ctx *ctx)
2712 {
2713 	struct io_kiocb *req;
2714 	int nr = 0;
2715 
2716 	mutex_lock(&ctx->uring_lock);
2717 
2718 	while (!io_req_cache_empty(ctx)) {
2719 		req = io_extract_req(ctx);
2720 		kmem_cache_free(req_cachep, req);
2721 		nr++;
2722 	}
2723 	if (nr)
2724 		percpu_ref_put_many(&ctx->refs, nr);
2725 	mutex_unlock(&ctx->uring_lock);
2726 }
2727 
io_ring_ctx_free(struct io_ring_ctx * ctx)2728 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2729 {
2730 	io_sq_thread_finish(ctx);
2731 	/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2732 	if (WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list)))
2733 		return;
2734 
2735 	mutex_lock(&ctx->uring_lock);
2736 	if (ctx->buf_data)
2737 		__io_sqe_buffers_unregister(ctx);
2738 	if (ctx->file_data)
2739 		__io_sqe_files_unregister(ctx);
2740 	io_cqring_overflow_kill(ctx);
2741 	io_eventfd_unregister(ctx);
2742 	io_alloc_cache_free(&ctx->apoll_cache, kfree);
2743 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2744 	io_alloc_cache_free(&ctx->rw_cache, io_rw_cache_free);
2745 	io_alloc_cache_free(&ctx->uring_cache, kfree);
2746 	io_alloc_cache_free(&ctx->msg_cache, io_msg_cache_free);
2747 	io_futex_cache_free(ctx);
2748 	io_destroy_buffers(ctx);
2749 	mutex_unlock(&ctx->uring_lock);
2750 	if (ctx->sq_creds)
2751 		put_cred(ctx->sq_creds);
2752 	if (ctx->submitter_task)
2753 		put_task_struct(ctx->submitter_task);
2754 
2755 	/* there are no registered resources left, nobody uses it */
2756 	if (ctx->rsrc_node)
2757 		io_rsrc_node_destroy(ctx, ctx->rsrc_node);
2758 
2759 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2760 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2761 
2762 	io_alloc_cache_free(&ctx->rsrc_node_cache, kfree);
2763 	if (ctx->mm_account) {
2764 		mmdrop(ctx->mm_account);
2765 		ctx->mm_account = NULL;
2766 	}
2767 	io_rings_free(ctx);
2768 
2769 	percpu_ref_exit(&ctx->refs);
2770 	free_uid(ctx->user);
2771 	io_req_caches_free(ctx);
2772 	if (ctx->hash_map)
2773 		io_wq_put_hash(ctx->hash_map);
2774 	io_napi_free(ctx);
2775 	kfree(ctx->cancel_table.hbs);
2776 	kfree(ctx->cancel_table_locked.hbs);
2777 	xa_destroy(&ctx->io_bl_xa);
2778 	kfree(ctx);
2779 }
2780 
io_activate_pollwq_cb(struct callback_head * cb)2781 static __cold void io_activate_pollwq_cb(struct callback_head *cb)
2782 {
2783 	struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx,
2784 					       poll_wq_task_work);
2785 
2786 	mutex_lock(&ctx->uring_lock);
2787 	ctx->poll_activated = true;
2788 	mutex_unlock(&ctx->uring_lock);
2789 
2790 	/*
2791 	 * Wake ups for some events between start of polling and activation
2792 	 * might've been lost due to loose synchronisation.
2793 	 */
2794 	wake_up_all(&ctx->poll_wq);
2795 	percpu_ref_put(&ctx->refs);
2796 }
2797 
io_activate_pollwq(struct io_ring_ctx * ctx)2798 __cold void io_activate_pollwq(struct io_ring_ctx *ctx)
2799 {
2800 	spin_lock(&ctx->completion_lock);
2801 	/* already activated or in progress */
2802 	if (ctx->poll_activated || ctx->poll_wq_task_work.func)
2803 		goto out;
2804 	if (WARN_ON_ONCE(!ctx->task_complete))
2805 		goto out;
2806 	if (!ctx->submitter_task)
2807 		goto out;
2808 	/*
2809 	 * with ->submitter_task only the submitter task completes requests, we
2810 	 * only need to sync with it, which is done by injecting a tw
2811 	 */
2812 	init_task_work(&ctx->poll_wq_task_work, io_activate_pollwq_cb);
2813 	percpu_ref_get(&ctx->refs);
2814 	if (task_work_add(ctx->submitter_task, &ctx->poll_wq_task_work, TWA_SIGNAL))
2815 		percpu_ref_put(&ctx->refs);
2816 out:
2817 	spin_unlock(&ctx->completion_lock);
2818 }
2819 
io_uring_poll(struct file * file,poll_table * wait)2820 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2821 {
2822 	struct io_ring_ctx *ctx = file->private_data;
2823 	__poll_t mask = 0;
2824 
2825 	if (unlikely(!ctx->poll_activated))
2826 		io_activate_pollwq(ctx);
2827 
2828 	poll_wait(file, &ctx->poll_wq, wait);
2829 	/*
2830 	 * synchronizes with barrier from wq_has_sleeper call in
2831 	 * io_commit_cqring
2832 	 */
2833 	smp_rmb();
2834 	if (!io_sqring_full(ctx))
2835 		mask |= EPOLLOUT | EPOLLWRNORM;
2836 
2837 	/*
2838 	 * Don't flush cqring overflow list here, just do a simple check.
2839 	 * Otherwise there could possible be ABBA deadlock:
2840 	 *      CPU0                    CPU1
2841 	 *      ----                    ----
2842 	 * lock(&ctx->uring_lock);
2843 	 *                              lock(&ep->mtx);
2844 	 *                              lock(&ctx->uring_lock);
2845 	 * lock(&ep->mtx);
2846 	 *
2847 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2848 	 * pushes them to do the flush.
2849 	 */
2850 
2851 	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
2852 		mask |= EPOLLIN | EPOLLRDNORM;
2853 
2854 	return mask;
2855 }
2856 
2857 struct io_tctx_exit {
2858 	struct callback_head		task_work;
2859 	struct completion		completion;
2860 	struct io_ring_ctx		*ctx;
2861 };
2862 
io_tctx_exit_cb(struct callback_head * cb)2863 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2864 {
2865 	struct io_uring_task *tctx = current->io_uring;
2866 	struct io_tctx_exit *work;
2867 
2868 	work = container_of(cb, struct io_tctx_exit, task_work);
2869 	/*
2870 	 * When @in_cancel, we're in cancellation and it's racy to remove the
2871 	 * node. It'll be removed by the end of cancellation, just ignore it.
2872 	 * tctx can be NULL if the queueing of this task_work raced with
2873 	 * work cancelation off the exec path.
2874 	 */
2875 	if (tctx && !atomic_read(&tctx->in_cancel))
2876 		io_uring_del_tctx_node((unsigned long)work->ctx);
2877 	complete(&work->completion);
2878 }
2879 
io_cancel_ctx_cb(struct io_wq_work * work,void * data)2880 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2881 {
2882 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2883 
2884 	return req->ctx == data;
2885 }
2886 
io_ring_exit_work(struct work_struct * work)2887 static __cold void io_ring_exit_work(struct work_struct *work)
2888 {
2889 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2890 	unsigned long timeout = jiffies + HZ * 60 * 5;
2891 	unsigned long interval = HZ / 20;
2892 	struct io_tctx_exit exit;
2893 	struct io_tctx_node *node;
2894 	int ret;
2895 
2896 	/*
2897 	 * If we're doing polled IO and end up having requests being
2898 	 * submitted async (out-of-line), then completions can come in while
2899 	 * we're waiting for refs to drop. We need to reap these manually,
2900 	 * as nobody else will be looking for them.
2901 	 */
2902 	do {
2903 		if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
2904 			mutex_lock(&ctx->uring_lock);
2905 			io_cqring_overflow_kill(ctx);
2906 			mutex_unlock(&ctx->uring_lock);
2907 		}
2908 
2909 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2910 			io_move_task_work_from_local(ctx);
2911 
2912 		while (io_uring_try_cancel_requests(ctx, NULL, true))
2913 			cond_resched();
2914 
2915 		if (ctx->sq_data) {
2916 			struct io_sq_data *sqd = ctx->sq_data;
2917 			struct task_struct *tsk;
2918 
2919 			io_sq_thread_park(sqd);
2920 			tsk = sqpoll_task_locked(sqd);
2921 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2922 				io_wq_cancel_cb(tsk->io_uring->io_wq,
2923 						io_cancel_ctx_cb, ctx, true);
2924 			io_sq_thread_unpark(sqd);
2925 		}
2926 
2927 		io_req_caches_free(ctx);
2928 
2929 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2930 			/* there is little hope left, don't run it too often */
2931 			interval = HZ * 60;
2932 		}
2933 		/*
2934 		 * This is really an uninterruptible wait, as it has to be
2935 		 * complete. But it's also run from a kworker, which doesn't
2936 		 * take signals, so it's fine to make it interruptible. This
2937 		 * avoids scenarios where we knowingly can wait much longer
2938 		 * on completions, for example if someone does a SIGSTOP on
2939 		 * a task that needs to finish task_work to make this loop
2940 		 * complete. That's a synthetic situation that should not
2941 		 * cause a stuck task backtrace, and hence a potential panic
2942 		 * on stuck tasks if that is enabled.
2943 		 */
2944 	} while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
2945 
2946 	init_completion(&exit.completion);
2947 	init_task_work(&exit.task_work, io_tctx_exit_cb);
2948 	exit.ctx = ctx;
2949 
2950 	mutex_lock(&ctx->uring_lock);
2951 	while (!list_empty(&ctx->tctx_list)) {
2952 		WARN_ON_ONCE(time_after(jiffies, timeout));
2953 
2954 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2955 					ctx_node);
2956 		/* don't spin on a single task if cancellation failed */
2957 		list_rotate_left(&ctx->tctx_list);
2958 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2959 		if (WARN_ON_ONCE(ret))
2960 			continue;
2961 
2962 		mutex_unlock(&ctx->uring_lock);
2963 		/*
2964 		 * See comment above for
2965 		 * wait_for_completion_interruptible_timeout() on why this
2966 		 * wait is marked as interruptible.
2967 		 */
2968 		wait_for_completion_interruptible(&exit.completion);
2969 		mutex_lock(&ctx->uring_lock);
2970 	}
2971 	mutex_unlock(&ctx->uring_lock);
2972 	spin_lock(&ctx->completion_lock);
2973 	spin_unlock(&ctx->completion_lock);
2974 
2975 	/* pairs with RCU read section in io_req_local_work_add() */
2976 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2977 		synchronize_rcu();
2978 
2979 	io_ring_ctx_free(ctx);
2980 }
2981 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)2982 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2983 {
2984 	unsigned long index;
2985 	struct creds *creds;
2986 
2987 	mutex_lock(&ctx->uring_lock);
2988 	percpu_ref_kill(&ctx->refs);
2989 	xa_for_each(&ctx->personalities, index, creds)
2990 		io_unregister_personality(ctx, index);
2991 	mutex_unlock(&ctx->uring_lock);
2992 
2993 	flush_delayed_work(&ctx->fallback_work);
2994 
2995 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2996 	/*
2997 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
2998 	 * if we're exiting a ton of rings at the same time. It just adds
2999 	 * noise and overhead, there's no discernable change in runtime
3000 	 * over using system_wq.
3001 	 */
3002 	queue_work(iou_wq, &ctx->exit_work);
3003 }
3004 
io_uring_release(struct inode * inode,struct file * file)3005 static int io_uring_release(struct inode *inode, struct file *file)
3006 {
3007 	struct io_ring_ctx *ctx = file->private_data;
3008 
3009 	file->private_data = NULL;
3010 	io_ring_ctx_wait_and_kill(ctx);
3011 	return 0;
3012 }
3013 
3014 struct io_task_cancel {
3015 	struct task_struct *task;
3016 	bool all;
3017 };
3018 
io_cancel_task_cb(struct io_wq_work * work,void * data)3019 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
3020 {
3021 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
3022 	struct io_task_cancel *cancel = data;
3023 
3024 	return io_match_task_safe(req, cancel->task, cancel->all);
3025 }
3026 
io_cancel_defer_files(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)3027 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
3028 					 struct task_struct *task,
3029 					 bool cancel_all)
3030 {
3031 	struct io_defer_entry *de;
3032 	LIST_HEAD(list);
3033 
3034 	spin_lock(&ctx->completion_lock);
3035 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
3036 		if (io_match_task_safe(de->req, task, cancel_all)) {
3037 			list_cut_position(&list, &ctx->defer_list, &de->list);
3038 			break;
3039 		}
3040 	}
3041 	spin_unlock(&ctx->completion_lock);
3042 	if (list_empty(&list))
3043 		return false;
3044 
3045 	while (!list_empty(&list)) {
3046 		de = list_first_entry(&list, struct io_defer_entry, list);
3047 		list_del_init(&de->list);
3048 		io_req_task_queue_fail(de->req, -ECANCELED);
3049 		kfree(de);
3050 	}
3051 	return true;
3052 }
3053 
io_uring_try_cancel_iowq(struct io_ring_ctx * ctx)3054 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
3055 {
3056 	struct io_tctx_node *node;
3057 	enum io_wq_cancel cret;
3058 	bool ret = false;
3059 
3060 	mutex_lock(&ctx->uring_lock);
3061 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3062 		struct io_uring_task *tctx = node->task->io_uring;
3063 
3064 		/*
3065 		 * io_wq will stay alive while we hold uring_lock, because it's
3066 		 * killed after ctx nodes, which requires to take the lock.
3067 		 */
3068 		if (!tctx || !tctx->io_wq)
3069 			continue;
3070 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
3071 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3072 	}
3073 	mutex_unlock(&ctx->uring_lock);
3074 
3075 	return ret;
3076 }
3077 
io_uring_try_cancel_requests(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)3078 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
3079 						struct task_struct *task,
3080 						bool cancel_all)
3081 {
3082 	struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
3083 	struct io_uring_task *tctx = task ? task->io_uring : NULL;
3084 	enum io_wq_cancel cret;
3085 	bool ret = false;
3086 
3087 	/* set it so io_req_local_work_add() would wake us up */
3088 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
3089 		atomic_set(&ctx->cq_wait_nr, 1);
3090 		smp_mb();
3091 	}
3092 
3093 	/* failed during ring init, it couldn't have issued any requests */
3094 	if (!ctx->rings)
3095 		return false;
3096 
3097 	if (!task) {
3098 		ret |= io_uring_try_cancel_iowq(ctx);
3099 	} else if (tctx && tctx->io_wq) {
3100 		/*
3101 		 * Cancels requests of all rings, not only @ctx, but
3102 		 * it's fine as the task is in exit/exec.
3103 		 */
3104 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
3105 				       &cancel, true);
3106 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
3107 	}
3108 
3109 	/* SQPOLL thread does its own polling */
3110 	if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
3111 	    (ctx->sq_data && ctx->sq_data->thread == current)) {
3112 		while (!wq_list_empty(&ctx->iopoll_list)) {
3113 			io_iopoll_try_reap_events(ctx);
3114 			ret = true;
3115 			cond_resched();
3116 		}
3117 	}
3118 
3119 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3120 	    io_allowed_defer_tw_run(ctx))
3121 		ret |= io_run_local_work(ctx, INT_MAX) > 0;
3122 	ret |= io_cancel_defer_files(ctx, task, cancel_all);
3123 	mutex_lock(&ctx->uring_lock);
3124 	ret |= io_poll_remove_all(ctx, task, cancel_all);
3125 	ret |= io_waitid_remove_all(ctx, task, cancel_all);
3126 	ret |= io_futex_remove_all(ctx, task, cancel_all);
3127 	ret |= io_uring_try_cancel_uring_cmd(ctx, task, cancel_all);
3128 	mutex_unlock(&ctx->uring_lock);
3129 	ret |= io_kill_timeouts(ctx, task, cancel_all);
3130 	if (task)
3131 		ret |= io_run_task_work() > 0;
3132 	else
3133 		ret |= flush_delayed_work(&ctx->fallback_work);
3134 	return ret;
3135 }
3136 
tctx_inflight(struct io_uring_task * tctx,bool tracked)3137 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
3138 {
3139 	if (tracked)
3140 		return atomic_read(&tctx->inflight_tracked);
3141 	return percpu_counter_sum(&tctx->inflight);
3142 }
3143 
3144 /*
3145  * Find any io_uring ctx that this task has registered or done IO on, and cancel
3146  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
3147  */
io_uring_cancel_generic(bool cancel_all,struct io_sq_data * sqd)3148 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
3149 {
3150 	struct io_uring_task *tctx = current->io_uring;
3151 	struct io_ring_ctx *ctx;
3152 	struct io_tctx_node *node;
3153 	unsigned long index;
3154 	s64 inflight;
3155 	DEFINE_WAIT(wait);
3156 
3157 	WARN_ON_ONCE(sqd && sqpoll_task_locked(sqd) != current);
3158 
3159 	if (!current->io_uring)
3160 		return;
3161 	if (tctx->io_wq)
3162 		io_wq_exit_start(tctx->io_wq);
3163 
3164 	atomic_inc(&tctx->in_cancel);
3165 	do {
3166 		bool loop = false;
3167 
3168 		io_uring_drop_tctx_refs(current);
3169 		if (!tctx_inflight(tctx, !cancel_all))
3170 			break;
3171 
3172 		/* read completions before cancelations */
3173 		inflight = tctx_inflight(tctx, false);
3174 		if (!inflight)
3175 			break;
3176 
3177 		if (!sqd) {
3178 			xa_for_each(&tctx->xa, index, node) {
3179 				/* sqpoll task will cancel all its requests */
3180 				if (node->ctx->sq_data)
3181 					continue;
3182 				loop |= io_uring_try_cancel_requests(node->ctx,
3183 							current, cancel_all);
3184 			}
3185 		} else {
3186 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3187 				loop |= io_uring_try_cancel_requests(ctx,
3188 								     current,
3189 								     cancel_all);
3190 		}
3191 
3192 		if (loop) {
3193 			cond_resched();
3194 			continue;
3195 		}
3196 
3197 		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3198 		io_run_task_work();
3199 		io_uring_drop_tctx_refs(current);
3200 		xa_for_each(&tctx->xa, index, node) {
3201 			if (!llist_empty(&node->ctx->work_llist)) {
3202 				WARN_ON_ONCE(node->ctx->submitter_task &&
3203 					     node->ctx->submitter_task != current);
3204 				goto end_wait;
3205 			}
3206 		}
3207 		/*
3208 		 * If we've seen completions, retry without waiting. This
3209 		 * avoids a race where a completion comes in before we did
3210 		 * prepare_to_wait().
3211 		 */
3212 		if (inflight == tctx_inflight(tctx, !cancel_all))
3213 			schedule();
3214 end_wait:
3215 		finish_wait(&tctx->wait, &wait);
3216 	} while (1);
3217 
3218 	io_uring_clean_tctx(tctx);
3219 	if (cancel_all) {
3220 		/*
3221 		 * We shouldn't run task_works after cancel, so just leave
3222 		 * ->in_cancel set for normal exit.
3223 		 */
3224 		atomic_dec(&tctx->in_cancel);
3225 		/* for exec all current's requests should be gone, kill tctx */
3226 		__io_uring_free(current);
3227 	}
3228 }
3229 
__io_uring_cancel(bool cancel_all)3230 void __io_uring_cancel(bool cancel_all)
3231 {
3232 	io_uring_unreg_ringfd();
3233 	io_uring_cancel_generic(cancel_all, NULL);
3234 }
3235 
io_validate_ext_arg(unsigned flags,const void __user * argp,size_t argsz)3236 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3237 {
3238 	if (flags & IORING_ENTER_EXT_ARG) {
3239 		struct io_uring_getevents_arg arg;
3240 
3241 		if (argsz != sizeof(arg))
3242 			return -EINVAL;
3243 		if (copy_from_user(&arg, argp, sizeof(arg)))
3244 			return -EFAULT;
3245 	}
3246 	return 0;
3247 }
3248 
io_get_ext_arg(unsigned flags,const void __user * argp,struct ext_arg * ext_arg)3249 static int io_get_ext_arg(unsigned flags, const void __user *argp,
3250 			  struct ext_arg *ext_arg)
3251 {
3252 	struct io_uring_getevents_arg arg;
3253 
3254 	/*
3255 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3256 	 * is just a pointer to the sigset_t.
3257 	 */
3258 	if (!(flags & IORING_ENTER_EXT_ARG)) {
3259 		ext_arg->sig = (const sigset_t __user *) argp;
3260 		ext_arg->ts = NULL;
3261 		return 0;
3262 	}
3263 
3264 	/*
3265 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3266 	 * timespec and sigset_t pointers if good.
3267 	 */
3268 	if (ext_arg->argsz != sizeof(arg))
3269 		return -EINVAL;
3270 	if (copy_from_user(&arg, argp, sizeof(arg)))
3271 		return -EFAULT;
3272 	ext_arg->min_time = arg.min_wait_usec * NSEC_PER_USEC;
3273 	ext_arg->sig = u64_to_user_ptr(arg.sigmask);
3274 	ext_arg->argsz = arg.sigmask_sz;
3275 	ext_arg->ts = u64_to_user_ptr(arg.ts);
3276 	return 0;
3277 }
3278 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const void __user *,argp,size_t,argsz)3279 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3280 		u32, min_complete, u32, flags, const void __user *, argp,
3281 		size_t, argsz)
3282 {
3283 	struct io_ring_ctx *ctx;
3284 	struct file *file;
3285 	long ret;
3286 
3287 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3288 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3289 			       IORING_ENTER_REGISTERED_RING |
3290 			       IORING_ENTER_ABS_TIMER)))
3291 		return -EINVAL;
3292 
3293 	/*
3294 	 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3295 	 * need only dereference our task private array to find it.
3296 	 */
3297 	if (flags & IORING_ENTER_REGISTERED_RING) {
3298 		struct io_uring_task *tctx = current->io_uring;
3299 
3300 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3301 			return -EINVAL;
3302 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3303 		file = tctx->registered_rings[fd];
3304 		if (unlikely(!file))
3305 			return -EBADF;
3306 	} else {
3307 		file = fget(fd);
3308 		if (unlikely(!file))
3309 			return -EBADF;
3310 		ret = -EOPNOTSUPP;
3311 		if (unlikely(!io_is_uring_fops(file)))
3312 			goto out;
3313 	}
3314 
3315 	ctx = file->private_data;
3316 	ret = -EBADFD;
3317 	if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3318 		goto out;
3319 
3320 	/*
3321 	 * For SQ polling, the thread will do all submissions and completions.
3322 	 * Just return the requested submit count, and wake the thread if
3323 	 * we were asked to.
3324 	 */
3325 	ret = 0;
3326 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3327 		if (unlikely(ctx->sq_data->thread == NULL)) {
3328 			ret = -EOWNERDEAD;
3329 			goto out;
3330 		}
3331 		if (flags & IORING_ENTER_SQ_WAKEUP)
3332 			wake_up(&ctx->sq_data->wait);
3333 		if (flags & IORING_ENTER_SQ_WAIT)
3334 			io_sqpoll_wait_sq(ctx);
3335 
3336 		ret = to_submit;
3337 	} else if (to_submit) {
3338 		ret = io_uring_add_tctx_node(ctx);
3339 		if (unlikely(ret))
3340 			goto out;
3341 
3342 		mutex_lock(&ctx->uring_lock);
3343 		ret = io_submit_sqes(ctx, to_submit);
3344 		if (ret != to_submit) {
3345 			mutex_unlock(&ctx->uring_lock);
3346 			goto out;
3347 		}
3348 		if (flags & IORING_ENTER_GETEVENTS) {
3349 			if (ctx->syscall_iopoll)
3350 				goto iopoll_locked;
3351 			/*
3352 			 * Ignore errors, we'll soon call io_cqring_wait() and
3353 			 * it should handle ownership problems if any.
3354 			 */
3355 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3356 				(void)io_run_local_work_locked(ctx, min_complete);
3357 		}
3358 		mutex_unlock(&ctx->uring_lock);
3359 	}
3360 
3361 	if (flags & IORING_ENTER_GETEVENTS) {
3362 		int ret2;
3363 
3364 		if (ctx->syscall_iopoll) {
3365 			/*
3366 			 * We disallow the app entering submit/complete with
3367 			 * polling, but we still need to lock the ring to
3368 			 * prevent racing with polled issue that got punted to
3369 			 * a workqueue.
3370 			 */
3371 			mutex_lock(&ctx->uring_lock);
3372 iopoll_locked:
3373 			ret2 = io_validate_ext_arg(flags, argp, argsz);
3374 			if (likely(!ret2)) {
3375 				min_complete = min(min_complete,
3376 						   ctx->cq_entries);
3377 				ret2 = io_iopoll_check(ctx, min_complete);
3378 			}
3379 			mutex_unlock(&ctx->uring_lock);
3380 		} else {
3381 			struct ext_arg ext_arg = { .argsz = argsz };
3382 
3383 			ret2 = io_get_ext_arg(flags, argp, &ext_arg);
3384 			if (likely(!ret2)) {
3385 				min_complete = min(min_complete,
3386 						   ctx->cq_entries);
3387 				ret2 = io_cqring_wait(ctx, min_complete, flags,
3388 						      &ext_arg);
3389 			}
3390 		}
3391 
3392 		if (!ret) {
3393 			ret = ret2;
3394 
3395 			/*
3396 			 * EBADR indicates that one or more CQE were dropped.
3397 			 * Once the user has been informed we can clear the bit
3398 			 * as they are obviously ok with those drops.
3399 			 */
3400 			if (unlikely(ret2 == -EBADR))
3401 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3402 					  &ctx->check_cq);
3403 		}
3404 	}
3405 out:
3406 	if (!(flags & IORING_ENTER_REGISTERED_RING))
3407 		fput(file);
3408 	return ret;
3409 }
3410 
3411 static const struct file_operations io_uring_fops = {
3412 	.release	= io_uring_release,
3413 	.mmap		= io_uring_mmap,
3414 	.get_unmapped_area = io_uring_get_unmapped_area,
3415 #ifndef CONFIG_MMU
3416 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
3417 #endif
3418 	.poll		= io_uring_poll,
3419 #ifdef CONFIG_PROC_FS
3420 	.show_fdinfo	= io_uring_show_fdinfo,
3421 #endif
3422 };
3423 
io_is_uring_fops(struct file * file)3424 bool io_is_uring_fops(struct file *file)
3425 {
3426 	return file->f_op == &io_uring_fops;
3427 }
3428 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)3429 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3430 					 struct io_uring_params *p)
3431 {
3432 	struct io_rings *rings;
3433 	size_t size, sq_array_offset;
3434 	void *ptr;
3435 
3436 	/* make sure these are sane, as we already accounted them */
3437 	ctx->sq_entries = p->sq_entries;
3438 	ctx->cq_entries = p->cq_entries;
3439 
3440 	size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3441 	if (size == SIZE_MAX)
3442 		return -EOVERFLOW;
3443 
3444 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3445 		rings = io_pages_map(&ctx->ring_pages, &ctx->n_ring_pages, size);
3446 	else
3447 		rings = io_rings_map(ctx, p->cq_off.user_addr, size);
3448 
3449 	if (IS_ERR(rings))
3450 		return PTR_ERR(rings);
3451 
3452 	ctx->rings = rings;
3453 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3454 		ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3455 	rings->sq_ring_mask = p->sq_entries - 1;
3456 	rings->cq_ring_mask = p->cq_entries - 1;
3457 	rings->sq_ring_entries = p->sq_entries;
3458 	rings->cq_ring_entries = p->cq_entries;
3459 
3460 	if (p->flags & IORING_SETUP_SQE128)
3461 		size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3462 	else
3463 		size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3464 	if (size == SIZE_MAX) {
3465 		io_rings_free(ctx);
3466 		return -EOVERFLOW;
3467 	}
3468 
3469 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3470 		ptr = io_pages_map(&ctx->sqe_pages, &ctx->n_sqe_pages, size);
3471 	else
3472 		ptr = io_sqes_map(ctx, p->sq_off.user_addr, size);
3473 
3474 	if (IS_ERR(ptr)) {
3475 		io_rings_free(ctx);
3476 		return PTR_ERR(ptr);
3477 	}
3478 
3479 	ctx->sq_sqes = ptr;
3480 	return 0;
3481 }
3482 
io_uring_install_fd(struct file * file)3483 static int io_uring_install_fd(struct file *file)
3484 {
3485 	int fd;
3486 
3487 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3488 	if (fd < 0)
3489 		return fd;
3490 	fd_install(fd, file);
3491 	return fd;
3492 }
3493 
3494 /*
3495  * Allocate an anonymous fd, this is what constitutes the application
3496  * visible backing of an io_uring instance. The application mmaps this
3497  * fd to gain access to the SQ/CQ ring details.
3498  */
io_uring_get_file(struct io_ring_ctx * ctx)3499 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3500 {
3501 	/* Create a new inode so that the LSM can block the creation.  */
3502 	return anon_inode_create_getfile("[io_uring]", &io_uring_fops, ctx,
3503 					 O_RDWR | O_CLOEXEC, NULL);
3504 }
3505 
io_uring_create(unsigned entries,struct io_uring_params * p,struct io_uring_params __user * params)3506 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3507 				  struct io_uring_params __user *params)
3508 {
3509 	struct io_ring_ctx *ctx;
3510 	struct io_uring_task *tctx;
3511 	struct file *file;
3512 	int ret;
3513 
3514 	if (!entries)
3515 		return -EINVAL;
3516 	if (entries > IORING_MAX_ENTRIES) {
3517 		if (!(p->flags & IORING_SETUP_CLAMP))
3518 			return -EINVAL;
3519 		entries = IORING_MAX_ENTRIES;
3520 	}
3521 
3522 	if ((p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3523 	    && !(p->flags & IORING_SETUP_NO_MMAP))
3524 		return -EINVAL;
3525 
3526 	/*
3527 	 * Use twice as many entries for the CQ ring. It's possible for the
3528 	 * application to drive a higher depth than the size of the SQ ring,
3529 	 * since the sqes are only used at submission time. This allows for
3530 	 * some flexibility in overcommitting a bit. If the application has
3531 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3532 	 * of CQ ring entries manually.
3533 	 */
3534 	p->sq_entries = roundup_pow_of_two(entries);
3535 	if (p->flags & IORING_SETUP_CQSIZE) {
3536 		/*
3537 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3538 		 * to a power-of-two, if it isn't already. We do NOT impose
3539 		 * any cq vs sq ring sizing.
3540 		 */
3541 		if (!p->cq_entries)
3542 			return -EINVAL;
3543 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3544 			if (!(p->flags & IORING_SETUP_CLAMP))
3545 				return -EINVAL;
3546 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
3547 		}
3548 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
3549 		if (p->cq_entries < p->sq_entries)
3550 			return -EINVAL;
3551 	} else {
3552 		p->cq_entries = 2 * p->sq_entries;
3553 	}
3554 
3555 	ctx = io_ring_ctx_alloc(p);
3556 	if (!ctx)
3557 		return -ENOMEM;
3558 
3559 	ctx->clockid = CLOCK_MONOTONIC;
3560 	ctx->clock_offset = 0;
3561 
3562 	if ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
3563 	    !(ctx->flags & IORING_SETUP_IOPOLL) &&
3564 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3565 		ctx->task_complete = true;
3566 
3567 	if (ctx->task_complete || (ctx->flags & IORING_SETUP_IOPOLL))
3568 		ctx->lockless_cq = true;
3569 
3570 	/*
3571 	 * lazy poll_wq activation relies on ->task_complete for synchronisation
3572 	 * purposes, see io_activate_pollwq()
3573 	 */
3574 	if (!ctx->task_complete)
3575 		ctx->poll_activated = true;
3576 
3577 	/*
3578 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3579 	 * space applications don't need to do io completion events
3580 	 * polling again, they can rely on io_sq_thread to do polling
3581 	 * work, which can reduce cpu usage and uring_lock contention.
3582 	 */
3583 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3584 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3585 		ctx->syscall_iopoll = 1;
3586 
3587 	ctx->compat = in_compat_syscall();
3588 	if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
3589 		ctx->user = get_uid(current_user());
3590 
3591 	/*
3592 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3593 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3594 	 */
3595 	ret = -EINVAL;
3596 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3597 		/* IPI related flags don't make sense with SQPOLL */
3598 		if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3599 				  IORING_SETUP_TASKRUN_FLAG |
3600 				  IORING_SETUP_DEFER_TASKRUN))
3601 			goto err;
3602 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3603 	} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3604 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3605 	} else {
3606 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3607 		    !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3608 			goto err;
3609 		ctx->notify_method = TWA_SIGNAL;
3610 	}
3611 
3612 	/*
3613 	 * For DEFER_TASKRUN we require the completion task to be the same as the
3614 	 * submission task. This implies that there is only one submitter, so enforce
3615 	 * that.
3616 	 */
3617 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3618 	    !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3619 		goto err;
3620 	}
3621 
3622 	/*
3623 	 * This is just grabbed for accounting purposes. When a process exits,
3624 	 * the mm is exited and dropped before the files, hence we need to hang
3625 	 * on to this mm purely for the purposes of being able to unaccount
3626 	 * memory (locked/pinned vm). It's not used for anything else.
3627 	 */
3628 	mmgrab(current->mm);
3629 	ctx->mm_account = current->mm;
3630 
3631 	ret = io_allocate_scq_urings(ctx, p);
3632 	if (ret)
3633 		goto err;
3634 
3635 	ret = io_sq_offload_create(ctx, p);
3636 	if (ret)
3637 		goto err;
3638 
3639 	ret = io_rsrc_init(ctx);
3640 	if (ret)
3641 		goto err;
3642 
3643 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3644 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3645 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3646 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3647 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3648 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3649 	if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
3650 		p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3651 	p->sq_off.resv1 = 0;
3652 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3653 		p->sq_off.user_addr = 0;
3654 
3655 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3656 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3657 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3658 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3659 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3660 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3661 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3662 	p->cq_off.resv1 = 0;
3663 	if (!(ctx->flags & IORING_SETUP_NO_MMAP))
3664 		p->cq_off.user_addr = 0;
3665 
3666 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3667 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3668 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3669 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3670 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3671 			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3672 			IORING_FEAT_LINKED_FILE | IORING_FEAT_REG_REG_RING |
3673 			IORING_FEAT_RECVSEND_BUNDLE | IORING_FEAT_MIN_TIMEOUT;
3674 
3675 	if (copy_to_user(params, p, sizeof(*p))) {
3676 		ret = -EFAULT;
3677 		goto err;
3678 	}
3679 
3680 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3681 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3682 		WRITE_ONCE(ctx->submitter_task, get_task_struct(current));
3683 
3684 	file = io_uring_get_file(ctx);
3685 	if (IS_ERR(file)) {
3686 		ret = PTR_ERR(file);
3687 		goto err;
3688 	}
3689 
3690 	ret = __io_uring_add_tctx_node(ctx);
3691 	if (ret)
3692 		goto err_fput;
3693 	tctx = current->io_uring;
3694 
3695 	/*
3696 	 * Install ring fd as the very last thing, so we don't risk someone
3697 	 * having closed it before we finish setup
3698 	 */
3699 	if (p->flags & IORING_SETUP_REGISTERED_FD_ONLY)
3700 		ret = io_ring_add_registered_file(tctx, file, 0, IO_RINGFD_REG_MAX);
3701 	else
3702 		ret = io_uring_install_fd(file);
3703 	if (ret < 0)
3704 		goto err_fput;
3705 
3706 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3707 	return ret;
3708 err:
3709 	io_ring_ctx_wait_and_kill(ctx);
3710 	return ret;
3711 err_fput:
3712 	fput(file);
3713 	return ret;
3714 }
3715 
3716 /*
3717  * Sets up an aio uring context, and returns the fd. Applications asks for a
3718  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3719  * params structure passed in.
3720  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3721 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3722 {
3723 	struct io_uring_params p;
3724 	int i;
3725 
3726 	if (copy_from_user(&p, params, sizeof(p)))
3727 		return -EFAULT;
3728 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3729 		if (p.resv[i])
3730 			return -EINVAL;
3731 	}
3732 
3733 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3734 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3735 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3736 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3737 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3738 			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3739 			IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN |
3740 			IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY |
3741 			IORING_SETUP_NO_SQARRAY))
3742 		return -EINVAL;
3743 
3744 	return io_uring_create(entries, &p, params);
3745 }
3746 
io_uring_allowed(void)3747 static inline bool io_uring_allowed(void)
3748 {
3749 	int disabled = READ_ONCE(sysctl_io_uring_disabled);
3750 	kgid_t io_uring_group;
3751 
3752 	if (disabled == 2)
3753 		return false;
3754 
3755 	if (disabled == 0 || capable(CAP_SYS_ADMIN))
3756 		return true;
3757 
3758 	io_uring_group = make_kgid(&init_user_ns, sysctl_io_uring_group);
3759 	if (!gid_valid(io_uring_group))
3760 		return false;
3761 
3762 	return in_group_p(io_uring_group);
3763 }
3764 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3765 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3766 		struct io_uring_params __user *, params)
3767 {
3768 	if (!io_uring_allowed())
3769 		return -EPERM;
3770 
3771 	return io_uring_setup(entries, params);
3772 }
3773 
io_uring_init(void)3774 static int __init io_uring_init(void)
3775 {
3776 	struct kmem_cache_args kmem_args = {
3777 		.useroffset = offsetof(struct io_kiocb, cmd.data),
3778 		.usersize = sizeof_field(struct io_kiocb, cmd.data),
3779 	};
3780 
3781 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
3782 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
3783 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
3784 } while (0)
3785 
3786 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
3787 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
3788 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
3789 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
3790 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
3791 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
3792 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
3793 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
3794 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
3795 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
3796 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
3797 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
3798 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
3799 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
3800 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
3801 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
3802 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
3803 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
3804 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
3805 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
3806 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
3807 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
3808 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
3809 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
3810 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
3811 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
3812 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
3813 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
3814 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
3815 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
3816 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
3817 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
3818 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
3819 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
3820 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
3821 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
3822 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
3823 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
3824 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
3825 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
3826 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
3827 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
3828 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
3829 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
3830 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
3831 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
3832 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
3833 
3834 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
3835 		     sizeof(struct io_uring_rsrc_update));
3836 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
3837 		     sizeof(struct io_uring_rsrc_update2));
3838 
3839 	/* ->buf_index is u16 */
3840 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
3841 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
3842 		     offsetof(struct io_uring_buf_ring, tail));
3843 
3844 	/* should fit into one byte */
3845 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
3846 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
3847 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
3848 
3849 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof_field(struct io_kiocb, flags));
3850 
3851 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
3852 
3853 	/* top 8bits are for internal use */
3854 	BUILD_BUG_ON((IORING_URING_CMD_MASK & 0xff000000) != 0);
3855 
3856 	io_uring_optable_init();
3857 
3858 	/*
3859 	 * Allow user copy in the per-command field, which starts after the
3860 	 * file in io_kiocb and until the opcode field. The openat2 handling
3861 	 * requires copying in user memory into the io_kiocb object in that
3862 	 * range, and HARDENED_USERCOPY will complain if we haven't
3863 	 * correctly annotated this range.
3864 	 */
3865 	req_cachep = kmem_cache_create("io_kiocb", sizeof(struct io_kiocb), &kmem_args,
3866 				SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT |
3867 				SLAB_TYPESAFE_BY_RCU);
3868 	io_buf_cachep = KMEM_CACHE(io_buffer,
3869 					  SLAB_HWCACHE_ALIGN | SLAB_PANIC | SLAB_ACCOUNT);
3870 
3871 	iou_wq = alloc_workqueue("iou_exit", WQ_UNBOUND, 64);
3872 
3873 #ifdef CONFIG_SYSCTL
3874 	register_sysctl_init("kernel", kernel_io_uring_disabled_table);
3875 #endif
3876 
3877 	return 0;
3878 };
3879 __initcall(io_uring_init);
3880