• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqe (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <net/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 #include <linux/bits.h>
50 
51 #include <linux/sched/signal.h>
52 #include <linux/fs.h>
53 #include <linux/file.h>
54 #include <linux/fdtable.h>
55 #include <linux/mm.h>
56 #include <linux/mman.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/bvec.h>
60 #include <linux/net.h>
61 #include <net/sock.h>
62 #include <net/af_unix.h>
63 #include <net/scm.h>
64 #include <linux/anon_inodes.h>
65 #include <linux/sched/mm.h>
66 #include <linux/uaccess.h>
67 #include <linux/nospec.h>
68 #include <linux/highmem.h>
69 #include <linux/fsnotify.h>
70 #include <linux/fadvise.h>
71 #include <linux/task_work.h>
72 #include <linux/io_uring.h>
73 #include <linux/audit.h>
74 #include <linux/security.h>
75 #include <asm/shmparam.h>
76 
77 #define CREATE_TRACE_POINTS
78 #include <trace/events/io_uring.h>
79 
80 #include <uapi/linux/io_uring.h>
81 
82 #include "io-wq.h"
83 
84 #include "io_uring.h"
85 #include "opdef.h"
86 #include "refs.h"
87 #include "tctx.h"
88 #include "sqpoll.h"
89 #include "fdinfo.h"
90 #include "kbuf.h"
91 #include "rsrc.h"
92 #include "cancel.h"
93 #include "net.h"
94 #include "notif.h"
95 
96 #include "timeout.h"
97 #include "poll.h"
98 #include "alloc_cache.h"
99 
100 #define IORING_MAX_ENTRIES	32768
101 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
102 
103 #define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
104 				 IORING_REGISTER_LAST + IORING_OP_LAST)
105 
106 #define SQE_COMMON_FLAGS (IOSQE_FIXED_FILE | IOSQE_IO_LINK | \
107 			  IOSQE_IO_HARDLINK | IOSQE_ASYNC)
108 
109 #define SQE_VALID_FLAGS	(SQE_COMMON_FLAGS | IOSQE_BUFFER_SELECT | \
110 			IOSQE_IO_DRAIN | IOSQE_CQE_SKIP_SUCCESS)
111 
112 #define IO_REQ_CLEAN_FLAGS (REQ_F_BUFFER_SELECTED | REQ_F_NEED_CLEANUP | \
113 				REQ_F_POLLED | REQ_F_INFLIGHT | REQ_F_CREDS | \
114 				REQ_F_ASYNC_DATA)
115 
116 #define IO_REQ_CLEAN_SLOW_FLAGS (REQ_F_REFCOUNT | REQ_F_LINK | REQ_F_HARDLINK |\
117 				 IO_REQ_CLEAN_FLAGS)
118 
119 #define IO_TCTX_REFS_CACHE_NR	(1U << 10)
120 
121 #define IO_COMPL_BATCH			32
122 #define IO_REQ_ALLOC_BATCH		8
123 
124 enum {
125 	IO_CHECK_CQ_OVERFLOW_BIT,
126 	IO_CHECK_CQ_DROPPED_BIT,
127 };
128 
129 enum {
130 	IO_EVENTFD_OP_SIGNAL_BIT,
131 	IO_EVENTFD_OP_FREE_BIT,
132 };
133 
134 struct io_defer_entry {
135 	struct list_head	list;
136 	struct io_kiocb		*req;
137 	u32			seq;
138 };
139 
140 /* requests with any of those set should undergo io_disarm_next() */
141 #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL)
142 #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK)
143 
144 static bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
145 					 struct task_struct *task,
146 					 bool cancel_all);
147 
148 static void io_dismantle_req(struct io_kiocb *req);
149 static void io_clean_op(struct io_kiocb *req);
150 static void io_queue_sqe(struct io_kiocb *req);
151 static void io_move_task_work_from_local(struct io_ring_ctx *ctx);
152 static void __io_submit_flush_completions(struct io_ring_ctx *ctx);
153 
154 static struct kmem_cache *req_cachep;
155 
io_uring_get_socket(struct file * file)156 struct sock *io_uring_get_socket(struct file *file)
157 {
158 #if defined(CONFIG_UNIX)
159 	if (io_is_uring_fops(file)) {
160 		struct io_ring_ctx *ctx = file->private_data;
161 
162 		return ctx->ring_sock->sk;
163 	}
164 #endif
165 	return NULL;
166 }
167 EXPORT_SYMBOL(io_uring_get_socket);
168 
io_submit_flush_completions(struct io_ring_ctx * ctx)169 static inline void io_submit_flush_completions(struct io_ring_ctx *ctx)
170 {
171 	if (!wq_list_empty(&ctx->submit_state.compl_reqs))
172 		__io_submit_flush_completions(ctx);
173 }
174 
__io_cqring_events(struct io_ring_ctx * ctx)175 static inline unsigned int __io_cqring_events(struct io_ring_ctx *ctx)
176 {
177 	return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head);
178 }
179 
__io_cqring_events_user(struct io_ring_ctx * ctx)180 static inline unsigned int __io_cqring_events_user(struct io_ring_ctx *ctx)
181 {
182 	return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head);
183 }
184 
io_match_linked(struct io_kiocb * head)185 static bool io_match_linked(struct io_kiocb *head)
186 {
187 	struct io_kiocb *req;
188 
189 	io_for_each_link(req, head) {
190 		if (req->flags & REQ_F_INFLIGHT)
191 			return true;
192 	}
193 	return false;
194 }
195 
196 /*
197  * As io_match_task() but protected against racing with linked timeouts.
198  * User must not hold timeout_lock.
199  */
io_match_task_safe(struct io_kiocb * head,struct task_struct * task,bool cancel_all)200 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task,
201 			bool cancel_all)
202 {
203 	bool matched;
204 
205 	if (task && head->task != task)
206 		return false;
207 	if (cancel_all)
208 		return true;
209 
210 	if (head->flags & REQ_F_LINK_TIMEOUT) {
211 		struct io_ring_ctx *ctx = head->ctx;
212 
213 		/* protect against races with linked timeouts */
214 		spin_lock_irq(&ctx->timeout_lock);
215 		matched = io_match_linked(head);
216 		spin_unlock_irq(&ctx->timeout_lock);
217 	} else {
218 		matched = io_match_linked(head);
219 	}
220 	return matched;
221 }
222 
req_fail_link_node(struct io_kiocb * req,int res)223 static inline void req_fail_link_node(struct io_kiocb *req, int res)
224 {
225 	req_set_fail(req);
226 	io_req_set_res(req, res, 0);
227 }
228 
io_req_add_to_cache(struct io_kiocb * req,struct io_ring_ctx * ctx)229 static inline void io_req_add_to_cache(struct io_kiocb *req, struct io_ring_ctx *ctx)
230 {
231 	wq_stack_add_head(&req->comp_list, &ctx->submit_state.free_list);
232 }
233 
io_ring_ctx_ref_free(struct percpu_ref * ref)234 static __cold void io_ring_ctx_ref_free(struct percpu_ref *ref)
235 {
236 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
237 
238 	complete(&ctx->ref_comp);
239 }
240 
io_fallback_req_func(struct work_struct * work)241 static __cold void io_fallback_req_func(struct work_struct *work)
242 {
243 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
244 						fallback_work.work);
245 	struct llist_node *node = llist_del_all(&ctx->fallback_llist);
246 	struct io_kiocb *req, *tmp;
247 	bool locked = false;
248 
249 	percpu_ref_get(&ctx->refs);
250 	llist_for_each_entry_safe(req, tmp, node, io_task_work.node)
251 		req->io_task_work.func(req, &locked);
252 
253 	if (locked) {
254 		io_submit_flush_completions(ctx);
255 		mutex_unlock(&ctx->uring_lock);
256 	}
257 	percpu_ref_put(&ctx->refs);
258 }
259 
io_alloc_hash_table(struct io_hash_table * table,unsigned bits)260 static int io_alloc_hash_table(struct io_hash_table *table, unsigned bits)
261 {
262 	unsigned hash_buckets = 1U << bits;
263 	size_t hash_size = hash_buckets * sizeof(table->hbs[0]);
264 
265 	table->hbs = kmalloc(hash_size, GFP_KERNEL);
266 	if (!table->hbs)
267 		return -ENOMEM;
268 
269 	table->hash_bits = bits;
270 	init_hash_table(table, hash_buckets);
271 	return 0;
272 }
273 
io_ring_ctx_alloc(struct io_uring_params * p)274 static __cold struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
275 {
276 	struct io_ring_ctx *ctx;
277 	int hash_bits;
278 
279 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
280 	if (!ctx)
281 		return NULL;
282 
283 	xa_init(&ctx->io_bl_xa);
284 
285 	/*
286 	 * Use 5 bits less than the max cq entries, that should give us around
287 	 * 32 entries per hash list if totally full and uniformly spread, but
288 	 * don't keep too many buckets to not overconsume memory.
289 	 */
290 	hash_bits = ilog2(p->cq_entries) - 5;
291 	hash_bits = clamp(hash_bits, 1, 8);
292 	if (io_alloc_hash_table(&ctx->cancel_table, hash_bits))
293 		goto err;
294 	if (io_alloc_hash_table(&ctx->cancel_table_locked, hash_bits))
295 		goto err;
296 
297 	ctx->dummy_ubuf = kzalloc(sizeof(*ctx->dummy_ubuf), GFP_KERNEL);
298 	if (!ctx->dummy_ubuf)
299 		goto err;
300 	/* set invalid range, so io_import_fixed() fails meeting it */
301 	ctx->dummy_ubuf->ubuf = -1UL;
302 
303 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
304 			    0, GFP_KERNEL))
305 		goto err;
306 
307 	ctx->flags = p->flags;
308 	init_waitqueue_head(&ctx->sqo_sq_wait);
309 	INIT_LIST_HEAD(&ctx->sqd_list);
310 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
311 	INIT_LIST_HEAD(&ctx->io_buffers_cache);
312 	io_alloc_cache_init(&ctx->apoll_cache);
313 	io_alloc_cache_init(&ctx->netmsg_cache);
314 	init_completion(&ctx->ref_comp);
315 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
316 	mutex_init(&ctx->uring_lock);
317 	init_waitqueue_head(&ctx->cq_wait);
318 	spin_lock_init(&ctx->completion_lock);
319 	spin_lock_init(&ctx->timeout_lock);
320 	INIT_WQ_LIST(&ctx->iopoll_list);
321 	INIT_LIST_HEAD(&ctx->io_buffers_pages);
322 	INIT_LIST_HEAD(&ctx->io_buffers_comp);
323 	INIT_LIST_HEAD(&ctx->defer_list);
324 	INIT_LIST_HEAD(&ctx->timeout_list);
325 	INIT_LIST_HEAD(&ctx->ltimeout_list);
326 	spin_lock_init(&ctx->rsrc_ref_lock);
327 	INIT_LIST_HEAD(&ctx->rsrc_ref_list);
328 	INIT_DELAYED_WORK(&ctx->rsrc_put_work, io_rsrc_put_work);
329 	init_llist_head(&ctx->rsrc_put_llist);
330 	init_llist_head(&ctx->work_llist);
331 	INIT_LIST_HEAD(&ctx->tctx_list);
332 	ctx->submit_state.free_list.next = NULL;
333 	INIT_WQ_LIST(&ctx->locked_free_list);
334 	INIT_DELAYED_WORK(&ctx->fallback_work, io_fallback_req_func);
335 	INIT_WQ_LIST(&ctx->submit_state.compl_reqs);
336 	return ctx;
337 err:
338 	kfree(ctx->dummy_ubuf);
339 	kfree(ctx->cancel_table.hbs);
340 	kfree(ctx->cancel_table_locked.hbs);
341 	kfree(ctx->io_bl);
342 	xa_destroy(&ctx->io_bl_xa);
343 	kfree(ctx);
344 	return NULL;
345 }
346 
io_account_cq_overflow(struct io_ring_ctx * ctx)347 static void io_account_cq_overflow(struct io_ring_ctx *ctx)
348 {
349 	struct io_rings *r = ctx->rings;
350 
351 	WRITE_ONCE(r->cq_overflow, READ_ONCE(r->cq_overflow) + 1);
352 	ctx->cq_extra--;
353 }
354 
req_need_defer(struct io_kiocb * req,u32 seq)355 static bool req_need_defer(struct io_kiocb *req, u32 seq)
356 {
357 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
358 		struct io_ring_ctx *ctx = req->ctx;
359 
360 		return seq + READ_ONCE(ctx->cq_extra) != ctx->cached_cq_tail;
361 	}
362 
363 	return false;
364 }
365 
io_req_track_inflight(struct io_kiocb * req)366 static inline void io_req_track_inflight(struct io_kiocb *req)
367 {
368 	if (!(req->flags & REQ_F_INFLIGHT)) {
369 		req->flags |= REQ_F_INFLIGHT;
370 		atomic_inc(&req->task->io_uring->inflight_tracked);
371 	}
372 }
373 
__io_prep_linked_timeout(struct io_kiocb * req)374 static struct io_kiocb *__io_prep_linked_timeout(struct io_kiocb *req)
375 {
376 	if (WARN_ON_ONCE(!req->link))
377 		return NULL;
378 
379 	req->flags &= ~REQ_F_ARM_LTIMEOUT;
380 	req->flags |= REQ_F_LINK_TIMEOUT;
381 
382 	/* linked timeouts should have two refs once prep'ed */
383 	io_req_set_refcount(req);
384 	__io_req_set_refcount(req->link, 2);
385 	return req->link;
386 }
387 
io_prep_linked_timeout(struct io_kiocb * req)388 static inline struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
389 {
390 	if (likely(!(req->flags & REQ_F_ARM_LTIMEOUT)))
391 		return NULL;
392 	return __io_prep_linked_timeout(req);
393 }
394 
__io_arm_ltimeout(struct io_kiocb * req)395 static noinline void __io_arm_ltimeout(struct io_kiocb *req)
396 {
397 	io_queue_linked_timeout(__io_prep_linked_timeout(req));
398 }
399 
io_arm_ltimeout(struct io_kiocb * req)400 static inline void io_arm_ltimeout(struct io_kiocb *req)
401 {
402 	if (unlikely(req->flags & REQ_F_ARM_LTIMEOUT))
403 		__io_arm_ltimeout(req);
404 }
405 
io_prep_async_work(struct io_kiocb * req)406 static void io_prep_async_work(struct io_kiocb *req)
407 {
408 	const struct io_op_def *def = &io_op_defs[req->opcode];
409 	struct io_ring_ctx *ctx = req->ctx;
410 
411 	if (!(req->flags & REQ_F_CREDS)) {
412 		req->flags |= REQ_F_CREDS;
413 		req->creds = get_current_cred();
414 	}
415 
416 	req->work.list.next = NULL;
417 	req->work.flags = 0;
418 	req->work.cancel_seq = atomic_read(&ctx->cancel_seq);
419 	if (req->flags & REQ_F_FORCE_ASYNC)
420 		req->work.flags |= IO_WQ_WORK_CONCURRENT;
421 
422 	if (req->file && !io_req_ffs_set(req))
423 		req->flags |= io_file_get_flags(req->file) << REQ_F_SUPPORT_NOWAIT_BIT;
424 
425 	if (req->flags & REQ_F_ISREG) {
426 		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
427 			io_wq_hash_work(&req->work, file_inode(req->file));
428 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
429 		if (def->unbound_nonreg_file)
430 			req->work.flags |= IO_WQ_WORK_UNBOUND;
431 	}
432 }
433 
io_prep_async_link(struct io_kiocb * req)434 static void io_prep_async_link(struct io_kiocb *req)
435 {
436 	struct io_kiocb *cur;
437 
438 	if (req->flags & REQ_F_LINK_TIMEOUT) {
439 		struct io_ring_ctx *ctx = req->ctx;
440 
441 		spin_lock_irq(&ctx->timeout_lock);
442 		io_for_each_link(cur, req)
443 			io_prep_async_work(cur);
444 		spin_unlock_irq(&ctx->timeout_lock);
445 	} else {
446 		io_for_each_link(cur, req)
447 			io_prep_async_work(cur);
448 	}
449 }
450 
io_queue_iowq(struct io_kiocb * req,bool * dont_use)451 void io_queue_iowq(struct io_kiocb *req, bool *dont_use)
452 {
453 	struct io_kiocb *link = io_prep_linked_timeout(req);
454 	struct io_uring_task *tctx = req->task->io_uring;
455 
456 	BUG_ON(!tctx);
457 	BUG_ON(!tctx->io_wq);
458 
459 	/* init ->work of the whole link before punting */
460 	io_prep_async_link(req);
461 
462 	/*
463 	 * Not expected to happen, but if we do have a bug where this _can_
464 	 * happen, catch it here and ensure the request is marked as
465 	 * canceled. That will make io-wq go through the usual work cancel
466 	 * procedure rather than attempt to run this request (or create a new
467 	 * worker for it).
468 	 */
469 	if (WARN_ON_ONCE(!same_thread_group(req->task, current)))
470 		req->work.flags |= IO_WQ_WORK_CANCEL;
471 
472 	trace_io_uring_queue_async_work(req, io_wq_is_hashed(&req->work));
473 	io_wq_enqueue(tctx->io_wq, &req->work);
474 	if (link)
475 		io_queue_linked_timeout(link);
476 }
477 
io_queue_deferred(struct io_ring_ctx * ctx)478 static __cold void io_queue_deferred(struct io_ring_ctx *ctx)
479 {
480 	while (!list_empty(&ctx->defer_list)) {
481 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
482 						struct io_defer_entry, list);
483 
484 		if (req_need_defer(de->req, de->seq))
485 			break;
486 		list_del_init(&de->list);
487 		io_req_task_queue(de->req);
488 		kfree(de);
489 	}
490 }
491 
492 
io_eventfd_ops(struct rcu_head * rcu)493 static void io_eventfd_ops(struct rcu_head *rcu)
494 {
495 	struct io_ev_fd *ev_fd = container_of(rcu, struct io_ev_fd, rcu);
496 	int ops = atomic_xchg(&ev_fd->ops, 0);
497 
498 	if (ops & BIT(IO_EVENTFD_OP_SIGNAL_BIT))
499 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
500 
501 	/* IO_EVENTFD_OP_FREE_BIT may not be set here depending on callback
502 	 * ordering in a race but if references are 0 we know we have to free
503 	 * it regardless.
504 	 */
505 	if (atomic_dec_and_test(&ev_fd->refs)) {
506 		eventfd_ctx_put(ev_fd->cq_ev_fd);
507 		kfree(ev_fd);
508 	}
509 }
510 
io_eventfd_signal(struct io_ring_ctx * ctx)511 static void io_eventfd_signal(struct io_ring_ctx *ctx)
512 {
513 	struct io_ev_fd *ev_fd = NULL;
514 
515 	rcu_read_lock();
516 	/*
517 	 * rcu_dereference ctx->io_ev_fd once and use it for both for checking
518 	 * and eventfd_signal
519 	 */
520 	ev_fd = rcu_dereference(ctx->io_ev_fd);
521 
522 	/*
523 	 * Check again if ev_fd exists incase an io_eventfd_unregister call
524 	 * completed between the NULL check of ctx->io_ev_fd at the start of
525 	 * the function and rcu_read_lock.
526 	 */
527 	if (unlikely(!ev_fd))
528 		goto out;
529 	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
530 		goto out;
531 	if (ev_fd->eventfd_async && !io_wq_current_is_worker())
532 		goto out;
533 
534 	if (likely(eventfd_signal_allowed())) {
535 		eventfd_signal_mask(ev_fd->cq_ev_fd, 1, EPOLL_URING_WAKE);
536 	} else {
537 		atomic_inc(&ev_fd->refs);
538 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_SIGNAL_BIT), &ev_fd->ops))
539 			call_rcu_hurry(&ev_fd->rcu, io_eventfd_ops);
540 		else
541 			atomic_dec(&ev_fd->refs);
542 	}
543 
544 out:
545 	rcu_read_unlock();
546 }
547 
io_eventfd_flush_signal(struct io_ring_ctx * ctx)548 static void io_eventfd_flush_signal(struct io_ring_ctx *ctx)
549 {
550 	bool skip;
551 
552 	spin_lock(&ctx->completion_lock);
553 
554 	/*
555 	 * Eventfd should only get triggered when at least one event has been
556 	 * posted. Some applications rely on the eventfd notification count
557 	 * only changing IFF a new CQE has been added to the CQ ring. There's
558 	 * no depedency on 1:1 relationship between how many times this
559 	 * function is called (and hence the eventfd count) and number of CQEs
560 	 * posted to the CQ ring.
561 	 */
562 	skip = ctx->cached_cq_tail == ctx->evfd_last_cq_tail;
563 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
564 	spin_unlock(&ctx->completion_lock);
565 	if (skip)
566 		return;
567 
568 	io_eventfd_signal(ctx);
569 }
570 
__io_commit_cqring_flush(struct io_ring_ctx * ctx)571 void __io_commit_cqring_flush(struct io_ring_ctx *ctx)
572 {
573 	if (ctx->off_timeout_used || ctx->drain_active) {
574 		spin_lock(&ctx->completion_lock);
575 		if (ctx->off_timeout_used)
576 			io_flush_timeouts(ctx);
577 		if (ctx->drain_active)
578 			io_queue_deferred(ctx);
579 		spin_unlock(&ctx->completion_lock);
580 	}
581 	if (ctx->has_evfd)
582 		io_eventfd_flush_signal(ctx);
583 }
584 
io_cqring_ev_posted(struct io_ring_ctx * ctx)585 static inline void io_cqring_ev_posted(struct io_ring_ctx *ctx)
586 {
587 	io_commit_cqring_flush(ctx);
588 	io_cqring_wake(ctx);
589 }
590 
__io_cq_unlock_post(struct io_ring_ctx * ctx)591 static inline void __io_cq_unlock_post(struct io_ring_ctx *ctx)
592 	__releases(ctx->completion_lock)
593 {
594 	io_commit_cqring(ctx);
595 	spin_unlock(&ctx->completion_lock);
596 	io_cqring_ev_posted(ctx);
597 }
598 
io_cq_unlock_post(struct io_ring_ctx * ctx)599 void io_cq_unlock_post(struct io_ring_ctx *ctx)
600 {
601 	__io_cq_unlock_post(ctx);
602 }
603 
604 /* Returns true if there are no backlogged entries after the flush */
__io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool force)605 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
606 {
607 	bool all_flushed;
608 	size_t cqe_size = sizeof(struct io_uring_cqe);
609 
610 	if (!force && __io_cqring_events(ctx) == ctx->cq_entries)
611 		return false;
612 
613 	if (ctx->flags & IORING_SETUP_CQE32)
614 		cqe_size <<= 1;
615 
616 	io_cq_lock(ctx);
617 	while (!list_empty(&ctx->cq_overflow_list)) {
618 		struct io_uring_cqe *cqe = io_get_cqe_overflow(ctx, true);
619 		struct io_overflow_cqe *ocqe;
620 
621 		if (!cqe && !force)
622 			break;
623 		ocqe = list_first_entry(&ctx->cq_overflow_list,
624 					struct io_overflow_cqe, list);
625 		if (cqe)
626 			memcpy(cqe, &ocqe->cqe, cqe_size);
627 		else
628 			io_account_cq_overflow(ctx);
629 
630 		list_del(&ocqe->list);
631 		kfree(ocqe);
632 	}
633 
634 	all_flushed = list_empty(&ctx->cq_overflow_list);
635 	if (all_flushed) {
636 		clear_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
637 		atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
638 	}
639 
640 	io_cq_unlock_post(ctx);
641 	return all_flushed;
642 }
643 
io_cqring_overflow_flush(struct io_ring_ctx * ctx)644 static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx)
645 {
646 	bool ret = true;
647 
648 	if (test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq)) {
649 		/* iopoll syncs against uring_lock, not completion_lock */
650 		if (ctx->flags & IORING_SETUP_IOPOLL)
651 			mutex_lock(&ctx->uring_lock);
652 		ret = __io_cqring_overflow_flush(ctx, false);
653 		if (ctx->flags & IORING_SETUP_IOPOLL)
654 			mutex_unlock(&ctx->uring_lock);
655 	}
656 
657 	return ret;
658 }
659 
__io_put_task(struct task_struct * task,int nr)660 void __io_put_task(struct task_struct *task, int nr)
661 {
662 	struct io_uring_task *tctx = task->io_uring;
663 
664 	percpu_counter_sub(&tctx->inflight, nr);
665 	if (unlikely(atomic_read(&tctx->in_idle)))
666 		wake_up(&tctx->wait);
667 	put_task_struct_many(task, nr);
668 }
669 
io_task_refs_refill(struct io_uring_task * tctx)670 void io_task_refs_refill(struct io_uring_task *tctx)
671 {
672 	unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR;
673 
674 	percpu_counter_add(&tctx->inflight, refill);
675 	refcount_add(refill, &current->usage);
676 	tctx->cached_refs += refill;
677 }
678 
io_uring_drop_tctx_refs(struct task_struct * task)679 static __cold void io_uring_drop_tctx_refs(struct task_struct *task)
680 {
681 	struct io_uring_task *tctx = task->io_uring;
682 	unsigned int refs = tctx->cached_refs;
683 
684 	if (refs) {
685 		tctx->cached_refs = 0;
686 		percpu_counter_sub(&tctx->inflight, refs);
687 		put_task_struct_many(task, refs);
688 	}
689 }
690 
io_cqring_event_overflow(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,u64 extra1,u64 extra2)691 static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
692 				     s32 res, u32 cflags, u64 extra1, u64 extra2)
693 {
694 	struct io_overflow_cqe *ocqe;
695 	size_t ocq_size = sizeof(struct io_overflow_cqe);
696 	bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32);
697 
698 	if (is_cqe32)
699 		ocq_size += sizeof(struct io_uring_cqe);
700 
701 	ocqe = kmalloc(ocq_size, GFP_ATOMIC | __GFP_ACCOUNT);
702 	trace_io_uring_cqe_overflow(ctx, user_data, res, cflags, ocqe);
703 	if (!ocqe) {
704 		/*
705 		 * If we're in ring overflow flush mode, or in task cancel mode,
706 		 * or cannot allocate an overflow entry, then we need to drop it
707 		 * on the floor.
708 		 */
709 		io_account_cq_overflow(ctx);
710 		set_bit(IO_CHECK_CQ_DROPPED_BIT, &ctx->check_cq);
711 		return false;
712 	}
713 	if (list_empty(&ctx->cq_overflow_list)) {
714 		set_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq);
715 		atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags);
716 
717 	}
718 	ocqe->cqe.user_data = user_data;
719 	ocqe->cqe.res = res;
720 	ocqe->cqe.flags = cflags;
721 	if (is_cqe32) {
722 		ocqe->cqe.big_cqe[0] = extra1;
723 		ocqe->cqe.big_cqe[1] = extra2;
724 	}
725 	list_add_tail(&ocqe->list, &ctx->cq_overflow_list);
726 	return true;
727 }
728 
io_req_cqe_overflow(struct io_kiocb * req)729 bool io_req_cqe_overflow(struct io_kiocb *req)
730 {
731 	if (!(req->flags & REQ_F_CQE32_INIT)) {
732 		req->extra1 = 0;
733 		req->extra2 = 0;
734 	}
735 	return io_cqring_event_overflow(req->ctx, req->cqe.user_data,
736 					req->cqe.res, req->cqe.flags,
737 					req->extra1, req->extra2);
738 }
739 
740 /*
741  * writes to the cq entry need to come after reading head; the
742  * control dependency is enough as we're using WRITE_ONCE to
743  * fill the cq entry
744  */
__io_get_cqe(struct io_ring_ctx * ctx,bool overflow)745 struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow)
746 {
747 	struct io_rings *rings = ctx->rings;
748 	unsigned int off = ctx->cached_cq_tail & (ctx->cq_entries - 1);
749 	unsigned int free, queued, len;
750 
751 	/*
752 	 * Posting into the CQ when there are pending overflowed CQEs may break
753 	 * ordering guarantees, which will affect links, F_MORE users and more.
754 	 * Force overflow the completion.
755 	 */
756 	if (!overflow && (ctx->check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT)))
757 		return NULL;
758 
759 	/* userspace may cheat modifying the tail, be safe and do min */
760 	queued = min(__io_cqring_events(ctx), ctx->cq_entries);
761 	free = ctx->cq_entries - queued;
762 	/* we need a contiguous range, limit based on the current array offset */
763 	len = min(free, ctx->cq_entries - off);
764 	if (!len)
765 		return NULL;
766 
767 	if (ctx->flags & IORING_SETUP_CQE32) {
768 		off <<= 1;
769 		len <<= 1;
770 	}
771 
772 	ctx->cqe_cached = &rings->cqes[off];
773 	ctx->cqe_sentinel = ctx->cqe_cached + len;
774 
775 	ctx->cached_cq_tail++;
776 	ctx->cqe_cached++;
777 	if (ctx->flags & IORING_SETUP_CQE32)
778 		ctx->cqe_cached++;
779 	return &rings->cqes[off];
780 }
781 
io_fill_cqe_aux(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,bool allow_overflow)782 bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags,
783 		     bool allow_overflow)
784 {
785 	struct io_uring_cqe *cqe;
786 
787 	ctx->cq_extra++;
788 
789 	/*
790 	 * If we can't get a cq entry, userspace overflowed the
791 	 * submission (by quite a lot). Increment the overflow count in
792 	 * the ring.
793 	 */
794 	cqe = io_get_cqe(ctx);
795 	if (likely(cqe)) {
796 		trace_io_uring_complete(ctx, NULL, user_data, res, cflags, 0, 0);
797 
798 		WRITE_ONCE(cqe->user_data, user_data);
799 		WRITE_ONCE(cqe->res, res);
800 		WRITE_ONCE(cqe->flags, cflags);
801 
802 		if (ctx->flags & IORING_SETUP_CQE32) {
803 			WRITE_ONCE(cqe->big_cqe[0], 0);
804 			WRITE_ONCE(cqe->big_cqe[1], 0);
805 		}
806 		return true;
807 	}
808 
809 	if (allow_overflow)
810 		return io_cqring_event_overflow(ctx, user_data, res, cflags, 0, 0);
811 
812 	return false;
813 }
814 
io_post_aux_cqe(struct io_ring_ctx * ctx,u64 user_data,s32 res,u32 cflags,bool allow_overflow)815 bool io_post_aux_cqe(struct io_ring_ctx *ctx,
816 		     u64 user_data, s32 res, u32 cflags,
817 		     bool allow_overflow)
818 {
819 	bool filled;
820 
821 	io_cq_lock(ctx);
822 	filled = io_fill_cqe_aux(ctx, user_data, res, cflags, allow_overflow);
823 	io_cq_unlock_post(ctx);
824 	return filled;
825 }
826 
io_req_complete_post(struct io_kiocb * req)827 void io_req_complete_post(struct io_kiocb *req)
828 {
829 	struct io_ring_ctx *ctx = req->ctx;
830 
831 	io_cq_lock(ctx);
832 	if (!(req->flags & REQ_F_CQE_SKIP))
833 		__io_fill_cqe_req(ctx, req);
834 
835 	/*
836 	 * If we're the last reference to this request, add to our locked
837 	 * free_list cache.
838 	 */
839 	if (req_ref_put_and_test(req)) {
840 		if (req->flags & IO_REQ_LINK_FLAGS) {
841 			if (req->flags & IO_DISARM_MASK)
842 				io_disarm_next(req);
843 			if (req->link) {
844 				io_req_task_queue(req->link);
845 				req->link = NULL;
846 			}
847 		}
848 		io_req_put_rsrc(req);
849 		/*
850 		 * Selected buffer deallocation in io_clean_op() assumes that
851 		 * we don't hold ->completion_lock. Clean them here to avoid
852 		 * deadlocks.
853 		 */
854 		io_put_kbuf_comp(req);
855 		io_dismantle_req(req);
856 		io_put_task(req->task, 1);
857 		wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
858 		ctx->locked_free_nr++;
859 	}
860 	io_cq_unlock_post(ctx);
861 }
862 
__io_req_complete(struct io_kiocb * req,unsigned issue_flags)863 inline void __io_req_complete(struct io_kiocb *req, unsigned issue_flags)
864 {
865 	io_req_complete_post(req);
866 }
867 
io_req_complete_failed(struct io_kiocb * req,s32 res)868 void io_req_complete_failed(struct io_kiocb *req, s32 res)
869 	__must_hold(&ctx->uring_lock)
870 {
871 	const struct io_op_def *def = &io_op_defs[req->opcode];
872 
873 	lockdep_assert_held(&req->ctx->uring_lock);
874 
875 	req_set_fail(req);
876 	io_req_set_res(req, res, io_put_kbuf(req, IO_URING_F_UNLOCKED));
877 	if (def->fail)
878 		def->fail(req);
879 	io_req_complete_post(req);
880 }
881 
882 /*
883  * Don't initialise the fields below on every allocation, but do that in
884  * advance and keep them valid across allocations.
885  */
io_preinit_req(struct io_kiocb * req,struct io_ring_ctx * ctx)886 static void io_preinit_req(struct io_kiocb *req, struct io_ring_ctx *ctx)
887 {
888 	req->ctx = ctx;
889 	req->link = NULL;
890 	req->async_data = NULL;
891 	/* not necessary, but safer to zero */
892 	req->cqe.res = 0;
893 }
894 
io_flush_cached_locked_reqs(struct io_ring_ctx * ctx,struct io_submit_state * state)895 static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
896 					struct io_submit_state *state)
897 {
898 	spin_lock(&ctx->completion_lock);
899 	wq_list_splice(&ctx->locked_free_list, &state->free_list);
900 	ctx->locked_free_nr = 0;
901 	spin_unlock(&ctx->completion_lock);
902 }
903 
904 /*
905  * A request might get retired back into the request caches even before opcode
906  * handlers and io_issue_sqe() are done with it, e.g. inline completion path.
907  * Because of that, io_alloc_req() should be called only under ->uring_lock
908  * and with extra caution to not get a request that is still worked on.
909  */
__io_alloc_req_refill(struct io_ring_ctx * ctx)910 __cold bool __io_alloc_req_refill(struct io_ring_ctx *ctx)
911 	__must_hold(&ctx->uring_lock)
912 {
913 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
914 	void *reqs[IO_REQ_ALLOC_BATCH];
915 	int ret, i;
916 
917 	/*
918 	 * If we have more than a batch's worth of requests in our IRQ side
919 	 * locked cache, grab the lock and move them over to our submission
920 	 * side cache.
921 	 */
922 	if (data_race(ctx->locked_free_nr) > IO_COMPL_BATCH) {
923 		io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
924 		if (!io_req_cache_empty(ctx))
925 			return true;
926 	}
927 
928 	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
929 
930 	/*
931 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
932 	 * retry single alloc to be on the safe side.
933 	 */
934 	if (unlikely(ret <= 0)) {
935 		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
936 		if (!reqs[0])
937 			return false;
938 		ret = 1;
939 	}
940 
941 	percpu_ref_get_many(&ctx->refs, ret);
942 	for (i = 0; i < ret; i++) {
943 		struct io_kiocb *req = reqs[i];
944 
945 		io_preinit_req(req, ctx);
946 		io_req_add_to_cache(req, ctx);
947 	}
948 	return true;
949 }
950 
io_dismantle_req(struct io_kiocb * req)951 static inline void io_dismantle_req(struct io_kiocb *req)
952 {
953 	unsigned int flags = req->flags;
954 
955 	if (unlikely(flags & IO_REQ_CLEAN_FLAGS))
956 		io_clean_op(req);
957 	if (!(flags & REQ_F_FIXED_FILE))
958 		io_put_file(req->file);
959 }
960 
io_free_req(struct io_kiocb * req)961 __cold void io_free_req(struct io_kiocb *req)
962 {
963 	struct io_ring_ctx *ctx = req->ctx;
964 
965 	io_req_put_rsrc(req);
966 	io_dismantle_req(req);
967 	io_put_task(req->task, 1);
968 
969 	spin_lock(&ctx->completion_lock);
970 	wq_list_add_head(&req->comp_list, &ctx->locked_free_list);
971 	ctx->locked_free_nr++;
972 	spin_unlock(&ctx->completion_lock);
973 }
974 
__io_req_find_next_prep(struct io_kiocb * req)975 static void __io_req_find_next_prep(struct io_kiocb *req)
976 {
977 	struct io_ring_ctx *ctx = req->ctx;
978 
979 	io_cq_lock(ctx);
980 	io_disarm_next(req);
981 	io_cq_unlock_post(ctx);
982 }
983 
io_req_find_next(struct io_kiocb * req)984 static inline struct io_kiocb *io_req_find_next(struct io_kiocb *req)
985 {
986 	struct io_kiocb *nxt;
987 
988 	/*
989 	 * If LINK is set, we have dependent requests in this chain. If we
990 	 * didn't fail this request, queue the first one up, moving any other
991 	 * dependencies to the next request. In case of failure, fail the rest
992 	 * of the chain.
993 	 */
994 	if (unlikely(req->flags & IO_DISARM_MASK))
995 		__io_req_find_next_prep(req);
996 	nxt = req->link;
997 	req->link = NULL;
998 	return nxt;
999 }
1000 
ctx_flush_and_put(struct io_ring_ctx * ctx,bool * locked)1001 static void ctx_flush_and_put(struct io_ring_ctx *ctx, bool *locked)
1002 {
1003 	if (!ctx)
1004 		return;
1005 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1006 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1007 	if (*locked) {
1008 		io_submit_flush_completions(ctx);
1009 		mutex_unlock(&ctx->uring_lock);
1010 		*locked = false;
1011 	}
1012 	percpu_ref_put(&ctx->refs);
1013 }
1014 
handle_tw_list(struct llist_node * node,struct io_ring_ctx ** ctx,bool * locked,struct llist_node * last)1015 static unsigned int handle_tw_list(struct llist_node *node,
1016 				   struct io_ring_ctx **ctx, bool *locked,
1017 				   struct llist_node *last)
1018 {
1019 	unsigned int count = 0;
1020 
1021 	while (node != last) {
1022 		struct llist_node *next = node->next;
1023 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1024 						    io_task_work.node);
1025 
1026 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1027 
1028 		if (req->ctx != *ctx) {
1029 			ctx_flush_and_put(*ctx, locked);
1030 			*ctx = req->ctx;
1031 			/* if not contended, grab and improve batching */
1032 			*locked = mutex_trylock(&(*ctx)->uring_lock);
1033 			percpu_ref_get(&(*ctx)->refs);
1034 		} else if (!*locked)
1035 			*locked = mutex_trylock(&(*ctx)->uring_lock);
1036 		req->io_task_work.func(req, locked);
1037 		node = next;
1038 		count++;
1039 		if (unlikely(need_resched())) {
1040 			ctx_flush_and_put(*ctx, locked);
1041 			*ctx = NULL;
1042 			cond_resched();
1043 		}
1044 	}
1045 
1046 	return count;
1047 }
1048 
1049 /**
1050  * io_llist_xchg - swap all entries in a lock-less list
1051  * @head:	the head of lock-less list to delete all entries
1052  * @new:	new entry as the head of the list
1053  *
1054  * If list is empty, return NULL, otherwise, return the pointer to the first entry.
1055  * The order of entries returned is from the newest to the oldest added one.
1056  */
io_llist_xchg(struct llist_head * head,struct llist_node * new)1057 static inline struct llist_node *io_llist_xchg(struct llist_head *head,
1058 					       struct llist_node *new)
1059 {
1060 	return xchg(&head->first, new);
1061 }
1062 
1063 /**
1064  * io_llist_cmpxchg - possibly swap all entries in a lock-less list
1065  * @head:	the head of lock-less list to delete all entries
1066  * @old:	expected old value of the first entry of the list
1067  * @new:	new entry as the head of the list
1068  *
1069  * perform a cmpxchg on the first entry of the list.
1070  */
1071 
io_llist_cmpxchg(struct llist_head * head,struct llist_node * old,struct llist_node * new)1072 static inline struct llist_node *io_llist_cmpxchg(struct llist_head *head,
1073 						  struct llist_node *old,
1074 						  struct llist_node *new)
1075 {
1076 	return cmpxchg(&head->first, old, new);
1077 }
1078 
tctx_task_work(struct callback_head * cb)1079 void tctx_task_work(struct callback_head *cb)
1080 {
1081 	bool uring_locked = false;
1082 	struct io_ring_ctx *ctx = NULL;
1083 	struct io_uring_task *tctx = container_of(cb, struct io_uring_task,
1084 						  task_work);
1085 	struct llist_node fake = {};
1086 	struct llist_node *node = io_llist_xchg(&tctx->task_list, &fake);
1087 	unsigned int loops = 1;
1088 	unsigned int count = handle_tw_list(node, &ctx, &uring_locked, NULL);
1089 
1090 	node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1091 	while (node != &fake) {
1092 		loops++;
1093 		node = io_llist_xchg(&tctx->task_list, &fake);
1094 		count += handle_tw_list(node, &ctx, &uring_locked, &fake);
1095 		node = io_llist_cmpxchg(&tctx->task_list, &fake, NULL);
1096 	}
1097 
1098 	ctx_flush_and_put(ctx, &uring_locked);
1099 
1100 	/* relaxed read is enough as only the task itself sets ->in_idle */
1101 	if (unlikely(atomic_read(&tctx->in_idle)))
1102 		io_uring_drop_tctx_refs(current);
1103 
1104 	trace_io_uring_task_work_run(tctx, count, loops);
1105 }
1106 
io_req_local_work_add(struct io_kiocb * req)1107 static void io_req_local_work_add(struct io_kiocb *req)
1108 {
1109 	struct io_ring_ctx *ctx = req->ctx;
1110 
1111 	percpu_ref_get(&ctx->refs);
1112 
1113 	if (!llist_add(&req->io_task_work.node, &ctx->work_llist)) {
1114 		percpu_ref_put(&ctx->refs);
1115 		return;
1116 	}
1117 	/* need it for the following io_cqring_wake() */
1118 	smp_mb__after_atomic();
1119 
1120 	if (unlikely(atomic_read(&req->task->io_uring->in_idle))) {
1121 		io_move_task_work_from_local(ctx);
1122 		percpu_ref_put(&ctx->refs);
1123 		return;
1124 	}
1125 
1126 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1127 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1128 
1129 	if (ctx->has_evfd)
1130 		io_eventfd_signal(ctx);
1131 	__io_cqring_wake(ctx);
1132 	percpu_ref_put(&ctx->refs);
1133 }
1134 
__io_req_task_work_add(struct io_kiocb * req,bool allow_local)1135 void __io_req_task_work_add(struct io_kiocb *req, bool allow_local)
1136 {
1137 	struct io_uring_task *tctx = req->task->io_uring;
1138 	struct io_ring_ctx *ctx = req->ctx;
1139 	struct llist_node *node;
1140 
1141 	if (allow_local && ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
1142 		io_req_local_work_add(req);
1143 		return;
1144 	}
1145 
1146 	/* task_work already pending, we're done */
1147 	if (!llist_add(&req->io_task_work.node, &tctx->task_list))
1148 		return;
1149 
1150 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1151 		atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1152 
1153 	if (likely(!task_work_add(req->task, &tctx->task_work, ctx->notify_method)))
1154 		return;
1155 
1156 	node = llist_del_all(&tctx->task_list);
1157 
1158 	while (node) {
1159 		req = container_of(node, struct io_kiocb, io_task_work.node);
1160 		node = node->next;
1161 		if (llist_add(&req->io_task_work.node,
1162 			      &req->ctx->fallback_llist))
1163 			schedule_delayed_work(&req->ctx->fallback_work, 1);
1164 	}
1165 }
1166 
io_move_task_work_from_local(struct io_ring_ctx * ctx)1167 static void __cold io_move_task_work_from_local(struct io_ring_ctx *ctx)
1168 {
1169 	struct llist_node *node;
1170 
1171 	node = llist_del_all(&ctx->work_llist);
1172 	while (node) {
1173 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1174 						    io_task_work.node);
1175 
1176 		node = node->next;
1177 		__io_req_task_work_add(req, false);
1178 	}
1179 }
1180 
__io_run_local_work(struct io_ring_ctx * ctx,bool * locked)1181 int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked)
1182 {
1183 	struct llist_node *node;
1184 	struct llist_node fake;
1185 	struct llist_node *current_final = NULL;
1186 	int ret;
1187 	unsigned int loops = 1;
1188 
1189 	if (unlikely(ctx->submitter_task != current))
1190 		return -EEXIST;
1191 
1192 	node = io_llist_xchg(&ctx->work_llist, &fake);
1193 	ret = 0;
1194 again:
1195 	while (node != current_final) {
1196 		struct llist_node *next = node->next;
1197 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1198 						    io_task_work.node);
1199 		prefetch(container_of(next, struct io_kiocb, io_task_work.node));
1200 		req->io_task_work.func(req, locked);
1201 		ret++;
1202 		node = next;
1203 	}
1204 
1205 	if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
1206 		atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
1207 
1208 	node = io_llist_cmpxchg(&ctx->work_llist, &fake, NULL);
1209 	if (node != &fake) {
1210 		loops++;
1211 		current_final = &fake;
1212 		node = io_llist_xchg(&ctx->work_llist, &fake);
1213 		goto again;
1214 	}
1215 
1216 	if (*locked)
1217 		io_submit_flush_completions(ctx);
1218 	trace_io_uring_local_work_run(ctx, ret, loops);
1219 	return ret;
1220 
1221 }
1222 
io_run_local_work(struct io_ring_ctx * ctx)1223 int io_run_local_work(struct io_ring_ctx *ctx)
1224 {
1225 	bool locked;
1226 	int ret;
1227 
1228 	if (llist_empty(&ctx->work_llist))
1229 		return 0;
1230 
1231 	__set_current_state(TASK_RUNNING);
1232 	locked = mutex_trylock(&ctx->uring_lock);
1233 	ret = __io_run_local_work(ctx, &locked);
1234 	if (locked)
1235 		mutex_unlock(&ctx->uring_lock);
1236 
1237 	return ret;
1238 }
1239 
io_req_task_cancel(struct io_kiocb * req,bool * locked)1240 static void io_req_task_cancel(struct io_kiocb *req, bool *locked)
1241 {
1242 	/* not needed for normal modes, but SQPOLL depends on it */
1243 	io_tw_lock(req->ctx, locked);
1244 	io_req_complete_failed(req, req->cqe.res);
1245 }
1246 
io_req_task_submit(struct io_kiocb * req,bool * locked)1247 void io_req_task_submit(struct io_kiocb *req, bool *locked)
1248 {
1249 	io_tw_lock(req->ctx, locked);
1250 	/* req->task == current here, checking PF_EXITING is safe */
1251 	if (likely(!(req->task->flags & PF_EXITING)))
1252 		io_queue_sqe(req);
1253 	else
1254 		io_req_complete_failed(req, -EFAULT);
1255 }
1256 
io_req_task_queue_fail(struct io_kiocb * req,int ret)1257 void io_req_task_queue_fail(struct io_kiocb *req, int ret)
1258 {
1259 	io_req_set_res(req, ret, 0);
1260 	req->io_task_work.func = io_req_task_cancel;
1261 	io_req_task_work_add(req);
1262 }
1263 
io_req_task_queue(struct io_kiocb * req)1264 void io_req_task_queue(struct io_kiocb *req)
1265 {
1266 	req->io_task_work.func = io_req_task_submit;
1267 	io_req_task_work_add(req);
1268 }
1269 
io_queue_next(struct io_kiocb * req)1270 void io_queue_next(struct io_kiocb *req)
1271 {
1272 	struct io_kiocb *nxt = io_req_find_next(req);
1273 
1274 	if (nxt)
1275 		io_req_task_queue(nxt);
1276 }
1277 
io_free_batch_list(struct io_ring_ctx * ctx,struct io_wq_work_node * node)1278 void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node)
1279 	__must_hold(&ctx->uring_lock)
1280 {
1281 	struct task_struct *task = NULL;
1282 	int task_refs = 0;
1283 
1284 	do {
1285 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1286 						    comp_list);
1287 
1288 		if (unlikely(req->flags & IO_REQ_CLEAN_SLOW_FLAGS)) {
1289 			if (req->flags & REQ_F_REFCOUNT) {
1290 				node = req->comp_list.next;
1291 				if (!req_ref_put_and_test(req))
1292 					continue;
1293 			}
1294 			if ((req->flags & REQ_F_POLLED) && req->apoll) {
1295 				struct async_poll *apoll = req->apoll;
1296 
1297 				if (apoll->double_poll)
1298 					kfree(apoll->double_poll);
1299 				if (!io_alloc_cache_put(&ctx->apoll_cache, &apoll->cache))
1300 					kfree(apoll);
1301 				req->flags &= ~REQ_F_POLLED;
1302 			}
1303 			if (req->flags & IO_REQ_LINK_FLAGS)
1304 				io_queue_next(req);
1305 			if (unlikely(req->flags & IO_REQ_CLEAN_FLAGS))
1306 				io_clean_op(req);
1307 		}
1308 		if (!(req->flags & REQ_F_FIXED_FILE))
1309 			io_put_file(req->file);
1310 
1311 		io_req_put_rsrc_locked(req, ctx);
1312 
1313 		if (req->task != task) {
1314 			if (task)
1315 				io_put_task(task, task_refs);
1316 			task = req->task;
1317 			task_refs = 0;
1318 		}
1319 		task_refs++;
1320 		node = req->comp_list.next;
1321 		io_req_add_to_cache(req, ctx);
1322 	} while (node);
1323 
1324 	if (task)
1325 		io_put_task(task, task_refs);
1326 }
1327 
__io_submit_flush_completions(struct io_ring_ctx * ctx)1328 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
1329 	__must_hold(&ctx->uring_lock)
1330 {
1331 	struct io_wq_work_node *node, *prev;
1332 	struct io_submit_state *state = &ctx->submit_state;
1333 
1334 	io_cq_lock(ctx);
1335 	wq_list_for_each(node, prev, &state->compl_reqs) {
1336 		struct io_kiocb *req = container_of(node, struct io_kiocb,
1337 					    comp_list);
1338 
1339 		if (!(req->flags & REQ_F_CQE_SKIP))
1340 			__io_fill_cqe_req(ctx, req);
1341 	}
1342 	__io_cq_unlock_post(ctx);
1343 
1344 	io_free_batch_list(ctx, state->compl_reqs.first);
1345 	INIT_WQ_LIST(&state->compl_reqs);
1346 }
1347 
1348 /*
1349  * Drop reference to request, return next in chain (if there is one) if this
1350  * was the last reference to this request.
1351  */
io_put_req_find_next(struct io_kiocb * req)1352 static inline struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
1353 {
1354 	struct io_kiocb *nxt = NULL;
1355 
1356 	if (req_ref_put_and_test(req)) {
1357 		if (unlikely(req->flags & IO_REQ_LINK_FLAGS))
1358 			nxt = io_req_find_next(req);
1359 		io_free_req(req);
1360 	}
1361 	return nxt;
1362 }
1363 
io_cqring_events(struct io_ring_ctx * ctx)1364 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
1365 {
1366 	/* See comment at the top of this file */
1367 	smp_rmb();
1368 	return __io_cqring_events(ctx);
1369 }
1370 
1371 /*
1372  * We can't just wait for polled events to come to us, we have to actively
1373  * find and complete them.
1374  */
io_iopoll_try_reap_events(struct io_ring_ctx * ctx)1375 static __cold void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
1376 {
1377 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
1378 		return;
1379 
1380 	percpu_ref_get(&ctx->refs);
1381 	mutex_lock(&ctx->uring_lock);
1382 	while (!wq_list_empty(&ctx->iopoll_list)) {
1383 		/* let it sleep and repeat later if can't complete a request */
1384 		if (io_do_iopoll(ctx, true) == 0)
1385 			break;
1386 		/*
1387 		 * Ensure we allow local-to-the-cpu processing to take place,
1388 		 * in this case we need to ensure that we reap all events.
1389 		 * Also let task_work, etc. to progress by releasing the mutex
1390 		 */
1391 		if (need_resched()) {
1392 			mutex_unlock(&ctx->uring_lock);
1393 			cond_resched();
1394 			mutex_lock(&ctx->uring_lock);
1395 		}
1396 	}
1397 	mutex_unlock(&ctx->uring_lock);
1398 	percpu_ref_put(&ctx->refs);
1399 }
1400 
io_iopoll_check(struct io_ring_ctx * ctx,long min)1401 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
1402 {
1403 	unsigned int nr_events = 0;
1404 	int ret = 0;
1405 	unsigned long check_cq;
1406 
1407 	if (!io_allowed_run_tw(ctx))
1408 		return -EEXIST;
1409 
1410 	check_cq = READ_ONCE(ctx->check_cq);
1411 	if (unlikely(check_cq)) {
1412 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
1413 			__io_cqring_overflow_flush(ctx, false);
1414 		/*
1415 		 * Similarly do not spin if we have not informed the user of any
1416 		 * dropped CQE.
1417 		 */
1418 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
1419 			return -EBADR;
1420 	}
1421 	/*
1422 	 * Don't enter poll loop if we already have events pending.
1423 	 * If we do, we can potentially be spinning for commands that
1424 	 * already triggered a CQE (eg in error).
1425 	 */
1426 	if (io_cqring_events(ctx))
1427 		return 0;
1428 
1429 	do {
1430 		/*
1431 		 * If a submit got punted to a workqueue, we can have the
1432 		 * application entering polling for a command before it gets
1433 		 * issued. That app will hold the uring_lock for the duration
1434 		 * of the poll right here, so we need to take a breather every
1435 		 * now and then to ensure that the issue has a chance to add
1436 		 * the poll to the issued list. Otherwise we can spin here
1437 		 * forever, while the workqueue is stuck trying to acquire the
1438 		 * very same mutex.
1439 		 */
1440 		if (wq_list_empty(&ctx->iopoll_list) ||
1441 		    io_task_work_pending(ctx)) {
1442 			u32 tail = ctx->cached_cq_tail;
1443 
1444 			(void) io_run_local_work_locked(ctx);
1445 
1446 			if (task_work_pending(current) ||
1447 			    wq_list_empty(&ctx->iopoll_list)) {
1448 				mutex_unlock(&ctx->uring_lock);
1449 				io_run_task_work();
1450 				mutex_lock(&ctx->uring_lock);
1451 			}
1452 			/* some requests don't go through iopoll_list */
1453 			if (tail != ctx->cached_cq_tail ||
1454 			    wq_list_empty(&ctx->iopoll_list))
1455 				break;
1456 		}
1457 		ret = io_do_iopoll(ctx, !min);
1458 		if (ret < 0)
1459 			break;
1460 		nr_events += ret;
1461 		ret = 0;
1462 
1463 		if (task_sigpending(current))
1464 			return -EINTR;
1465 	} while (nr_events < min && !need_resched());
1466 
1467 	return ret;
1468 }
1469 
io_req_task_complete(struct io_kiocb * req,bool * locked)1470 void io_req_task_complete(struct io_kiocb *req, bool *locked)
1471 {
1472 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
1473 		unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED;
1474 
1475 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
1476 	}
1477 
1478 	if (*locked)
1479 		io_req_complete_defer(req);
1480 	else
1481 		io_req_complete_post(req);
1482 }
1483 
1484 /*
1485  * After the iocb has been issued, it's safe to be found on the poll list.
1486  * Adding the kiocb to the list AFTER submission ensures that we don't
1487  * find it from a io_do_iopoll() thread before the issuer is done
1488  * accessing the kiocb cookie.
1489  */
io_iopoll_req_issued(struct io_kiocb * req,unsigned int issue_flags)1490 static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags)
1491 {
1492 	struct io_ring_ctx *ctx = req->ctx;
1493 	const bool needs_lock = issue_flags & IO_URING_F_UNLOCKED;
1494 
1495 	/* workqueue context doesn't hold uring_lock, grab it now */
1496 	if (unlikely(needs_lock))
1497 		mutex_lock(&ctx->uring_lock);
1498 
1499 	/*
1500 	 * Track whether we have multiple files in our lists. This will impact
1501 	 * how we do polling eventually, not spinning if we're on potentially
1502 	 * different devices.
1503 	 */
1504 	if (wq_list_empty(&ctx->iopoll_list)) {
1505 		ctx->poll_multi_queue = false;
1506 	} else if (!ctx->poll_multi_queue) {
1507 		struct io_kiocb *list_req;
1508 
1509 		list_req = container_of(ctx->iopoll_list.first, struct io_kiocb,
1510 					comp_list);
1511 		if (list_req->file != req->file)
1512 			ctx->poll_multi_queue = true;
1513 	}
1514 
1515 	/*
1516 	 * For fast devices, IO may have already completed. If it has, add
1517 	 * it to the front so we find it first.
1518 	 */
1519 	if (READ_ONCE(req->iopoll_completed))
1520 		wq_list_add_head(&req->comp_list, &ctx->iopoll_list);
1521 	else
1522 		wq_list_add_tail(&req->comp_list, &ctx->iopoll_list);
1523 
1524 	if (unlikely(needs_lock)) {
1525 		/*
1526 		 * If IORING_SETUP_SQPOLL is enabled, sqes are either handle
1527 		 * in sq thread task context or in io worker task context. If
1528 		 * current task context is sq thread, we don't need to check
1529 		 * whether should wake up sq thread.
1530 		 */
1531 		if ((ctx->flags & IORING_SETUP_SQPOLL) &&
1532 		    wq_has_sleeper(&ctx->sq_data->wait))
1533 			wake_up(&ctx->sq_data->wait);
1534 
1535 		mutex_unlock(&ctx->uring_lock);
1536 	}
1537 }
1538 
io_bdev_nowait(struct block_device * bdev)1539 static bool io_bdev_nowait(struct block_device *bdev)
1540 {
1541 	return !bdev || bdev_nowait(bdev);
1542 }
1543 
1544 /*
1545  * If we tracked the file through the SCM inflight mechanism, we could support
1546  * any file. For now, just ensure that anything potentially problematic is done
1547  * inline.
1548  */
__io_file_supports_nowait(struct file * file,umode_t mode)1549 static bool __io_file_supports_nowait(struct file *file, umode_t mode)
1550 {
1551 	if (S_ISBLK(mode)) {
1552 		if (IS_ENABLED(CONFIG_BLOCK) &&
1553 		    io_bdev_nowait(I_BDEV(file->f_mapping->host)))
1554 			return true;
1555 		return false;
1556 	}
1557 	if (S_ISSOCK(mode))
1558 		return true;
1559 	if (S_ISREG(mode)) {
1560 		if (IS_ENABLED(CONFIG_BLOCK) &&
1561 		    io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
1562 		    !io_is_uring_fops(file))
1563 			return true;
1564 		return false;
1565 	}
1566 
1567 	/* any ->read/write should understand O_NONBLOCK */
1568 	if (file->f_flags & O_NONBLOCK)
1569 		return true;
1570 	return file->f_mode & FMODE_NOWAIT;
1571 }
1572 
1573 /*
1574  * If we tracked the file through the SCM inflight mechanism, we could support
1575  * any file. For now, just ensure that anything potentially problematic is done
1576  * inline.
1577  */
io_file_get_flags(struct file * file)1578 unsigned int io_file_get_flags(struct file *file)
1579 {
1580 	umode_t mode = file_inode(file)->i_mode;
1581 	unsigned int res = 0;
1582 
1583 	if (S_ISREG(mode))
1584 		res |= FFS_ISREG;
1585 	if (__io_file_supports_nowait(file, mode))
1586 		res |= FFS_NOWAIT;
1587 	return res;
1588 }
1589 
io_alloc_async_data(struct io_kiocb * req)1590 bool io_alloc_async_data(struct io_kiocb *req)
1591 {
1592 	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
1593 	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
1594 	if (req->async_data) {
1595 		req->flags |= REQ_F_ASYNC_DATA;
1596 		return false;
1597 	}
1598 	return true;
1599 }
1600 
io_req_prep_async(struct io_kiocb * req)1601 int io_req_prep_async(struct io_kiocb *req)
1602 {
1603 	const struct io_op_def *def = &io_op_defs[req->opcode];
1604 
1605 	/* assign early for deferred execution for non-fixed file */
1606 	if (def->needs_file && !(req->flags & REQ_F_FIXED_FILE) && !req->file)
1607 		req->file = io_file_get_normal(req, req->cqe.fd);
1608 	if (!def->prep_async)
1609 		return 0;
1610 	if (WARN_ON_ONCE(req_has_async_data(req)))
1611 		return -EFAULT;
1612 	if (!io_op_defs[req->opcode].manual_alloc) {
1613 		if (io_alloc_async_data(req))
1614 			return -EAGAIN;
1615 	}
1616 	return def->prep_async(req);
1617 }
1618 
io_get_sequence(struct io_kiocb * req)1619 static u32 io_get_sequence(struct io_kiocb *req)
1620 {
1621 	u32 seq = req->ctx->cached_sq_head;
1622 	struct io_kiocb *cur;
1623 
1624 	/* need original cached_sq_head, but it was increased for each req */
1625 	io_for_each_link(cur, req)
1626 		seq--;
1627 	return seq;
1628 }
1629 
io_drain_req(struct io_kiocb * req)1630 static __cold void io_drain_req(struct io_kiocb *req)
1631 	__must_hold(&ctx->uring_lock)
1632 {
1633 	struct io_ring_ctx *ctx = req->ctx;
1634 	struct io_defer_entry *de;
1635 	int ret;
1636 	u32 seq = io_get_sequence(req);
1637 
1638 	/* Still need defer if there is pending req in defer list. */
1639 	spin_lock(&ctx->completion_lock);
1640 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) {
1641 		spin_unlock(&ctx->completion_lock);
1642 queue:
1643 		ctx->drain_active = false;
1644 		io_req_task_queue(req);
1645 		return;
1646 	}
1647 	spin_unlock(&ctx->completion_lock);
1648 
1649 	io_prep_async_link(req);
1650 	de = kmalloc(sizeof(*de), GFP_KERNEL);
1651 	if (!de) {
1652 		ret = -ENOMEM;
1653 		io_req_complete_failed(req, ret);
1654 		return;
1655 	}
1656 
1657 	spin_lock(&ctx->completion_lock);
1658 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
1659 		spin_unlock(&ctx->completion_lock);
1660 		kfree(de);
1661 		goto queue;
1662 	}
1663 
1664 	trace_io_uring_defer(req);
1665 	de->req = req;
1666 	de->seq = seq;
1667 	list_add_tail(&de->list, &ctx->defer_list);
1668 	spin_unlock(&ctx->completion_lock);
1669 }
1670 
io_clean_op(struct io_kiocb * req)1671 static void io_clean_op(struct io_kiocb *req)
1672 {
1673 	if (req->flags & REQ_F_BUFFER_SELECTED) {
1674 		spin_lock(&req->ctx->completion_lock);
1675 		io_put_kbuf_comp(req);
1676 		spin_unlock(&req->ctx->completion_lock);
1677 	}
1678 
1679 	if (req->flags & REQ_F_NEED_CLEANUP) {
1680 		const struct io_op_def *def = &io_op_defs[req->opcode];
1681 
1682 		if (def->cleanup)
1683 			def->cleanup(req);
1684 	}
1685 	if ((req->flags & REQ_F_POLLED) && req->apoll) {
1686 		kfree(req->apoll->double_poll);
1687 		kfree(req->apoll);
1688 		req->apoll = NULL;
1689 	}
1690 	if (req->flags & REQ_F_INFLIGHT) {
1691 		struct io_uring_task *tctx = req->task->io_uring;
1692 
1693 		atomic_dec(&tctx->inflight_tracked);
1694 	}
1695 	if (req->flags & REQ_F_CREDS)
1696 		put_cred(req->creds);
1697 	if (req->flags & REQ_F_ASYNC_DATA) {
1698 		kfree(req->async_data);
1699 		req->async_data = NULL;
1700 	}
1701 	req->flags &= ~IO_REQ_CLEAN_FLAGS;
1702 }
1703 
io_assign_file(struct io_kiocb * req,unsigned int issue_flags)1704 static bool io_assign_file(struct io_kiocb *req, unsigned int issue_flags)
1705 {
1706 	if (req->file || !io_op_defs[req->opcode].needs_file)
1707 		return true;
1708 
1709 	if (req->flags & REQ_F_FIXED_FILE)
1710 		req->file = io_file_get_fixed(req, req->cqe.fd, issue_flags);
1711 	else
1712 		req->file = io_file_get_normal(req, req->cqe.fd);
1713 
1714 	return !!req->file;
1715 }
1716 
io_issue_sqe(struct io_kiocb * req,unsigned int issue_flags)1717 static int io_issue_sqe(struct io_kiocb *req, unsigned int issue_flags)
1718 {
1719 	const struct io_op_def *def = &io_op_defs[req->opcode];
1720 	const struct cred *creds = NULL;
1721 	int ret;
1722 
1723 	if (unlikely(!io_assign_file(req, issue_flags)))
1724 		return -EBADF;
1725 
1726 	if (unlikely((req->flags & REQ_F_CREDS) && req->creds != current_cred()))
1727 		creds = override_creds(req->creds);
1728 
1729 	if (!def->audit_skip)
1730 		audit_uring_entry(req->opcode);
1731 
1732 	ret = def->issue(req, issue_flags);
1733 
1734 	if (!def->audit_skip)
1735 		audit_uring_exit(!ret, ret);
1736 
1737 	if (creds)
1738 		revert_creds(creds);
1739 
1740 	if (ret == IOU_OK) {
1741 		if (issue_flags & IO_URING_F_COMPLETE_DEFER)
1742 			io_req_complete_defer(req);
1743 		else
1744 			io_req_complete_post(req);
1745 	} else if (ret != IOU_ISSUE_SKIP_COMPLETE)
1746 		return ret;
1747 
1748 	/* If the op doesn't have a file, we're not polling for it */
1749 	if ((req->ctx->flags & IORING_SETUP_IOPOLL) && def->iopoll_queue)
1750 		io_iopoll_req_issued(req, issue_flags);
1751 
1752 	return 0;
1753 }
1754 
io_poll_issue(struct io_kiocb * req,bool * locked)1755 int io_poll_issue(struct io_kiocb *req, bool *locked)
1756 {
1757 	io_tw_lock(req->ctx, locked);
1758 	if (unlikely(req->task->flags & PF_EXITING))
1759 		return -EFAULT;
1760 	return io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_MULTISHOT);
1761 }
1762 
io_wq_free_work(struct io_wq_work * work)1763 struct io_wq_work *io_wq_free_work(struct io_wq_work *work)
1764 {
1765 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1766 
1767 	req = io_put_req_find_next(req);
1768 	return req ? &req->work : NULL;
1769 }
1770 
io_wq_submit_work(struct io_wq_work * work)1771 void io_wq_submit_work(struct io_wq_work *work)
1772 {
1773 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1774 	const struct io_op_def *def = &io_op_defs[req->opcode];
1775 	unsigned int issue_flags = IO_URING_F_UNLOCKED;
1776 	bool needs_poll = false;
1777 	int ret = 0, err = -ECANCELED;
1778 
1779 	/* one will be dropped by ->io_free_work() after returning to io-wq */
1780 	if (!(req->flags & REQ_F_REFCOUNT))
1781 		__io_req_set_refcount(req, 2);
1782 	else
1783 		req_ref_get(req);
1784 
1785 	io_arm_ltimeout(req);
1786 
1787 	/* either cancelled or io-wq is dying, so don't touch tctx->iowq */
1788 	if (work->flags & IO_WQ_WORK_CANCEL) {
1789 fail:
1790 		io_req_task_queue_fail(req, err);
1791 		return;
1792 	}
1793 	if (!io_assign_file(req, issue_flags)) {
1794 		err = -EBADF;
1795 		work->flags |= IO_WQ_WORK_CANCEL;
1796 		goto fail;
1797 	}
1798 
1799 	if (req->flags & REQ_F_FORCE_ASYNC) {
1800 		bool opcode_poll = def->pollin || def->pollout;
1801 
1802 		if (opcode_poll && file_can_poll(req->file)) {
1803 			needs_poll = true;
1804 			issue_flags |= IO_URING_F_NONBLOCK;
1805 		}
1806 	}
1807 
1808 	do {
1809 		ret = io_issue_sqe(req, issue_flags);
1810 		if (ret != -EAGAIN)
1811 			break;
1812 
1813 		/*
1814 		 * If REQ_F_NOWAIT is set, then don't wait or retry with
1815 		 * poll. -EAGAIN is final for that case.
1816 		 */
1817 		if (req->flags & REQ_F_NOWAIT)
1818 			break;
1819 
1820 		/*
1821 		 * We can get EAGAIN for iopolled IO even though we're
1822 		 * forcing a sync submission from here, since we can't
1823 		 * wait for request slots on the block side.
1824 		 */
1825 		if (!needs_poll) {
1826 			if (!(req->ctx->flags & IORING_SETUP_IOPOLL))
1827 				break;
1828 			if (io_wq_worker_stopped())
1829 				break;
1830 			cond_resched();
1831 			continue;
1832 		}
1833 
1834 		if (io_arm_poll_handler(req, issue_flags) == IO_APOLL_OK)
1835 			return;
1836 		/* aborted or ready, in either case retry blocking */
1837 		needs_poll = false;
1838 		issue_flags &= ~IO_URING_F_NONBLOCK;
1839 	} while (1);
1840 
1841 	/* avoid locking problems by failing it from a clean context */
1842 	if (ret < 0)
1843 		io_req_task_queue_fail(req, ret);
1844 }
1845 
io_file_get_fixed(struct io_kiocb * req,int fd,unsigned int issue_flags)1846 inline struct file *io_file_get_fixed(struct io_kiocb *req, int fd,
1847 				      unsigned int issue_flags)
1848 {
1849 	struct io_ring_ctx *ctx = req->ctx;
1850 	struct file *file = NULL;
1851 	unsigned long file_ptr;
1852 
1853 	io_ring_submit_lock(ctx, issue_flags);
1854 
1855 	if (unlikely((unsigned int)fd >= ctx->nr_user_files))
1856 		goto out;
1857 	fd = array_index_nospec(fd, ctx->nr_user_files);
1858 	file_ptr = io_fixed_file_slot(&ctx->file_table, fd)->file_ptr;
1859 	file = (struct file *) (file_ptr & FFS_MASK);
1860 	file_ptr &= ~FFS_MASK;
1861 	/* mask in overlapping REQ_F and FFS bits */
1862 	req->flags |= (file_ptr << REQ_F_SUPPORT_NOWAIT_BIT);
1863 	io_req_set_rsrc_node(req, ctx, 0);
1864 out:
1865 	io_ring_submit_unlock(ctx, issue_flags);
1866 	return file;
1867 }
1868 
io_file_get_normal(struct io_kiocb * req,int fd)1869 struct file *io_file_get_normal(struct io_kiocb *req, int fd)
1870 {
1871 	struct file *file = fget(fd);
1872 
1873 	trace_io_uring_file_get(req, fd);
1874 
1875 	/* we don't allow fixed io_uring files */
1876 	if (file && io_is_uring_fops(file))
1877 		io_req_track_inflight(req);
1878 	return file;
1879 }
1880 
io_queue_async(struct io_kiocb * req,int ret)1881 static void io_queue_async(struct io_kiocb *req, int ret)
1882 	__must_hold(&req->ctx->uring_lock)
1883 {
1884 	struct io_kiocb *linked_timeout;
1885 
1886 	if (ret != -EAGAIN || (req->flags & REQ_F_NOWAIT)) {
1887 		io_req_complete_failed(req, ret);
1888 		return;
1889 	}
1890 
1891 	linked_timeout = io_prep_linked_timeout(req);
1892 
1893 	switch (io_arm_poll_handler(req, 0)) {
1894 	case IO_APOLL_READY:
1895 		io_kbuf_recycle(req, 0);
1896 		io_req_task_queue(req);
1897 		break;
1898 	case IO_APOLL_ABORTED:
1899 		io_kbuf_recycle(req, 0);
1900 		io_queue_iowq(req, NULL);
1901 		break;
1902 	case IO_APOLL_OK:
1903 		break;
1904 	}
1905 
1906 	if (linked_timeout)
1907 		io_queue_linked_timeout(linked_timeout);
1908 }
1909 
io_queue_sqe(struct io_kiocb * req)1910 static inline void io_queue_sqe(struct io_kiocb *req)
1911 	__must_hold(&req->ctx->uring_lock)
1912 {
1913 	int ret;
1914 
1915 	ret = io_issue_sqe(req, IO_URING_F_NONBLOCK|IO_URING_F_COMPLETE_DEFER);
1916 
1917 	/*
1918 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
1919 	 * doesn't support non-blocking read/write attempts
1920 	 */
1921 	if (likely(!ret))
1922 		io_arm_ltimeout(req);
1923 	else
1924 		io_queue_async(req, ret);
1925 }
1926 
io_queue_sqe_fallback(struct io_kiocb * req)1927 static void io_queue_sqe_fallback(struct io_kiocb *req)
1928 	__must_hold(&req->ctx->uring_lock)
1929 {
1930 	if (unlikely(req->flags & REQ_F_FAIL)) {
1931 		/*
1932 		 * We don't submit, fail them all, for that replace hardlinks
1933 		 * with normal links. Extra REQ_F_LINK is tolerated.
1934 		 */
1935 		req->flags &= ~REQ_F_HARDLINK;
1936 		req->flags |= REQ_F_LINK;
1937 		io_req_complete_failed(req, req->cqe.res);
1938 	} else {
1939 		int ret = io_req_prep_async(req);
1940 
1941 		if (unlikely(ret)) {
1942 			io_req_complete_failed(req, ret);
1943 			return;
1944 		}
1945 
1946 		if (unlikely(req->ctx->drain_active))
1947 			io_drain_req(req);
1948 		else
1949 			io_queue_iowq(req, NULL);
1950 	}
1951 }
1952 
1953 /*
1954  * Check SQE restrictions (opcode and flags).
1955  *
1956  * Returns 'true' if SQE is allowed, 'false' otherwise.
1957  */
io_check_restriction(struct io_ring_ctx * ctx,struct io_kiocb * req,unsigned int sqe_flags)1958 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
1959 					struct io_kiocb *req,
1960 					unsigned int sqe_flags)
1961 {
1962 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
1963 		return false;
1964 
1965 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
1966 	    ctx->restrictions.sqe_flags_required)
1967 		return false;
1968 
1969 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
1970 			  ctx->restrictions.sqe_flags_required))
1971 		return false;
1972 
1973 	return true;
1974 }
1975 
io_init_req_drain(struct io_kiocb * req)1976 static void io_init_req_drain(struct io_kiocb *req)
1977 {
1978 	struct io_ring_ctx *ctx = req->ctx;
1979 	struct io_kiocb *head = ctx->submit_state.link.head;
1980 
1981 	ctx->drain_active = true;
1982 	if (head) {
1983 		/*
1984 		 * If we need to drain a request in the middle of a link, drain
1985 		 * the head request and the next request/link after the current
1986 		 * link. Considering sequential execution of links,
1987 		 * REQ_F_IO_DRAIN will be maintained for every request of our
1988 		 * link.
1989 		 */
1990 		head->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
1991 		ctx->drain_next = true;
1992 	}
1993 }
1994 
io_init_req(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)1995 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
1996 		       const struct io_uring_sqe *sqe)
1997 	__must_hold(&ctx->uring_lock)
1998 {
1999 	const struct io_op_def *def;
2000 	unsigned int sqe_flags;
2001 	int personality;
2002 	u8 opcode;
2003 
2004 	/* req is partially pre-initialised, see io_preinit_req() */
2005 	req->opcode = opcode = READ_ONCE(sqe->opcode);
2006 	/* same numerical values with corresponding REQ_F_*, safe to copy */
2007 	req->flags = sqe_flags = READ_ONCE(sqe->flags);
2008 	req->cqe.user_data = READ_ONCE(sqe->user_data);
2009 	req->file = NULL;
2010 	req->rsrc_node = NULL;
2011 	req->task = current;
2012 
2013 	if (unlikely(opcode >= IORING_OP_LAST)) {
2014 		req->opcode = 0;
2015 		return -EINVAL;
2016 	}
2017 	def = &io_op_defs[opcode];
2018 	if (unlikely(sqe_flags & ~SQE_COMMON_FLAGS)) {
2019 		/* enforce forwards compatibility on users */
2020 		if (sqe_flags & ~SQE_VALID_FLAGS)
2021 			return -EINVAL;
2022 		if (sqe_flags & IOSQE_BUFFER_SELECT) {
2023 			if (!def->buffer_select)
2024 				return -EOPNOTSUPP;
2025 			req->buf_index = READ_ONCE(sqe->buf_group);
2026 		}
2027 		if (sqe_flags & IOSQE_CQE_SKIP_SUCCESS)
2028 			ctx->drain_disabled = true;
2029 		if (sqe_flags & IOSQE_IO_DRAIN) {
2030 			if (ctx->drain_disabled)
2031 				return -EOPNOTSUPP;
2032 			io_init_req_drain(req);
2033 		}
2034 	}
2035 	if (unlikely(ctx->restricted || ctx->drain_active || ctx->drain_next)) {
2036 		if (ctx->restricted && !io_check_restriction(ctx, req, sqe_flags))
2037 			return -EACCES;
2038 		/* knock it to the slow queue path, will be drained there */
2039 		if (ctx->drain_active)
2040 			req->flags |= REQ_F_FORCE_ASYNC;
2041 		/* if there is no link, we're at "next" request and need to drain */
2042 		if (unlikely(ctx->drain_next) && !ctx->submit_state.link.head) {
2043 			ctx->drain_next = false;
2044 			ctx->drain_active = true;
2045 			req->flags |= REQ_F_IO_DRAIN | REQ_F_FORCE_ASYNC;
2046 		}
2047 	}
2048 
2049 	if (!def->ioprio && sqe->ioprio)
2050 		return -EINVAL;
2051 	if (!def->iopoll && (ctx->flags & IORING_SETUP_IOPOLL))
2052 		return -EINVAL;
2053 
2054 	if (def->needs_file) {
2055 		struct io_submit_state *state = &ctx->submit_state;
2056 
2057 		req->cqe.fd = READ_ONCE(sqe->fd);
2058 
2059 		/*
2060 		 * Plug now if we have more than 2 IO left after this, and the
2061 		 * target is potentially a read/write to block based storage.
2062 		 */
2063 		if (state->need_plug && def->plug) {
2064 			state->plug_started = true;
2065 			state->need_plug = false;
2066 			blk_start_plug_nr_ios(&state->plug, state->submit_nr);
2067 		}
2068 	}
2069 
2070 	personality = READ_ONCE(sqe->personality);
2071 	if (personality) {
2072 		int ret;
2073 
2074 		req->creds = xa_load(&ctx->personalities, personality);
2075 		if (!req->creds)
2076 			return -EINVAL;
2077 		get_cred(req->creds);
2078 		ret = security_uring_override_creds(req->creds);
2079 		if (ret) {
2080 			put_cred(req->creds);
2081 			return ret;
2082 		}
2083 		req->flags |= REQ_F_CREDS;
2084 	}
2085 
2086 	return def->prep(req, sqe);
2087 }
2088 
io_submit_fail_init(const struct io_uring_sqe * sqe,struct io_kiocb * req,int ret)2089 static __cold int io_submit_fail_init(const struct io_uring_sqe *sqe,
2090 				      struct io_kiocb *req, int ret)
2091 {
2092 	struct io_ring_ctx *ctx = req->ctx;
2093 	struct io_submit_link *link = &ctx->submit_state.link;
2094 	struct io_kiocb *head = link->head;
2095 
2096 	trace_io_uring_req_failed(sqe, req, ret);
2097 
2098 	/*
2099 	 * Avoid breaking links in the middle as it renders links with SQPOLL
2100 	 * unusable. Instead of failing eagerly, continue assembling the link if
2101 	 * applicable and mark the head with REQ_F_FAIL. The link flushing code
2102 	 * should find the flag and handle the rest.
2103 	 */
2104 	req_fail_link_node(req, ret);
2105 	if (head && !(head->flags & REQ_F_FAIL))
2106 		req_fail_link_node(head, -ECANCELED);
2107 
2108 	if (!(req->flags & IO_REQ_LINK_FLAGS)) {
2109 		if (head) {
2110 			link->last->link = req;
2111 			link->head = NULL;
2112 			req = head;
2113 		}
2114 		io_queue_sqe_fallback(req);
2115 		return ret;
2116 	}
2117 
2118 	if (head)
2119 		link->last->link = req;
2120 	else
2121 		link->head = req;
2122 	link->last = req;
2123 	return 0;
2124 }
2125 
io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe)2126 static inline int io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2127 			 const struct io_uring_sqe *sqe)
2128 	__must_hold(&ctx->uring_lock)
2129 {
2130 	struct io_submit_link *link = &ctx->submit_state.link;
2131 	int ret;
2132 
2133 	ret = io_init_req(ctx, req, sqe);
2134 	if (unlikely(ret))
2135 		return io_submit_fail_init(sqe, req, ret);
2136 
2137 	/* don't need @sqe from now on */
2138 	trace_io_uring_submit_sqe(req, true);
2139 
2140 	/*
2141 	 * If we already have a head request, queue this one for async
2142 	 * submittal once the head completes. If we don't have a head but
2143 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2144 	 * submitted sync once the chain is complete. If none of those
2145 	 * conditions are true (normal request), then just queue it.
2146 	 */
2147 	if (unlikely(link->head)) {
2148 		ret = io_req_prep_async(req);
2149 		if (unlikely(ret))
2150 			return io_submit_fail_init(sqe, req, ret);
2151 
2152 		trace_io_uring_link(req, link->head);
2153 		link->last->link = req;
2154 		link->last = req;
2155 
2156 		if (req->flags & IO_REQ_LINK_FLAGS)
2157 			return 0;
2158 		/* last request of the link, flush it */
2159 		req = link->head;
2160 		link->head = NULL;
2161 		if (req->flags & (REQ_F_FORCE_ASYNC | REQ_F_FAIL))
2162 			goto fallback;
2163 
2164 	} else if (unlikely(req->flags & (IO_REQ_LINK_FLAGS |
2165 					  REQ_F_FORCE_ASYNC | REQ_F_FAIL))) {
2166 		if (req->flags & IO_REQ_LINK_FLAGS) {
2167 			link->head = req;
2168 			link->last = req;
2169 		} else {
2170 fallback:
2171 			io_queue_sqe_fallback(req);
2172 		}
2173 		return 0;
2174 	}
2175 
2176 	io_queue_sqe(req);
2177 	return 0;
2178 }
2179 
2180 /*
2181  * Batched submission is done, ensure local IO is flushed out.
2182  */
io_submit_state_end(struct io_ring_ctx * ctx)2183 static void io_submit_state_end(struct io_ring_ctx *ctx)
2184 {
2185 	struct io_submit_state *state = &ctx->submit_state;
2186 
2187 	if (unlikely(state->link.head))
2188 		io_queue_sqe_fallback(state->link.head);
2189 	/* flush only after queuing links as they can generate completions */
2190 	io_submit_flush_completions(ctx);
2191 	if (state->plug_started)
2192 		blk_finish_plug(&state->plug);
2193 }
2194 
2195 /*
2196  * Start submission side cache.
2197  */
io_submit_state_start(struct io_submit_state * state,unsigned int max_ios)2198 static void io_submit_state_start(struct io_submit_state *state,
2199 				  unsigned int max_ios)
2200 {
2201 	state->plug_started = false;
2202 	state->need_plug = max_ios > 2;
2203 	state->submit_nr = max_ios;
2204 	/* set only head, no need to init link_last in advance */
2205 	state->link.head = NULL;
2206 }
2207 
io_commit_sqring(struct io_ring_ctx * ctx)2208 static void io_commit_sqring(struct io_ring_ctx *ctx)
2209 {
2210 	struct io_rings *rings = ctx->rings;
2211 
2212 	/*
2213 	 * Ensure any loads from the SQEs are done at this point,
2214 	 * since once we write the new head, the application could
2215 	 * write new data to them.
2216 	 */
2217 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2218 }
2219 
2220 /*
2221  * Fetch an sqe, if one is available. Note this returns a pointer to memory
2222  * that is mapped by userspace. This means that care needs to be taken to
2223  * ensure that reads are stable, as we cannot rely on userspace always
2224  * being a good citizen. If members of the sqe are validated and then later
2225  * used, it's important that those reads are done through READ_ONCE() to
2226  * prevent a re-load down the line.
2227  */
io_get_sqe(struct io_ring_ctx * ctx)2228 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
2229 {
2230 	unsigned head, mask = ctx->sq_entries - 1;
2231 	unsigned sq_idx = ctx->cached_sq_head++ & mask;
2232 
2233 	/*
2234 	 * The cached sq head (or cq tail) serves two purposes:
2235 	 *
2236 	 * 1) allows us to batch the cost of updating the user visible
2237 	 *    head updates.
2238 	 * 2) allows the kernel side to track the head on its own, even
2239 	 *    though the application is the one updating it.
2240 	 */
2241 	head = READ_ONCE(ctx->sq_array[sq_idx]);
2242 	if (likely(head < ctx->sq_entries)) {
2243 		/* double index for 128-byte SQEs, twice as long */
2244 		if (ctx->flags & IORING_SETUP_SQE128)
2245 			head <<= 1;
2246 		return &ctx->sq_sqes[head];
2247 	}
2248 
2249 	/* drop invalid entries */
2250 	spin_lock(&ctx->completion_lock);
2251 	ctx->cq_extra--;
2252 	spin_unlock(&ctx->completion_lock);
2253 	WRITE_ONCE(ctx->rings->sq_dropped,
2254 		   READ_ONCE(ctx->rings->sq_dropped) + 1);
2255 	return NULL;
2256 }
2257 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr)2258 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
2259 	__must_hold(&ctx->uring_lock)
2260 {
2261 	unsigned int entries = io_sqring_entries(ctx);
2262 	unsigned int left;
2263 	int ret;
2264 
2265 	if (unlikely(!entries))
2266 		return 0;
2267 	/* make sure SQ entry isn't read before tail */
2268 	ret = left = min3(nr, ctx->sq_entries, entries);
2269 	io_get_task_refs(left);
2270 	io_submit_state_start(&ctx->submit_state, left);
2271 
2272 	do {
2273 		const struct io_uring_sqe *sqe;
2274 		struct io_kiocb *req;
2275 
2276 		if (unlikely(!io_alloc_req_refill(ctx)))
2277 			break;
2278 		req = io_alloc_req(ctx);
2279 		sqe = io_get_sqe(ctx);
2280 		if (unlikely(!sqe)) {
2281 			io_req_add_to_cache(req, ctx);
2282 			break;
2283 		}
2284 
2285 		/*
2286 		 * Continue submitting even for sqe failure if the
2287 		 * ring was setup with IORING_SETUP_SUBMIT_ALL
2288 		 */
2289 		if (unlikely(io_submit_sqe(ctx, req, sqe)) &&
2290 		    !(ctx->flags & IORING_SETUP_SUBMIT_ALL)) {
2291 			left--;
2292 			break;
2293 		}
2294 	} while (--left);
2295 
2296 	if (unlikely(left)) {
2297 		ret -= left;
2298 		/* try again if it submitted nothing and can't allocate a req */
2299 		if (!ret && io_req_cache_empty(ctx))
2300 			ret = -EAGAIN;
2301 		current->io_uring->cached_refs += left;
2302 	}
2303 
2304 	io_submit_state_end(ctx);
2305 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
2306 	io_commit_sqring(ctx);
2307 	return ret;
2308 }
2309 
2310 struct io_wait_queue {
2311 	struct wait_queue_entry wq;
2312 	struct io_ring_ctx *ctx;
2313 	unsigned cq_tail;
2314 	unsigned nr_timeouts;
2315 };
2316 
io_has_work(struct io_ring_ctx * ctx)2317 static inline bool io_has_work(struct io_ring_ctx *ctx)
2318 {
2319 	return test_bit(IO_CHECK_CQ_OVERFLOW_BIT, &ctx->check_cq) ||
2320 	       ((ctx->flags & IORING_SETUP_DEFER_TASKRUN) &&
2321 		!llist_empty(&ctx->work_llist));
2322 }
2323 
io_should_wake(struct io_wait_queue * iowq)2324 static inline bool io_should_wake(struct io_wait_queue *iowq)
2325 {
2326 	struct io_ring_ctx *ctx = iowq->ctx;
2327 	int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2328 
2329 	/*
2330 	 * Wake up if we have enough events, or if a timeout occurred since we
2331 	 * started waiting. For timeouts, we always want to return to userspace,
2332 	 * regardless of event count.
2333 	 */
2334 	return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2335 }
2336 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)2337 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2338 			    int wake_flags, void *key)
2339 {
2340 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2341 							wq);
2342 	struct io_ring_ctx *ctx = iowq->ctx;
2343 
2344 	/*
2345 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
2346 	 * the task, and the next invocation will do it.
2347 	 */
2348 	if (io_should_wake(iowq) || io_has_work(ctx))
2349 		return autoremove_wake_function(curr, mode, wake_flags, key);
2350 	return -1;
2351 }
2352 
io_run_task_work_sig(struct io_ring_ctx * ctx)2353 int io_run_task_work_sig(struct io_ring_ctx *ctx)
2354 {
2355 	if (io_run_task_work_ctx(ctx) > 0)
2356 		return 1;
2357 	if (task_sigpending(current))
2358 		return -EINTR;
2359 	return 0;
2360 }
2361 
current_pending_io(void)2362 static bool current_pending_io(void)
2363 {
2364 	struct io_uring_task *tctx = current->io_uring;
2365 
2366 	if (!tctx)
2367 		return false;
2368 	return percpu_counter_read_positive(&tctx->inflight);
2369 }
2370 
2371 /* when returns >0, the caller should retry */
io_cqring_wait_schedule(struct io_ring_ctx * ctx,struct io_wait_queue * iowq,ktime_t * timeout)2372 static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx,
2373 					  struct io_wait_queue *iowq,
2374 					  ktime_t *timeout)
2375 {
2376 	int io_wait, ret;
2377 	unsigned long check_cq;
2378 
2379 	/* make sure we run task_work before checking for signals */
2380 	ret = io_run_task_work_sig(ctx);
2381 	if (ret || io_should_wake(iowq))
2382 		return ret;
2383 
2384 	check_cq = READ_ONCE(ctx->check_cq);
2385 	if (unlikely(check_cq)) {
2386 		/* let the caller flush overflows, retry */
2387 		if (check_cq & BIT(IO_CHECK_CQ_OVERFLOW_BIT))
2388 			return 1;
2389 		if (check_cq & BIT(IO_CHECK_CQ_DROPPED_BIT))
2390 			return -EBADR;
2391 	}
2392 
2393 	/*
2394 	 * Mark us as being in io_wait if we have pending requests, so cpufreq
2395 	 * can take into account that the task is waiting for IO - turns out
2396 	 * to be important for low QD IO.
2397 	 */
2398 	io_wait = current->in_iowait;
2399 	if (current_pending_io())
2400 		current->in_iowait = 1;
2401 	ret = 1;
2402 	if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS))
2403 		ret = -ETIME;
2404 	current->in_iowait = io_wait;
2405 	return ret;
2406 }
2407 
2408 /*
2409  * Wait until events become available, if we don't already have some. The
2410  * application must reap them itself, as they reside on the shared cq ring.
2411  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,const sigset_t __user * sig,size_t sigsz,struct __kernel_timespec __user * uts)2412 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2413 			  const sigset_t __user *sig, size_t sigsz,
2414 			  struct __kernel_timespec __user *uts)
2415 {
2416 	struct io_wait_queue iowq;
2417 	struct io_rings *rings = ctx->rings;
2418 	ktime_t timeout = KTIME_MAX;
2419 	int ret;
2420 
2421 	if (!io_allowed_run_tw(ctx))
2422 		return -EEXIST;
2423 
2424 	do {
2425 		/* always run at least 1 task work to process local work */
2426 		ret = io_run_task_work_ctx(ctx);
2427 		if (ret < 0)
2428 			return ret;
2429 		io_cqring_overflow_flush(ctx);
2430 
2431 		/* if user messes with these they will just get an early return */
2432 		if (__io_cqring_events_user(ctx) >= min_events)
2433 			return 0;
2434 	} while (ret > 0);
2435 
2436 	if (sig) {
2437 #ifdef CONFIG_COMPAT
2438 		if (in_compat_syscall())
2439 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2440 						      sigsz);
2441 		else
2442 #endif
2443 			ret = set_user_sigmask(sig, sigsz);
2444 
2445 		if (ret)
2446 			return ret;
2447 	}
2448 
2449 	if (uts) {
2450 		struct timespec64 ts;
2451 
2452 		if (get_timespec64(&ts, uts))
2453 			return -EFAULT;
2454 		timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2455 	}
2456 
2457 	init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2458 	iowq.wq.private = current;
2459 	INIT_LIST_HEAD(&iowq.wq.entry);
2460 	iowq.ctx = ctx;
2461 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2462 	iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2463 
2464 	trace_io_uring_cqring_wait(ctx, min_events);
2465 	do {
2466 		/* if we can't even flush overflow, don't wait for more */
2467 		if (!io_cqring_overflow_flush(ctx)) {
2468 			ret = -EBUSY;
2469 			break;
2470 		}
2471 		prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2472 						TASK_INTERRUPTIBLE);
2473 		ret = io_cqring_wait_schedule(ctx, &iowq, &timeout);
2474 		cond_resched();
2475 	} while (ret > 0);
2476 
2477 	finish_wait(&ctx->cq_wait, &iowq.wq);
2478 	restore_saved_sigmask_unless(ret == -EINTR);
2479 
2480 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2481 }
2482 
io_mem_free(void * ptr)2483 static void io_mem_free(void *ptr)
2484 {
2485 	struct page *page;
2486 
2487 	if (!ptr)
2488 		return;
2489 
2490 	page = virt_to_head_page(ptr);
2491 	if (put_page_testzero(page))
2492 		free_compound_page(page);
2493 }
2494 
io_mem_alloc(size_t size)2495 static void *io_mem_alloc(size_t size)
2496 {
2497 	gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP;
2498 
2499 	return (void *) __get_free_pages(gfp, get_order(size));
2500 }
2501 
rings_size(struct io_ring_ctx * ctx,unsigned int sq_entries,unsigned int cq_entries,size_t * sq_offset)2502 static unsigned long rings_size(struct io_ring_ctx *ctx, unsigned int sq_entries,
2503 				unsigned int cq_entries, size_t *sq_offset)
2504 {
2505 	struct io_rings *rings;
2506 	size_t off, sq_array_size;
2507 
2508 	off = struct_size(rings, cqes, cq_entries);
2509 	if (off == SIZE_MAX)
2510 		return SIZE_MAX;
2511 	if (ctx->flags & IORING_SETUP_CQE32) {
2512 		if (check_shl_overflow(off, 1, &off))
2513 			return SIZE_MAX;
2514 	}
2515 
2516 #ifdef CONFIG_SMP
2517 	off = ALIGN(off, SMP_CACHE_BYTES);
2518 	if (off == 0)
2519 		return SIZE_MAX;
2520 #endif
2521 
2522 	if (sq_offset)
2523 		*sq_offset = off;
2524 
2525 	sq_array_size = array_size(sizeof(u32), sq_entries);
2526 	if (sq_array_size == SIZE_MAX)
2527 		return SIZE_MAX;
2528 
2529 	if (check_add_overflow(off, sq_array_size, &off))
2530 		return SIZE_MAX;
2531 
2532 	return off;
2533 }
2534 
io_eventfd_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int eventfd_async)2535 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg,
2536 			       unsigned int eventfd_async)
2537 {
2538 	struct io_ev_fd *ev_fd;
2539 	__s32 __user *fds = arg;
2540 	int fd;
2541 
2542 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2543 					lockdep_is_held(&ctx->uring_lock));
2544 	if (ev_fd)
2545 		return -EBUSY;
2546 
2547 	if (copy_from_user(&fd, fds, sizeof(*fds)))
2548 		return -EFAULT;
2549 
2550 	ev_fd = kmalloc(sizeof(*ev_fd), GFP_KERNEL);
2551 	if (!ev_fd)
2552 		return -ENOMEM;
2553 
2554 	ev_fd->cq_ev_fd = eventfd_ctx_fdget(fd);
2555 	if (IS_ERR(ev_fd->cq_ev_fd)) {
2556 		int ret = PTR_ERR(ev_fd->cq_ev_fd);
2557 		kfree(ev_fd);
2558 		return ret;
2559 	}
2560 
2561 	spin_lock(&ctx->completion_lock);
2562 	ctx->evfd_last_cq_tail = ctx->cached_cq_tail;
2563 	spin_unlock(&ctx->completion_lock);
2564 
2565 	ev_fd->eventfd_async = eventfd_async;
2566 	ctx->has_evfd = true;
2567 	rcu_assign_pointer(ctx->io_ev_fd, ev_fd);
2568 	atomic_set(&ev_fd->refs, 1);
2569 	atomic_set(&ev_fd->ops, 0);
2570 	return 0;
2571 }
2572 
io_eventfd_unregister(struct io_ring_ctx * ctx)2573 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
2574 {
2575 	struct io_ev_fd *ev_fd;
2576 
2577 	ev_fd = rcu_dereference_protected(ctx->io_ev_fd,
2578 					lockdep_is_held(&ctx->uring_lock));
2579 	if (ev_fd) {
2580 		ctx->has_evfd = false;
2581 		rcu_assign_pointer(ctx->io_ev_fd, NULL);
2582 		if (!atomic_fetch_or(BIT(IO_EVENTFD_OP_FREE_BIT), &ev_fd->ops))
2583 			call_rcu(&ev_fd->rcu, io_eventfd_ops);
2584 		return 0;
2585 	}
2586 
2587 	return -ENXIO;
2588 }
2589 
io_req_caches_free(struct io_ring_ctx * ctx)2590 static void io_req_caches_free(struct io_ring_ctx *ctx)
2591 {
2592 	int nr = 0;
2593 
2594 	mutex_lock(&ctx->uring_lock);
2595 	io_flush_cached_locked_reqs(ctx, &ctx->submit_state);
2596 
2597 	while (!io_req_cache_empty(ctx)) {
2598 		struct io_kiocb *req = io_alloc_req(ctx);
2599 
2600 		kmem_cache_free(req_cachep, req);
2601 		nr++;
2602 	}
2603 	if (nr)
2604 		percpu_ref_put_many(&ctx->refs, nr);
2605 	mutex_unlock(&ctx->uring_lock);
2606 }
2607 
io_ring_ctx_free(struct io_ring_ctx * ctx)2608 static __cold void io_ring_ctx_free(struct io_ring_ctx *ctx)
2609 {
2610 	io_sq_thread_finish(ctx);
2611 	io_rsrc_refs_drop(ctx);
2612 	/* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
2613 	io_wait_rsrc_data(ctx->buf_data);
2614 	io_wait_rsrc_data(ctx->file_data);
2615 
2616 	mutex_lock(&ctx->uring_lock);
2617 	if (ctx->buf_data)
2618 		__io_sqe_buffers_unregister(ctx);
2619 	if (ctx->file_data)
2620 		__io_sqe_files_unregister(ctx);
2621 	if (ctx->rings)
2622 		__io_cqring_overflow_flush(ctx, true);
2623 	io_eventfd_unregister(ctx);
2624 	io_alloc_cache_free(&ctx->apoll_cache, io_apoll_cache_free);
2625 	io_alloc_cache_free(&ctx->netmsg_cache, io_netmsg_cache_free);
2626 	io_destroy_buffers(ctx);
2627 	mutex_unlock(&ctx->uring_lock);
2628 	if (ctx->sq_creds)
2629 		put_cred(ctx->sq_creds);
2630 	if (ctx->submitter_task)
2631 		put_task_struct(ctx->submitter_task);
2632 
2633 	/* there are no registered resources left, nobody uses it */
2634 	if (ctx->rsrc_node)
2635 		io_rsrc_node_destroy(ctx->rsrc_node);
2636 	if (ctx->rsrc_backup_node)
2637 		io_rsrc_node_destroy(ctx->rsrc_backup_node);
2638 	flush_delayed_work(&ctx->rsrc_put_work);
2639 	flush_delayed_work(&ctx->fallback_work);
2640 
2641 	WARN_ON_ONCE(!list_empty(&ctx->rsrc_ref_list));
2642 	WARN_ON_ONCE(!llist_empty(&ctx->rsrc_put_llist));
2643 
2644 #if defined(CONFIG_UNIX)
2645 	if (ctx->ring_sock) {
2646 		ctx->ring_sock->file = NULL; /* so that iput() is called */
2647 		sock_release(ctx->ring_sock);
2648 	}
2649 #endif
2650 	WARN_ON_ONCE(!list_empty(&ctx->ltimeout_list));
2651 
2652 	if (ctx->mm_account) {
2653 		mmdrop(ctx->mm_account);
2654 		ctx->mm_account = NULL;
2655 	}
2656 	io_mem_free(ctx->rings);
2657 	io_mem_free(ctx->sq_sqes);
2658 
2659 	percpu_ref_exit(&ctx->refs);
2660 	free_uid(ctx->user);
2661 	io_req_caches_free(ctx);
2662 	if (ctx->hash_map)
2663 		io_wq_put_hash(ctx->hash_map);
2664 	kfree(ctx->cancel_table.hbs);
2665 	kfree(ctx->cancel_table_locked.hbs);
2666 	kfree(ctx->dummy_ubuf);
2667 	kfree(ctx->io_bl);
2668 	xa_destroy(&ctx->io_bl_xa);
2669 	kfree(ctx);
2670 }
2671 
io_uring_poll(struct file * file,poll_table * wait)2672 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
2673 {
2674 	struct io_ring_ctx *ctx = file->private_data;
2675 	__poll_t mask = 0;
2676 
2677 	poll_wait(file, &ctx->cq_wait, wait);
2678 	/*
2679 	 * synchronizes with barrier from wq_has_sleeper call in
2680 	 * io_commit_cqring
2681 	 */
2682 	smp_rmb();
2683 	if (!io_sqring_full(ctx))
2684 		mask |= EPOLLOUT | EPOLLWRNORM;
2685 
2686 	/*
2687 	 * Don't flush cqring overflow list here, just do a simple check.
2688 	 * Otherwise there could possible be ABBA deadlock:
2689 	 *      CPU0                    CPU1
2690 	 *      ----                    ----
2691 	 * lock(&ctx->uring_lock);
2692 	 *                              lock(&ep->mtx);
2693 	 *                              lock(&ctx->uring_lock);
2694 	 * lock(&ep->mtx);
2695 	 *
2696 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
2697 	 * pushs them to do the flush.
2698 	 */
2699 
2700 	if (__io_cqring_events_user(ctx) || io_has_work(ctx))
2701 		mask |= EPOLLIN | EPOLLRDNORM;
2702 
2703 	return mask;
2704 }
2705 
io_unregister_personality(struct io_ring_ctx * ctx,unsigned id)2706 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
2707 {
2708 	const struct cred *creds;
2709 
2710 	creds = xa_erase(&ctx->personalities, id);
2711 	if (creds) {
2712 		put_cred(creds);
2713 		return 0;
2714 	}
2715 
2716 	return -EINVAL;
2717 }
2718 
2719 struct io_tctx_exit {
2720 	struct callback_head		task_work;
2721 	struct completion		completion;
2722 	struct io_ring_ctx		*ctx;
2723 };
2724 
io_tctx_exit_cb(struct callback_head * cb)2725 static __cold void io_tctx_exit_cb(struct callback_head *cb)
2726 {
2727 	struct io_uring_task *tctx = current->io_uring;
2728 	struct io_tctx_exit *work;
2729 
2730 	work = container_of(cb, struct io_tctx_exit, task_work);
2731 	/*
2732 	 * When @in_idle, we're in cancellation and it's racy to remove the
2733 	 * node. It'll be removed by the end of cancellation, just ignore it.
2734 	 * tctx can be NULL if the queueing of this task_work raced with
2735 	 * work cancelation off the exec path.
2736 	 */
2737 	if (tctx && !atomic_read(&tctx->in_idle))
2738 		io_uring_del_tctx_node((unsigned long)work->ctx);
2739 	complete(&work->completion);
2740 }
2741 
io_cancel_ctx_cb(struct io_wq_work * work,void * data)2742 static __cold bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
2743 {
2744 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2745 
2746 	return req->ctx == data;
2747 }
2748 
io_ring_exit_work(struct work_struct * work)2749 static __cold void io_ring_exit_work(struct work_struct *work)
2750 {
2751 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, exit_work);
2752 	unsigned long timeout = jiffies + HZ * 60 * 5;
2753 	unsigned long interval = HZ / 20;
2754 	struct io_tctx_exit exit;
2755 	struct io_tctx_node *node;
2756 	int ret;
2757 
2758 	/*
2759 	 * If we're doing polled IO and end up having requests being
2760 	 * submitted async (out-of-line), then completions can come in while
2761 	 * we're waiting for refs to drop. We need to reap these manually,
2762 	 * as nobody else will be looking for them.
2763 	 */
2764 	do {
2765 		if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2766 			io_move_task_work_from_local(ctx);
2767 
2768 		while (io_uring_try_cancel_requests(ctx, NULL, true))
2769 			cond_resched();
2770 
2771 		if (ctx->sq_data) {
2772 			struct io_sq_data *sqd = ctx->sq_data;
2773 			struct task_struct *tsk;
2774 
2775 			io_sq_thread_park(sqd);
2776 			tsk = sqd->thread;
2777 			if (tsk && tsk->io_uring && tsk->io_uring->io_wq)
2778 				io_wq_cancel_cb(tsk->io_uring->io_wq,
2779 						io_cancel_ctx_cb, ctx, true);
2780 			io_sq_thread_unpark(sqd);
2781 		}
2782 
2783 		io_req_caches_free(ctx);
2784 
2785 		if (WARN_ON_ONCE(time_after(jiffies, timeout))) {
2786 			/* there is little hope left, don't run it too often */
2787 			interval = HZ * 60;
2788 		}
2789 		/*
2790 		 * This is really an uninterruptible wait, as it has to be
2791 		 * complete. But it's also run from a kworker, which doesn't
2792 		 * take signals, so it's fine to make it interruptible. This
2793 		 * avoids scenarios where we knowingly can wait much longer
2794 		 * on completions, for example if someone does a SIGSTOP on
2795 		 * a task that needs to finish task_work to make this loop
2796 		 * complete. That's a synthetic situation that should not
2797 		 * cause a stuck task backtrace, and hence a potential panic
2798 		 * on stuck tasks if that is enabled.
2799 		 */
2800 	} while (!wait_for_completion_interruptible_timeout(&ctx->ref_comp, interval));
2801 
2802 	init_completion(&exit.completion);
2803 	init_task_work(&exit.task_work, io_tctx_exit_cb);
2804 	exit.ctx = ctx;
2805 
2806 	mutex_lock(&ctx->uring_lock);
2807 	while (!list_empty(&ctx->tctx_list)) {
2808 		WARN_ON_ONCE(time_after(jiffies, timeout));
2809 
2810 		node = list_first_entry(&ctx->tctx_list, struct io_tctx_node,
2811 					ctx_node);
2812 		/* don't spin on a single task if cancellation failed */
2813 		list_rotate_left(&ctx->tctx_list);
2814 		ret = task_work_add(node->task, &exit.task_work, TWA_SIGNAL);
2815 		if (WARN_ON_ONCE(ret))
2816 			continue;
2817 
2818 		mutex_unlock(&ctx->uring_lock);
2819 		/*
2820 		 * See comment above for
2821 		 * wait_for_completion_interruptible_timeout() on why this
2822 		 * wait is marked as interruptible.
2823 		 */
2824 		wait_for_completion_interruptible(&exit.completion);
2825 		mutex_lock(&ctx->uring_lock);
2826 	}
2827 	mutex_unlock(&ctx->uring_lock);
2828 	spin_lock(&ctx->completion_lock);
2829 	spin_unlock(&ctx->completion_lock);
2830 
2831 	io_ring_ctx_free(ctx);
2832 }
2833 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)2834 static __cold void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
2835 {
2836 	unsigned long index;
2837 	struct creds *creds;
2838 
2839 	mutex_lock(&ctx->uring_lock);
2840 	percpu_ref_kill(&ctx->refs);
2841 	if (ctx->rings)
2842 		__io_cqring_overflow_flush(ctx, true);
2843 	xa_for_each(&ctx->personalities, index, creds)
2844 		io_unregister_personality(ctx, index);
2845 	if (ctx->rings)
2846 		io_poll_remove_all(ctx, NULL, true);
2847 	mutex_unlock(&ctx->uring_lock);
2848 
2849 	/*
2850 	 * If we failed setting up the ctx, we might not have any rings
2851 	 * and therefore did not submit any requests
2852 	 */
2853 	if (ctx->rings)
2854 		io_kill_timeouts(ctx, NULL, true);
2855 
2856 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
2857 	/*
2858 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
2859 	 * if we're exiting a ton of rings at the same time. It just adds
2860 	 * noise and overhead, there's no discernable change in runtime
2861 	 * over using system_wq.
2862 	 */
2863 	queue_work(system_unbound_wq, &ctx->exit_work);
2864 }
2865 
io_uring_release(struct inode * inode,struct file * file)2866 static int io_uring_release(struct inode *inode, struct file *file)
2867 {
2868 	struct io_ring_ctx *ctx = file->private_data;
2869 
2870 	file->private_data = NULL;
2871 	io_ring_ctx_wait_and_kill(ctx);
2872 	return 0;
2873 }
2874 
2875 struct io_task_cancel {
2876 	struct task_struct *task;
2877 	bool all;
2878 };
2879 
io_cancel_task_cb(struct io_wq_work * work,void * data)2880 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
2881 {
2882 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2883 	struct io_task_cancel *cancel = data;
2884 
2885 	return io_match_task_safe(req, cancel->task, cancel->all);
2886 }
2887 
io_cancel_defer_files(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)2888 static __cold bool io_cancel_defer_files(struct io_ring_ctx *ctx,
2889 					 struct task_struct *task,
2890 					 bool cancel_all)
2891 {
2892 	struct io_defer_entry *de;
2893 	LIST_HEAD(list);
2894 
2895 	spin_lock(&ctx->completion_lock);
2896 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
2897 		if (io_match_task_safe(de->req, task, cancel_all)) {
2898 			list_cut_position(&list, &ctx->defer_list, &de->list);
2899 			break;
2900 		}
2901 	}
2902 	spin_unlock(&ctx->completion_lock);
2903 	if (list_empty(&list))
2904 		return false;
2905 
2906 	while (!list_empty(&list)) {
2907 		de = list_first_entry(&list, struct io_defer_entry, list);
2908 		list_del_init(&de->list);
2909 		io_req_task_queue_fail(de->req, -ECANCELED);
2910 		kfree(de);
2911 	}
2912 	return true;
2913 }
2914 
io_uring_try_cancel_iowq(struct io_ring_ctx * ctx)2915 static __cold bool io_uring_try_cancel_iowq(struct io_ring_ctx *ctx)
2916 {
2917 	struct io_tctx_node *node;
2918 	enum io_wq_cancel cret;
2919 	bool ret = false;
2920 
2921 	mutex_lock(&ctx->uring_lock);
2922 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
2923 		struct io_uring_task *tctx = node->task->io_uring;
2924 
2925 		/*
2926 		 * io_wq will stay alive while we hold uring_lock, because it's
2927 		 * killed after ctx nodes, which requires to take the lock.
2928 		 */
2929 		if (!tctx || !tctx->io_wq)
2930 			continue;
2931 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_ctx_cb, ctx, true);
2932 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
2933 	}
2934 	mutex_unlock(&ctx->uring_lock);
2935 
2936 	return ret;
2937 }
2938 
io_uring_try_cancel_requests(struct io_ring_ctx * ctx,struct task_struct * task,bool cancel_all)2939 static __cold bool io_uring_try_cancel_requests(struct io_ring_ctx *ctx,
2940 						struct task_struct *task,
2941 						bool cancel_all)
2942 {
2943 	struct io_task_cancel cancel = { .task = task, .all = cancel_all, };
2944 	struct io_uring_task *tctx = task ? task->io_uring : NULL;
2945 	enum io_wq_cancel cret;
2946 	bool ret = false;
2947 
2948 	/* failed during ring init, it couldn't have issued any requests */
2949 	if (!ctx->rings)
2950 		return false;
2951 
2952 	if (!task) {
2953 		ret |= io_uring_try_cancel_iowq(ctx);
2954 	} else if (tctx && tctx->io_wq) {
2955 		/*
2956 		 * Cancels requests of all rings, not only @ctx, but
2957 		 * it's fine as the task is in exit/exec.
2958 		 */
2959 		cret = io_wq_cancel_cb(tctx->io_wq, io_cancel_task_cb,
2960 				       &cancel, true);
2961 		ret |= (cret != IO_WQ_CANCEL_NOTFOUND);
2962 	}
2963 
2964 	/* SQPOLL thread does its own polling */
2965 	if ((!(ctx->flags & IORING_SETUP_SQPOLL) && cancel_all) ||
2966 	    (ctx->sq_data && ctx->sq_data->thread == current)) {
2967 		while (!wq_list_empty(&ctx->iopoll_list)) {
2968 			io_iopoll_try_reap_events(ctx);
2969 			ret = true;
2970 			cond_resched();
2971 		}
2972 	}
2973 
2974 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
2975 		ret |= io_run_local_work(ctx) > 0;
2976 	ret |= io_cancel_defer_files(ctx, task, cancel_all);
2977 	mutex_lock(&ctx->uring_lock);
2978 	ret |= io_poll_remove_all(ctx, task, cancel_all);
2979 	mutex_unlock(&ctx->uring_lock);
2980 	ret |= io_kill_timeouts(ctx, task, cancel_all);
2981 	if (task)
2982 		ret |= io_run_task_work() > 0;
2983 	return ret;
2984 }
2985 
tctx_inflight(struct io_uring_task * tctx,bool tracked)2986 static s64 tctx_inflight(struct io_uring_task *tctx, bool tracked)
2987 {
2988 	if (tracked)
2989 		return atomic_read(&tctx->inflight_tracked);
2990 	return percpu_counter_sum(&tctx->inflight);
2991 }
2992 
2993 /*
2994  * Find any io_uring ctx that this task has registered or done IO on, and cancel
2995  * requests. @sqd should be not-null IFF it's an SQPOLL thread cancellation.
2996  */
io_uring_cancel_generic(bool cancel_all,struct io_sq_data * sqd)2997 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd)
2998 {
2999 	struct io_uring_task *tctx = current->io_uring;
3000 	struct io_ring_ctx *ctx;
3001 	s64 inflight;
3002 	DEFINE_WAIT(wait);
3003 
3004 	WARN_ON_ONCE(sqd && sqd->thread != current);
3005 
3006 	if (!current->io_uring)
3007 		return;
3008 	if (tctx->io_wq)
3009 		io_wq_exit_start(tctx->io_wq);
3010 
3011 	atomic_inc(&tctx->in_idle);
3012 	do {
3013 		bool loop = false;
3014 
3015 		io_uring_drop_tctx_refs(current);
3016 		/* read completions before cancelations */
3017 		inflight = tctx_inflight(tctx, !cancel_all);
3018 		if (!inflight)
3019 			break;
3020 
3021 		if (!sqd) {
3022 			struct io_tctx_node *node;
3023 			unsigned long index;
3024 
3025 			xa_for_each(&tctx->xa, index, node) {
3026 				/* sqpoll task will cancel all its requests */
3027 				if (node->ctx->sq_data)
3028 					continue;
3029 				loop |= io_uring_try_cancel_requests(node->ctx,
3030 							current, cancel_all);
3031 			}
3032 		} else {
3033 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
3034 				loop |= io_uring_try_cancel_requests(ctx,
3035 								     current,
3036 								     cancel_all);
3037 		}
3038 
3039 		if (loop) {
3040 			cond_resched();
3041 			continue;
3042 		}
3043 
3044 		prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
3045 		io_run_task_work();
3046 		io_uring_drop_tctx_refs(current);
3047 
3048 		/*
3049 		 * If we've seen completions, retry without waiting. This
3050 		 * avoids a race where a completion comes in before we did
3051 		 * prepare_to_wait().
3052 		 */
3053 		if (inflight == tctx_inflight(tctx, !cancel_all))
3054 			schedule();
3055 		finish_wait(&tctx->wait, &wait);
3056 	} while (1);
3057 
3058 	io_uring_clean_tctx(tctx);
3059 	if (cancel_all) {
3060 		/*
3061 		 * We shouldn't run task_works after cancel, so just leave
3062 		 * ->in_idle set for normal exit.
3063 		 */
3064 		atomic_dec(&tctx->in_idle);
3065 		/* for exec all current's requests should be gone, kill tctx */
3066 		__io_uring_free(current);
3067 	}
3068 }
3069 
__io_uring_cancel(bool cancel_all)3070 void __io_uring_cancel(bool cancel_all)
3071 {
3072 	io_uring_cancel_generic(cancel_all, NULL);
3073 }
3074 
io_uring_validate_mmap_request(struct file * file,loff_t pgoff,size_t sz)3075 static void *io_uring_validate_mmap_request(struct file *file,
3076 					    loff_t pgoff, size_t sz)
3077 {
3078 	struct io_ring_ctx *ctx = file->private_data;
3079 	loff_t offset = pgoff << PAGE_SHIFT;
3080 	struct page *page;
3081 	void *ptr;
3082 
3083 	switch (offset) {
3084 	case IORING_OFF_SQ_RING:
3085 	case IORING_OFF_CQ_RING:
3086 		ptr = ctx->rings;
3087 		break;
3088 	case IORING_OFF_SQES:
3089 		ptr = ctx->sq_sqes;
3090 		break;
3091 	default:
3092 		return ERR_PTR(-EINVAL);
3093 	}
3094 
3095 	page = virt_to_head_page(ptr);
3096 	if (sz > page_size(page))
3097 		return ERR_PTR(-EINVAL);
3098 
3099 	return ptr;
3100 }
3101 
3102 #ifdef CONFIG_MMU
3103 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3104 static __cold int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3105 {
3106 	size_t sz = vma->vm_end - vma->vm_start;
3107 	unsigned long pfn;
3108 	void *ptr;
3109 
3110 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
3111 	if (IS_ERR(ptr))
3112 		return PTR_ERR(ptr);
3113 
3114 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3115 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3116 }
3117 
io_uring_mmu_get_unmapped_area(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3118 static unsigned long io_uring_mmu_get_unmapped_area(struct file *filp,
3119 			unsigned long addr, unsigned long len,
3120 			unsigned long pgoff, unsigned long flags)
3121 {
3122 	void *ptr;
3123 
3124 	/*
3125 	 * Do not allow to map to user-provided address to avoid breaking the
3126 	 * aliasing rules. Userspace is not able to guess the offset address of
3127 	 * kernel kmalloc()ed memory area.
3128 	 */
3129 	if (addr)
3130 		return -EINVAL;
3131 
3132 	ptr = io_uring_validate_mmap_request(filp, pgoff, len);
3133 	if (IS_ERR(ptr))
3134 		return -ENOMEM;
3135 
3136 	/*
3137 	 * Some architectures have strong cache aliasing requirements.
3138 	 * For such architectures we need a coherent mapping which aliases
3139 	 * kernel memory *and* userspace memory. To achieve that:
3140 	 * - use a NULL file pointer to reference physical memory, and
3141 	 * - use the kernel virtual address of the shared io_uring context
3142 	 *   (instead of the userspace-provided address, which has to be 0UL
3143 	 *   anyway).
3144 	 * - use the same pgoff which the get_unmapped_area() uses to
3145 	 *   calculate the page colouring.
3146 	 * For architectures without such aliasing requirements, the
3147 	 * architecture will return any suitable mapping because addr is 0.
3148 	 */
3149 	filp = NULL;
3150 	flags |= MAP_SHARED;
3151 	pgoff = 0;	/* has been translated to ptr above */
3152 #ifdef SHM_COLOUR
3153 	addr = (uintptr_t) ptr;
3154 	pgoff = addr >> PAGE_SHIFT;
3155 #else
3156 	addr = 0UL;
3157 #endif
3158 	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
3159 }
3160 
3161 #else /* !CONFIG_MMU */
3162 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3163 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3164 {
3165 	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
3166 }
3167 
io_uring_nommu_mmap_capabilities(struct file * file)3168 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
3169 {
3170 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
3171 }
3172 
io_uring_nommu_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3173 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
3174 	unsigned long addr, unsigned long len,
3175 	unsigned long pgoff, unsigned long flags)
3176 {
3177 	void *ptr;
3178 
3179 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
3180 	if (IS_ERR(ptr))
3181 		return PTR_ERR(ptr);
3182 
3183 	return (unsigned long) ptr;
3184 }
3185 
3186 #endif /* !CONFIG_MMU */
3187 
io_validate_ext_arg(unsigned flags,const void __user * argp,size_t argsz)3188 static int io_validate_ext_arg(unsigned flags, const void __user *argp, size_t argsz)
3189 {
3190 	if (flags & IORING_ENTER_EXT_ARG) {
3191 		struct io_uring_getevents_arg arg;
3192 
3193 		if (argsz != sizeof(arg))
3194 			return -EINVAL;
3195 		if (copy_from_user(&arg, argp, sizeof(arg)))
3196 			return -EFAULT;
3197 	}
3198 	return 0;
3199 }
3200 
io_get_ext_arg(unsigned flags,const void __user * argp,size_t * argsz,struct __kernel_timespec __user ** ts,const sigset_t __user ** sig)3201 static int io_get_ext_arg(unsigned flags, const void __user *argp, size_t *argsz,
3202 			  struct __kernel_timespec __user **ts,
3203 			  const sigset_t __user **sig)
3204 {
3205 	struct io_uring_getevents_arg arg;
3206 
3207 	/*
3208 	 * If EXT_ARG isn't set, then we have no timespec and the argp pointer
3209 	 * is just a pointer to the sigset_t.
3210 	 */
3211 	if (!(flags & IORING_ENTER_EXT_ARG)) {
3212 		*sig = (const sigset_t __user *) argp;
3213 		*ts = NULL;
3214 		return 0;
3215 	}
3216 
3217 	/*
3218 	 * EXT_ARG is set - ensure we agree on the size of it and copy in our
3219 	 * timespec and sigset_t pointers if good.
3220 	 */
3221 	if (*argsz != sizeof(arg))
3222 		return -EINVAL;
3223 	if (copy_from_user(&arg, argp, sizeof(arg)))
3224 		return -EFAULT;
3225 	if (arg.pad)
3226 		return -EINVAL;
3227 	*sig = u64_to_user_ptr(arg.sigmask);
3228 	*argsz = arg.sigmask_sz;
3229 	*ts = u64_to_user_ptr(arg.ts);
3230 	return 0;
3231 }
3232 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const void __user *,argp,size_t,argsz)3233 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3234 		u32, min_complete, u32, flags, const void __user *, argp,
3235 		size_t, argsz)
3236 {
3237 	struct io_ring_ctx *ctx;
3238 	struct fd f;
3239 	long ret;
3240 
3241 	if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
3242 			       IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG |
3243 			       IORING_ENTER_REGISTERED_RING)))
3244 		return -EINVAL;
3245 
3246 	/*
3247 	 * Ring fd has been registered via IORING_REGISTER_RING_FDS, we
3248 	 * need only dereference our task private array to find it.
3249 	 */
3250 	if (flags & IORING_ENTER_REGISTERED_RING) {
3251 		struct io_uring_task *tctx = current->io_uring;
3252 
3253 		if (unlikely(!tctx || fd >= IO_RINGFD_REG_MAX))
3254 			return -EINVAL;
3255 		fd = array_index_nospec(fd, IO_RINGFD_REG_MAX);
3256 		f.file = tctx->registered_rings[fd];
3257 		f.flags = 0;
3258 		if (unlikely(!f.file))
3259 			return -EBADF;
3260 	} else {
3261 		f = fdget(fd);
3262 		if (unlikely(!f.file))
3263 			return -EBADF;
3264 		ret = -EOPNOTSUPP;
3265 		if (unlikely(!io_is_uring_fops(f.file)))
3266 			goto out;
3267 	}
3268 
3269 	ctx = f.file->private_data;
3270 	ret = -EBADFD;
3271 	if (unlikely(ctx->flags & IORING_SETUP_R_DISABLED))
3272 		goto out;
3273 
3274 	/*
3275 	 * For SQ polling, the thread will do all submissions and completions.
3276 	 * Just return the requested submit count, and wake the thread if
3277 	 * we were asked to.
3278 	 */
3279 	ret = 0;
3280 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3281 		io_cqring_overflow_flush(ctx);
3282 
3283 		if (unlikely(ctx->sq_data->thread == NULL)) {
3284 			ret = -EOWNERDEAD;
3285 			goto out;
3286 		}
3287 		if (flags & IORING_ENTER_SQ_WAKEUP)
3288 			wake_up(&ctx->sq_data->wait);
3289 		if (flags & IORING_ENTER_SQ_WAIT) {
3290 			ret = io_sqpoll_wait_sq(ctx);
3291 			if (ret)
3292 				goto out;
3293 		}
3294 		ret = to_submit;
3295 	} else if (to_submit) {
3296 		ret = io_uring_add_tctx_node(ctx);
3297 		if (unlikely(ret))
3298 			goto out;
3299 
3300 		mutex_lock(&ctx->uring_lock);
3301 		ret = io_submit_sqes(ctx, to_submit);
3302 		if (ret != to_submit) {
3303 			mutex_unlock(&ctx->uring_lock);
3304 			goto out;
3305 		}
3306 		if (flags & IORING_ENTER_GETEVENTS) {
3307 			if (ctx->syscall_iopoll)
3308 				goto iopoll_locked;
3309 			/*
3310 			 * Ignore errors, we'll soon call io_cqring_wait() and
3311 			 * it should handle ownership problems if any.
3312 			 */
3313 			if (ctx->flags & IORING_SETUP_DEFER_TASKRUN)
3314 				(void)io_run_local_work_locked(ctx);
3315 		}
3316 		mutex_unlock(&ctx->uring_lock);
3317 	}
3318 
3319 	if (flags & IORING_ENTER_GETEVENTS) {
3320 		int ret2;
3321 
3322 		if (ctx->syscall_iopoll) {
3323 			/*
3324 			 * We disallow the app entering submit/complete with
3325 			 * polling, but we still need to lock the ring to
3326 			 * prevent racing with polled issue that got punted to
3327 			 * a workqueue.
3328 			 */
3329 			mutex_lock(&ctx->uring_lock);
3330 iopoll_locked:
3331 			ret2 = io_validate_ext_arg(flags, argp, argsz);
3332 			if (likely(!ret2)) {
3333 				min_complete = min(min_complete,
3334 						   ctx->cq_entries);
3335 				ret2 = io_iopoll_check(ctx, min_complete);
3336 			}
3337 			mutex_unlock(&ctx->uring_lock);
3338 		} else {
3339 			const sigset_t __user *sig;
3340 			struct __kernel_timespec __user *ts;
3341 
3342 			ret2 = io_get_ext_arg(flags, argp, &argsz, &ts, &sig);
3343 			if (likely(!ret2)) {
3344 				min_complete = min(min_complete,
3345 						   ctx->cq_entries);
3346 				ret2 = io_cqring_wait(ctx, min_complete, sig,
3347 						      argsz, ts);
3348 			}
3349 		}
3350 
3351 		if (!ret) {
3352 			ret = ret2;
3353 
3354 			/*
3355 			 * EBADR indicates that one or more CQE were dropped.
3356 			 * Once the user has been informed we can clear the bit
3357 			 * as they are obviously ok with those drops.
3358 			 */
3359 			if (unlikely(ret2 == -EBADR))
3360 				clear_bit(IO_CHECK_CQ_DROPPED_BIT,
3361 					  &ctx->check_cq);
3362 		}
3363 	}
3364 out:
3365 	fdput(f);
3366 	return ret;
3367 }
3368 
3369 static const struct file_operations io_uring_fops = {
3370 	.release	= io_uring_release,
3371 	.mmap		= io_uring_mmap,
3372 #ifndef CONFIG_MMU
3373 	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
3374 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
3375 #else
3376 	.get_unmapped_area = io_uring_mmu_get_unmapped_area,
3377 #endif
3378 	.poll		= io_uring_poll,
3379 #ifdef CONFIG_PROC_FS
3380 	.show_fdinfo	= io_uring_show_fdinfo,
3381 #endif
3382 };
3383 
io_is_uring_fops(struct file * file)3384 bool io_is_uring_fops(struct file *file)
3385 {
3386 	return file->f_op == &io_uring_fops;
3387 }
3388 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)3389 static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3390 					 struct io_uring_params *p)
3391 {
3392 	struct io_rings *rings;
3393 	size_t size, sq_array_offset;
3394 
3395 	/* make sure these are sane, as we already accounted them */
3396 	ctx->sq_entries = p->sq_entries;
3397 	ctx->cq_entries = p->cq_entries;
3398 
3399 	size = rings_size(ctx, p->sq_entries, p->cq_entries, &sq_array_offset);
3400 	if (size == SIZE_MAX)
3401 		return -EOVERFLOW;
3402 
3403 	rings = io_mem_alloc(size);
3404 	if (!rings)
3405 		return -ENOMEM;
3406 
3407 	ctx->rings = rings;
3408 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3409 	rings->sq_ring_mask = p->sq_entries - 1;
3410 	rings->cq_ring_mask = p->cq_entries - 1;
3411 	rings->sq_ring_entries = p->sq_entries;
3412 	rings->cq_ring_entries = p->cq_entries;
3413 
3414 	if (p->flags & IORING_SETUP_SQE128)
3415 		size = array_size(2 * sizeof(struct io_uring_sqe), p->sq_entries);
3416 	else
3417 		size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3418 	if (size == SIZE_MAX) {
3419 		io_mem_free(ctx->rings);
3420 		ctx->rings = NULL;
3421 		return -EOVERFLOW;
3422 	}
3423 
3424 	ctx->sq_sqes = io_mem_alloc(size);
3425 	if (!ctx->sq_sqes) {
3426 		io_mem_free(ctx->rings);
3427 		ctx->rings = NULL;
3428 		return -ENOMEM;
3429 	}
3430 
3431 	return 0;
3432 }
3433 
io_uring_install_fd(struct io_ring_ctx * ctx,struct file * file)3434 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
3435 {
3436 	int ret, fd;
3437 
3438 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3439 	if (fd < 0)
3440 		return fd;
3441 
3442 	ret = __io_uring_add_tctx_node(ctx);
3443 	if (ret) {
3444 		put_unused_fd(fd);
3445 		return ret;
3446 	}
3447 	fd_install(fd, file);
3448 	return fd;
3449 }
3450 
3451 /*
3452  * Allocate an anonymous fd, this is what constitutes the application
3453  * visible backing of an io_uring instance. The application mmaps this
3454  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3455  * we have to tie this fd to a socket for file garbage collection purposes.
3456  */
io_uring_get_file(struct io_ring_ctx * ctx)3457 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
3458 {
3459 	struct file *file;
3460 #if defined(CONFIG_UNIX)
3461 	int ret;
3462 
3463 	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3464 				&ctx->ring_sock);
3465 	if (ret)
3466 		return ERR_PTR(ret);
3467 #endif
3468 
3469 	file = anon_inode_getfile_secure("[io_uring]", &io_uring_fops, ctx,
3470 					 O_RDWR | O_CLOEXEC, NULL);
3471 #if defined(CONFIG_UNIX)
3472 	if (IS_ERR(file)) {
3473 		sock_release(ctx->ring_sock);
3474 		ctx->ring_sock = NULL;
3475 	} else {
3476 		ctx->ring_sock->file = file;
3477 	}
3478 #endif
3479 	return file;
3480 }
3481 
io_uring_create(unsigned entries,struct io_uring_params * p,struct io_uring_params __user * params)3482 static __cold int io_uring_create(unsigned entries, struct io_uring_params *p,
3483 				  struct io_uring_params __user *params)
3484 {
3485 	struct io_ring_ctx *ctx;
3486 	struct file *file;
3487 	int ret;
3488 
3489 	if (!entries)
3490 		return -EINVAL;
3491 	if (entries > IORING_MAX_ENTRIES) {
3492 		if (!(p->flags & IORING_SETUP_CLAMP))
3493 			return -EINVAL;
3494 		entries = IORING_MAX_ENTRIES;
3495 	}
3496 
3497 	/*
3498 	 * Use twice as many entries for the CQ ring. It's possible for the
3499 	 * application to drive a higher depth than the size of the SQ ring,
3500 	 * since the sqes are only used at submission time. This allows for
3501 	 * some flexibility in overcommitting a bit. If the application has
3502 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
3503 	 * of CQ ring entries manually.
3504 	 */
3505 	p->sq_entries = roundup_pow_of_two(entries);
3506 	if (p->flags & IORING_SETUP_CQSIZE) {
3507 		/*
3508 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
3509 		 * to a power-of-two, if it isn't already. We do NOT impose
3510 		 * any cq vs sq ring sizing.
3511 		 */
3512 		if (!p->cq_entries)
3513 			return -EINVAL;
3514 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
3515 			if (!(p->flags & IORING_SETUP_CLAMP))
3516 				return -EINVAL;
3517 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
3518 		}
3519 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
3520 		if (p->cq_entries < p->sq_entries)
3521 			return -EINVAL;
3522 	} else {
3523 		p->cq_entries = 2 * p->sq_entries;
3524 	}
3525 
3526 	ctx = io_ring_ctx_alloc(p);
3527 	if (!ctx)
3528 		return -ENOMEM;
3529 
3530 	/*
3531 	 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
3532 	 * space applications don't need to do io completion events
3533 	 * polling again, they can rely on io_sq_thread to do polling
3534 	 * work, which can reduce cpu usage and uring_lock contention.
3535 	 */
3536 	if (ctx->flags & IORING_SETUP_IOPOLL &&
3537 	    !(ctx->flags & IORING_SETUP_SQPOLL))
3538 		ctx->syscall_iopoll = 1;
3539 
3540 	ctx->compat = in_compat_syscall();
3541 	if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK))
3542 		ctx->user = get_uid(current_user());
3543 
3544 	/*
3545 	 * For SQPOLL, we just need a wakeup, always. For !SQPOLL, if
3546 	 * COOP_TASKRUN is set, then IPIs are never needed by the app.
3547 	 */
3548 	ret = -EINVAL;
3549 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3550 		/* IPI related flags don't make sense with SQPOLL */
3551 		if (ctx->flags & (IORING_SETUP_COOP_TASKRUN |
3552 				  IORING_SETUP_TASKRUN_FLAG |
3553 				  IORING_SETUP_DEFER_TASKRUN))
3554 			goto err;
3555 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3556 	} else if (ctx->flags & IORING_SETUP_COOP_TASKRUN) {
3557 		ctx->notify_method = TWA_SIGNAL_NO_IPI;
3558 	} else {
3559 		if (ctx->flags & IORING_SETUP_TASKRUN_FLAG &&
3560 		    !(ctx->flags & IORING_SETUP_DEFER_TASKRUN))
3561 			goto err;
3562 		ctx->notify_method = TWA_SIGNAL;
3563 	}
3564 
3565 	/*
3566 	 * For DEFER_TASKRUN we require the completion task to be the same as the
3567 	 * submission task. This implies that there is only one submitter, so enforce
3568 	 * that.
3569 	 */
3570 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN &&
3571 	    !(ctx->flags & IORING_SETUP_SINGLE_ISSUER)) {
3572 		goto err;
3573 	}
3574 
3575 	/*
3576 	 * This is just grabbed for accounting purposes. When a process exits,
3577 	 * the mm is exited and dropped before the files, hence we need to hang
3578 	 * on to this mm purely for the purposes of being able to unaccount
3579 	 * memory (locked/pinned vm). It's not used for anything else.
3580 	 */
3581 	mmgrab(current->mm);
3582 	ctx->mm_account = current->mm;
3583 
3584 	ret = io_allocate_scq_urings(ctx, p);
3585 	if (ret)
3586 		goto err;
3587 
3588 	ret = io_sq_offload_create(ctx, p);
3589 	if (ret)
3590 		goto err;
3591 	/* always set a rsrc node */
3592 	ret = io_rsrc_node_switch_start(ctx);
3593 	if (ret)
3594 		goto err;
3595 	io_rsrc_node_switch(ctx, NULL);
3596 
3597 	memset(&p->sq_off, 0, sizeof(p->sq_off));
3598 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3599 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3600 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3601 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3602 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3603 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3604 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3605 
3606 	memset(&p->cq_off, 0, sizeof(p->cq_off));
3607 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3608 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3609 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3610 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3611 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3612 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3613 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
3614 
3615 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
3616 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
3617 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
3618 			IORING_FEAT_POLL_32BITS | IORING_FEAT_SQPOLL_NONFIXED |
3619 			IORING_FEAT_EXT_ARG | IORING_FEAT_NATIVE_WORKERS |
3620 			IORING_FEAT_RSRC_TAGS | IORING_FEAT_CQE_SKIP |
3621 			IORING_FEAT_LINKED_FILE;
3622 
3623 	if (copy_to_user(params, p, sizeof(*p))) {
3624 		ret = -EFAULT;
3625 		goto err;
3626 	}
3627 
3628 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER
3629 	    && !(ctx->flags & IORING_SETUP_R_DISABLED))
3630 		ctx->submitter_task = get_task_struct(current);
3631 
3632 	file = io_uring_get_file(ctx);
3633 	if (IS_ERR(file)) {
3634 		ret = PTR_ERR(file);
3635 		goto err;
3636 	}
3637 
3638 	/*
3639 	 * Install ring fd as the very last thing, so we don't risk someone
3640 	 * having closed it before we finish setup
3641 	 */
3642 	ret = io_uring_install_fd(ctx, file);
3643 	if (ret < 0) {
3644 		/* fput will clean it up */
3645 		fput(file);
3646 		return ret;
3647 	}
3648 
3649 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
3650 	return ret;
3651 err:
3652 	io_ring_ctx_wait_and_kill(ctx);
3653 	return ret;
3654 }
3655 
3656 /*
3657  * Sets up an aio uring context, and returns the fd. Applications asks for a
3658  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3659  * params structure passed in.
3660  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3661 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3662 {
3663 	struct io_uring_params p;
3664 	int i;
3665 
3666 	if (copy_from_user(&p, params, sizeof(p)))
3667 		return -EFAULT;
3668 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3669 		if (p.resv[i])
3670 			return -EINVAL;
3671 	}
3672 
3673 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3674 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
3675 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
3676 			IORING_SETUP_R_DISABLED | IORING_SETUP_SUBMIT_ALL |
3677 			IORING_SETUP_COOP_TASKRUN | IORING_SETUP_TASKRUN_FLAG |
3678 			IORING_SETUP_SQE128 | IORING_SETUP_CQE32 |
3679 			IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN))
3680 		return -EINVAL;
3681 
3682 	return io_uring_create(entries, &p, params);
3683 }
3684 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3685 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3686 		struct io_uring_params __user *, params)
3687 {
3688 	return io_uring_setup(entries, params);
3689 }
3690 
io_probe(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3691 static __cold int io_probe(struct io_ring_ctx *ctx, void __user *arg,
3692 			   unsigned nr_args)
3693 {
3694 	struct io_uring_probe *p;
3695 	size_t size;
3696 	int i, ret;
3697 
3698 	size = struct_size(p, ops, nr_args);
3699 	if (size == SIZE_MAX)
3700 		return -EOVERFLOW;
3701 	p = kzalloc(size, GFP_KERNEL);
3702 	if (!p)
3703 		return -ENOMEM;
3704 
3705 	ret = -EFAULT;
3706 	if (copy_from_user(p, arg, size))
3707 		goto out;
3708 	ret = -EINVAL;
3709 	if (memchr_inv(p, 0, size))
3710 		goto out;
3711 
3712 	p->last_op = IORING_OP_LAST - 1;
3713 	if (nr_args > IORING_OP_LAST)
3714 		nr_args = IORING_OP_LAST;
3715 
3716 	for (i = 0; i < nr_args; i++) {
3717 		p->ops[i].op = i;
3718 		if (!io_op_defs[i].not_supported)
3719 			p->ops[i].flags = IO_URING_OP_SUPPORTED;
3720 	}
3721 	p->ops_len = i;
3722 
3723 	ret = 0;
3724 	if (copy_to_user(arg, p, size))
3725 		ret = -EFAULT;
3726 out:
3727 	kfree(p);
3728 	return ret;
3729 }
3730 
io_register_personality(struct io_ring_ctx * ctx)3731 static int io_register_personality(struct io_ring_ctx *ctx)
3732 {
3733 	const struct cred *creds;
3734 	u32 id;
3735 	int ret;
3736 
3737 	creds = get_current_cred();
3738 
3739 	ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)creds,
3740 			XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
3741 	if (ret < 0) {
3742 		put_cred(creds);
3743 		return ret;
3744 	}
3745 	return id;
3746 }
3747 
io_register_restrictions(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args)3748 static __cold int io_register_restrictions(struct io_ring_ctx *ctx,
3749 					   void __user *arg, unsigned int nr_args)
3750 {
3751 	struct io_uring_restriction *res;
3752 	size_t size;
3753 	int i, ret;
3754 
3755 	/* Restrictions allowed only if rings started disabled */
3756 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3757 		return -EBADFD;
3758 
3759 	/* We allow only a single restrictions registration */
3760 	if (ctx->restrictions.registered)
3761 		return -EBUSY;
3762 
3763 	if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
3764 		return -EINVAL;
3765 
3766 	size = array_size(nr_args, sizeof(*res));
3767 	if (size == SIZE_MAX)
3768 		return -EOVERFLOW;
3769 
3770 	res = memdup_user(arg, size);
3771 	if (IS_ERR(res))
3772 		return PTR_ERR(res);
3773 
3774 	ret = 0;
3775 
3776 	for (i = 0; i < nr_args; i++) {
3777 		switch (res[i].opcode) {
3778 		case IORING_RESTRICTION_REGISTER_OP:
3779 			if (res[i].register_op >= IORING_REGISTER_LAST) {
3780 				ret = -EINVAL;
3781 				goto out;
3782 			}
3783 
3784 			__set_bit(res[i].register_op,
3785 				  ctx->restrictions.register_op);
3786 			break;
3787 		case IORING_RESTRICTION_SQE_OP:
3788 			if (res[i].sqe_op >= IORING_OP_LAST) {
3789 				ret = -EINVAL;
3790 				goto out;
3791 			}
3792 
3793 			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
3794 			break;
3795 		case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
3796 			ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
3797 			break;
3798 		case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
3799 			ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
3800 			break;
3801 		default:
3802 			ret = -EINVAL;
3803 			goto out;
3804 		}
3805 	}
3806 
3807 out:
3808 	/* Reset all restrictions if an error happened */
3809 	if (ret != 0)
3810 		memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
3811 	else
3812 		ctx->restrictions.registered = true;
3813 
3814 	kfree(res);
3815 	return ret;
3816 }
3817 
io_register_enable_rings(struct io_ring_ctx * ctx)3818 static int io_register_enable_rings(struct io_ring_ctx *ctx)
3819 {
3820 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
3821 		return -EBADFD;
3822 
3823 	if (ctx->flags & IORING_SETUP_SINGLE_ISSUER && !ctx->submitter_task)
3824 		ctx->submitter_task = get_task_struct(current);
3825 
3826 	if (ctx->restrictions.registered)
3827 		ctx->restricted = 1;
3828 
3829 	ctx->flags &= ~IORING_SETUP_R_DISABLED;
3830 	if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
3831 		wake_up(&ctx->sq_data->wait);
3832 	return 0;
3833 }
3834 
__io_register_iowq_aff(struct io_ring_ctx * ctx,cpumask_var_t new_mask)3835 static __cold int __io_register_iowq_aff(struct io_ring_ctx *ctx,
3836 					 cpumask_var_t new_mask)
3837 {
3838 	int ret;
3839 
3840 	if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
3841 		ret = io_wq_cpu_affinity(current->io_uring, new_mask);
3842 	} else {
3843 		mutex_unlock(&ctx->uring_lock);
3844 		ret = io_sqpoll_wq_cpu_affinity(ctx, new_mask);
3845 		mutex_lock(&ctx->uring_lock);
3846 	}
3847 
3848 	return ret;
3849 }
3850 
io_register_iowq_aff(struct io_ring_ctx * ctx,void __user * arg,unsigned len)3851 static __cold int io_register_iowq_aff(struct io_ring_ctx *ctx,
3852 				       void __user *arg, unsigned len)
3853 {
3854 	cpumask_var_t new_mask;
3855 	int ret;
3856 
3857 	if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
3858 		return -ENOMEM;
3859 
3860 	cpumask_clear(new_mask);
3861 	if (len > cpumask_size())
3862 		len = cpumask_size();
3863 
3864 	if (in_compat_syscall()) {
3865 		ret = compat_get_bitmap(cpumask_bits(new_mask),
3866 					(const compat_ulong_t __user *)arg,
3867 					len * 8 /* CHAR_BIT */);
3868 	} else {
3869 		ret = copy_from_user(new_mask, arg, len);
3870 	}
3871 
3872 	if (ret) {
3873 		free_cpumask_var(new_mask);
3874 		return -EFAULT;
3875 	}
3876 
3877 	ret = __io_register_iowq_aff(ctx, new_mask);
3878 	free_cpumask_var(new_mask);
3879 	return ret;
3880 }
3881 
io_unregister_iowq_aff(struct io_ring_ctx * ctx)3882 static __cold int io_unregister_iowq_aff(struct io_ring_ctx *ctx)
3883 {
3884 	return __io_register_iowq_aff(ctx, NULL);
3885 }
3886 
io_register_iowq_max_workers(struct io_ring_ctx * ctx,void __user * arg)3887 static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx,
3888 					       void __user *arg)
3889 	__must_hold(&ctx->uring_lock)
3890 {
3891 	struct io_tctx_node *node;
3892 	struct io_uring_task *tctx = NULL;
3893 	struct io_sq_data *sqd = NULL;
3894 	__u32 new_count[2];
3895 	int i, ret;
3896 
3897 	if (copy_from_user(new_count, arg, sizeof(new_count)))
3898 		return -EFAULT;
3899 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
3900 		if (new_count[i] > INT_MAX)
3901 			return -EINVAL;
3902 
3903 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3904 		sqd = ctx->sq_data;
3905 		if (sqd) {
3906 			/*
3907 			 * Observe the correct sqd->lock -> ctx->uring_lock
3908 			 * ordering. Fine to drop uring_lock here, we hold
3909 			 * a ref to the ctx.
3910 			 */
3911 			refcount_inc(&sqd->refs);
3912 			mutex_unlock(&ctx->uring_lock);
3913 			mutex_lock(&sqd->lock);
3914 			mutex_lock(&ctx->uring_lock);
3915 			if (sqd->thread)
3916 				tctx = sqd->thread->io_uring;
3917 		}
3918 	} else {
3919 		tctx = current->io_uring;
3920 	}
3921 
3922 	BUILD_BUG_ON(sizeof(new_count) != sizeof(ctx->iowq_limits));
3923 
3924 	for (i = 0; i < ARRAY_SIZE(new_count); i++)
3925 		if (new_count[i])
3926 			ctx->iowq_limits[i] = new_count[i];
3927 	ctx->iowq_limits_set = true;
3928 
3929 	if (tctx && tctx->io_wq) {
3930 		ret = io_wq_max_workers(tctx->io_wq, new_count);
3931 		if (ret)
3932 			goto err;
3933 	} else {
3934 		memset(new_count, 0, sizeof(new_count));
3935 	}
3936 
3937 	if (sqd) {
3938 		mutex_unlock(&sqd->lock);
3939 		io_put_sq_data(sqd);
3940 	}
3941 
3942 	if (copy_to_user(arg, new_count, sizeof(new_count)))
3943 		return -EFAULT;
3944 
3945 	/* that's it for SQPOLL, only the SQPOLL task creates requests */
3946 	if (sqd)
3947 		return 0;
3948 
3949 	/* now propagate the restriction to all registered users */
3950 	list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
3951 		struct io_uring_task *tctx = node->task->io_uring;
3952 
3953 		if (WARN_ON_ONCE(!tctx->io_wq))
3954 			continue;
3955 
3956 		for (i = 0; i < ARRAY_SIZE(new_count); i++)
3957 			new_count[i] = ctx->iowq_limits[i];
3958 		/* ignore errors, it always returns zero anyway */
3959 		(void)io_wq_max_workers(tctx->io_wq, new_count);
3960 	}
3961 	return 0;
3962 err:
3963 	if (sqd) {
3964 		mutex_unlock(&sqd->lock);
3965 		io_put_sq_data(sqd);
3966 	}
3967 	return ret;
3968 }
3969 
__io_uring_register(struct io_ring_ctx * ctx,unsigned opcode,void __user * arg,unsigned nr_args)3970 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3971 			       void __user *arg, unsigned nr_args)
3972 	__releases(ctx->uring_lock)
3973 	__acquires(ctx->uring_lock)
3974 {
3975 	int ret;
3976 
3977 	/*
3978 	 * We don't quiesce the refs for register anymore and so it can't be
3979 	 * dying as we're holding a file ref here.
3980 	 */
3981 	if (WARN_ON_ONCE(percpu_ref_is_dying(&ctx->refs)))
3982 		return -ENXIO;
3983 
3984 	if (ctx->submitter_task && ctx->submitter_task != current)
3985 		return -EEXIST;
3986 
3987 	if (ctx->restricted) {
3988 		opcode = array_index_nospec(opcode, IORING_REGISTER_LAST);
3989 		if (!test_bit(opcode, ctx->restrictions.register_op))
3990 			return -EACCES;
3991 	}
3992 
3993 	switch (opcode) {
3994 	case IORING_REGISTER_BUFFERS:
3995 		ret = -EFAULT;
3996 		if (!arg)
3997 			break;
3998 		ret = io_sqe_buffers_register(ctx, arg, nr_args, NULL);
3999 		break;
4000 	case IORING_UNREGISTER_BUFFERS:
4001 		ret = -EINVAL;
4002 		if (arg || nr_args)
4003 			break;
4004 		ret = io_sqe_buffers_unregister(ctx);
4005 		break;
4006 	case IORING_REGISTER_FILES:
4007 		ret = -EFAULT;
4008 		if (!arg)
4009 			break;
4010 		ret = io_sqe_files_register(ctx, arg, nr_args, NULL);
4011 		break;
4012 	case IORING_UNREGISTER_FILES:
4013 		ret = -EINVAL;
4014 		if (arg || nr_args)
4015 			break;
4016 		ret = io_sqe_files_unregister(ctx);
4017 		break;
4018 	case IORING_REGISTER_FILES_UPDATE:
4019 		ret = io_register_files_update(ctx, arg, nr_args);
4020 		break;
4021 	case IORING_REGISTER_EVENTFD:
4022 		ret = -EINVAL;
4023 		if (nr_args != 1)
4024 			break;
4025 		ret = io_eventfd_register(ctx, arg, 0);
4026 		break;
4027 	case IORING_REGISTER_EVENTFD_ASYNC:
4028 		ret = -EINVAL;
4029 		if (nr_args != 1)
4030 			break;
4031 		ret = io_eventfd_register(ctx, arg, 1);
4032 		break;
4033 	case IORING_UNREGISTER_EVENTFD:
4034 		ret = -EINVAL;
4035 		if (arg || nr_args)
4036 			break;
4037 		ret = io_eventfd_unregister(ctx);
4038 		break;
4039 	case IORING_REGISTER_PROBE:
4040 		ret = -EINVAL;
4041 		if (!arg || nr_args > 256)
4042 			break;
4043 		ret = io_probe(ctx, arg, nr_args);
4044 		break;
4045 	case IORING_REGISTER_PERSONALITY:
4046 		ret = -EINVAL;
4047 		if (arg || nr_args)
4048 			break;
4049 		ret = io_register_personality(ctx);
4050 		break;
4051 	case IORING_UNREGISTER_PERSONALITY:
4052 		ret = -EINVAL;
4053 		if (arg)
4054 			break;
4055 		ret = io_unregister_personality(ctx, nr_args);
4056 		break;
4057 	case IORING_REGISTER_ENABLE_RINGS:
4058 		ret = -EINVAL;
4059 		if (arg || nr_args)
4060 			break;
4061 		ret = io_register_enable_rings(ctx);
4062 		break;
4063 	case IORING_REGISTER_RESTRICTIONS:
4064 		ret = io_register_restrictions(ctx, arg, nr_args);
4065 		break;
4066 	case IORING_REGISTER_FILES2:
4067 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_FILE);
4068 		break;
4069 	case IORING_REGISTER_FILES_UPDATE2:
4070 		ret = io_register_rsrc_update(ctx, arg, nr_args,
4071 					      IORING_RSRC_FILE);
4072 		break;
4073 	case IORING_REGISTER_BUFFERS2:
4074 		ret = io_register_rsrc(ctx, arg, nr_args, IORING_RSRC_BUFFER);
4075 		break;
4076 	case IORING_REGISTER_BUFFERS_UPDATE:
4077 		ret = io_register_rsrc_update(ctx, arg, nr_args,
4078 					      IORING_RSRC_BUFFER);
4079 		break;
4080 	case IORING_REGISTER_IOWQ_AFF:
4081 		ret = -EINVAL;
4082 		if (!arg || !nr_args)
4083 			break;
4084 		ret = io_register_iowq_aff(ctx, arg, nr_args);
4085 		break;
4086 	case IORING_UNREGISTER_IOWQ_AFF:
4087 		ret = -EINVAL;
4088 		if (arg || nr_args)
4089 			break;
4090 		ret = io_unregister_iowq_aff(ctx);
4091 		break;
4092 	case IORING_REGISTER_IOWQ_MAX_WORKERS:
4093 		ret = -EINVAL;
4094 		if (!arg || nr_args != 2)
4095 			break;
4096 		ret = io_register_iowq_max_workers(ctx, arg);
4097 		break;
4098 	case IORING_REGISTER_RING_FDS:
4099 		ret = io_ringfd_register(ctx, arg, nr_args);
4100 		break;
4101 	case IORING_UNREGISTER_RING_FDS:
4102 		ret = io_ringfd_unregister(ctx, arg, nr_args);
4103 		break;
4104 	case IORING_REGISTER_PBUF_RING:
4105 		ret = -EINVAL;
4106 		if (!arg || nr_args != 1)
4107 			break;
4108 		ret = io_register_pbuf_ring(ctx, arg);
4109 		break;
4110 	case IORING_UNREGISTER_PBUF_RING:
4111 		ret = -EINVAL;
4112 		if (!arg || nr_args != 1)
4113 			break;
4114 		ret = io_unregister_pbuf_ring(ctx, arg);
4115 		break;
4116 	case IORING_REGISTER_SYNC_CANCEL:
4117 		ret = -EINVAL;
4118 		if (!arg || nr_args != 1)
4119 			break;
4120 		ret = io_sync_cancel(ctx, arg);
4121 		break;
4122 	case IORING_REGISTER_FILE_ALLOC_RANGE:
4123 		ret = -EINVAL;
4124 		if (!arg || nr_args)
4125 			break;
4126 		ret = io_register_file_alloc_range(ctx, arg);
4127 		break;
4128 	default:
4129 		ret = -EINVAL;
4130 		break;
4131 	}
4132 
4133 	return ret;
4134 }
4135 
SYSCALL_DEFINE4(io_uring_register,unsigned int,fd,unsigned int,opcode,void __user *,arg,unsigned int,nr_args)4136 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4137 		void __user *, arg, unsigned int, nr_args)
4138 {
4139 	struct io_ring_ctx *ctx;
4140 	long ret = -EBADF;
4141 	struct fd f;
4142 
4143 	if (opcode >= IORING_REGISTER_LAST)
4144 		return -EINVAL;
4145 
4146 	f = fdget(fd);
4147 	if (!f.file)
4148 		return -EBADF;
4149 
4150 	ret = -EOPNOTSUPP;
4151 	if (!io_is_uring_fops(f.file))
4152 		goto out_fput;
4153 
4154 	ctx = f.file->private_data;
4155 
4156 	io_run_task_work_ctx(ctx);
4157 
4158 	mutex_lock(&ctx->uring_lock);
4159 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
4160 	mutex_unlock(&ctx->uring_lock);
4161 	trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs, ret);
4162 out_fput:
4163 	fdput(f);
4164 	return ret;
4165 }
4166 
io_uring_init(void)4167 static int __init io_uring_init(void)
4168 {
4169 #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \
4170 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
4171 	BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \
4172 } while (0)
4173 
4174 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
4175 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, sizeof(etype), ename)
4176 #define BUILD_BUG_SQE_ELEM_SIZE(eoffset, esize, ename) \
4177 	__BUILD_BUG_VERIFY_OFFSET_SIZE(struct io_uring_sqe, eoffset, esize, ename)
4178 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
4179 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
4180 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
4181 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
4182 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
4183 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
4184 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
4185 	BUILD_BUG_SQE_ELEM(8,  __u32,  cmd_op);
4186 	BUILD_BUG_SQE_ELEM(12, __u32, __pad1);
4187 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
4188 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
4189 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
4190 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
4191 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
4192 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
4193 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
4194 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
4195 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
4196 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
4197 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
4198 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
4199 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
4200 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
4201 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
4202 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
4203 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
4204 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
4205 	BUILD_BUG_SQE_ELEM(28, __u32,  rename_flags);
4206 	BUILD_BUG_SQE_ELEM(28, __u32,  unlink_flags);
4207 	BUILD_BUG_SQE_ELEM(28, __u32,  hardlink_flags);
4208 	BUILD_BUG_SQE_ELEM(28, __u32,  xattr_flags);
4209 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_ring_flags);
4210 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
4211 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
4212 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_group);
4213 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
4214 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
4215 	BUILD_BUG_SQE_ELEM(44, __u32,  file_index);
4216 	BUILD_BUG_SQE_ELEM(44, __u16,  addr_len);
4217 	BUILD_BUG_SQE_ELEM(46, __u16,  __pad3[0]);
4218 	BUILD_BUG_SQE_ELEM(48, __u64,  addr3);
4219 	BUILD_BUG_SQE_ELEM_SIZE(48, 0, cmd);
4220 	BUILD_BUG_SQE_ELEM(56, __u64,  __pad2);
4221 
4222 	BUILD_BUG_ON(sizeof(struct io_uring_files_update) !=
4223 		     sizeof(struct io_uring_rsrc_update));
4224 	BUILD_BUG_ON(sizeof(struct io_uring_rsrc_update) >
4225 		     sizeof(struct io_uring_rsrc_update2));
4226 
4227 	/* ->buf_index is u16 */
4228 	BUILD_BUG_ON(offsetof(struct io_uring_buf_ring, bufs) != 0);
4229 	BUILD_BUG_ON(offsetof(struct io_uring_buf, resv) !=
4230 		     offsetof(struct io_uring_buf_ring, tail));
4231 
4232 	/* should fit into one byte */
4233 	BUILD_BUG_ON(SQE_VALID_FLAGS >= (1 << 8));
4234 	BUILD_BUG_ON(SQE_COMMON_FLAGS >= (1 << 8));
4235 	BUILD_BUG_ON((SQE_VALID_FLAGS | SQE_COMMON_FLAGS) != SQE_VALID_FLAGS);
4236 
4237 	BUILD_BUG_ON(__REQ_F_LAST_BIT > 8 * sizeof(int));
4238 
4239 	BUILD_BUG_ON(sizeof(atomic_t) != sizeof(u32));
4240 
4241 	io_uring_optable_init();
4242 
4243 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC |
4244 				SLAB_ACCOUNT);
4245 	return 0;
4246 };
4247 __initcall(io_uring_init);
4248