• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqring (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 
50 #include <linux/sched/signal.h>
51 #include <linux/fs.h>
52 #include <linux/file.h>
53 #include <linux/fdtable.h>
54 #include <linux/mm.h>
55 #include <linux/mman.h>
56 #include <linux/mmu_context.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/workqueue.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
64 #include <net/sock.h>
65 #include <net/af_unix.h>
66 #include <net/scm.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 
75 #include <uapi/linux/io_uring.h>
76 
77 #include "internal.h"
78 
79 #define IORING_MAX_ENTRIES	32768
80 #define IORING_MAX_FIXED_FILES	1024
81 
82 struct io_uring {
83 	u32 head ____cacheline_aligned_in_smp;
84 	u32 tail ____cacheline_aligned_in_smp;
85 };
86 
87 /*
88  * This data is shared with the application through the mmap at offsets
89  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
90  *
91  * The offsets to the member fields are published through struct
92  * io_sqring_offsets when calling io_uring_setup.
93  */
94 struct io_rings {
95 	/*
96 	 * Head and tail offsets into the ring; the offsets need to be
97 	 * masked to get valid indices.
98 	 *
99 	 * The kernel controls head of the sq ring and the tail of the cq ring,
100 	 * and the application controls tail of the sq ring and the head of the
101 	 * cq ring.
102 	 */
103 	struct io_uring		sq, cq;
104 	/*
105 	 * Bitmasks to apply to head and tail offsets (constant, equals
106 	 * ring_entries - 1)
107 	 */
108 	u32			sq_ring_mask, cq_ring_mask;
109 	/* Ring sizes (constant, power of 2) */
110 	u32			sq_ring_entries, cq_ring_entries;
111 	/*
112 	 * Number of invalid entries dropped by the kernel due to
113 	 * invalid index stored in array
114 	 *
115 	 * Written by the kernel, shouldn't be modified by the
116 	 * application (i.e. get number of "new events" by comparing to
117 	 * cached value).
118 	 *
119 	 * After a new SQ head value was read by the application this
120 	 * counter includes all submissions that were dropped reaching
121 	 * the new SQ head (and possibly more).
122 	 */
123 	u32			sq_dropped;
124 	/*
125 	 * Runtime flags
126 	 *
127 	 * Written by the kernel, shouldn't be modified by the
128 	 * application.
129 	 *
130 	 * The application needs a full memory barrier before checking
131 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
132 	 */
133 	u32			sq_flags;
134 	/*
135 	 * Number of completion events lost because the queue was full;
136 	 * this should be avoided by the application by making sure
137 	 * there are not more requests pending thatn there is space in
138 	 * the completion queue.
139 	 *
140 	 * Written by the kernel, shouldn't be modified by the
141 	 * application (i.e. get number of "new events" by comparing to
142 	 * cached value).
143 	 *
144 	 * As completion events come in out of order this counter is not
145 	 * ordered with any other data.
146 	 */
147 	u32			cq_overflow;
148 	/*
149 	 * Ring buffer of completion events.
150 	 *
151 	 * The kernel writes completion events fresh every time they are
152 	 * produced, so the application is allowed to modify pending
153 	 * entries.
154 	 */
155 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
156 };
157 
158 struct io_mapped_ubuf {
159 	u64		ubuf;
160 	size_t		len;
161 	struct		bio_vec *bvec;
162 	unsigned int	nr_bvecs;
163 };
164 
165 struct async_list {
166 	spinlock_t		lock;
167 	atomic_t		cnt;
168 	struct list_head	list;
169 
170 	struct file		*file;
171 	off_t			io_start;
172 	size_t			io_len;
173 };
174 
175 struct io_ring_ctx {
176 	struct {
177 		struct percpu_ref	refs;
178 	} ____cacheline_aligned_in_smp;
179 
180 	struct {
181 		unsigned int		flags;
182 		bool			compat;
183 		bool			account_mem;
184 
185 		/*
186 		 * Ring buffer of indices into array of io_uring_sqe, which is
187 		 * mmapped by the application using the IORING_OFF_SQES offset.
188 		 *
189 		 * This indirection could e.g. be used to assign fixed
190 		 * io_uring_sqe entries to operations and only submit them to
191 		 * the queue when needed.
192 		 *
193 		 * The kernel modifies neither the indices array nor the entries
194 		 * array.
195 		 */
196 		u32			*sq_array;
197 		unsigned		cached_sq_head;
198 		unsigned		sq_entries;
199 		unsigned		sq_mask;
200 		unsigned		sq_thread_idle;
201 		unsigned		cached_sq_dropped;
202 		struct io_uring_sqe	*sq_sqes;
203 
204 		struct list_head	defer_list;
205 		struct list_head	timeout_list;
206 	} ____cacheline_aligned_in_smp;
207 
208 	/* IO offload */
209 	struct workqueue_struct	*sqo_wq[2];
210 	struct task_struct	*sqo_thread;	/* if using sq thread polling */
211 	struct mm_struct	*sqo_mm;
212 	wait_queue_head_t	sqo_wait;
213 	struct completion	sqo_thread_started;
214 
215 	struct {
216 		unsigned		cached_cq_tail;
217 		atomic_t		cached_cq_overflow;
218 		unsigned		cq_entries;
219 		unsigned		cq_mask;
220 		struct wait_queue_head	cq_wait;
221 		struct fasync_struct	*cq_fasync;
222 		struct eventfd_ctx	*cq_ev_fd;
223 		atomic_t		cq_timeouts;
224 	} ____cacheline_aligned_in_smp;
225 
226 	struct io_rings	*rings;
227 
228 	/*
229 	 * If used, fixed file set. Writers must ensure that ->refs is dead,
230 	 * readers must ensure that ->refs is alive as long as the file* is
231 	 * used. Only updated through io_uring_register(2).
232 	 */
233 	struct file		**user_files;
234 	unsigned		nr_user_files;
235 
236 	/* if used, fixed mapped user buffers */
237 	unsigned		nr_user_bufs;
238 	struct io_mapped_ubuf	*user_bufs;
239 
240 	struct user_struct	*user;
241 
242 	const struct cred	*creds;
243 
244 	struct completion	ctx_done;
245 
246 	struct {
247 		struct mutex		uring_lock;
248 		wait_queue_head_t	wait;
249 	} ____cacheline_aligned_in_smp;
250 
251 	struct {
252 		spinlock_t		completion_lock;
253 		bool			poll_multi_file;
254 		/*
255 		 * ->poll_list is protected by the ctx->uring_lock for
256 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
257 		 * For SQPOLL, only the single threaded io_sq_thread() will
258 		 * manipulate the list, hence no extra locking is needed there.
259 		 */
260 		struct list_head	poll_list;
261 		struct list_head	cancel_list;
262 	} ____cacheline_aligned_in_smp;
263 
264 	struct async_list	pending_async[2];
265 
266 #if defined(CONFIG_UNIX)
267 	struct socket		*ring_sock;
268 #endif
269 };
270 
271 struct sqe_submit {
272 	const struct io_uring_sqe	*sqe;
273 	unsigned short			index;
274 	u32				sequence;
275 	bool				has_user;
276 	bool				needs_lock;
277 	bool				needs_fixed_file;
278 };
279 
280 /*
281  * First field must be the file pointer in all the
282  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
283  */
284 struct io_poll_iocb {
285 	struct file			*file;
286 	struct wait_queue_head		*head;
287 	__poll_t			events;
288 	bool				done;
289 	bool				canceled;
290 	struct wait_queue_entry		wait;
291 };
292 
293 struct io_timeout {
294 	struct file			*file;
295 	struct hrtimer			timer;
296 };
297 
298 /*
299  * NOTE! Each of the iocb union members has the file pointer
300  * as the first entry in their struct definition. So you can
301  * access the file pointer through any of the sub-structs,
302  * or directly as just 'ki_filp' in this struct.
303  */
304 struct io_kiocb {
305 	union {
306 		struct file		*file;
307 		struct kiocb		rw;
308 		struct io_poll_iocb	poll;
309 		struct io_timeout	timeout;
310 	};
311 
312 	struct sqe_submit	submit;
313 
314 	struct io_ring_ctx	*ctx;
315 	struct list_head	list;
316 	struct list_head	link_list;
317 	unsigned int		flags;
318 	refcount_t		refs;
319 #define REQ_F_NOWAIT		1	/* must not punt to workers */
320 #define REQ_F_IOPOLL_COMPLETED	2	/* polled IO has completed */
321 #define REQ_F_FIXED_FILE	4	/* ctx owns file */
322 #define REQ_F_SEQ_PREV		8	/* sequential with previous */
323 #define REQ_F_IO_DRAIN		16	/* drain existing IO first */
324 #define REQ_F_IO_DRAINED	32	/* drain done */
325 #define REQ_F_LINK		64	/* linked sqes */
326 #define REQ_F_LINK_DONE		128	/* linked sqes done */
327 #define REQ_F_FAIL_LINK		256	/* fail rest of links */
328 #define REQ_F_SHADOW_DRAIN	512	/* link-drain shadow req */
329 #define REQ_F_TIMEOUT		1024	/* timeout request */
330 #define REQ_F_ISREG		2048	/* regular file */
331 #define REQ_F_MUST_PUNT		4096	/* must be punted even for NONBLOCK */
332 #define REQ_F_TIMEOUT_NOSEQ	8192	/* no timeout sequence */
333 	u64			user_data;
334 	u32			result;
335 	u32			sequence;
336 
337 	struct work_struct	work;
338 };
339 
340 #define IO_PLUG_THRESHOLD		2
341 #define IO_IOPOLL_BATCH			8
342 
343 struct io_submit_state {
344 	struct blk_plug		plug;
345 
346 	/*
347 	 * io_kiocb alloc cache
348 	 */
349 	void			*reqs[IO_IOPOLL_BATCH];
350 	unsigned		int free_reqs;
351 	unsigned		int cur_req;
352 
353 	/*
354 	 * File reference cache
355 	 */
356 	struct file		*file;
357 	unsigned int		fd;
358 	unsigned int		has_refs;
359 	unsigned int		used_refs;
360 	unsigned int		ios_left;
361 };
362 
363 static void io_sq_wq_submit_work(struct work_struct *work);
364 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
365 				 long res);
366 static void __io_free_req(struct io_kiocb *req);
367 
368 static struct kmem_cache *req_cachep;
369 
370 static const struct file_operations io_uring_fops;
371 
io_uring_get_socket(struct file * file)372 struct sock *io_uring_get_socket(struct file *file)
373 {
374 #if defined(CONFIG_UNIX)
375 	if (file->f_op == &io_uring_fops) {
376 		struct io_ring_ctx *ctx = file->private_data;
377 
378 		return ctx->ring_sock->sk;
379 	}
380 #endif
381 	return NULL;
382 }
383 EXPORT_SYMBOL(io_uring_get_socket);
384 
io_ring_ctx_ref_free(struct percpu_ref * ref)385 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
386 {
387 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
388 
389 	complete(&ctx->ctx_done);
390 }
391 
io_ring_ctx_alloc(struct io_uring_params * p)392 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
393 {
394 	struct io_ring_ctx *ctx;
395 	int i;
396 
397 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
398 	if (!ctx)
399 		return NULL;
400 
401 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
402 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
403 		kfree(ctx);
404 		return NULL;
405 	}
406 
407 	ctx->flags = p->flags;
408 	init_waitqueue_head(&ctx->cq_wait);
409 	init_completion(&ctx->ctx_done);
410 	init_completion(&ctx->sqo_thread_started);
411 	mutex_init(&ctx->uring_lock);
412 	init_waitqueue_head(&ctx->wait);
413 	for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
414 		spin_lock_init(&ctx->pending_async[i].lock);
415 		INIT_LIST_HEAD(&ctx->pending_async[i].list);
416 		atomic_set(&ctx->pending_async[i].cnt, 0);
417 	}
418 	spin_lock_init(&ctx->completion_lock);
419 	INIT_LIST_HEAD(&ctx->poll_list);
420 	INIT_LIST_HEAD(&ctx->cancel_list);
421 	INIT_LIST_HEAD(&ctx->defer_list);
422 	INIT_LIST_HEAD(&ctx->timeout_list);
423 	return ctx;
424 }
425 
__io_sequence_defer(struct io_ring_ctx * ctx,struct io_kiocb * req)426 static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
427 				       struct io_kiocb *req)
428 {
429 	return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
430 					+ atomic_read(&ctx->cached_cq_overflow);
431 }
432 
io_sequence_defer(struct io_ring_ctx * ctx,struct io_kiocb * req)433 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
434 				     struct io_kiocb *req)
435 {
436 	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
437 		return false;
438 
439 	return __io_sequence_defer(ctx, req);
440 }
441 
io_get_deferred_req(struct io_ring_ctx * ctx)442 static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
443 {
444 	struct io_kiocb *req;
445 
446 	req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
447 	if (req && !io_sequence_defer(ctx, req)) {
448 		list_del_init(&req->list);
449 		return req;
450 	}
451 
452 	return NULL;
453 }
454 
io_get_timeout_req(struct io_ring_ctx * ctx)455 static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
456 {
457 	struct io_kiocb *req;
458 
459 	req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
460 	if (req) {
461 		if (req->flags & REQ_F_TIMEOUT_NOSEQ)
462 			return NULL;
463 		if (!__io_sequence_defer(ctx, req)) {
464 			list_del_init(&req->list);
465 			return req;
466 		}
467 	}
468 
469 	return NULL;
470 }
471 
__io_commit_cqring(struct io_ring_ctx * ctx)472 static void __io_commit_cqring(struct io_ring_ctx *ctx)
473 {
474 	struct io_rings *rings = ctx->rings;
475 
476 	if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
477 		/* order cqe stores with ring update */
478 		smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
479 
480 		if (wq_has_sleeper(&ctx->cq_wait)) {
481 			wake_up_interruptible(&ctx->cq_wait);
482 			kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
483 		}
484 	}
485 }
486 
io_queue_async_work(struct io_ring_ctx * ctx,struct io_kiocb * req)487 static inline void io_queue_async_work(struct io_ring_ctx *ctx,
488 				       struct io_kiocb *req)
489 {
490 	int rw = 0;
491 
492 	if (req->submit.sqe) {
493 		switch (req->submit.sqe->opcode) {
494 		case IORING_OP_WRITEV:
495 		case IORING_OP_WRITE_FIXED:
496 			rw = !(req->rw.ki_flags & IOCB_DIRECT);
497 			break;
498 		}
499 	}
500 
501 	queue_work(ctx->sqo_wq[rw], &req->work);
502 }
503 
io_kill_timeout(struct io_kiocb * req)504 static void io_kill_timeout(struct io_kiocb *req)
505 {
506 	int ret;
507 
508 	ret = hrtimer_try_to_cancel(&req->timeout.timer);
509 	if (ret != -1) {
510 		atomic_inc(&req->ctx->cq_timeouts);
511 		list_del(&req->list);
512 		io_cqring_fill_event(req->ctx, req->user_data, 0);
513 		__io_free_req(req);
514 	}
515 }
516 
io_kill_timeouts(struct io_ring_ctx * ctx)517 static void io_kill_timeouts(struct io_ring_ctx *ctx)
518 {
519 	struct io_kiocb *req, *tmp;
520 
521 	spin_lock_irq(&ctx->completion_lock);
522 	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
523 		io_kill_timeout(req);
524 	spin_unlock_irq(&ctx->completion_lock);
525 }
526 
io_commit_cqring(struct io_ring_ctx * ctx)527 static void io_commit_cqring(struct io_ring_ctx *ctx)
528 {
529 	struct io_kiocb *req;
530 
531 	while ((req = io_get_timeout_req(ctx)) != NULL)
532 		io_kill_timeout(req);
533 
534 	__io_commit_cqring(ctx);
535 
536 	while ((req = io_get_deferred_req(ctx)) != NULL) {
537 		if (req->flags & REQ_F_SHADOW_DRAIN) {
538 			/* Just for drain, free it. */
539 			__io_free_req(req);
540 			continue;
541 		}
542 		req->flags |= REQ_F_IO_DRAINED;
543 		io_queue_async_work(ctx, req);
544 	}
545 }
546 
io_get_cqring(struct io_ring_ctx * ctx)547 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
548 {
549 	struct io_rings *rings = ctx->rings;
550 	unsigned tail;
551 
552 	tail = ctx->cached_cq_tail;
553 	/*
554 	 * writes to the cq entry need to come after reading head; the
555 	 * control dependency is enough as we're using WRITE_ONCE to
556 	 * fill the cq entry
557 	 */
558 	if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
559 		return NULL;
560 
561 	ctx->cached_cq_tail++;
562 	return &rings->cqes[tail & ctx->cq_mask];
563 }
564 
io_cqring_fill_event(struct io_ring_ctx * ctx,u64 ki_user_data,long res)565 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
566 				 long res)
567 {
568 	struct io_uring_cqe *cqe;
569 
570 	/*
571 	 * If we can't get a cq entry, userspace overflowed the
572 	 * submission (by quite a lot). Increment the overflow count in
573 	 * the ring.
574 	 */
575 	cqe = io_get_cqring(ctx);
576 	if (cqe) {
577 		WRITE_ONCE(cqe->user_data, ki_user_data);
578 		WRITE_ONCE(cqe->res, res);
579 		WRITE_ONCE(cqe->flags, 0);
580 	} else {
581 		WRITE_ONCE(ctx->rings->cq_overflow,
582 				atomic_inc_return(&ctx->cached_cq_overflow));
583 	}
584 }
585 
io_cqring_ev_posted(struct io_ring_ctx * ctx)586 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
587 {
588 	if (waitqueue_active(&ctx->wait))
589 		wake_up(&ctx->wait);
590 	if (waitqueue_active(&ctx->sqo_wait))
591 		wake_up(&ctx->sqo_wait);
592 	if (ctx->cq_ev_fd)
593 		eventfd_signal(ctx->cq_ev_fd, 1);
594 }
595 
io_cqring_add_event(struct io_ring_ctx * ctx,u64 user_data,long res)596 static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
597 				long res)
598 {
599 	unsigned long flags;
600 
601 	spin_lock_irqsave(&ctx->completion_lock, flags);
602 	io_cqring_fill_event(ctx, user_data, res);
603 	io_commit_cqring(ctx);
604 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
605 
606 	io_cqring_ev_posted(ctx);
607 }
608 
io_get_req(struct io_ring_ctx * ctx,struct io_submit_state * state)609 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
610 				   struct io_submit_state *state)
611 {
612 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
613 	struct io_kiocb *req;
614 
615 	if (!percpu_ref_tryget(&ctx->refs))
616 		return NULL;
617 
618 	if (!state) {
619 		req = kmem_cache_alloc(req_cachep, gfp);
620 		if (unlikely(!req))
621 			goto out;
622 	} else if (!state->free_reqs) {
623 		size_t sz;
624 		int ret;
625 
626 		sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
627 		ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
628 
629 		/*
630 		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
631 		 * retry single alloc to be on the safe side.
632 		 */
633 		if (unlikely(ret <= 0)) {
634 			state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
635 			if (!state->reqs[0])
636 				goto out;
637 			ret = 1;
638 		}
639 		state->free_reqs = ret - 1;
640 		state->cur_req = 1;
641 		req = state->reqs[0];
642 	} else {
643 		req = state->reqs[state->cur_req];
644 		state->free_reqs--;
645 		state->cur_req++;
646 	}
647 
648 	req->file = NULL;
649 	req->ctx = ctx;
650 	req->flags = 0;
651 	/* one is dropped after submission, the other at completion */
652 	refcount_set(&req->refs, 2);
653 	req->result = 0;
654 	return req;
655 out:
656 	percpu_ref_put(&ctx->refs);
657 	return NULL;
658 }
659 
io_free_req_many(struct io_ring_ctx * ctx,void ** reqs,int * nr)660 static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
661 {
662 	if (*nr) {
663 		kmem_cache_free_bulk(req_cachep, *nr, reqs);
664 		percpu_ref_put_many(&ctx->refs, *nr);
665 		*nr = 0;
666 	}
667 }
668 
__io_free_req(struct io_kiocb * req)669 static void __io_free_req(struct io_kiocb *req)
670 {
671 	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
672 		fput(req->file);
673 	percpu_ref_put(&req->ctx->refs);
674 	kmem_cache_free(req_cachep, req);
675 }
676 
io_req_link_next(struct io_kiocb * req)677 static void io_req_link_next(struct io_kiocb *req)
678 {
679 	struct io_kiocb *nxt;
680 
681 	/*
682 	 * The list should never be empty when we are called here. But could
683 	 * potentially happen if the chain is messed up, check to be on the
684 	 * safe side.
685 	 */
686 	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
687 	if (nxt) {
688 		list_del(&nxt->list);
689 		if (!list_empty(&req->link_list)) {
690 			INIT_LIST_HEAD(&nxt->link_list);
691 			list_splice(&req->link_list, &nxt->link_list);
692 			nxt->flags |= REQ_F_LINK;
693 		}
694 
695 		nxt->flags |= REQ_F_LINK_DONE;
696 		INIT_WORK(&nxt->work, io_sq_wq_submit_work);
697 		io_queue_async_work(req->ctx, nxt);
698 	}
699 }
700 
701 /*
702  * Called if REQ_F_LINK is set, and we fail the head request
703  */
io_fail_links(struct io_kiocb * req)704 static void io_fail_links(struct io_kiocb *req)
705 {
706 	struct io_kiocb *link;
707 
708 	while (!list_empty(&req->link_list)) {
709 		link = list_first_entry(&req->link_list, struct io_kiocb, list);
710 		list_del(&link->list);
711 
712 		io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
713 		__io_free_req(link);
714 	}
715 }
716 
io_free_req(struct io_kiocb * req)717 static void io_free_req(struct io_kiocb *req)
718 {
719 	/*
720 	 * If LINK is set, we have dependent requests in this chain. If we
721 	 * didn't fail this request, queue the first one up, moving any other
722 	 * dependencies to the next request. In case of failure, fail the rest
723 	 * of the chain.
724 	 */
725 	if (req->flags & REQ_F_LINK) {
726 		if (req->flags & REQ_F_FAIL_LINK)
727 			io_fail_links(req);
728 		else
729 			io_req_link_next(req);
730 	}
731 
732 	__io_free_req(req);
733 }
734 
io_put_req(struct io_kiocb * req)735 static void io_put_req(struct io_kiocb *req)
736 {
737 	if (refcount_dec_and_test(&req->refs))
738 		io_free_req(req);
739 }
740 
io_cqring_events(struct io_rings * rings)741 static unsigned io_cqring_events(struct io_rings *rings)
742 {
743 	/* See comment at the top of this file */
744 	smp_rmb();
745 	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
746 }
747 
io_sqring_entries(struct io_ring_ctx * ctx)748 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
749 {
750 	struct io_rings *rings = ctx->rings;
751 
752 	/* make sure SQ entry isn't read before tail */
753 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
754 }
755 
756 /*
757  * Find and free completed poll iocbs
758  */
io_iopoll_complete(struct io_ring_ctx * ctx,unsigned int * nr_events,struct list_head * done)759 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
760 			       struct list_head *done)
761 {
762 	void *reqs[IO_IOPOLL_BATCH];
763 	struct io_kiocb *req;
764 	int to_free;
765 
766 	to_free = 0;
767 	while (!list_empty(done)) {
768 		req = list_first_entry(done, struct io_kiocb, list);
769 		list_del(&req->list);
770 
771 		io_cqring_fill_event(ctx, req->user_data, req->result);
772 		(*nr_events)++;
773 
774 		if (refcount_dec_and_test(&req->refs)) {
775 			/* If we're not using fixed files, we have to pair the
776 			 * completion part with the file put. Use regular
777 			 * completions for those, only batch free for fixed
778 			 * file and non-linked commands.
779 			 */
780 			if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
781 			    REQ_F_FIXED_FILE) {
782 				reqs[to_free++] = req;
783 				if (to_free == ARRAY_SIZE(reqs))
784 					io_free_req_many(ctx, reqs, &to_free);
785 			} else {
786 				io_free_req(req);
787 			}
788 		}
789 	}
790 
791 	io_commit_cqring(ctx);
792 	io_free_req_many(ctx, reqs, &to_free);
793 }
794 
io_do_iopoll(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)795 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
796 			long min)
797 {
798 	struct io_kiocb *req, *tmp;
799 	LIST_HEAD(done);
800 	bool spin;
801 	int ret;
802 
803 	/*
804 	 * Only spin for completions if we don't have multiple devices hanging
805 	 * off our complete list, and we're under the requested amount.
806 	 */
807 	spin = !ctx->poll_multi_file && *nr_events < min;
808 
809 	ret = 0;
810 	list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
811 		struct kiocb *kiocb = &req->rw;
812 
813 		/*
814 		 * Move completed entries to our local list. If we find a
815 		 * request that requires polling, break out and complete
816 		 * the done list first, if we have entries there.
817 		 */
818 		if (req->flags & REQ_F_IOPOLL_COMPLETED) {
819 			list_move_tail(&req->list, &done);
820 			continue;
821 		}
822 		if (!list_empty(&done))
823 			break;
824 
825 		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
826 		if (ret < 0)
827 			break;
828 
829 		if (ret && spin)
830 			spin = false;
831 		ret = 0;
832 	}
833 
834 	if (!list_empty(&done))
835 		io_iopoll_complete(ctx, nr_events, &done);
836 
837 	return ret;
838 }
839 
840 /*
841  * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
842  * non-spinning poll check - we'll still enter the driver poll loop, but only
843  * as a non-spinning completion check.
844  */
io_iopoll_getevents(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)845 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
846 				long min)
847 {
848 	while (!list_empty(&ctx->poll_list) && !need_resched()) {
849 		int ret;
850 
851 		ret = io_do_iopoll(ctx, nr_events, min);
852 		if (ret < 0)
853 			return ret;
854 		if (!min || *nr_events >= min)
855 			return 0;
856 	}
857 
858 	return 1;
859 }
860 
861 /*
862  * We can't just wait for polled events to come to us, we have to actively
863  * find and complete them.
864  */
io_iopoll_reap_events(struct io_ring_ctx * ctx)865 static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
866 {
867 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
868 		return;
869 
870 	mutex_lock(&ctx->uring_lock);
871 	while (!list_empty(&ctx->poll_list)) {
872 		unsigned int nr_events = 0;
873 
874 		io_iopoll_getevents(ctx, &nr_events, 1);
875 
876 		/*
877 		 * Ensure we allow local-to-the-cpu processing to take place,
878 		 * in this case we need to ensure that we reap all events.
879 		 */
880 		cond_resched();
881 	}
882 	mutex_unlock(&ctx->uring_lock);
883 }
884 
__io_iopoll_check(struct io_ring_ctx * ctx,unsigned * nr_events,long min)885 static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
886 			    long min)
887 {
888 	int iters = 0, ret = 0;
889 
890 	do {
891 		int tmin = 0;
892 
893 		/*
894 		 * Don't enter poll loop if we already have events pending.
895 		 * If we do, we can potentially be spinning for commands that
896 		 * already triggered a CQE (eg in error).
897 		 */
898 		if (io_cqring_events(ctx->rings))
899 			break;
900 
901 		/*
902 		 * If a submit got punted to a workqueue, we can have the
903 		 * application entering polling for a command before it gets
904 		 * issued. That app will hold the uring_lock for the duration
905 		 * of the poll right here, so we need to take a breather every
906 		 * now and then to ensure that the issue has a chance to add
907 		 * the poll to the issued list. Otherwise we can spin here
908 		 * forever, while the workqueue is stuck trying to acquire the
909 		 * very same mutex.
910 		 */
911 		if (!(++iters & 7)) {
912 			mutex_unlock(&ctx->uring_lock);
913 			mutex_lock(&ctx->uring_lock);
914 		}
915 
916 		if (*nr_events < min)
917 			tmin = min - *nr_events;
918 
919 		ret = io_iopoll_getevents(ctx, nr_events, tmin);
920 		if (ret <= 0)
921 			break;
922 		ret = 0;
923 	} while (min && !*nr_events && !need_resched());
924 
925 	return ret;
926 }
927 
io_iopoll_check(struct io_ring_ctx * ctx,unsigned * nr_events,long min)928 static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
929 			   long min)
930 {
931 	int ret;
932 
933 	/*
934 	 * We disallow the app entering submit/complete with polling, but we
935 	 * still need to lock the ring to prevent racing with polled issue
936 	 * that got punted to a workqueue.
937 	 */
938 	mutex_lock(&ctx->uring_lock);
939 	ret = __io_iopoll_check(ctx, nr_events, min);
940 	mutex_unlock(&ctx->uring_lock);
941 	return ret;
942 }
943 
kiocb_end_write(struct io_kiocb * req)944 static void kiocb_end_write(struct io_kiocb *req)
945 {
946 	/*
947 	 * Tell lockdep we inherited freeze protection from submission
948 	 * thread.
949 	 */
950 	if (req->flags & REQ_F_ISREG) {
951 		struct inode *inode = file_inode(req->file);
952 
953 		__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
954 	}
955 	file_end_write(req->file);
956 }
957 
io_complete_rw(struct kiocb * kiocb,long res,long res2)958 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
959 {
960 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
961 
962 	if (kiocb->ki_flags & IOCB_WRITE)
963 		kiocb_end_write(req);
964 
965 	if ((req->flags & REQ_F_LINK) && res != req->result)
966 		req->flags |= REQ_F_FAIL_LINK;
967 	io_cqring_add_event(req->ctx, req->user_data, res);
968 	io_put_req(req);
969 }
970 
io_complete_rw_iopoll(struct kiocb * kiocb,long res,long res2)971 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
972 {
973 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
974 
975 	if (kiocb->ki_flags & IOCB_WRITE)
976 		kiocb_end_write(req);
977 
978 	if ((req->flags & REQ_F_LINK) && res != req->result)
979 		req->flags |= REQ_F_FAIL_LINK;
980 	req->result = res;
981 	if (res != -EAGAIN)
982 		req->flags |= REQ_F_IOPOLL_COMPLETED;
983 }
984 
985 /*
986  * After the iocb has been issued, it's safe to be found on the poll list.
987  * Adding the kiocb to the list AFTER submission ensures that we don't
988  * find it from a io_iopoll_getevents() thread before the issuer is done
989  * accessing the kiocb cookie.
990  */
io_iopoll_req_issued(struct io_kiocb * req)991 static void io_iopoll_req_issued(struct io_kiocb *req)
992 {
993 	struct io_ring_ctx *ctx = req->ctx;
994 
995 	/*
996 	 * Track whether we have multiple files in our lists. This will impact
997 	 * how we do polling eventually, not spinning if we're on potentially
998 	 * different devices.
999 	 */
1000 	if (list_empty(&ctx->poll_list)) {
1001 		ctx->poll_multi_file = false;
1002 	} else if (!ctx->poll_multi_file) {
1003 		struct io_kiocb *list_req;
1004 
1005 		list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1006 						list);
1007 		if (list_req->rw.ki_filp != req->rw.ki_filp)
1008 			ctx->poll_multi_file = true;
1009 	}
1010 
1011 	/*
1012 	 * For fast devices, IO may have already completed. If it has, add
1013 	 * it to the front so we find it first.
1014 	 */
1015 	if (req->flags & REQ_F_IOPOLL_COMPLETED)
1016 		list_add(&req->list, &ctx->poll_list);
1017 	else
1018 		list_add_tail(&req->list, &ctx->poll_list);
1019 }
1020 
io_file_put(struct io_submit_state * state)1021 static void io_file_put(struct io_submit_state *state)
1022 {
1023 	if (state->file) {
1024 		int diff = state->has_refs - state->used_refs;
1025 
1026 		if (diff)
1027 			fput_many(state->file, diff);
1028 		state->file = NULL;
1029 	}
1030 }
1031 
1032 /*
1033  * Get as many references to a file as we have IOs left in this submission,
1034  * assuming most submissions are for one file, or at least that each file
1035  * has more than one submission.
1036  */
io_file_get(struct io_submit_state * state,int fd)1037 static struct file *io_file_get(struct io_submit_state *state, int fd)
1038 {
1039 	if (!state)
1040 		return fget(fd);
1041 
1042 	if (state->file) {
1043 		if (state->fd == fd) {
1044 			state->used_refs++;
1045 			state->ios_left--;
1046 			return state->file;
1047 		}
1048 		io_file_put(state);
1049 	}
1050 	state->file = fget_many(fd, state->ios_left);
1051 	if (!state->file)
1052 		return NULL;
1053 
1054 	state->fd = fd;
1055 	state->has_refs = state->ios_left;
1056 	state->used_refs = 1;
1057 	state->ios_left--;
1058 	return state->file;
1059 }
1060 
1061 /*
1062  * If we tracked the file through the SCM inflight mechanism, we could support
1063  * any file. For now, just ensure that anything potentially problematic is done
1064  * inline.
1065  */
io_file_supports_async(struct file * file)1066 static bool io_file_supports_async(struct file *file)
1067 {
1068 	umode_t mode = file_inode(file)->i_mode;
1069 
1070 	if (S_ISBLK(mode) || S_ISCHR(mode))
1071 		return true;
1072 	if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1073 		return true;
1074 
1075 	return false;
1076 }
1077 
io_prep_rw(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1078 static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
1079 		      bool force_nonblock)
1080 {
1081 	const struct io_uring_sqe *sqe = s->sqe;
1082 	struct io_ring_ctx *ctx = req->ctx;
1083 	struct kiocb *kiocb = &req->rw;
1084 	unsigned ioprio;
1085 	int ret;
1086 
1087 	if (!req->file)
1088 		return -EBADF;
1089 
1090 	if (S_ISREG(file_inode(req->file)->i_mode))
1091 		req->flags |= REQ_F_ISREG;
1092 
1093 	/*
1094 	 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1095 	 * we know to async punt it even if it was opened O_NONBLOCK
1096 	 */
1097 	if (force_nonblock && !io_file_supports_async(req->file)) {
1098 		req->flags |= REQ_F_MUST_PUNT;
1099 		return -EAGAIN;
1100 	}
1101 
1102 	kiocb->ki_pos = READ_ONCE(sqe->off);
1103 	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1104 	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1105 
1106 	ioprio = READ_ONCE(sqe->ioprio);
1107 	if (ioprio) {
1108 		ret = ioprio_check_cap(ioprio);
1109 		if (ret)
1110 			return ret;
1111 
1112 		kiocb->ki_ioprio = ioprio;
1113 	} else
1114 		kiocb->ki_ioprio = get_current_ioprio();
1115 
1116 	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1117 	if (unlikely(ret))
1118 		return ret;
1119 
1120 	/* don't allow async punt if RWF_NOWAIT was requested */
1121 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1122 	    (req->file->f_flags & O_NONBLOCK))
1123 		req->flags |= REQ_F_NOWAIT;
1124 
1125 	if (force_nonblock)
1126 		kiocb->ki_flags |= IOCB_NOWAIT;
1127 
1128 	if (ctx->flags & IORING_SETUP_IOPOLL) {
1129 		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1130 		    !kiocb->ki_filp->f_op->iopoll)
1131 			return -EOPNOTSUPP;
1132 
1133 		kiocb->ki_flags |= IOCB_HIPRI;
1134 		kiocb->ki_complete = io_complete_rw_iopoll;
1135 		req->result = 0;
1136 	} else {
1137 		if (kiocb->ki_flags & IOCB_HIPRI)
1138 			return -EINVAL;
1139 		kiocb->ki_complete = io_complete_rw;
1140 	}
1141 	return 0;
1142 }
1143 
io_rw_done(struct kiocb * kiocb,ssize_t ret)1144 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1145 {
1146 	switch (ret) {
1147 	case -EIOCBQUEUED:
1148 		break;
1149 	case -ERESTARTSYS:
1150 	case -ERESTARTNOINTR:
1151 	case -ERESTARTNOHAND:
1152 	case -ERESTART_RESTARTBLOCK:
1153 		/*
1154 		 * We can't just restart the syscall, since previously
1155 		 * submitted sqes may already be in progress. Just fail this
1156 		 * IO with EINTR.
1157 		 */
1158 		ret = -EINTR;
1159 		/* fall through */
1160 	default:
1161 		kiocb->ki_complete(kiocb, ret, 0);
1162 	}
1163 }
1164 
io_import_fixed(struct io_ring_ctx * ctx,int rw,const struct io_uring_sqe * sqe,struct iov_iter * iter)1165 static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1166 			   const struct io_uring_sqe *sqe,
1167 			   struct iov_iter *iter)
1168 {
1169 	size_t len = READ_ONCE(sqe->len);
1170 	struct io_mapped_ubuf *imu;
1171 	unsigned index, buf_index;
1172 	size_t offset;
1173 	u64 buf_addr;
1174 
1175 	/* attempt to use fixed buffers without having provided iovecs */
1176 	if (unlikely(!ctx->user_bufs))
1177 		return -EFAULT;
1178 
1179 	buf_index = READ_ONCE(sqe->buf_index);
1180 	if (unlikely(buf_index >= ctx->nr_user_bufs))
1181 		return -EFAULT;
1182 
1183 	index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1184 	imu = &ctx->user_bufs[index];
1185 	buf_addr = READ_ONCE(sqe->addr);
1186 
1187 	/* overflow */
1188 	if (buf_addr + len < buf_addr)
1189 		return -EFAULT;
1190 	/* not inside the mapped region */
1191 	if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1192 		return -EFAULT;
1193 
1194 	/*
1195 	 * May not be a start of buffer, set size appropriately
1196 	 * and advance us to the beginning.
1197 	 */
1198 	offset = buf_addr - imu->ubuf;
1199 	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
1200 
1201 	if (offset) {
1202 		/*
1203 		 * Don't use iov_iter_advance() here, as it's really slow for
1204 		 * using the latter parts of a big fixed buffer - it iterates
1205 		 * over each segment manually. We can cheat a bit here, because
1206 		 * we know that:
1207 		 *
1208 		 * 1) it's a BVEC iter, we set it up
1209 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1210 		 *    first and last bvec
1211 		 *
1212 		 * So just find our index, and adjust the iterator afterwards.
1213 		 * If the offset is within the first bvec (or the whole first
1214 		 * bvec, just use iov_iter_advance(). This makes it easier
1215 		 * since we can just skip the first segment, which may not
1216 		 * be PAGE_SIZE aligned.
1217 		 */
1218 		const struct bio_vec *bvec = imu->bvec;
1219 
1220 		if (offset <= bvec->bv_len) {
1221 			iov_iter_advance(iter, offset);
1222 		} else {
1223 			unsigned long seg_skip;
1224 
1225 			/* skip first vec */
1226 			offset -= bvec->bv_len;
1227 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1228 
1229 			iter->bvec = bvec + seg_skip;
1230 			iter->nr_segs -= seg_skip;
1231 			iter->count -= bvec->bv_len + offset;
1232 			iter->iov_offset = offset & ~PAGE_MASK;
1233 		}
1234 	}
1235 
1236 	return len;
1237 }
1238 
io_import_iovec(struct io_ring_ctx * ctx,int rw,const struct sqe_submit * s,struct iovec ** iovec,struct iov_iter * iter)1239 static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1240 			       const struct sqe_submit *s, struct iovec **iovec,
1241 			       struct iov_iter *iter)
1242 {
1243 	const struct io_uring_sqe *sqe = s->sqe;
1244 	void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1245 	size_t sqe_len = READ_ONCE(sqe->len);
1246 	u8 opcode;
1247 
1248 	/*
1249 	 * We're reading ->opcode for the second time, but the first read
1250 	 * doesn't care whether it's _FIXED or not, so it doesn't matter
1251 	 * whether ->opcode changes concurrently. The first read does care
1252 	 * about whether it is a READ or a WRITE, so we don't trust this read
1253 	 * for that purpose and instead let the caller pass in the read/write
1254 	 * flag.
1255 	 */
1256 	opcode = READ_ONCE(sqe->opcode);
1257 	if (opcode == IORING_OP_READ_FIXED ||
1258 	    opcode == IORING_OP_WRITE_FIXED) {
1259 		ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
1260 		*iovec = NULL;
1261 		return ret;
1262 	}
1263 
1264 	if (!s->has_user)
1265 		return -EFAULT;
1266 
1267 #ifdef CONFIG_COMPAT
1268 	if (ctx->compat)
1269 		return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1270 						iovec, iter);
1271 #endif
1272 
1273 	return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1274 }
1275 
io_should_merge(struct async_list * al,struct kiocb * kiocb)1276 static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1277 {
1278 	if (al->file == kiocb->ki_filp) {
1279 		off_t start, end;
1280 
1281 		/*
1282 		 * Allow merging if we're anywhere in the range of the same
1283 		 * page. Generally this happens for sub-page reads or writes,
1284 		 * and it's beneficial to allow the first worker to bring the
1285 		 * page in and the piggy backed work can then work on the
1286 		 * cached page.
1287 		 */
1288 		start = al->io_start & PAGE_MASK;
1289 		end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1290 		if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1291 			return true;
1292 	}
1293 
1294 	al->file = NULL;
1295 	return false;
1296 }
1297 
1298 /*
1299  * Make a note of the last file/offset/direction we punted to async
1300  * context. We'll use this information to see if we can piggy back a
1301  * sequential request onto the previous one, if it's still hasn't been
1302  * completed by the async worker.
1303  */
io_async_list_note(int rw,struct io_kiocb * req,size_t len)1304 static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1305 {
1306 	struct async_list *async_list = &req->ctx->pending_async[rw];
1307 	struct kiocb *kiocb = &req->rw;
1308 	struct file *filp = kiocb->ki_filp;
1309 
1310 	if (io_should_merge(async_list, kiocb)) {
1311 		unsigned long max_bytes;
1312 
1313 		/* Use 8x RA size as a decent limiter for both reads/writes */
1314 		max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1315 		if (!max_bytes)
1316 			max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
1317 
1318 		/* If max len are exceeded, reset the state */
1319 		if (async_list->io_len + len <= max_bytes) {
1320 			req->flags |= REQ_F_SEQ_PREV;
1321 			async_list->io_len += len;
1322 		} else {
1323 			async_list->file = NULL;
1324 		}
1325 	}
1326 
1327 	/* New file? Reset state. */
1328 	if (async_list->file != filp) {
1329 		async_list->io_start = kiocb->ki_pos;
1330 		async_list->io_len = len;
1331 		async_list->file = filp;
1332 	}
1333 }
1334 
1335 /*
1336  * For files that don't have ->read_iter() and ->write_iter(), handle them
1337  * by looping over ->read() or ->write() manually.
1338  */
loop_rw_iter(int rw,struct file * file,struct kiocb * kiocb,struct iov_iter * iter)1339 static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1340 			   struct iov_iter *iter)
1341 {
1342 	ssize_t ret = 0;
1343 
1344 	/*
1345 	 * Don't support polled IO through this interface, and we can't
1346 	 * support non-blocking either. For the latter, this just causes
1347 	 * the kiocb to be handled from an async context.
1348 	 */
1349 	if (kiocb->ki_flags & IOCB_HIPRI)
1350 		return -EOPNOTSUPP;
1351 	if (kiocb->ki_flags & IOCB_NOWAIT)
1352 		return -EAGAIN;
1353 
1354 	while (iov_iter_count(iter)) {
1355 		struct iovec iovec;
1356 		ssize_t nr;
1357 
1358 		if (!iov_iter_is_bvec(iter)) {
1359 			iovec = iov_iter_iovec(iter);
1360 		} else {
1361 			/* fixed buffers import bvec */
1362 			iovec.iov_base = kmap(iter->bvec->bv_page)
1363 						+ iter->iov_offset;
1364 			iovec.iov_len = min(iter->count,
1365 					iter->bvec->bv_len - iter->iov_offset);
1366 		}
1367 
1368 		if (rw == READ) {
1369 			nr = file->f_op->read(file, iovec.iov_base,
1370 					      iovec.iov_len, &kiocb->ki_pos);
1371 		} else {
1372 			nr = file->f_op->write(file, iovec.iov_base,
1373 					       iovec.iov_len, &kiocb->ki_pos);
1374 		}
1375 
1376 		if (iov_iter_is_bvec(iter))
1377 			kunmap(iter->bvec->bv_page);
1378 
1379 		if (nr < 0) {
1380 			if (!ret)
1381 				ret = nr;
1382 			break;
1383 		}
1384 		ret += nr;
1385 		if (nr != iovec.iov_len)
1386 			break;
1387 		iov_iter_advance(iter, nr);
1388 	}
1389 
1390 	return ret;
1391 }
1392 
io_read(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1393 static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
1394 		   bool force_nonblock)
1395 {
1396 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1397 	struct kiocb *kiocb = &req->rw;
1398 	struct iov_iter iter;
1399 	struct file *file;
1400 	size_t iov_count;
1401 	ssize_t read_size, ret;
1402 
1403 	ret = io_prep_rw(req, s, force_nonblock);
1404 	if (ret)
1405 		return ret;
1406 	file = kiocb->ki_filp;
1407 
1408 	if (unlikely(!(file->f_mode & FMODE_READ)))
1409 		return -EBADF;
1410 
1411 	ret = io_import_iovec(req->ctx, READ, s, &iovec, &iter);
1412 	if (ret < 0)
1413 		return ret;
1414 
1415 	read_size = ret;
1416 	if (req->flags & REQ_F_LINK)
1417 		req->result = read_size;
1418 
1419 	iov_count = iov_iter_count(&iter);
1420 	ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
1421 	if (!ret) {
1422 		ssize_t ret2;
1423 
1424 		if (file->f_op->read_iter)
1425 			ret2 = call_read_iter(file, kiocb, &iter);
1426 		else
1427 			ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1428 
1429 		/*
1430 		 * In case of a short read, punt to async. This can happen
1431 		 * if we have data partially cached. Alternatively we can
1432 		 * return the short read, in which case the application will
1433 		 * need to issue another SQE and wait for it. That SQE will
1434 		 * need async punt anyway, so it's more efficient to do it
1435 		 * here.
1436 		 */
1437 		if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1438 		    (req->flags & REQ_F_ISREG) &&
1439 		    ret2 > 0 && ret2 < read_size)
1440 			ret2 = -EAGAIN;
1441 		/* Catch -EAGAIN return for forced non-blocking submission */
1442 		if (!force_nonblock || ret2 != -EAGAIN) {
1443 			io_rw_done(kiocb, ret2);
1444 		} else {
1445 			/*
1446 			 * If ->needs_lock is true, we're already in async
1447 			 * context.
1448 			 */
1449 			if (!s->needs_lock)
1450 				io_async_list_note(READ, req, iov_count);
1451 			ret = -EAGAIN;
1452 		}
1453 	}
1454 	kfree(iovec);
1455 	return ret;
1456 }
1457 
io_write(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1458 static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1459 		    bool force_nonblock)
1460 {
1461 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1462 	struct kiocb *kiocb = &req->rw;
1463 	struct iov_iter iter;
1464 	struct file *file;
1465 	size_t iov_count;
1466 	ssize_t ret;
1467 
1468 	ret = io_prep_rw(req, s, force_nonblock);
1469 	if (ret)
1470 		return ret;
1471 
1472 	file = kiocb->ki_filp;
1473 	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1474 		return -EBADF;
1475 
1476 	ret = io_import_iovec(req->ctx, WRITE, s, &iovec, &iter);
1477 	if (ret < 0)
1478 		return ret;
1479 
1480 	if (req->flags & REQ_F_LINK)
1481 		req->result = ret;
1482 
1483 	iov_count = iov_iter_count(&iter);
1484 
1485 	ret = -EAGAIN;
1486 	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1487 		/* If ->needs_lock is true, we're already in async context. */
1488 		if (!s->needs_lock)
1489 			io_async_list_note(WRITE, req, iov_count);
1490 		goto out_free;
1491 	}
1492 
1493 	ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
1494 	if (!ret) {
1495 		ssize_t ret2;
1496 
1497 		/*
1498 		 * Open-code file_start_write here to grab freeze protection,
1499 		 * which will be released by another thread in
1500 		 * io_complete_rw().  Fool lockdep by telling it the lock got
1501 		 * released so that it doesn't complain about the held lock when
1502 		 * we return to userspace.
1503 		 */
1504 		if (req->flags & REQ_F_ISREG) {
1505 			__sb_start_write(file_inode(file)->i_sb,
1506 						SB_FREEZE_WRITE, true);
1507 			__sb_writers_release(file_inode(file)->i_sb,
1508 						SB_FREEZE_WRITE);
1509 		}
1510 		kiocb->ki_flags |= IOCB_WRITE;
1511 
1512 		if (file->f_op->write_iter)
1513 			ret2 = call_write_iter(file, kiocb, &iter);
1514 		else
1515 			ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
1516 		if (!force_nonblock || ret2 != -EAGAIN) {
1517 			io_rw_done(kiocb, ret2);
1518 		} else {
1519 			/*
1520 			 * If ->needs_lock is true, we're already in async
1521 			 * context.
1522 			 */
1523 			if (!s->needs_lock)
1524 				io_async_list_note(WRITE, req, iov_count);
1525 			ret = -EAGAIN;
1526 		}
1527 	}
1528 out_free:
1529 	kfree(iovec);
1530 	return ret;
1531 }
1532 
1533 /*
1534  * IORING_OP_NOP just posts a completion event, nothing else.
1535  */
io_nop(struct io_kiocb * req,u64 user_data)1536 static int io_nop(struct io_kiocb *req, u64 user_data)
1537 {
1538 	struct io_ring_ctx *ctx = req->ctx;
1539 	long err = 0;
1540 
1541 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1542 		return -EINVAL;
1543 
1544 	io_cqring_add_event(ctx, user_data, err);
1545 	io_put_req(req);
1546 	return 0;
1547 }
1548 
io_prep_fsync(struct io_kiocb * req,const struct io_uring_sqe * sqe)1549 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1550 {
1551 	struct io_ring_ctx *ctx = req->ctx;
1552 
1553 	if (!req->file)
1554 		return -EBADF;
1555 
1556 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1557 		return -EINVAL;
1558 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1559 		return -EINVAL;
1560 
1561 	return 0;
1562 }
1563 
io_fsync(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1564 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1565 		    bool force_nonblock)
1566 {
1567 	loff_t sqe_off = READ_ONCE(sqe->off);
1568 	loff_t sqe_len = READ_ONCE(sqe->len);
1569 	loff_t end = sqe_off + sqe_len;
1570 	unsigned fsync_flags;
1571 	int ret;
1572 
1573 	fsync_flags = READ_ONCE(sqe->fsync_flags);
1574 	if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1575 		return -EINVAL;
1576 
1577 	ret = io_prep_fsync(req, sqe);
1578 	if (ret)
1579 		return ret;
1580 
1581 	/* fsync always requires a blocking context */
1582 	if (force_nonblock)
1583 		return -EAGAIN;
1584 
1585 	ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1586 				end > 0 ? end : LLONG_MAX,
1587 				fsync_flags & IORING_FSYNC_DATASYNC);
1588 
1589 	if (ret < 0 && (req->flags & REQ_F_LINK))
1590 		req->flags |= REQ_F_FAIL_LINK;
1591 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1592 	io_put_req(req);
1593 	return 0;
1594 }
1595 
io_prep_sfr(struct io_kiocb * req,const struct io_uring_sqe * sqe)1596 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1597 {
1598 	struct io_ring_ctx *ctx = req->ctx;
1599 	int ret = 0;
1600 
1601 	if (!req->file)
1602 		return -EBADF;
1603 
1604 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1605 		return -EINVAL;
1606 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1607 		return -EINVAL;
1608 
1609 	return ret;
1610 }
1611 
io_sync_file_range(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1612 static int io_sync_file_range(struct io_kiocb *req,
1613 			      const struct io_uring_sqe *sqe,
1614 			      bool force_nonblock)
1615 {
1616 	loff_t sqe_off;
1617 	loff_t sqe_len;
1618 	unsigned flags;
1619 	int ret;
1620 
1621 	ret = io_prep_sfr(req, sqe);
1622 	if (ret)
1623 		return ret;
1624 
1625 	/* sync_file_range always requires a blocking context */
1626 	if (force_nonblock)
1627 		return -EAGAIN;
1628 
1629 	sqe_off = READ_ONCE(sqe->off);
1630 	sqe_len = READ_ONCE(sqe->len);
1631 	flags = READ_ONCE(sqe->sync_range_flags);
1632 
1633 	ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1634 
1635 	if (ret < 0 && (req->flags & REQ_F_LINK))
1636 		req->flags |= REQ_F_FAIL_LINK;
1637 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1638 	io_put_req(req);
1639 	return 0;
1640 }
1641 
1642 #if defined(CONFIG_NET)
io_send_recvmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock,long (* fn)(struct socket *,struct user_msghdr __user *,unsigned int))1643 static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1644 			   bool force_nonblock,
1645 		   long (*fn)(struct socket *, struct user_msghdr __user *,
1646 				unsigned int))
1647 {
1648 	struct socket *sock;
1649 	int ret;
1650 
1651 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1652 		return -EINVAL;
1653 
1654 	sock = sock_from_file(req->file, &ret);
1655 	if (sock) {
1656 		struct user_msghdr __user *msg;
1657 		unsigned flags;
1658 
1659 		flags = READ_ONCE(sqe->msg_flags);
1660 		if (flags & MSG_DONTWAIT)
1661 			req->flags |= REQ_F_NOWAIT;
1662 		else if (force_nonblock)
1663 			flags |= MSG_DONTWAIT;
1664 
1665 		msg = (struct user_msghdr __user *) (unsigned long)
1666 			READ_ONCE(sqe->addr);
1667 
1668 		ret = fn(sock, msg, flags);
1669 		if (force_nonblock && ret == -EAGAIN)
1670 			return ret;
1671 		if (ret == -ERESTARTSYS)
1672 			ret = -EINTR;
1673 	}
1674 
1675 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1676 	io_put_req(req);
1677 	return 0;
1678 }
1679 #endif
1680 
io_sendmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1681 static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1682 		      bool force_nonblock)
1683 {
1684 #if defined(CONFIG_NET)
1685 	return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1686 #else
1687 	return -EOPNOTSUPP;
1688 #endif
1689 }
1690 
io_recvmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1691 static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1692 		      bool force_nonblock)
1693 {
1694 #if defined(CONFIG_NET)
1695 	return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
1696 #else
1697 	return -EOPNOTSUPP;
1698 #endif
1699 }
1700 
io_poll_remove_one(struct io_kiocb * req)1701 static void io_poll_remove_one(struct io_kiocb *req)
1702 {
1703 	struct io_poll_iocb *poll = &req->poll;
1704 
1705 	spin_lock(&poll->head->lock);
1706 	WRITE_ONCE(poll->canceled, true);
1707 	if (!list_empty(&poll->wait.entry)) {
1708 		list_del_init(&poll->wait.entry);
1709 		io_queue_async_work(req->ctx, req);
1710 	}
1711 	spin_unlock(&poll->head->lock);
1712 
1713 	list_del_init(&req->list);
1714 }
1715 
io_poll_remove_all(struct io_ring_ctx * ctx)1716 static void io_poll_remove_all(struct io_ring_ctx *ctx)
1717 {
1718 	struct io_kiocb *req;
1719 
1720 	spin_lock_irq(&ctx->completion_lock);
1721 	while (!list_empty(&ctx->cancel_list)) {
1722 		req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1723 		io_poll_remove_one(req);
1724 	}
1725 	spin_unlock_irq(&ctx->completion_lock);
1726 }
1727 
1728 /*
1729  * Find a running poll command that matches one specified in sqe->addr,
1730  * and remove it if found.
1731  */
io_poll_remove(struct io_kiocb * req,const struct io_uring_sqe * sqe)1732 static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1733 {
1734 	struct io_ring_ctx *ctx = req->ctx;
1735 	struct io_kiocb *poll_req, *next;
1736 	int ret = -ENOENT;
1737 
1738 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1739 		return -EINVAL;
1740 	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1741 	    sqe->poll_events)
1742 		return -EINVAL;
1743 
1744 	spin_lock_irq(&ctx->completion_lock);
1745 	list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1746 		if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1747 			io_poll_remove_one(poll_req);
1748 			ret = 0;
1749 			break;
1750 		}
1751 	}
1752 	spin_unlock_irq(&ctx->completion_lock);
1753 
1754 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1755 	io_put_req(req);
1756 	return 0;
1757 }
1758 
io_poll_complete(struct io_ring_ctx * ctx,struct io_kiocb * req,__poll_t mask)1759 static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1760 			     __poll_t mask)
1761 {
1762 	req->poll.done = true;
1763 	io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
1764 	io_commit_cqring(ctx);
1765 }
1766 
io_poll_complete_work(struct work_struct * work)1767 static void io_poll_complete_work(struct work_struct *work)
1768 {
1769 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1770 	struct io_poll_iocb *poll = &req->poll;
1771 	struct poll_table_struct pt = { ._key = poll->events };
1772 	struct io_ring_ctx *ctx = req->ctx;
1773 	const struct cred *old_cred;
1774 	__poll_t mask = 0;
1775 
1776 	old_cred = override_creds(ctx->creds);
1777 
1778 	if (!READ_ONCE(poll->canceled))
1779 		mask = vfs_poll(poll->file, &pt) & poll->events;
1780 
1781 	/*
1782 	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1783 	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1784 	 * synchronize with them.  In the cancellation case the list_del_init
1785 	 * itself is not actually needed, but harmless so we keep it in to
1786 	 * avoid further branches in the fast path.
1787 	 */
1788 	spin_lock_irq(&ctx->completion_lock);
1789 	if (!mask && !READ_ONCE(poll->canceled)) {
1790 		add_wait_queue(poll->head, &poll->wait);
1791 		spin_unlock_irq(&ctx->completion_lock);
1792 		goto out;
1793 	}
1794 	list_del_init(&req->list);
1795 	io_poll_complete(ctx, req, mask);
1796 	spin_unlock_irq(&ctx->completion_lock);
1797 
1798 	io_cqring_ev_posted(ctx);
1799 	io_put_req(req);
1800 out:
1801 	revert_creds(old_cred);
1802 }
1803 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)1804 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1805 			void *key)
1806 {
1807 	struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1808 							wait);
1809 	struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1810 	struct io_ring_ctx *ctx = req->ctx;
1811 	__poll_t mask = key_to_poll(key);
1812 	unsigned long flags;
1813 
1814 	/* for instances that support it check for an event match first: */
1815 	if (mask && !(mask & poll->events))
1816 		return 0;
1817 
1818 	list_del_init(&poll->wait.entry);
1819 
1820 	if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1821 		list_del(&req->list);
1822 		io_poll_complete(ctx, req, mask);
1823 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
1824 
1825 		io_cqring_ev_posted(ctx);
1826 		io_put_req(req);
1827 	} else {
1828 		io_queue_async_work(ctx, req);
1829 	}
1830 
1831 	return 1;
1832 }
1833 
1834 struct io_poll_table {
1835 	struct poll_table_struct pt;
1836 	struct io_kiocb *req;
1837 	int error;
1838 };
1839 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)1840 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1841 			       struct poll_table_struct *p)
1842 {
1843 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1844 
1845 	if (unlikely(pt->req->poll.head)) {
1846 		pt->error = -EINVAL;
1847 		return;
1848 	}
1849 
1850 	pt->error = 0;
1851 	pt->req->poll.head = head;
1852 	add_wait_queue(head, &pt->req->poll.wait);
1853 }
1854 
io_poll_add(struct io_kiocb * req,const struct io_uring_sqe * sqe)1855 static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1856 {
1857 	struct io_poll_iocb *poll = &req->poll;
1858 	struct io_ring_ctx *ctx = req->ctx;
1859 	struct io_poll_table ipt;
1860 	bool cancel = false;
1861 	__poll_t mask;
1862 	u16 events;
1863 
1864 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1865 		return -EINVAL;
1866 	if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1867 		return -EINVAL;
1868 	if (!poll->file)
1869 		return -EBADF;
1870 
1871 	req->submit.sqe = NULL;
1872 	INIT_WORK(&req->work, io_poll_complete_work);
1873 	events = READ_ONCE(sqe->poll_events);
1874 	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1875 
1876 	poll->head = NULL;
1877 	poll->done = false;
1878 	poll->canceled = false;
1879 
1880 	ipt.pt._qproc = io_poll_queue_proc;
1881 	ipt.pt._key = poll->events;
1882 	ipt.req = req;
1883 	ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1884 
1885 	/* initialized the list so that we can do list_empty checks */
1886 	INIT_LIST_HEAD(&poll->wait.entry);
1887 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1888 
1889 	INIT_LIST_HEAD(&req->list);
1890 
1891 	mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
1892 
1893 	spin_lock_irq(&ctx->completion_lock);
1894 	if (likely(poll->head)) {
1895 		spin_lock(&poll->head->lock);
1896 		if (unlikely(list_empty(&poll->wait.entry))) {
1897 			if (ipt.error)
1898 				cancel = true;
1899 			ipt.error = 0;
1900 			mask = 0;
1901 		}
1902 		if (mask || ipt.error)
1903 			list_del_init(&poll->wait.entry);
1904 		else if (cancel)
1905 			WRITE_ONCE(poll->canceled, true);
1906 		else if (!poll->done) /* actually waiting for an event */
1907 			list_add_tail(&req->list, &ctx->cancel_list);
1908 		spin_unlock(&poll->head->lock);
1909 	}
1910 	if (mask) { /* no async, we'd stolen it */
1911 		ipt.error = 0;
1912 		io_poll_complete(ctx, req, mask);
1913 	}
1914 	spin_unlock_irq(&ctx->completion_lock);
1915 
1916 	if (mask) {
1917 		io_cqring_ev_posted(ctx);
1918 		io_put_req(req);
1919 	}
1920 	return ipt.error;
1921 }
1922 
io_timeout_fn(struct hrtimer * timer)1923 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1924 {
1925 	struct io_ring_ctx *ctx;
1926 	struct io_kiocb *req, *prev;
1927 	unsigned long flags;
1928 
1929 	req = container_of(timer, struct io_kiocb, timeout.timer);
1930 	ctx = req->ctx;
1931 	atomic_inc(&ctx->cq_timeouts);
1932 
1933 	spin_lock_irqsave(&ctx->completion_lock, flags);
1934 	/*
1935 	 * Adjust the reqs sequence before the current one because it
1936 	 * will consume a slot in the cq_ring and the the cq_tail pointer
1937 	 * will be increased, otherwise other timeout reqs may return in
1938 	 * advance without waiting for enough wait_nr.
1939 	 */
1940 	prev = req;
1941 	list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
1942 		prev->sequence++;
1943 	list_del(&req->list);
1944 
1945 	io_cqring_fill_event(ctx, req->user_data, -ETIME);
1946 	io_commit_cqring(ctx);
1947 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1948 
1949 	io_cqring_ev_posted(ctx);
1950 
1951 	io_put_req(req);
1952 	return HRTIMER_NORESTART;
1953 }
1954 
io_timeout(struct io_kiocb * req,const struct io_uring_sqe * sqe)1955 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1956 {
1957 	unsigned count;
1958 	struct io_ring_ctx *ctx = req->ctx;
1959 	struct list_head *entry;
1960 	struct timespec64 ts;
1961 	unsigned span = 0;
1962 
1963 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1964 		return -EINVAL;
1965 	if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1966 	    sqe->len != 1)
1967 		return -EINVAL;
1968 
1969 	if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
1970 		return -EFAULT;
1971 
1972 	req->flags |= REQ_F_TIMEOUT;
1973 
1974 	/*
1975 	 * sqe->off holds how many events that need to occur for this
1976 	 * timeout event to be satisfied. If it isn't set, then this is
1977 	 * a pure timeout request, sequence isn't used.
1978 	 */
1979 	count = READ_ONCE(sqe->off);
1980 	if (!count) {
1981 		req->flags |= REQ_F_TIMEOUT_NOSEQ;
1982 		spin_lock_irq(&ctx->completion_lock);
1983 		entry = ctx->timeout_list.prev;
1984 		goto add;
1985 	}
1986 
1987 	req->sequence = ctx->cached_sq_head + count - 1;
1988 	/* reuse it to store the count */
1989 	req->submit.sequence = count;
1990 
1991 	/*
1992 	 * Insertion sort, ensuring the first entry in the list is always
1993 	 * the one we need first.
1994 	 */
1995 	spin_lock_irq(&ctx->completion_lock);
1996 	list_for_each_prev(entry, &ctx->timeout_list) {
1997 		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
1998 		unsigned nxt_sq_head;
1999 		long long tmp, tmp_nxt;
2000 
2001 		if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
2002 			continue;
2003 
2004 		/*
2005 		 * Since cached_sq_head + count - 1 can overflow, use type long
2006 		 * long to store it.
2007 		 */
2008 		tmp = (long long)ctx->cached_sq_head + count - 1;
2009 		nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2010 		tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2011 
2012 		/*
2013 		 * cached_sq_head may overflow, and it will never overflow twice
2014 		 * once there is some timeout req still be valid.
2015 		 */
2016 		if (ctx->cached_sq_head < nxt_sq_head)
2017 			tmp += UINT_MAX;
2018 
2019 		if (tmp > tmp_nxt)
2020 			break;
2021 
2022 		/*
2023 		 * Sequence of reqs after the insert one and itself should
2024 		 * be adjusted because each timeout req consumes a slot.
2025 		 */
2026 		span++;
2027 		nxt->sequence++;
2028 	}
2029 	req->sequence -= span;
2030 add:
2031 	list_add(&req->list, entry);
2032 	spin_unlock_irq(&ctx->completion_lock);
2033 
2034 	hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2035 	req->timeout.timer.function = io_timeout_fn;
2036 	hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
2037 			HRTIMER_MODE_REL);
2038 	return 0;
2039 }
2040 
io_req_defer(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2041 static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2042 			struct sqe_submit *s)
2043 {
2044 	struct io_uring_sqe *sqe_copy;
2045 
2046 	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
2047 		return 0;
2048 
2049 	sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2050 	if (!sqe_copy)
2051 		return -EAGAIN;
2052 
2053 	spin_lock_irq(&ctx->completion_lock);
2054 	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
2055 		spin_unlock_irq(&ctx->completion_lock);
2056 		kfree(sqe_copy);
2057 		return 0;
2058 	}
2059 
2060 	memcpy(&req->submit, s, sizeof(*s));
2061 	memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
2062 	req->submit.sqe = sqe_copy;
2063 
2064 	INIT_WORK(&req->work, io_sq_wq_submit_work);
2065 	list_add_tail(&req->list, &ctx->defer_list);
2066 	spin_unlock_irq(&ctx->completion_lock);
2067 	return -EIOCBQUEUED;
2068 }
2069 
__io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)2070 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2071 			   const struct sqe_submit *s, bool force_nonblock)
2072 {
2073 	int ret, opcode;
2074 
2075 	req->user_data = READ_ONCE(s->sqe->user_data);
2076 
2077 	if (unlikely(s->index >= ctx->sq_entries))
2078 		return -EINVAL;
2079 
2080 	opcode = READ_ONCE(s->sqe->opcode);
2081 	switch (opcode) {
2082 	case IORING_OP_NOP:
2083 		ret = io_nop(req, req->user_data);
2084 		break;
2085 	case IORING_OP_READV:
2086 		if (unlikely(s->sqe->buf_index))
2087 			return -EINVAL;
2088 		ret = io_read(req, s, force_nonblock);
2089 		break;
2090 	case IORING_OP_WRITEV:
2091 		if (unlikely(s->sqe->buf_index))
2092 			return -EINVAL;
2093 		ret = io_write(req, s, force_nonblock);
2094 		break;
2095 	case IORING_OP_READ_FIXED:
2096 		ret = io_read(req, s, force_nonblock);
2097 		break;
2098 	case IORING_OP_WRITE_FIXED:
2099 		ret = io_write(req, s, force_nonblock);
2100 		break;
2101 	case IORING_OP_FSYNC:
2102 		ret = io_fsync(req, s->sqe, force_nonblock);
2103 		break;
2104 	case IORING_OP_POLL_ADD:
2105 		ret = io_poll_add(req, s->sqe);
2106 		break;
2107 	case IORING_OP_POLL_REMOVE:
2108 		ret = io_poll_remove(req, s->sqe);
2109 		break;
2110 	case IORING_OP_SYNC_FILE_RANGE:
2111 		ret = io_sync_file_range(req, s->sqe, force_nonblock);
2112 		break;
2113 	case IORING_OP_SENDMSG:
2114 		ret = io_sendmsg(req, s->sqe, force_nonblock);
2115 		break;
2116 	case IORING_OP_RECVMSG:
2117 		ret = io_recvmsg(req, s->sqe, force_nonblock);
2118 		break;
2119 	case IORING_OP_TIMEOUT:
2120 		ret = io_timeout(req, s->sqe);
2121 		break;
2122 	default:
2123 		ret = -EINVAL;
2124 		break;
2125 	}
2126 
2127 	if (ret)
2128 		return ret;
2129 
2130 	if (ctx->flags & IORING_SETUP_IOPOLL) {
2131 		if (req->result == -EAGAIN)
2132 			return -EAGAIN;
2133 
2134 		/* workqueue context doesn't hold uring_lock, grab it now */
2135 		if (s->needs_lock)
2136 			mutex_lock(&ctx->uring_lock);
2137 		io_iopoll_req_issued(req);
2138 		if (s->needs_lock)
2139 			mutex_unlock(&ctx->uring_lock);
2140 	}
2141 
2142 	return 0;
2143 }
2144 
io_async_list_from_sqe(struct io_ring_ctx * ctx,const struct io_uring_sqe * sqe)2145 static struct async_list *io_async_list_from_sqe(struct io_ring_ctx *ctx,
2146 						 const struct io_uring_sqe *sqe)
2147 {
2148 	switch (sqe->opcode) {
2149 	case IORING_OP_READV:
2150 	case IORING_OP_READ_FIXED:
2151 		return &ctx->pending_async[READ];
2152 	case IORING_OP_WRITEV:
2153 	case IORING_OP_WRITE_FIXED:
2154 		return &ctx->pending_async[WRITE];
2155 	default:
2156 		return NULL;
2157 	}
2158 }
2159 
io_sqe_needs_user(const struct io_uring_sqe * sqe)2160 static inline bool io_sqe_needs_user(const struct io_uring_sqe *sqe)
2161 {
2162 	u8 opcode = READ_ONCE(sqe->opcode);
2163 
2164 	return !(opcode == IORING_OP_READ_FIXED ||
2165 		 opcode == IORING_OP_WRITE_FIXED);
2166 }
2167 
io_sq_wq_submit_work(struct work_struct * work)2168 static void io_sq_wq_submit_work(struct work_struct *work)
2169 {
2170 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2171 	struct io_ring_ctx *ctx = req->ctx;
2172 	struct mm_struct *cur_mm = NULL;
2173 	struct async_list *async_list;
2174 	const struct cred *old_cred;
2175 	LIST_HEAD(req_list);
2176 	mm_segment_t old_fs;
2177 	int ret;
2178 
2179 	old_cred = override_creds(ctx->creds);
2180 	async_list = io_async_list_from_sqe(ctx, req->submit.sqe);
2181 restart:
2182 	do {
2183 		struct sqe_submit *s = &req->submit;
2184 		const struct io_uring_sqe *sqe = s->sqe;
2185 		unsigned int flags = req->flags;
2186 
2187 		/* Ensure we clear previously set non-block flag */
2188 		req->rw.ki_flags &= ~IOCB_NOWAIT;
2189 
2190 		ret = 0;
2191 		if (io_sqe_needs_user(sqe) && !cur_mm) {
2192 			if (!mmget_not_zero(ctx->sqo_mm)) {
2193 				ret = -EFAULT;
2194 			} else {
2195 				cur_mm = ctx->sqo_mm;
2196 				use_mm(cur_mm);
2197 				old_fs = get_fs();
2198 				set_fs(USER_DS);
2199 			}
2200 		}
2201 
2202 		if (!ret) {
2203 			s->has_user = cur_mm != NULL;
2204 			s->needs_lock = true;
2205 			do {
2206 				ret = __io_submit_sqe(ctx, req, s, false);
2207 				/*
2208 				 * We can get EAGAIN for polled IO even though
2209 				 * we're forcing a sync submission from here,
2210 				 * since we can't wait for request slots on the
2211 				 * block side.
2212 				 */
2213 				if (ret != -EAGAIN)
2214 					break;
2215 				cond_resched();
2216 			} while (1);
2217 		}
2218 
2219 		/* drop submission reference */
2220 		io_put_req(req);
2221 
2222 		if (ret) {
2223 			io_cqring_add_event(ctx, sqe->user_data, ret);
2224 			io_put_req(req);
2225 		}
2226 
2227 		/* async context always use a copy of the sqe */
2228 		kfree(sqe);
2229 
2230 		/* req from defer and link list needn't decrease async cnt */
2231 		if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
2232 			goto out;
2233 
2234 		if (!async_list)
2235 			break;
2236 		if (!list_empty(&req_list)) {
2237 			req = list_first_entry(&req_list, struct io_kiocb,
2238 						list);
2239 			list_del(&req->list);
2240 			continue;
2241 		}
2242 		if (list_empty(&async_list->list))
2243 			break;
2244 
2245 		req = NULL;
2246 		spin_lock(&async_list->lock);
2247 		if (list_empty(&async_list->list)) {
2248 			spin_unlock(&async_list->lock);
2249 			break;
2250 		}
2251 		list_splice_init(&async_list->list, &req_list);
2252 		spin_unlock(&async_list->lock);
2253 
2254 		req = list_first_entry(&req_list, struct io_kiocb, list);
2255 		list_del(&req->list);
2256 	} while (req);
2257 
2258 	/*
2259 	 * Rare case of racing with a submitter. If we find the count has
2260 	 * dropped to zero AND we have pending work items, then restart
2261 	 * the processing. This is a tiny race window.
2262 	 */
2263 	if (async_list) {
2264 		ret = atomic_dec_return(&async_list->cnt);
2265 		while (!ret && !list_empty(&async_list->list)) {
2266 			spin_lock(&async_list->lock);
2267 			atomic_inc(&async_list->cnt);
2268 			list_splice_init(&async_list->list, &req_list);
2269 			spin_unlock(&async_list->lock);
2270 
2271 			if (!list_empty(&req_list)) {
2272 				req = list_first_entry(&req_list,
2273 							struct io_kiocb, list);
2274 				list_del(&req->list);
2275 				goto restart;
2276 			}
2277 			ret = atomic_dec_return(&async_list->cnt);
2278 		}
2279 	}
2280 
2281 out:
2282 	if (cur_mm) {
2283 		set_fs(old_fs);
2284 		unuse_mm(cur_mm);
2285 		mmput(cur_mm);
2286 	}
2287 	revert_creds(old_cred);
2288 }
2289 
2290 /*
2291  * See if we can piggy back onto previously submitted work, that is still
2292  * running. We currently only allow this if the new request is sequential
2293  * to the previous one we punted.
2294  */
io_add_to_prev_work(struct async_list * list,struct io_kiocb * req)2295 static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2296 {
2297 	bool ret;
2298 
2299 	if (!list)
2300 		return false;
2301 	if (!(req->flags & REQ_F_SEQ_PREV))
2302 		return false;
2303 	if (!atomic_read(&list->cnt))
2304 		return false;
2305 
2306 	ret = true;
2307 	spin_lock(&list->lock);
2308 	list_add_tail(&req->list, &list->list);
2309 	/*
2310 	 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2311 	 */
2312 	smp_mb();
2313 	if (!atomic_read(&list->cnt)) {
2314 		list_del_init(&req->list);
2315 		ret = false;
2316 	}
2317 	spin_unlock(&list->lock);
2318 	return ret;
2319 }
2320 
io_op_needs_file(const struct io_uring_sqe * sqe)2321 static bool io_op_needs_file(const struct io_uring_sqe *sqe)
2322 {
2323 	int op = READ_ONCE(sqe->opcode);
2324 
2325 	switch (op) {
2326 	case IORING_OP_NOP:
2327 	case IORING_OP_POLL_REMOVE:
2328 	case IORING_OP_TIMEOUT:
2329 		return false;
2330 	default:
2331 		return true;
2332 	}
2333 }
2334 
io_req_set_file(struct io_ring_ctx * ctx,const struct sqe_submit * s,struct io_submit_state * state,struct io_kiocb * req)2335 static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2336 			   struct io_submit_state *state, struct io_kiocb *req)
2337 {
2338 	unsigned flags;
2339 	int fd;
2340 
2341 	flags = READ_ONCE(s->sqe->flags);
2342 	fd = READ_ONCE(s->sqe->fd);
2343 
2344 	if (flags & IOSQE_IO_DRAIN)
2345 		req->flags |= REQ_F_IO_DRAIN;
2346 	/*
2347 	 * All io need record the previous position, if LINK vs DARIN,
2348 	 * it can be used to mark the position of the first IO in the
2349 	 * link list.
2350 	 */
2351 	req->sequence = s->sequence;
2352 
2353 	if (!io_op_needs_file(s->sqe))
2354 		return 0;
2355 
2356 	if (flags & IOSQE_FIXED_FILE) {
2357 		if (unlikely(!ctx->user_files ||
2358 		    (unsigned) fd >= ctx->nr_user_files))
2359 			return -EBADF;
2360 		req->file = ctx->user_files[fd];
2361 		req->flags |= REQ_F_FIXED_FILE;
2362 	} else {
2363 		if (s->needs_fixed_file)
2364 			return -EBADF;
2365 		req->file = io_file_get(state, fd);
2366 		if (unlikely(!req->file))
2367 			return -EBADF;
2368 	}
2369 
2370 	return 0;
2371 }
2372 
__io_queue_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2373 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2374 			struct sqe_submit *s)
2375 {
2376 	int ret;
2377 
2378 	ret = __io_submit_sqe(ctx, req, s, true);
2379 
2380 	/*
2381 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
2382 	 * doesn't support non-blocking read/write attempts
2383 	 */
2384 	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2385 	    (req->flags & REQ_F_MUST_PUNT))) {
2386 		struct io_uring_sqe *sqe_copy;
2387 
2388 		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2389 		if (sqe_copy) {
2390 			struct async_list *list;
2391 
2392 			s->sqe = sqe_copy;
2393 			memcpy(&req->submit, s, sizeof(*s));
2394 			list = io_async_list_from_sqe(ctx, s->sqe);
2395 			if (!io_add_to_prev_work(list, req)) {
2396 				if (list)
2397 					atomic_inc(&list->cnt);
2398 				INIT_WORK(&req->work, io_sq_wq_submit_work);
2399 				io_queue_async_work(ctx, req);
2400 			}
2401 
2402 			/*
2403 			 * Queued up for async execution, worker will release
2404 			 * submit reference when the iocb is actually submitted.
2405 			 */
2406 			return 0;
2407 		}
2408 	}
2409 
2410 	/* drop submission reference */
2411 	io_put_req(req);
2412 
2413 	/* and drop final reference, if we failed */
2414 	if (ret) {
2415 		io_cqring_add_event(ctx, req->user_data, ret);
2416 		if (req->flags & REQ_F_LINK)
2417 			req->flags |= REQ_F_FAIL_LINK;
2418 		io_put_req(req);
2419 	}
2420 
2421 	return ret;
2422 }
2423 
io_queue_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2424 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2425 			struct sqe_submit *s)
2426 {
2427 	int ret;
2428 
2429 	ret = io_req_defer(ctx, req, s);
2430 	if (ret) {
2431 		if (ret != -EIOCBQUEUED) {
2432 			io_free_req(req);
2433 			io_cqring_add_event(ctx, s->sqe->user_data, ret);
2434 		}
2435 		return 0;
2436 	}
2437 
2438 	return __io_queue_sqe(ctx, req, s);
2439 }
2440 
io_queue_link_head(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s,struct io_kiocb * shadow)2441 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
2442 			      struct sqe_submit *s, struct io_kiocb *shadow)
2443 {
2444 	int ret;
2445 	int need_submit = false;
2446 
2447 	if (!shadow)
2448 		return io_queue_sqe(ctx, req, s);
2449 
2450 	/*
2451 	 * Mark the first IO in link list as DRAIN, let all the following
2452 	 * IOs enter the defer list. all IO needs to be completed before link
2453 	 * list.
2454 	 */
2455 	req->flags |= REQ_F_IO_DRAIN;
2456 	ret = io_req_defer(ctx, req, s);
2457 	if (ret) {
2458 		if (ret != -EIOCBQUEUED) {
2459 			io_free_req(req);
2460 			__io_free_req(shadow);
2461 			io_cqring_add_event(ctx, s->sqe->user_data, ret);
2462 			return 0;
2463 		}
2464 	} else {
2465 		/*
2466 		 * If ret == 0 means that all IOs in front of link io are
2467 		 * running done. let's queue link head.
2468 		 */
2469 		need_submit = true;
2470 	}
2471 
2472 	/* Insert shadow req to defer_list, blocking next IOs */
2473 	spin_lock_irq(&ctx->completion_lock);
2474 	list_add_tail(&shadow->list, &ctx->defer_list);
2475 	spin_unlock_irq(&ctx->completion_lock);
2476 
2477 	if (need_submit)
2478 		return __io_queue_sqe(ctx, req, s);
2479 
2480 	return 0;
2481 }
2482 
2483 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2484 
io_submit_sqe(struct io_ring_ctx * ctx,struct sqe_submit * s,struct io_submit_state * state,struct io_kiocb ** link)2485 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
2486 			  struct io_submit_state *state, struct io_kiocb **link)
2487 {
2488 	struct io_uring_sqe *sqe_copy;
2489 	struct io_kiocb *req;
2490 	int ret;
2491 
2492 	/* enforce forwards compatibility on users */
2493 	if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2494 		ret = -EINVAL;
2495 		goto err;
2496 	}
2497 
2498 	req = io_get_req(ctx, state);
2499 	if (unlikely(!req)) {
2500 		ret = -EAGAIN;
2501 		goto err;
2502 	}
2503 
2504 	ret = io_req_set_file(ctx, s, state, req);
2505 	if (unlikely(ret)) {
2506 err_req:
2507 		io_free_req(req);
2508 err:
2509 		io_cqring_add_event(ctx, s->sqe->user_data, ret);
2510 		return;
2511 	}
2512 
2513 	req->user_data = s->sqe->user_data;
2514 
2515 	/*
2516 	 * If we already have a head request, queue this one for async
2517 	 * submittal once the head completes. If we don't have a head but
2518 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2519 	 * submitted sync once the chain is complete. If none of those
2520 	 * conditions are true (normal request), then just queue it.
2521 	 */
2522 	if (*link) {
2523 		struct io_kiocb *prev = *link;
2524 
2525 		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2526 		if (!sqe_copy) {
2527 			ret = -EAGAIN;
2528 			goto err_req;
2529 		}
2530 
2531 		s->sqe = sqe_copy;
2532 		memcpy(&req->submit, s, sizeof(*s));
2533 		list_add_tail(&req->list, &prev->link_list);
2534 	} else if (s->sqe->flags & IOSQE_IO_LINK) {
2535 		req->flags |= REQ_F_LINK;
2536 
2537 		memcpy(&req->submit, s, sizeof(*s));
2538 		INIT_LIST_HEAD(&req->link_list);
2539 		*link = req;
2540 	} else {
2541 		io_queue_sqe(ctx, req, s);
2542 	}
2543 }
2544 
2545 /*
2546  * Batched submission is done, ensure local IO is flushed out.
2547  */
io_submit_state_end(struct io_submit_state * state)2548 static void io_submit_state_end(struct io_submit_state *state)
2549 {
2550 	blk_finish_plug(&state->plug);
2551 	io_file_put(state);
2552 	if (state->free_reqs)
2553 		kmem_cache_free_bulk(req_cachep, state->free_reqs,
2554 					&state->reqs[state->cur_req]);
2555 }
2556 
2557 /*
2558  * Start submission side cache.
2559  */
io_submit_state_start(struct io_submit_state * state,struct io_ring_ctx * ctx,unsigned max_ios)2560 static void io_submit_state_start(struct io_submit_state *state,
2561 				  struct io_ring_ctx *ctx, unsigned max_ios)
2562 {
2563 	blk_start_plug(&state->plug);
2564 	state->free_reqs = 0;
2565 	state->file = NULL;
2566 	state->ios_left = max_ios;
2567 }
2568 
io_commit_sqring(struct io_ring_ctx * ctx)2569 static void io_commit_sqring(struct io_ring_ctx *ctx)
2570 {
2571 	struct io_rings *rings = ctx->rings;
2572 
2573 	if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2574 		/*
2575 		 * Ensure any loads from the SQEs are done at this point,
2576 		 * since once we write the new head, the application could
2577 		 * write new data to them.
2578 		 */
2579 		smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2580 	}
2581 }
2582 
2583 /*
2584  * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2585  * that is mapped by userspace. This means that care needs to be taken to
2586  * ensure that reads are stable, as we cannot rely on userspace always
2587  * being a good citizen. If members of the sqe are validated and then later
2588  * used, it's important that those reads are done through READ_ONCE() to
2589  * prevent a re-load down the line.
2590  */
io_get_sqring(struct io_ring_ctx * ctx,struct sqe_submit * s)2591 static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2592 {
2593 	struct io_rings *rings = ctx->rings;
2594 	u32 *sq_array = ctx->sq_array;
2595 	unsigned head;
2596 
2597 	/*
2598 	 * The cached sq head (or cq tail) serves two purposes:
2599 	 *
2600 	 * 1) allows us to batch the cost of updating the user visible
2601 	 *    head updates.
2602 	 * 2) allows the kernel side to track the head on its own, even
2603 	 *    though the application is the one updating it.
2604 	 */
2605 	head = ctx->cached_sq_head;
2606 	/* make sure SQ entry isn't read before tail */
2607 	if (head == smp_load_acquire(&rings->sq.tail))
2608 		return false;
2609 
2610 	head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2611 	if (head < ctx->sq_entries) {
2612 		s->index = head;
2613 		s->sqe = &ctx->sq_sqes[head];
2614 		s->sequence = ctx->cached_sq_head;
2615 		ctx->cached_sq_head++;
2616 		return true;
2617 	}
2618 
2619 	/* drop invalid entries */
2620 	ctx->cached_sq_head++;
2621 	ctx->cached_sq_dropped++;
2622 	WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
2623 	return false;
2624 }
2625 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr,bool has_user,bool mm_fault)2626 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
2627 			  bool has_user, bool mm_fault)
2628 {
2629 	struct io_submit_state state, *statep = NULL;
2630 	struct io_kiocb *link = NULL;
2631 	struct io_kiocb *shadow_req = NULL;
2632 	bool prev_was_link = false;
2633 	int i, submitted = 0;
2634 
2635 	if (nr > IO_PLUG_THRESHOLD) {
2636 		io_submit_state_start(&state, ctx, nr);
2637 		statep = &state;
2638 	}
2639 
2640 	for (i = 0; i < nr; i++) {
2641 		struct sqe_submit s;
2642 
2643 		if (!io_get_sqring(ctx, &s))
2644 			break;
2645 
2646 		/*
2647 		 * If previous wasn't linked and we have a linked command,
2648 		 * that's the end of the chain. Submit the previous link.
2649 		 */
2650 		if (!prev_was_link && link) {
2651 			io_queue_link_head(ctx, link, &link->submit, shadow_req);
2652 			link = NULL;
2653 			shadow_req = NULL;
2654 		}
2655 		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2656 
2657 		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2658 			if (!shadow_req) {
2659 				shadow_req = io_get_req(ctx, NULL);
2660 				if (unlikely(!shadow_req))
2661 					goto out;
2662 				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2663 				refcount_dec(&shadow_req->refs);
2664 			}
2665 			shadow_req->sequence = s.sequence;
2666 		}
2667 
2668 out:
2669 		if (unlikely(mm_fault)) {
2670 			io_cqring_add_event(ctx, s.sqe->user_data,
2671 						-EFAULT);
2672 		} else {
2673 			s.has_user = has_user;
2674 			s.needs_lock = true;
2675 			s.needs_fixed_file = true;
2676 			io_submit_sqe(ctx, &s, statep, &link);
2677 			submitted++;
2678 		}
2679 	}
2680 
2681 	if (link)
2682 		io_queue_link_head(ctx, link, &link->submit, shadow_req);
2683 	if (statep)
2684 		io_submit_state_end(&state);
2685 
2686 	return submitted;
2687 }
2688 
io_sq_thread(void * data)2689 static int io_sq_thread(void *data)
2690 {
2691 	struct io_ring_ctx *ctx = data;
2692 	struct mm_struct *cur_mm = NULL;
2693 	const struct cred *old_cred;
2694 	mm_segment_t old_fs;
2695 	DEFINE_WAIT(wait);
2696 	unsigned inflight;
2697 	unsigned long timeout;
2698 
2699 	complete(&ctx->sqo_thread_started);
2700 
2701 	old_fs = get_fs();
2702 	set_fs(USER_DS);
2703 	old_cred = override_creds(ctx->creds);
2704 
2705 	timeout = inflight = 0;
2706 	while (!kthread_should_park()) {
2707 		bool mm_fault = false;
2708 		unsigned int to_submit;
2709 
2710 		if (inflight) {
2711 			unsigned nr_events = 0;
2712 
2713 			if (ctx->flags & IORING_SETUP_IOPOLL) {
2714 				/*
2715 				 * inflight is the count of the maximum possible
2716 				 * entries we submitted, but it can be smaller
2717 				 * if we dropped some of them. If we don't have
2718 				 * poll entries available, then we know that we
2719 				 * have nothing left to poll for. Reset the
2720 				 * inflight count to zero in that case.
2721 				 */
2722 				mutex_lock(&ctx->uring_lock);
2723 				if (!list_empty(&ctx->poll_list))
2724 					__io_iopoll_check(ctx, &nr_events, 0);
2725 				else
2726 					inflight = 0;
2727 				mutex_unlock(&ctx->uring_lock);
2728 			} else {
2729 				/*
2730 				 * Normal IO, just pretend everything completed.
2731 				 * We don't have to poll completions for that.
2732 				 */
2733 				nr_events = inflight;
2734 			}
2735 
2736 			inflight -= nr_events;
2737 			if (!inflight)
2738 				timeout = jiffies + ctx->sq_thread_idle;
2739 		}
2740 
2741 		to_submit = io_sqring_entries(ctx);
2742 		if (!to_submit) {
2743 			/*
2744 			 * We're polling. If we're within the defined idle
2745 			 * period, then let us spin without work before going
2746 			 * to sleep.
2747 			 */
2748 			if (inflight || !time_after(jiffies, timeout)) {
2749 				cond_resched();
2750 				continue;
2751 			}
2752 
2753 			/*
2754 			 * Drop cur_mm before scheduling, we can't hold it for
2755 			 * long periods (or over schedule()). Do this before
2756 			 * adding ourselves to the waitqueue, as the unuse/drop
2757 			 * may sleep.
2758 			 */
2759 			if (cur_mm) {
2760 				unuse_mm(cur_mm);
2761 				mmput(cur_mm);
2762 				cur_mm = NULL;
2763 			}
2764 
2765 			prepare_to_wait(&ctx->sqo_wait, &wait,
2766 						TASK_INTERRUPTIBLE);
2767 
2768 			/* Tell userspace we may need a wakeup call */
2769 			ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
2770 			/* make sure to read SQ tail after writing flags */
2771 			smp_mb();
2772 
2773 			to_submit = io_sqring_entries(ctx);
2774 			if (!to_submit) {
2775 				if (kthread_should_park()) {
2776 					finish_wait(&ctx->sqo_wait, &wait);
2777 					break;
2778 				}
2779 				if (signal_pending(current))
2780 					flush_signals(current);
2781 				schedule();
2782 				finish_wait(&ctx->sqo_wait, &wait);
2783 
2784 				ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2785 				continue;
2786 			}
2787 			finish_wait(&ctx->sqo_wait, &wait);
2788 
2789 			ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2790 		}
2791 
2792 		/* Unless all new commands are FIXED regions, grab mm */
2793 		if (!cur_mm) {
2794 			mm_fault = !mmget_not_zero(ctx->sqo_mm);
2795 			if (!mm_fault) {
2796 				use_mm(ctx->sqo_mm);
2797 				cur_mm = ctx->sqo_mm;
2798 			}
2799 		}
2800 
2801 		to_submit = min(to_submit, ctx->sq_entries);
2802 		inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
2803 					   mm_fault);
2804 
2805 		/* Commit SQ ring head once we've consumed all SQEs */
2806 		io_commit_sqring(ctx);
2807 	}
2808 
2809 	set_fs(old_fs);
2810 	if (cur_mm) {
2811 		unuse_mm(cur_mm);
2812 		mmput(cur_mm);
2813 	}
2814 	revert_creds(old_cred);
2815 
2816 	kthread_parkme();
2817 
2818 	return 0;
2819 }
2820 
io_ring_submit(struct io_ring_ctx * ctx,unsigned int to_submit)2821 static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2822 {
2823 	struct io_submit_state state, *statep = NULL;
2824 	struct io_kiocb *link = NULL;
2825 	struct io_kiocb *shadow_req = NULL;
2826 	bool prev_was_link = false;
2827 	int i, submit = 0;
2828 
2829 	if (to_submit > IO_PLUG_THRESHOLD) {
2830 		io_submit_state_start(&state, ctx, to_submit);
2831 		statep = &state;
2832 	}
2833 
2834 	for (i = 0; i < to_submit; i++) {
2835 		struct sqe_submit s;
2836 
2837 		if (!io_get_sqring(ctx, &s))
2838 			break;
2839 
2840 		/*
2841 		 * If previous wasn't linked and we have a linked command,
2842 		 * that's the end of the chain. Submit the previous link.
2843 		 */
2844 		if (!prev_was_link && link) {
2845 			io_queue_link_head(ctx, link, &link->submit, shadow_req);
2846 			link = NULL;
2847 			shadow_req = NULL;
2848 		}
2849 		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2850 
2851 		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2852 			if (!shadow_req) {
2853 				shadow_req = io_get_req(ctx, NULL);
2854 				if (unlikely(!shadow_req))
2855 					goto out;
2856 				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2857 				refcount_dec(&shadow_req->refs);
2858 			}
2859 			shadow_req->sequence = s.sequence;
2860 		}
2861 
2862 out:
2863 		s.has_user = true;
2864 		s.needs_lock = false;
2865 		s.needs_fixed_file = false;
2866 		submit++;
2867 		io_submit_sqe(ctx, &s, statep, &link);
2868 	}
2869 
2870 	if (link)
2871 		io_queue_link_head(ctx, link, &link->submit, shadow_req);
2872 	if (statep)
2873 		io_submit_state_end(statep);
2874 
2875 	io_commit_sqring(ctx);
2876 
2877 	return submit;
2878 }
2879 
2880 struct io_wait_queue {
2881 	struct wait_queue_entry wq;
2882 	struct io_ring_ctx *ctx;
2883 	unsigned to_wait;
2884 	unsigned nr_timeouts;
2885 };
2886 
io_should_wake(struct io_wait_queue * iowq)2887 static inline bool io_should_wake(struct io_wait_queue *iowq)
2888 {
2889 	struct io_ring_ctx *ctx = iowq->ctx;
2890 
2891 	/*
2892 	 * Wake up if we have enough events, or if a timeout occured since we
2893 	 * started waiting. For timeouts, we always want to return to userspace,
2894 	 * regardless of event count.
2895 	 */
2896 	return io_cqring_events(ctx->rings) >= iowq->to_wait ||
2897 			atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2898 }
2899 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)2900 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2901 			    int wake_flags, void *key)
2902 {
2903 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2904 							wq);
2905 
2906 	if (!io_should_wake(iowq))
2907 		return -1;
2908 
2909 	return autoremove_wake_function(curr, mode, wake_flags, key);
2910 }
2911 
2912 /*
2913  * Wait until events become available, if we don't already have some. The
2914  * application must reap them itself, as they reside on the shared cq ring.
2915  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,const sigset_t __user * sig,size_t sigsz)2916 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
2917 			  const sigset_t __user *sig, size_t sigsz)
2918 {
2919 	struct io_wait_queue iowq = {
2920 		.wq = {
2921 			.private	= current,
2922 			.func		= io_wake_function,
2923 			.entry		= LIST_HEAD_INIT(iowq.wq.entry),
2924 		},
2925 		.ctx		= ctx,
2926 		.to_wait	= min_events,
2927 	};
2928 	struct io_rings *rings = ctx->rings;
2929 	int ret;
2930 
2931 	if (io_cqring_events(rings) >= min_events)
2932 		return 0;
2933 
2934 	if (sig) {
2935 #ifdef CONFIG_COMPAT
2936 		if (in_compat_syscall())
2937 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
2938 						      sigsz);
2939 		else
2940 #endif
2941 			ret = set_user_sigmask(sig, sigsz);
2942 
2943 		if (ret)
2944 			return ret;
2945 	}
2946 
2947 	ret = 0;
2948 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2949 	do {
2950 		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
2951 						TASK_INTERRUPTIBLE);
2952 		if (io_should_wake(&iowq))
2953 			break;
2954 		schedule();
2955 		if (signal_pending(current)) {
2956 			ret = -ERESTARTSYS;
2957 			break;
2958 		}
2959 	} while (1);
2960 	finish_wait(&ctx->wait, &iowq.wq);
2961 
2962 	restore_saved_sigmask_unless(ret == -ERESTARTSYS);
2963 	if (ret == -ERESTARTSYS)
2964 		ret = -EINTR;
2965 
2966 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
2967 }
2968 
__io_sqe_files_unregister(struct io_ring_ctx * ctx)2969 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
2970 {
2971 #if defined(CONFIG_UNIX)
2972 	if (ctx->ring_sock) {
2973 		struct sock *sock = ctx->ring_sock->sk;
2974 		struct sk_buff *skb;
2975 
2976 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
2977 			kfree_skb(skb);
2978 	}
2979 #else
2980 	int i;
2981 
2982 	for (i = 0; i < ctx->nr_user_files; i++)
2983 		fput(ctx->user_files[i]);
2984 #endif
2985 }
2986 
io_sqe_files_unregister(struct io_ring_ctx * ctx)2987 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
2988 {
2989 	if (!ctx->user_files)
2990 		return -ENXIO;
2991 
2992 	__io_sqe_files_unregister(ctx);
2993 	kfree(ctx->user_files);
2994 	ctx->user_files = NULL;
2995 	ctx->nr_user_files = 0;
2996 	return 0;
2997 }
2998 
io_sq_thread_stop(struct io_ring_ctx * ctx)2999 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3000 {
3001 	if (ctx->sqo_thread) {
3002 		wait_for_completion(&ctx->sqo_thread_started);
3003 		/*
3004 		 * The park is a bit of a work-around, without it we get
3005 		 * warning spews on shutdown with SQPOLL set and affinity
3006 		 * set to a single CPU.
3007 		 */
3008 		kthread_park(ctx->sqo_thread);
3009 		kthread_stop(ctx->sqo_thread);
3010 		ctx->sqo_thread = NULL;
3011 	}
3012 }
3013 
io_finish_async(struct io_ring_ctx * ctx)3014 static void io_finish_async(struct io_ring_ctx *ctx)
3015 {
3016 	int i;
3017 
3018 	io_sq_thread_stop(ctx);
3019 
3020 	for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
3021 		if (ctx->sqo_wq[i]) {
3022 			destroy_workqueue(ctx->sqo_wq[i]);
3023 			ctx->sqo_wq[i] = NULL;
3024 		}
3025 	}
3026 }
3027 
3028 #if defined(CONFIG_UNIX)
io_destruct_skb(struct sk_buff * skb)3029 static void io_destruct_skb(struct sk_buff *skb)
3030 {
3031 	struct io_ring_ctx *ctx = skb->sk->sk_user_data;
3032 	int i;
3033 
3034 	for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++)
3035 		if (ctx->sqo_wq[i])
3036 			flush_workqueue(ctx->sqo_wq[i]);
3037 
3038 	unix_destruct_scm(skb);
3039 }
3040 
3041 /*
3042  * Ensure the UNIX gc is aware of our file set, so we are certain that
3043  * the io_uring can be safely unregistered on process exit, even if we have
3044  * loops in the file referencing.
3045  */
__io_sqe_files_scm(struct io_ring_ctx * ctx,int nr,int offset)3046 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
3047 {
3048 	struct sock *sk = ctx->ring_sock->sk;
3049 	struct scm_fp_list *fpl;
3050 	struct sk_buff *skb;
3051 	int i;
3052 
3053 	if (!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
3054 		unsigned long inflight = ctx->user->unix_inflight + nr;
3055 
3056 		if (inflight > task_rlimit(current, RLIMIT_NOFILE))
3057 			return -EMFILE;
3058 	}
3059 
3060 	fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
3061 	if (!fpl)
3062 		return -ENOMEM;
3063 
3064 	skb = alloc_skb(0, GFP_KERNEL);
3065 	if (!skb) {
3066 		kfree(fpl);
3067 		return -ENOMEM;
3068 	}
3069 
3070 	skb->sk = sk;
3071 	skb->destructor = io_destruct_skb;
3072 
3073 	fpl->user = get_uid(ctx->user);
3074 	for (i = 0; i < nr; i++) {
3075 		fpl->fp[i] = get_file(ctx->user_files[i + offset]);
3076 		unix_inflight(fpl->user, fpl->fp[i]);
3077 	}
3078 
3079 	fpl->max = fpl->count = nr;
3080 	UNIXCB(skb).fp = fpl;
3081 	refcount_add(skb->truesize, &sk->sk_wmem_alloc);
3082 	skb_queue_head(&sk->sk_receive_queue, skb);
3083 
3084 	for (i = 0; i < nr; i++)
3085 		fput(fpl->fp[i]);
3086 
3087 	return 0;
3088 }
3089 
3090 /*
3091  * If UNIX sockets are enabled, fd passing can cause a reference cycle which
3092  * causes regular reference counting to break down. We rely on the UNIX
3093  * garbage collection to take care of this problem for us.
3094  */
io_sqe_files_scm(struct io_ring_ctx * ctx)3095 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3096 {
3097 	unsigned left, total;
3098 	int ret = 0;
3099 
3100 	total = 0;
3101 	left = ctx->nr_user_files;
3102 	while (left) {
3103 		unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
3104 
3105 		ret = __io_sqe_files_scm(ctx, this_files, total);
3106 		if (ret)
3107 			break;
3108 		left -= this_files;
3109 		total += this_files;
3110 	}
3111 
3112 	if (!ret)
3113 		return 0;
3114 
3115 	while (total < ctx->nr_user_files) {
3116 		fput(ctx->user_files[total]);
3117 		total++;
3118 	}
3119 
3120 	return ret;
3121 }
3122 #else
io_sqe_files_scm(struct io_ring_ctx * ctx)3123 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
3124 {
3125 	return 0;
3126 }
3127 #endif
3128 
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3129 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3130 				 unsigned nr_args)
3131 {
3132 	__s32 __user *fds = (__s32 __user *) arg;
3133 	int fd, ret = 0;
3134 	unsigned i;
3135 
3136 	if (ctx->user_files)
3137 		return -EBUSY;
3138 	if (!nr_args)
3139 		return -EINVAL;
3140 	if (nr_args > IORING_MAX_FIXED_FILES)
3141 		return -EMFILE;
3142 
3143 	ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
3144 	if (!ctx->user_files)
3145 		return -ENOMEM;
3146 
3147 	for (i = 0; i < nr_args; i++) {
3148 		ret = -EFAULT;
3149 		if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3150 			break;
3151 
3152 		ctx->user_files[i] = fget(fd);
3153 
3154 		ret = -EBADF;
3155 		if (!ctx->user_files[i])
3156 			break;
3157 		/*
3158 		 * Don't allow io_uring instances to be registered. If UNIX
3159 		 * isn't enabled, then this causes a reference cycle and this
3160 		 * instance can never get freed. If UNIX is enabled we'll
3161 		 * handle it just fine, but there's still no point in allowing
3162 		 * a ring fd as it doesn't support regular read/write anyway.
3163 		 */
3164 		if (ctx->user_files[i]->f_op == &io_uring_fops) {
3165 			fput(ctx->user_files[i]);
3166 			break;
3167 		}
3168 		ctx->nr_user_files++;
3169 		ret = 0;
3170 	}
3171 
3172 	if (ret) {
3173 		for (i = 0; i < ctx->nr_user_files; i++)
3174 			fput(ctx->user_files[i]);
3175 
3176 		kfree(ctx->user_files);
3177 		ctx->user_files = NULL;
3178 		ctx->nr_user_files = 0;
3179 		return ret;
3180 	}
3181 
3182 	ret = io_sqe_files_scm(ctx);
3183 	if (ret)
3184 		io_sqe_files_unregister(ctx);
3185 
3186 	return ret;
3187 }
3188 
io_sq_offload_start(struct io_ring_ctx * ctx,struct io_uring_params * p)3189 static int io_sq_offload_start(struct io_ring_ctx *ctx,
3190 			       struct io_uring_params *p)
3191 {
3192 	int ret;
3193 
3194 	init_waitqueue_head(&ctx->sqo_wait);
3195 	mmgrab(current->mm);
3196 	ctx->sqo_mm = current->mm;
3197 
3198 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3199 		ret = -EPERM;
3200 		if (!capable(CAP_SYS_ADMIN))
3201 			goto err;
3202 
3203 		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3204 		if (!ctx->sq_thread_idle)
3205 			ctx->sq_thread_idle = HZ;
3206 
3207 		if (p->flags & IORING_SETUP_SQ_AFF) {
3208 			int cpu = p->sq_thread_cpu;
3209 
3210 			ret = -EINVAL;
3211 			if (cpu >= nr_cpu_ids)
3212 				goto err;
3213 			if (!cpu_online(cpu))
3214 				goto err;
3215 
3216 			ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3217 							ctx, cpu,
3218 							"io_uring-sq");
3219 		} else {
3220 			ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3221 							"io_uring-sq");
3222 		}
3223 		if (IS_ERR(ctx->sqo_thread)) {
3224 			ret = PTR_ERR(ctx->sqo_thread);
3225 			ctx->sqo_thread = NULL;
3226 			goto err;
3227 		}
3228 		wake_up_process(ctx->sqo_thread);
3229 	} else if (p->flags & IORING_SETUP_SQ_AFF) {
3230 		/* Can't have SQ_AFF without SQPOLL */
3231 		ret = -EINVAL;
3232 		goto err;
3233 	}
3234 
3235 	/* Do QD, or 2 * CPUS, whatever is smallest */
3236 	ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3237 			WQ_UNBOUND | WQ_FREEZABLE,
3238 			min(ctx->sq_entries - 1, 2 * num_online_cpus()));
3239 	if (!ctx->sqo_wq[0]) {
3240 		ret = -ENOMEM;
3241 		goto err;
3242 	}
3243 
3244 	/*
3245 	 * This is for buffered writes, where we want to limit the parallelism
3246 	 * due to file locking in file systems. As "normal" buffered writes
3247 	 * should parellelize on writeout quite nicely, limit us to having 2
3248 	 * pending. This avoids massive contention on the inode when doing
3249 	 * buffered async writes.
3250 	 */
3251 	ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3252 						WQ_UNBOUND | WQ_FREEZABLE, 2);
3253 	if (!ctx->sqo_wq[1]) {
3254 		ret = -ENOMEM;
3255 		goto err;
3256 	}
3257 
3258 	return 0;
3259 err:
3260 	io_finish_async(ctx);
3261 	mmdrop(ctx->sqo_mm);
3262 	ctx->sqo_mm = NULL;
3263 	return ret;
3264 }
3265 
io_unaccount_mem(struct user_struct * user,unsigned long nr_pages)3266 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3267 {
3268 	atomic_long_sub(nr_pages, &user->locked_vm);
3269 }
3270 
io_account_mem(struct user_struct * user,unsigned long nr_pages)3271 static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3272 {
3273 	unsigned long page_limit, cur_pages, new_pages;
3274 
3275 	/* Don't allow more pages than we can safely lock */
3276 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3277 
3278 	do {
3279 		cur_pages = atomic_long_read(&user->locked_vm);
3280 		new_pages = cur_pages + nr_pages;
3281 		if (new_pages > page_limit)
3282 			return -ENOMEM;
3283 	} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3284 					new_pages) != cur_pages);
3285 
3286 	return 0;
3287 }
3288 
io_mem_free(void * ptr)3289 static void io_mem_free(void *ptr)
3290 {
3291 	struct page *page;
3292 
3293 	if (!ptr)
3294 		return;
3295 
3296 	page = virt_to_head_page(ptr);
3297 	if (put_page_testzero(page))
3298 		free_compound_page(page);
3299 }
3300 
io_mem_alloc(size_t size)3301 static void *io_mem_alloc(size_t size)
3302 {
3303 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3304 				__GFP_NORETRY;
3305 
3306 	return (void *) __get_free_pages(gfp_flags, get_order(size));
3307 }
3308 
rings_size(unsigned sq_entries,unsigned cq_entries,size_t * sq_offset)3309 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3310 				size_t *sq_offset)
3311 {
3312 	struct io_rings *rings;
3313 	size_t off, sq_array_size;
3314 
3315 	off = struct_size(rings, cqes, cq_entries);
3316 	if (off == SIZE_MAX)
3317 		return SIZE_MAX;
3318 
3319 #ifdef CONFIG_SMP
3320 	off = ALIGN(off, SMP_CACHE_BYTES);
3321 	if (off == 0)
3322 		return SIZE_MAX;
3323 #endif
3324 
3325 	sq_array_size = array_size(sizeof(u32), sq_entries);
3326 	if (sq_array_size == SIZE_MAX)
3327 		return SIZE_MAX;
3328 
3329 	if (check_add_overflow(off, sq_array_size, &off))
3330 		return SIZE_MAX;
3331 
3332 	if (sq_offset)
3333 		*sq_offset = off;
3334 
3335 	return off;
3336 }
3337 
ring_pages(unsigned sq_entries,unsigned cq_entries)3338 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3339 {
3340 	size_t pages;
3341 
3342 	pages = (size_t)1 << get_order(
3343 		rings_size(sq_entries, cq_entries, NULL));
3344 	pages += (size_t)1 << get_order(
3345 		array_size(sizeof(struct io_uring_sqe), sq_entries));
3346 
3347 	return pages;
3348 }
3349 
io_sqe_buffer_unregister(struct io_ring_ctx * ctx)3350 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3351 {
3352 	int i, j;
3353 
3354 	if (!ctx->user_bufs)
3355 		return -ENXIO;
3356 
3357 	for (i = 0; i < ctx->nr_user_bufs; i++) {
3358 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3359 
3360 		for (j = 0; j < imu->nr_bvecs; j++)
3361 			put_user_page(imu->bvec[j].bv_page);
3362 
3363 		if (ctx->account_mem)
3364 			io_unaccount_mem(ctx->user, imu->nr_bvecs);
3365 		kvfree(imu->bvec);
3366 		imu->nr_bvecs = 0;
3367 	}
3368 
3369 	kfree(ctx->user_bufs);
3370 	ctx->user_bufs = NULL;
3371 	ctx->nr_user_bufs = 0;
3372 	return 0;
3373 }
3374 
io_copy_iov(struct io_ring_ctx * ctx,struct iovec * dst,void __user * arg,unsigned index)3375 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3376 		       void __user *arg, unsigned index)
3377 {
3378 	struct iovec __user *src;
3379 
3380 #ifdef CONFIG_COMPAT
3381 	if (ctx->compat) {
3382 		struct compat_iovec __user *ciovs;
3383 		struct compat_iovec ciov;
3384 
3385 		ciovs = (struct compat_iovec __user *) arg;
3386 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3387 			return -EFAULT;
3388 
3389 		dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3390 		dst->iov_len = ciov.iov_len;
3391 		return 0;
3392 	}
3393 #endif
3394 	src = (struct iovec __user *) arg;
3395 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
3396 		return -EFAULT;
3397 	return 0;
3398 }
3399 
io_sqe_buffer_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3400 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3401 				  unsigned nr_args)
3402 {
3403 	struct vm_area_struct **vmas = NULL;
3404 	struct page **pages = NULL;
3405 	int i, j, got_pages = 0;
3406 	int ret = -EINVAL;
3407 
3408 	if (ctx->user_bufs)
3409 		return -EBUSY;
3410 	if (!nr_args || nr_args > UIO_MAXIOV)
3411 		return -EINVAL;
3412 
3413 	ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3414 					GFP_KERNEL);
3415 	if (!ctx->user_bufs)
3416 		return -ENOMEM;
3417 
3418 	for (i = 0; i < nr_args; i++) {
3419 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3420 		unsigned long off, start, end, ubuf;
3421 		int pret, nr_pages;
3422 		struct iovec iov;
3423 		size_t size;
3424 
3425 		ret = io_copy_iov(ctx, &iov, arg, i);
3426 		if (ret)
3427 			goto err;
3428 
3429 		/*
3430 		 * Don't impose further limits on the size and buffer
3431 		 * constraints here, we'll -EINVAL later when IO is
3432 		 * submitted if they are wrong.
3433 		 */
3434 		ret = -EFAULT;
3435 		if (!iov.iov_base || !iov.iov_len)
3436 			goto err;
3437 
3438 		/* arbitrary limit, but we need something */
3439 		if (iov.iov_len > SZ_1G)
3440 			goto err;
3441 
3442 		ubuf = (unsigned long) iov.iov_base;
3443 		end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3444 		start = ubuf >> PAGE_SHIFT;
3445 		nr_pages = end - start;
3446 
3447 		if (ctx->account_mem) {
3448 			ret = io_account_mem(ctx->user, nr_pages);
3449 			if (ret)
3450 				goto err;
3451 		}
3452 
3453 		ret = 0;
3454 		if (!pages || nr_pages > got_pages) {
3455 			kfree(vmas);
3456 			kfree(pages);
3457 			pages = kvmalloc_array(nr_pages, sizeof(struct page *),
3458 						GFP_KERNEL);
3459 			vmas = kvmalloc_array(nr_pages,
3460 					sizeof(struct vm_area_struct *),
3461 					GFP_KERNEL);
3462 			if (!pages || !vmas) {
3463 				ret = -ENOMEM;
3464 				if (ctx->account_mem)
3465 					io_unaccount_mem(ctx->user, nr_pages);
3466 				goto err;
3467 			}
3468 			got_pages = nr_pages;
3469 		}
3470 
3471 		imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
3472 						GFP_KERNEL);
3473 		ret = -ENOMEM;
3474 		if (!imu->bvec) {
3475 			if (ctx->account_mem)
3476 				io_unaccount_mem(ctx->user, nr_pages);
3477 			goto err;
3478 		}
3479 
3480 		ret = 0;
3481 		down_read(&current->mm->mmap_sem);
3482 		pret = get_user_pages(ubuf, nr_pages,
3483 				      FOLL_WRITE | FOLL_LONGTERM,
3484 				      pages, vmas);
3485 		if (pret == nr_pages) {
3486 			/* don't support file backed memory */
3487 			for (j = 0; j < nr_pages; j++) {
3488 				struct vm_area_struct *vma = vmas[j];
3489 
3490 				if (vma->vm_file &&
3491 				    !is_file_hugepages(vma->vm_file)) {
3492 					ret = -EOPNOTSUPP;
3493 					break;
3494 				}
3495 			}
3496 		} else {
3497 			ret = pret < 0 ? pret : -EFAULT;
3498 		}
3499 		up_read(&current->mm->mmap_sem);
3500 		if (ret) {
3501 			/*
3502 			 * if we did partial map, or found file backed vmas,
3503 			 * release any pages we did get
3504 			 */
3505 			if (pret > 0)
3506 				put_user_pages(pages, pret);
3507 			if (ctx->account_mem)
3508 				io_unaccount_mem(ctx->user, nr_pages);
3509 			kvfree(imu->bvec);
3510 			goto err;
3511 		}
3512 
3513 		off = ubuf & ~PAGE_MASK;
3514 		size = iov.iov_len;
3515 		for (j = 0; j < nr_pages; j++) {
3516 			size_t vec_len;
3517 
3518 			vec_len = min_t(size_t, size, PAGE_SIZE - off);
3519 			imu->bvec[j].bv_page = pages[j];
3520 			imu->bvec[j].bv_len = vec_len;
3521 			imu->bvec[j].bv_offset = off;
3522 			off = 0;
3523 			size -= vec_len;
3524 		}
3525 		/* store original address for later verification */
3526 		imu->ubuf = ubuf;
3527 		imu->len = iov.iov_len;
3528 		imu->nr_bvecs = nr_pages;
3529 
3530 		ctx->nr_user_bufs++;
3531 	}
3532 	kvfree(pages);
3533 	kvfree(vmas);
3534 	return 0;
3535 err:
3536 	kvfree(pages);
3537 	kvfree(vmas);
3538 	io_sqe_buffer_unregister(ctx);
3539 	return ret;
3540 }
3541 
io_eventfd_register(struct io_ring_ctx * ctx,void __user * arg)3542 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3543 {
3544 	__s32 __user *fds = arg;
3545 	int fd;
3546 
3547 	if (ctx->cq_ev_fd)
3548 		return -EBUSY;
3549 
3550 	if (copy_from_user(&fd, fds, sizeof(*fds)))
3551 		return -EFAULT;
3552 
3553 	ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3554 	if (IS_ERR(ctx->cq_ev_fd)) {
3555 		int ret = PTR_ERR(ctx->cq_ev_fd);
3556 		ctx->cq_ev_fd = NULL;
3557 		return ret;
3558 	}
3559 
3560 	return 0;
3561 }
3562 
io_eventfd_unregister(struct io_ring_ctx * ctx)3563 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3564 {
3565 	if (ctx->cq_ev_fd) {
3566 		eventfd_ctx_put(ctx->cq_ev_fd);
3567 		ctx->cq_ev_fd = NULL;
3568 		return 0;
3569 	}
3570 
3571 	return -ENXIO;
3572 }
3573 
io_ring_ctx_free(struct io_ring_ctx * ctx)3574 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3575 {
3576 	io_finish_async(ctx);
3577 	if (ctx->sqo_mm)
3578 		mmdrop(ctx->sqo_mm);
3579 
3580 	io_iopoll_reap_events(ctx);
3581 	io_sqe_buffer_unregister(ctx);
3582 	io_sqe_files_unregister(ctx);
3583 	io_eventfd_unregister(ctx);
3584 
3585 #if defined(CONFIG_UNIX)
3586 	if (ctx->ring_sock) {
3587 		ctx->ring_sock->file = NULL; /* so that iput() is called */
3588 		sock_release(ctx->ring_sock);
3589 	}
3590 #endif
3591 
3592 	io_mem_free(ctx->rings);
3593 	io_mem_free(ctx->sq_sqes);
3594 
3595 	percpu_ref_exit(&ctx->refs);
3596 	if (ctx->account_mem)
3597 		io_unaccount_mem(ctx->user,
3598 				ring_pages(ctx->sq_entries, ctx->cq_entries));
3599 	free_uid(ctx->user);
3600 	if (ctx->creds)
3601 		put_cred(ctx->creds);
3602 	kfree(ctx);
3603 }
3604 
io_uring_poll(struct file * file,poll_table * wait)3605 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3606 {
3607 	struct io_ring_ctx *ctx = file->private_data;
3608 	__poll_t mask = 0;
3609 
3610 	poll_wait(file, &ctx->cq_wait, wait);
3611 	/*
3612 	 * synchronizes with barrier from wq_has_sleeper call in
3613 	 * io_commit_cqring
3614 	 */
3615 	smp_rmb();
3616 	if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3617 	    ctx->rings->sq_ring_entries)
3618 		mask |= EPOLLOUT | EPOLLWRNORM;
3619 	if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
3620 		mask |= EPOLLIN | EPOLLRDNORM;
3621 
3622 	return mask;
3623 }
3624 
io_uring_fasync(int fd,struct file * file,int on)3625 static int io_uring_fasync(int fd, struct file *file, int on)
3626 {
3627 	struct io_ring_ctx *ctx = file->private_data;
3628 
3629 	return fasync_helper(fd, file, on, &ctx->cq_fasync);
3630 }
3631 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)3632 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3633 {
3634 	mutex_lock(&ctx->uring_lock);
3635 	percpu_ref_kill(&ctx->refs);
3636 	mutex_unlock(&ctx->uring_lock);
3637 
3638 	io_kill_timeouts(ctx);
3639 	io_poll_remove_all(ctx);
3640 	io_iopoll_reap_events(ctx);
3641 	wait_for_completion(&ctx->ctx_done);
3642 	io_ring_ctx_free(ctx);
3643 }
3644 
io_uring_release(struct inode * inode,struct file * file)3645 static int io_uring_release(struct inode *inode, struct file *file)
3646 {
3647 	struct io_ring_ctx *ctx = file->private_data;
3648 
3649 	file->private_data = NULL;
3650 	io_ring_ctx_wait_and_kill(ctx);
3651 	return 0;
3652 }
3653 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3654 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3655 {
3656 	loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3657 	unsigned long sz = vma->vm_end - vma->vm_start;
3658 	struct io_ring_ctx *ctx = file->private_data;
3659 	unsigned long pfn;
3660 	struct page *page;
3661 	void *ptr;
3662 
3663 	switch (offset) {
3664 	case IORING_OFF_SQ_RING:
3665 	case IORING_OFF_CQ_RING:
3666 		ptr = ctx->rings;
3667 		break;
3668 	case IORING_OFF_SQES:
3669 		ptr = ctx->sq_sqes;
3670 		break;
3671 	default:
3672 		return -EINVAL;
3673 	}
3674 
3675 	page = virt_to_head_page(ptr);
3676 	if (sz > page_size(page))
3677 		return -EINVAL;
3678 
3679 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3680 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3681 }
3682 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const sigset_t __user *,sig,size_t,sigsz)3683 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3684 		u32, min_complete, u32, flags, const sigset_t __user *, sig,
3685 		size_t, sigsz)
3686 {
3687 	struct io_ring_ctx *ctx;
3688 	long ret = -EBADF;
3689 	int submitted = 0;
3690 	struct fd f;
3691 
3692 	if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
3693 		return -EINVAL;
3694 
3695 	f = fdget(fd);
3696 	if (!f.file)
3697 		return -EBADF;
3698 
3699 	ret = -EOPNOTSUPP;
3700 	if (f.file->f_op != &io_uring_fops)
3701 		goto out_fput;
3702 
3703 	ret = -ENXIO;
3704 	ctx = f.file->private_data;
3705 	if (!percpu_ref_tryget(&ctx->refs))
3706 		goto out_fput;
3707 
3708 	/*
3709 	 * For SQ polling, the thread will do all submissions and completions.
3710 	 * Just return the requested submit count, and wake the thread if
3711 	 * we were asked to.
3712 	 */
3713 	ret = 0;
3714 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3715 		if (flags & IORING_ENTER_SQ_WAKEUP)
3716 			wake_up(&ctx->sqo_wait);
3717 		submitted = to_submit;
3718 	} else if (to_submit) {
3719 		to_submit = min(to_submit, ctx->sq_entries);
3720 
3721 		mutex_lock(&ctx->uring_lock);
3722 		submitted = io_ring_submit(ctx, to_submit);
3723 		mutex_unlock(&ctx->uring_lock);
3724 
3725 		if (submitted != to_submit)
3726 			goto out;
3727 	}
3728 	if (flags & IORING_ENTER_GETEVENTS) {
3729 		unsigned nr_events = 0;
3730 
3731 		min_complete = min(min_complete, ctx->cq_entries);
3732 
3733 		if (ctx->flags & IORING_SETUP_IOPOLL) {
3734 			ret = io_iopoll_check(ctx, &nr_events, min_complete);
3735 		} else {
3736 			ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3737 		}
3738 	}
3739 
3740 out:
3741 	percpu_ref_put(&ctx->refs);
3742 out_fput:
3743 	fdput(f);
3744 	return submitted ? submitted : ret;
3745 }
3746 
3747 static const struct file_operations io_uring_fops = {
3748 	.release	= io_uring_release,
3749 	.mmap		= io_uring_mmap,
3750 	.poll		= io_uring_poll,
3751 	.fasync		= io_uring_fasync,
3752 };
3753 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)3754 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3755 				  struct io_uring_params *p)
3756 {
3757 	struct io_rings *rings;
3758 	size_t size, sq_array_offset;
3759 
3760 	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3761 	if (size == SIZE_MAX)
3762 		return -EOVERFLOW;
3763 
3764 	rings = io_mem_alloc(size);
3765 	if (!rings)
3766 		return -ENOMEM;
3767 
3768 	ctx->rings = rings;
3769 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3770 	rings->sq_ring_mask = p->sq_entries - 1;
3771 	rings->cq_ring_mask = p->cq_entries - 1;
3772 	rings->sq_ring_entries = p->sq_entries;
3773 	rings->cq_ring_entries = p->cq_entries;
3774 	ctx->sq_mask = rings->sq_ring_mask;
3775 	ctx->cq_mask = rings->cq_ring_mask;
3776 	ctx->sq_entries = rings->sq_ring_entries;
3777 	ctx->cq_entries = rings->cq_ring_entries;
3778 
3779 	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3780 	if (size == SIZE_MAX) {
3781 		io_mem_free(ctx->rings);
3782 		ctx->rings = NULL;
3783 		return -EOVERFLOW;
3784 	}
3785 
3786 	ctx->sq_sqes = io_mem_alloc(size);
3787 	if (!ctx->sq_sqes) {
3788 		io_mem_free(ctx->rings);
3789 		ctx->rings = NULL;
3790 		return -ENOMEM;
3791 	}
3792 
3793 	return 0;
3794 }
3795 
3796 /*
3797  * Allocate an anonymous fd, this is what constitutes the application
3798  * visible backing of an io_uring instance. The application mmaps this
3799  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
3800  * we have to tie this fd to a socket for file garbage collection purposes.
3801  */
io_uring_get_fd(struct io_ring_ctx * ctx)3802 static int io_uring_get_fd(struct io_ring_ctx *ctx)
3803 {
3804 	struct file *file;
3805 	int ret;
3806 
3807 #if defined(CONFIG_UNIX)
3808 	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
3809 				&ctx->ring_sock);
3810 	if (ret)
3811 		return ret;
3812 #endif
3813 
3814 	ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3815 	if (ret < 0)
3816 		goto err;
3817 
3818 	file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3819 					O_RDWR | O_CLOEXEC);
3820 	if (IS_ERR(file)) {
3821 		put_unused_fd(ret);
3822 		ret = PTR_ERR(file);
3823 		goto err;
3824 	}
3825 
3826 #if defined(CONFIG_UNIX)
3827 	ctx->ring_sock->file = file;
3828 	ctx->ring_sock->sk->sk_user_data = ctx;
3829 #endif
3830 	fd_install(ret, file);
3831 	return ret;
3832 err:
3833 #if defined(CONFIG_UNIX)
3834 	sock_release(ctx->ring_sock);
3835 	ctx->ring_sock = NULL;
3836 #endif
3837 	return ret;
3838 }
3839 
io_uring_create(unsigned entries,struct io_uring_params * p)3840 static int io_uring_create(unsigned entries, struct io_uring_params *p)
3841 {
3842 	struct user_struct *user = NULL;
3843 	struct io_ring_ctx *ctx;
3844 	bool account_mem;
3845 	int ret;
3846 
3847 	if (!entries || entries > IORING_MAX_ENTRIES)
3848 		return -EINVAL;
3849 
3850 	/*
3851 	 * Use twice as many entries for the CQ ring. It's possible for the
3852 	 * application to drive a higher depth than the size of the SQ ring,
3853 	 * since the sqes are only used at submission time. This allows for
3854 	 * some flexibility in overcommitting a bit.
3855 	 */
3856 	p->sq_entries = roundup_pow_of_two(entries);
3857 	p->cq_entries = 2 * p->sq_entries;
3858 
3859 	user = get_uid(current_user());
3860 	account_mem = !capable(CAP_IPC_LOCK);
3861 
3862 	if (account_mem) {
3863 		ret = io_account_mem(user,
3864 				ring_pages(p->sq_entries, p->cq_entries));
3865 		if (ret) {
3866 			free_uid(user);
3867 			return ret;
3868 		}
3869 	}
3870 
3871 	ctx = io_ring_ctx_alloc(p);
3872 	if (!ctx) {
3873 		if (account_mem)
3874 			io_unaccount_mem(user, ring_pages(p->sq_entries,
3875 								p->cq_entries));
3876 		free_uid(user);
3877 		return -ENOMEM;
3878 	}
3879 	ctx->compat = in_compat_syscall();
3880 	ctx->account_mem = account_mem;
3881 	ctx->user = user;
3882 
3883 	ctx->creds = get_current_cred();
3884 	if (!ctx->creds) {
3885 		ret = -ENOMEM;
3886 		goto err;
3887 	}
3888 
3889 	ret = io_allocate_scq_urings(ctx, p);
3890 	if (ret)
3891 		goto err;
3892 
3893 	ret = io_sq_offload_start(ctx, p);
3894 	if (ret)
3895 		goto err;
3896 
3897 	memset(&p->sq_off, 0, sizeof(p->sq_off));
3898 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3899 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3900 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3901 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3902 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3903 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3904 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3905 
3906 	memset(&p->cq_off, 0, sizeof(p->cq_off));
3907 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3908 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3909 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3910 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3911 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3912 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3913 
3914 	/*
3915 	 * Install ring fd as the very last thing, so we don't risk someone
3916 	 * having closed it before we finish setup
3917 	 */
3918 	ret = io_uring_get_fd(ctx);
3919 	if (ret < 0)
3920 		goto err;
3921 
3922 	p->features = IORING_FEAT_SINGLE_MMAP;
3923 	return ret;
3924 err:
3925 	io_ring_ctx_wait_and_kill(ctx);
3926 	return ret;
3927 }
3928 
3929 /*
3930  * Sets up an aio uring context, and returns the fd. Applications asks for a
3931  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3932  * params structure passed in.
3933  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3934 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3935 {
3936 	struct io_uring_params p;
3937 	long ret;
3938 	int i;
3939 
3940 	if (copy_from_user(&p, params, sizeof(p)))
3941 		return -EFAULT;
3942 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3943 		if (p.resv[i])
3944 			return -EINVAL;
3945 	}
3946 
3947 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3948 			IORING_SETUP_SQ_AFF))
3949 		return -EINVAL;
3950 
3951 	ret = io_uring_create(entries, &p);
3952 	if (ret < 0)
3953 		return ret;
3954 
3955 	if (copy_to_user(params, &p, sizeof(p)))
3956 		return -EFAULT;
3957 
3958 	return ret;
3959 }
3960 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3961 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3962 		struct io_uring_params __user *, params)
3963 {
3964 	return io_uring_setup(entries, params);
3965 }
3966 
__io_uring_register(struct io_ring_ctx * ctx,unsigned opcode,void __user * arg,unsigned nr_args)3967 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3968 			       void __user *arg, unsigned nr_args)
3969 	__releases(ctx->uring_lock)
3970 	__acquires(ctx->uring_lock)
3971 {
3972 	int ret;
3973 
3974 	/*
3975 	 * We're inside the ring mutex, if the ref is already dying, then
3976 	 * someone else killed the ctx or is already going through
3977 	 * io_uring_register().
3978 	 */
3979 	if (percpu_ref_is_dying(&ctx->refs))
3980 		return -ENXIO;
3981 
3982 	percpu_ref_kill(&ctx->refs);
3983 
3984 	/*
3985 	 * Drop uring mutex before waiting for references to exit. If another
3986 	 * thread is currently inside io_uring_enter() it might need to grab
3987 	 * the uring_lock to make progress. If we hold it here across the drain
3988 	 * wait, then we can deadlock. It's safe to drop the mutex here, since
3989 	 * no new references will come in after we've killed the percpu ref.
3990 	 */
3991 	mutex_unlock(&ctx->uring_lock);
3992 	wait_for_completion(&ctx->ctx_done);
3993 	mutex_lock(&ctx->uring_lock);
3994 
3995 	switch (opcode) {
3996 	case IORING_REGISTER_BUFFERS:
3997 		ret = io_sqe_buffer_register(ctx, arg, nr_args);
3998 		break;
3999 	case IORING_UNREGISTER_BUFFERS:
4000 		ret = -EINVAL;
4001 		if (arg || nr_args)
4002 			break;
4003 		ret = io_sqe_buffer_unregister(ctx);
4004 		break;
4005 	case IORING_REGISTER_FILES:
4006 		ret = io_sqe_files_register(ctx, arg, nr_args);
4007 		break;
4008 	case IORING_UNREGISTER_FILES:
4009 		ret = -EINVAL;
4010 		if (arg || nr_args)
4011 			break;
4012 		ret = io_sqe_files_unregister(ctx);
4013 		break;
4014 	case IORING_REGISTER_EVENTFD:
4015 		ret = -EINVAL;
4016 		if (nr_args != 1)
4017 			break;
4018 		ret = io_eventfd_register(ctx, arg);
4019 		break;
4020 	case IORING_UNREGISTER_EVENTFD:
4021 		ret = -EINVAL;
4022 		if (arg || nr_args)
4023 			break;
4024 		ret = io_eventfd_unregister(ctx);
4025 		break;
4026 	default:
4027 		ret = -EINVAL;
4028 		break;
4029 	}
4030 
4031 	/* bring the ctx back to life */
4032 	reinit_completion(&ctx->ctx_done);
4033 	percpu_ref_reinit(&ctx->refs);
4034 	return ret;
4035 }
4036 
SYSCALL_DEFINE4(io_uring_register,unsigned int,fd,unsigned int,opcode,void __user *,arg,unsigned int,nr_args)4037 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4038 		void __user *, arg, unsigned int, nr_args)
4039 {
4040 	struct io_ring_ctx *ctx;
4041 	long ret = -EBADF;
4042 	struct fd f;
4043 
4044 	f = fdget(fd);
4045 	if (!f.file)
4046 		return -EBADF;
4047 
4048 	ret = -EOPNOTSUPP;
4049 	if (f.file->f_op != &io_uring_fops)
4050 		goto out_fput;
4051 
4052 	ctx = f.file->private_data;
4053 
4054 	mutex_lock(&ctx->uring_lock);
4055 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
4056 	mutex_unlock(&ctx->uring_lock);
4057 out_fput:
4058 	fdput(f);
4059 	return ret;
4060 }
4061 
io_uring_init(void)4062 static int __init io_uring_init(void)
4063 {
4064 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4065 	return 0;
4066 };
4067 __initcall(io_uring_init);
4068