• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqring (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <linux/refcount.h>
48 #include <linux/uio.h>
49 
50 #include <linux/sched/signal.h>
51 #include <linux/fs.h>
52 #include <linux/file.h>
53 #include <linux/fdtable.h>
54 #include <linux/mm.h>
55 #include <linux/mman.h>
56 #include <linux/mmu_context.h>
57 #include <linux/percpu.h>
58 #include <linux/slab.h>
59 #include <linux/workqueue.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
64 #include <net/sock.h>
65 #include <net/af_unix.h>
66 #include <linux/anon_inodes.h>
67 #include <linux/sched/mm.h>
68 #include <linux/uaccess.h>
69 #include <linux/nospec.h>
70 #include <linux/sizes.h>
71 #include <linux/hugetlb.h>
72 #include <linux/highmem.h>
73 #include <linux/fs_struct.h>
74 
75 #include <uapi/linux/io_uring.h>
76 
77 #include "internal.h"
78 
79 #define IORING_MAX_ENTRIES	32768
80 #define IORING_MAX_FIXED_FILES	1024
81 
82 struct io_uring {
83 	u32 head ____cacheline_aligned_in_smp;
84 	u32 tail ____cacheline_aligned_in_smp;
85 };
86 
87 /*
88  * This data is shared with the application through the mmap at offsets
89  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
90  *
91  * The offsets to the member fields are published through struct
92  * io_sqring_offsets when calling io_uring_setup.
93  */
94 struct io_rings {
95 	/*
96 	 * Head and tail offsets into the ring; the offsets need to be
97 	 * masked to get valid indices.
98 	 *
99 	 * The kernel controls head of the sq ring and the tail of the cq ring,
100 	 * and the application controls tail of the sq ring and the head of the
101 	 * cq ring.
102 	 */
103 	struct io_uring		sq, cq;
104 	/*
105 	 * Bitmasks to apply to head and tail offsets (constant, equals
106 	 * ring_entries - 1)
107 	 */
108 	u32			sq_ring_mask, cq_ring_mask;
109 	/* Ring sizes (constant, power of 2) */
110 	u32			sq_ring_entries, cq_ring_entries;
111 	/*
112 	 * Number of invalid entries dropped by the kernel due to
113 	 * invalid index stored in array
114 	 *
115 	 * Written by the kernel, shouldn't be modified by the
116 	 * application (i.e. get number of "new events" by comparing to
117 	 * cached value).
118 	 *
119 	 * After a new SQ head value was read by the application this
120 	 * counter includes all submissions that were dropped reaching
121 	 * the new SQ head (and possibly more).
122 	 */
123 	u32			sq_dropped;
124 	/*
125 	 * Runtime flags
126 	 *
127 	 * Written by the kernel, shouldn't be modified by the
128 	 * application.
129 	 *
130 	 * The application needs a full memory barrier before checking
131 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
132 	 */
133 	u32			sq_flags;
134 	/*
135 	 * Number of completion events lost because the queue was full;
136 	 * this should be avoided by the application by making sure
137 	 * there are not more requests pending thatn there is space in
138 	 * the completion queue.
139 	 *
140 	 * Written by the kernel, shouldn't be modified by the
141 	 * application (i.e. get number of "new events" by comparing to
142 	 * cached value).
143 	 *
144 	 * As completion events come in out of order this counter is not
145 	 * ordered with any other data.
146 	 */
147 	u32			cq_overflow;
148 	/*
149 	 * Ring buffer of completion events.
150 	 *
151 	 * The kernel writes completion events fresh every time they are
152 	 * produced, so the application is allowed to modify pending
153 	 * entries.
154 	 */
155 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
156 };
157 
158 struct io_mapped_ubuf {
159 	u64		ubuf;
160 	size_t		len;
161 	struct		bio_vec *bvec;
162 	unsigned int	nr_bvecs;
163 };
164 
165 struct async_list {
166 	spinlock_t		lock;
167 	atomic_t		cnt;
168 	struct list_head	list;
169 
170 	struct file		*file;
171 	off_t			io_start;
172 	size_t			io_len;
173 };
174 
175 struct io_ring_ctx {
176 	struct {
177 		struct percpu_ref	refs;
178 	} ____cacheline_aligned_in_smp;
179 
180 	struct {
181 		unsigned int		flags;
182 		bool			compat;
183 		bool			account_mem;
184 
185 		/*
186 		 * Ring buffer of indices into array of io_uring_sqe, which is
187 		 * mmapped by the application using the IORING_OFF_SQES offset.
188 		 *
189 		 * This indirection could e.g. be used to assign fixed
190 		 * io_uring_sqe entries to operations and only submit them to
191 		 * the queue when needed.
192 		 *
193 		 * The kernel modifies neither the indices array nor the entries
194 		 * array.
195 		 */
196 		u32			*sq_array;
197 		unsigned		cached_sq_head;
198 		unsigned		sq_entries;
199 		unsigned		sq_mask;
200 		unsigned		sq_thread_idle;
201 		unsigned		cached_sq_dropped;
202 		struct io_uring_sqe	*sq_sqes;
203 
204 		struct list_head	defer_list;
205 		struct list_head	timeout_list;
206 	} ____cacheline_aligned_in_smp;
207 
208 	/* IO offload */
209 	struct workqueue_struct	*sqo_wq[2];
210 	struct task_struct	*sqo_thread;	/* if using sq thread polling */
211 	struct mm_struct	*sqo_mm;
212 	wait_queue_head_t	sqo_wait;
213 	struct completion	sqo_thread_started;
214 
215 	struct {
216 		unsigned		cached_cq_tail;
217 		atomic_t		cached_cq_overflow;
218 		unsigned		cq_entries;
219 		unsigned		cq_mask;
220 		struct wait_queue_head	cq_wait;
221 		struct fasync_struct	*cq_fasync;
222 		struct eventfd_ctx	*cq_ev_fd;
223 		atomic_t		cq_timeouts;
224 	} ____cacheline_aligned_in_smp;
225 
226 	struct io_rings	*rings;
227 
228 	/*
229 	 * If used, fixed file set. Writers must ensure that ->refs is dead,
230 	 * readers must ensure that ->refs is alive as long as the file* is
231 	 * used. Only updated through io_uring_register(2).
232 	 */
233 	struct file		**user_files;
234 	unsigned		nr_user_files;
235 
236 	/* if used, fixed mapped user buffers */
237 	unsigned		nr_user_bufs;
238 	struct io_mapped_ubuf	*user_bufs;
239 
240 	struct user_struct	*user;
241 
242 	const struct cred	*creds;
243 
244 	struct completion	ctx_done;
245 
246 	struct {
247 		struct mutex		uring_lock;
248 		wait_queue_head_t	wait;
249 	} ____cacheline_aligned_in_smp;
250 
251 	struct {
252 		spinlock_t		completion_lock;
253 		bool			poll_multi_file;
254 		/*
255 		 * ->poll_list is protected by the ctx->uring_lock for
256 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
257 		 * For SQPOLL, only the single threaded io_sq_thread() will
258 		 * manipulate the list, hence no extra locking is needed there.
259 		 */
260 		struct list_head	poll_list;
261 		struct list_head	cancel_list;
262 	} ____cacheline_aligned_in_smp;
263 
264 	struct async_list	pending_async[2];
265 
266 	struct list_head	task_list;
267 	spinlock_t		task_lock;
268 };
269 
270 struct sqe_submit {
271 	const struct io_uring_sqe	*sqe;
272 	unsigned short			index;
273 	u32				sequence;
274 	bool				has_user;
275 	bool				needs_lock;
276 	bool				needs_fixed_file;
277 	u8				opcode;
278 };
279 
280 /*
281  * First field must be the file pointer in all the
282  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
283  */
284 struct io_poll_iocb {
285 	struct file			*file;
286 	struct wait_queue_head		*head;
287 	__poll_t			events;
288 	bool				done;
289 	bool				canceled;
290 	struct wait_queue_entry		wait;
291 };
292 
293 struct io_timeout {
294 	struct file			*file;
295 	struct hrtimer			timer;
296 };
297 
298 /*
299  * NOTE! Each of the iocb union members has the file pointer
300  * as the first entry in their struct definition. So you can
301  * access the file pointer through any of the sub-structs,
302  * or directly as just 'ki_filp' in this struct.
303  */
304 struct io_kiocb {
305 	union {
306 		struct file		*file;
307 		struct kiocb		rw;
308 		struct io_poll_iocb	poll;
309 		struct io_timeout	timeout;
310 	};
311 
312 	struct sqe_submit	submit;
313 
314 	struct io_ring_ctx	*ctx;
315 	struct list_head	list;
316 	struct list_head	link_list;
317 	unsigned int		flags;
318 	refcount_t		refs;
319 #define REQ_F_NOWAIT		1	/* must not punt to workers */
320 #define REQ_F_IOPOLL_COMPLETED	2	/* polled IO has completed */
321 #define REQ_F_FIXED_FILE	4	/* ctx owns file */
322 #define REQ_F_SEQ_PREV		8	/* sequential with previous */
323 #define REQ_F_IO_DRAIN		16	/* drain existing IO first */
324 #define REQ_F_IO_DRAINED	32	/* drain done */
325 #define REQ_F_LINK		64	/* linked sqes */
326 #define REQ_F_LINK_DONE		128	/* linked sqes done */
327 #define REQ_F_FAIL_LINK		256	/* fail rest of links */
328 #define REQ_F_SHADOW_DRAIN	512	/* link-drain shadow req */
329 #define REQ_F_TIMEOUT		1024	/* timeout request */
330 #define REQ_F_ISREG		2048	/* regular file */
331 #define REQ_F_MUST_PUNT		4096	/* must be punted even for NONBLOCK */
332 #define REQ_F_TIMEOUT_NOSEQ	8192	/* no timeout sequence */
333 #define REQ_F_CANCEL		16384	/* cancel request */
334 	unsigned long		fsize;
335 	u64			user_data;
336 	u32			result;
337 	u32			sequence;
338 	struct files_struct	*files;
339 
340 	struct fs_struct	*fs;
341 
342 	struct work_struct	work;
343 	struct task_struct	*work_task;
344 	struct list_head	task_list;
345 };
346 
347 #define IO_PLUG_THRESHOLD		2
348 #define IO_IOPOLL_BATCH			8
349 
350 struct io_submit_state {
351 	struct blk_plug		plug;
352 
353 	/*
354 	 * io_kiocb alloc cache
355 	 */
356 	void			*reqs[IO_IOPOLL_BATCH];
357 	unsigned		int free_reqs;
358 	unsigned		int cur_req;
359 
360 	/*
361 	 * File reference cache
362 	 */
363 	struct file		*file;
364 	unsigned int		fd;
365 	unsigned int		has_refs;
366 	unsigned int		used_refs;
367 	unsigned int		ios_left;
368 };
369 
370 static void io_sq_wq_submit_work(struct work_struct *work);
371 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
372 				 long res);
373 static void __io_free_req(struct io_kiocb *req);
374 
375 static struct kmem_cache *req_cachep;
376 
377 static const struct file_operations io_uring_fops;
378 
io_ring_ctx_ref_free(struct percpu_ref * ref)379 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
380 {
381 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
382 
383 	complete(&ctx->ctx_done);
384 }
385 
io_ring_ctx_alloc(struct io_uring_params * p)386 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
387 {
388 	struct io_ring_ctx *ctx;
389 	int i;
390 
391 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
392 	if (!ctx)
393 		return NULL;
394 
395 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
396 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) {
397 		kfree(ctx);
398 		return NULL;
399 	}
400 
401 	ctx->flags = p->flags;
402 	init_waitqueue_head(&ctx->sqo_wait);
403 	init_waitqueue_head(&ctx->cq_wait);
404 	init_completion(&ctx->ctx_done);
405 	init_completion(&ctx->sqo_thread_started);
406 	mutex_init(&ctx->uring_lock);
407 	init_waitqueue_head(&ctx->wait);
408 	for (i = 0; i < ARRAY_SIZE(ctx->pending_async); i++) {
409 		spin_lock_init(&ctx->pending_async[i].lock);
410 		INIT_LIST_HEAD(&ctx->pending_async[i].list);
411 		atomic_set(&ctx->pending_async[i].cnt, 0);
412 	}
413 	spin_lock_init(&ctx->completion_lock);
414 	INIT_LIST_HEAD(&ctx->poll_list);
415 	INIT_LIST_HEAD(&ctx->cancel_list);
416 	INIT_LIST_HEAD(&ctx->defer_list);
417 	INIT_LIST_HEAD(&ctx->timeout_list);
418 	INIT_LIST_HEAD(&ctx->task_list);
419 	spin_lock_init(&ctx->task_lock);
420 	return ctx;
421 }
422 
io_req_put_fs(struct io_kiocb * req)423 static void io_req_put_fs(struct io_kiocb *req)
424 {
425 	struct fs_struct *fs = req->fs;
426 
427 	if (!fs)
428 		return;
429 
430 	spin_lock(&req->fs->lock);
431 	if (--fs->users)
432 		fs = NULL;
433 	spin_unlock(&req->fs->lock);
434 	if (fs)
435 		free_fs_struct(fs);
436 	req->fs = NULL;
437 }
438 
__io_sequence_defer(struct io_ring_ctx * ctx,struct io_kiocb * req)439 static inline bool __io_sequence_defer(struct io_ring_ctx *ctx,
440 				       struct io_kiocb *req)
441 {
442 	return req->sequence != ctx->cached_cq_tail + ctx->cached_sq_dropped
443 					+ atomic_read(&ctx->cached_cq_overflow);
444 }
445 
io_sequence_defer(struct io_ring_ctx * ctx,struct io_kiocb * req)446 static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
447 				     struct io_kiocb *req)
448 {
449 	if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
450 		return false;
451 
452 	return __io_sequence_defer(ctx, req);
453 }
454 
io_get_deferred_req(struct io_ring_ctx * ctx)455 static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
456 {
457 	struct io_kiocb *req;
458 
459 	req = list_first_entry_or_null(&ctx->defer_list, struct io_kiocb, list);
460 	if (req && !io_sequence_defer(ctx, req)) {
461 		list_del_init(&req->list);
462 		return req;
463 	}
464 
465 	return NULL;
466 }
467 
io_get_timeout_req(struct io_ring_ctx * ctx)468 static struct io_kiocb *io_get_timeout_req(struct io_ring_ctx *ctx)
469 {
470 	struct io_kiocb *req;
471 
472 	req = list_first_entry_or_null(&ctx->timeout_list, struct io_kiocb, list);
473 	if (req) {
474 		if (req->flags & REQ_F_TIMEOUT_NOSEQ)
475 			return NULL;
476 		if (!__io_sequence_defer(ctx, req)) {
477 			list_del_init(&req->list);
478 			return req;
479 		}
480 	}
481 
482 	return NULL;
483 }
484 
__io_commit_cqring(struct io_ring_ctx * ctx)485 static void __io_commit_cqring(struct io_ring_ctx *ctx)
486 {
487 	struct io_rings *rings = ctx->rings;
488 
489 	if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
490 		/* order cqe stores with ring update */
491 		smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
492 
493 		if (wq_has_sleeper(&ctx->cq_wait)) {
494 			wake_up_interruptible(&ctx->cq_wait);
495 			kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
496 		}
497 	}
498 }
499 
io_queue_async_work(struct io_ring_ctx * ctx,struct io_kiocb * req)500 static inline void io_queue_async_work(struct io_ring_ctx *ctx,
501 				       struct io_kiocb *req)
502 {
503 	unsigned long flags;
504 	int rw = 0;
505 
506 	if (req->submit.sqe) {
507 		switch (req->submit.opcode) {
508 		case IORING_OP_WRITEV:
509 		case IORING_OP_WRITE_FIXED:
510 			rw = !(req->rw.ki_flags & IOCB_DIRECT);
511 			break;
512 		}
513 	}
514 
515 	if (req->work.func == io_sq_wq_submit_work) {
516 		req->files = current->files;
517 
518 		spin_lock_irqsave(&ctx->task_lock, flags);
519 		list_add(&req->task_list, &ctx->task_list);
520 		req->work_task = NULL;
521 		spin_unlock_irqrestore(&ctx->task_lock, flags);
522 	}
523 
524 	queue_work(ctx->sqo_wq[rw], &req->work);
525 }
526 
io_kill_timeout(struct io_kiocb * req)527 static void io_kill_timeout(struct io_kiocb *req)
528 {
529 	int ret;
530 
531 	ret = hrtimer_try_to_cancel(&req->timeout.timer);
532 	if (ret != -1) {
533 		atomic_inc(&req->ctx->cq_timeouts);
534 		list_del(&req->list);
535 		io_cqring_fill_event(req->ctx, req->user_data, 0);
536 		if (refcount_dec_and_test(&req->refs))
537 			__io_free_req(req);
538 	}
539 }
540 
io_kill_timeouts(struct io_ring_ctx * ctx)541 static void io_kill_timeouts(struct io_ring_ctx *ctx)
542 {
543 	struct io_kiocb *req, *tmp;
544 
545 	spin_lock_irq(&ctx->completion_lock);
546 	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list)
547 		io_kill_timeout(req);
548 	spin_unlock_irq(&ctx->completion_lock);
549 }
550 
io_commit_cqring(struct io_ring_ctx * ctx)551 static void io_commit_cqring(struct io_ring_ctx *ctx)
552 {
553 	struct io_kiocb *req;
554 
555 	while ((req = io_get_timeout_req(ctx)) != NULL)
556 		io_kill_timeout(req);
557 
558 	__io_commit_cqring(ctx);
559 
560 	while ((req = io_get_deferred_req(ctx)) != NULL) {
561 		if (req->flags & REQ_F_SHADOW_DRAIN) {
562 			/* Just for drain, free it. */
563 			__io_free_req(req);
564 			continue;
565 		}
566 		req->flags |= REQ_F_IO_DRAINED;
567 		io_queue_async_work(ctx, req);
568 	}
569 }
570 
io_get_cqring(struct io_ring_ctx * ctx)571 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
572 {
573 	struct io_rings *rings = ctx->rings;
574 	unsigned tail;
575 
576 	tail = ctx->cached_cq_tail;
577 	/*
578 	 * writes to the cq entry need to come after reading head; the
579 	 * control dependency is enough as we're using WRITE_ONCE to
580 	 * fill the cq entry
581 	 */
582 	if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
583 		return NULL;
584 
585 	ctx->cached_cq_tail++;
586 	return &rings->cqes[tail & ctx->cq_mask];
587 }
588 
io_cqring_fill_event(struct io_ring_ctx * ctx,u64 ki_user_data,long res)589 static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
590 				 long res)
591 {
592 	struct io_uring_cqe *cqe;
593 
594 	/*
595 	 * If we can't get a cq entry, userspace overflowed the
596 	 * submission (by quite a lot). Increment the overflow count in
597 	 * the ring.
598 	 */
599 	cqe = io_get_cqring(ctx);
600 	if (cqe) {
601 		WRITE_ONCE(cqe->user_data, ki_user_data);
602 		WRITE_ONCE(cqe->res, res);
603 		WRITE_ONCE(cqe->flags, 0);
604 	} else {
605 		WRITE_ONCE(ctx->rings->cq_overflow,
606 				atomic_inc_return(&ctx->cached_cq_overflow));
607 	}
608 }
609 
io_cqring_ev_posted(struct io_ring_ctx * ctx)610 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
611 {
612 	if (waitqueue_active(&ctx->wait))
613 		wake_up(&ctx->wait);
614 	if (waitqueue_active(&ctx->sqo_wait))
615 		wake_up(&ctx->sqo_wait);
616 	if (ctx->cq_ev_fd)
617 		eventfd_signal(ctx->cq_ev_fd, 1);
618 }
619 
io_cqring_add_event(struct io_ring_ctx * ctx,u64 user_data,long res)620 static void io_cqring_add_event(struct io_ring_ctx *ctx, u64 user_data,
621 				long res)
622 {
623 	unsigned long flags;
624 
625 	spin_lock_irqsave(&ctx->completion_lock, flags);
626 	io_cqring_fill_event(ctx, user_data, res);
627 	io_commit_cqring(ctx);
628 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
629 
630 	io_cqring_ev_posted(ctx);
631 }
632 
io_get_req(struct io_ring_ctx * ctx,struct io_submit_state * state)633 static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
634 				   struct io_submit_state *state)
635 {
636 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
637 	struct io_kiocb *req;
638 
639 	if (!percpu_ref_tryget(&ctx->refs))
640 		return NULL;
641 
642 	if (!state) {
643 		req = kmem_cache_alloc(req_cachep, gfp);
644 		if (unlikely(!req))
645 			goto out;
646 	} else if (!state->free_reqs) {
647 		size_t sz;
648 		int ret;
649 
650 		sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
651 		ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
652 
653 		/*
654 		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
655 		 * retry single alloc to be on the safe side.
656 		 */
657 		if (unlikely(ret <= 0)) {
658 			state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
659 			if (!state->reqs[0])
660 				goto out;
661 			ret = 1;
662 		}
663 		state->free_reqs = ret - 1;
664 		state->cur_req = 1;
665 		req = state->reqs[0];
666 	} else {
667 		req = state->reqs[state->cur_req];
668 		state->free_reqs--;
669 		state->cur_req++;
670 	}
671 
672 	INIT_LIST_HEAD(&req->task_list);
673 	req->file = NULL;
674 	req->ctx = ctx;
675 	req->flags = 0;
676 	/* one is dropped after submission, the other at completion */
677 	refcount_set(&req->refs, 2);
678 	req->result = 0;
679 	req->fs = NULL;
680 	return req;
681 out:
682 	percpu_ref_put(&ctx->refs);
683 	return NULL;
684 }
685 
io_free_req_many(struct io_ring_ctx * ctx,void ** reqs,int * nr)686 static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
687 {
688 	if (*nr) {
689 		kmem_cache_free_bulk(req_cachep, *nr, reqs);
690 		percpu_ref_put_many(&ctx->refs, *nr);
691 		*nr = 0;
692 	}
693 }
694 
__io_free_req(struct io_kiocb * req)695 static void __io_free_req(struct io_kiocb *req)
696 {
697 	io_req_put_fs(req);
698 	if (req->file && !(req->flags & REQ_F_FIXED_FILE))
699 		fput(req->file);
700 	percpu_ref_put(&req->ctx->refs);
701 	kmem_cache_free(req_cachep, req);
702 }
703 
io_req_link_next(struct io_kiocb * req)704 static void io_req_link_next(struct io_kiocb *req)
705 {
706 	struct io_kiocb *nxt;
707 
708 	/*
709 	 * The list should never be empty when we are called here. But could
710 	 * potentially happen if the chain is messed up, check to be on the
711 	 * safe side.
712 	 */
713 	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
714 	if (nxt) {
715 		list_del(&nxt->list);
716 		if (!list_empty(&req->link_list)) {
717 			INIT_LIST_HEAD(&nxt->link_list);
718 			list_splice(&req->link_list, &nxt->link_list);
719 			nxt->flags |= REQ_F_LINK;
720 		}
721 
722 		nxt->flags |= REQ_F_LINK_DONE;
723 		INIT_WORK(&nxt->work, io_sq_wq_submit_work);
724 		io_queue_async_work(req->ctx, nxt);
725 	}
726 }
727 
728 /*
729  * Called if REQ_F_LINK is set, and we fail the head request
730  */
io_fail_links(struct io_kiocb * req)731 static void io_fail_links(struct io_kiocb *req)
732 {
733 	struct io_kiocb *link;
734 
735 	while (!list_empty(&req->link_list)) {
736 		link = list_first_entry(&req->link_list, struct io_kiocb, list);
737 		list_del(&link->list);
738 
739 		io_cqring_add_event(req->ctx, link->user_data, -ECANCELED);
740 		__io_free_req(link);
741 	}
742 }
743 
io_free_req(struct io_kiocb * req)744 static void io_free_req(struct io_kiocb *req)
745 {
746 	/*
747 	 * If LINK is set, we have dependent requests in this chain. If we
748 	 * didn't fail this request, queue the first one up, moving any other
749 	 * dependencies to the next request. In case of failure, fail the rest
750 	 * of the chain.
751 	 */
752 	if (req->flags & REQ_F_LINK) {
753 		if (req->flags & REQ_F_FAIL_LINK)
754 			io_fail_links(req);
755 		else
756 			io_req_link_next(req);
757 	}
758 
759 	__io_free_req(req);
760 }
761 
io_put_req(struct io_kiocb * req)762 static void io_put_req(struct io_kiocb *req)
763 {
764 	if (refcount_dec_and_test(&req->refs))
765 		io_free_req(req);
766 }
767 
io_cqring_events(struct io_rings * rings)768 static unsigned io_cqring_events(struct io_rings *rings)
769 {
770 	/* See comment at the top of this file */
771 	smp_rmb();
772 	return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
773 }
774 
io_sqring_entries(struct io_ring_ctx * ctx)775 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
776 {
777 	struct io_rings *rings = ctx->rings;
778 
779 	/* make sure SQ entry isn't read before tail */
780 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
781 }
782 
783 /*
784  * Find and free completed poll iocbs
785  */
io_iopoll_complete(struct io_ring_ctx * ctx,unsigned int * nr_events,struct list_head * done)786 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
787 			       struct list_head *done)
788 {
789 	void *reqs[IO_IOPOLL_BATCH];
790 	struct io_kiocb *req;
791 	int to_free;
792 
793 	to_free = 0;
794 	while (!list_empty(done)) {
795 		req = list_first_entry(done, struct io_kiocb, list);
796 		list_del(&req->list);
797 
798 		io_cqring_fill_event(ctx, req->user_data, req->result);
799 		(*nr_events)++;
800 
801 		if (refcount_dec_and_test(&req->refs)) {
802 			/* If we're not using fixed files, we have to pair the
803 			 * completion part with the file put. Use regular
804 			 * completions for those, only batch free for fixed
805 			 * file and non-linked commands.
806 			 */
807 			if ((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) ==
808 			    REQ_F_FIXED_FILE) {
809 				reqs[to_free++] = req;
810 				if (to_free == ARRAY_SIZE(reqs))
811 					io_free_req_many(ctx, reqs, &to_free);
812 			} else {
813 				io_free_req(req);
814 			}
815 		}
816 	}
817 
818 	io_commit_cqring(ctx);
819 	io_free_req_many(ctx, reqs, &to_free);
820 }
821 
io_do_iopoll(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)822 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
823 			long min)
824 {
825 	struct io_kiocb *req, *tmp;
826 	LIST_HEAD(done);
827 	bool spin;
828 	int ret;
829 
830 	/*
831 	 * Only spin for completions if we don't have multiple devices hanging
832 	 * off our complete list, and we're under the requested amount.
833 	 */
834 	spin = !ctx->poll_multi_file && *nr_events < min;
835 
836 	ret = 0;
837 	list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) {
838 		struct kiocb *kiocb = &req->rw;
839 
840 		/*
841 		 * Move completed entries to our local list. If we find a
842 		 * request that requires polling, break out and complete
843 		 * the done list first, if we have entries there.
844 		 */
845 		if (req->flags & REQ_F_IOPOLL_COMPLETED) {
846 			list_move_tail(&req->list, &done);
847 			continue;
848 		}
849 		if (!list_empty(&done))
850 			break;
851 
852 		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
853 		if (ret < 0)
854 			break;
855 
856 		if (ret && spin)
857 			spin = false;
858 		ret = 0;
859 	}
860 
861 	if (!list_empty(&done))
862 		io_iopoll_complete(ctx, nr_events, &done);
863 
864 	return ret;
865 }
866 
867 /*
868  * Poll for a mininum of 'min' events. Note that if min == 0 we consider that a
869  * non-spinning poll check - we'll still enter the driver poll loop, but only
870  * as a non-spinning completion check.
871  */
io_iopoll_getevents(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)872 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
873 				long min)
874 {
875 	while (!list_empty(&ctx->poll_list) && !need_resched()) {
876 		int ret;
877 
878 		ret = io_do_iopoll(ctx, nr_events, min);
879 		if (ret < 0)
880 			return ret;
881 		if (!min || *nr_events >= min)
882 			return 0;
883 	}
884 
885 	return 1;
886 }
887 
888 /*
889  * We can't just wait for polled events to come to us, we have to actively
890  * find and complete them.
891  */
io_iopoll_reap_events(struct io_ring_ctx * ctx)892 static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
893 {
894 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
895 		return;
896 
897 	mutex_lock(&ctx->uring_lock);
898 	while (!list_empty(&ctx->poll_list)) {
899 		unsigned int nr_events = 0;
900 
901 		io_iopoll_getevents(ctx, &nr_events, 1);
902 
903 		/*
904 		 * Ensure we allow local-to-the-cpu processing to take place,
905 		 * in this case we need to ensure that we reap all events.
906 		 */
907 		cond_resched();
908 	}
909 	mutex_unlock(&ctx->uring_lock);
910 }
911 
io_iopoll_check(struct io_ring_ctx * ctx,unsigned * nr_events,long min)912 static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
913 			   long min)
914 {
915 	int iters = 0, ret = 0;
916 
917 	/*
918 	 * We disallow the app entering submit/complete with polling, but we
919 	 * still need to lock the ring to prevent racing with polled issue
920 	 * that got punted to a workqueue.
921 	 */
922 	mutex_lock(&ctx->uring_lock);
923 	do {
924 		int tmin = 0;
925 
926 		/*
927 		 * Don't enter poll loop if we already have events pending.
928 		 * If we do, we can potentially be spinning for commands that
929 		 * already triggered a CQE (eg in error).
930 		 */
931 		if (io_cqring_events(ctx->rings))
932 			break;
933 
934 		/*
935 		 * If a submit got punted to a workqueue, we can have the
936 		 * application entering polling for a command before it gets
937 		 * issued. That app will hold the uring_lock for the duration
938 		 * of the poll right here, so we need to take a breather every
939 		 * now and then to ensure that the issue has a chance to add
940 		 * the poll to the issued list. Otherwise we can spin here
941 		 * forever, while the workqueue is stuck trying to acquire the
942 		 * very same mutex.
943 		 */
944 		if (!(++iters & 7)) {
945 			mutex_unlock(&ctx->uring_lock);
946 			mutex_lock(&ctx->uring_lock);
947 		}
948 
949 		if (*nr_events < min)
950 			tmin = min - *nr_events;
951 
952 		ret = io_iopoll_getevents(ctx, nr_events, tmin);
953 		if (ret <= 0)
954 			break;
955 		ret = 0;
956 	} while (min && !*nr_events && !need_resched());
957 
958 	mutex_unlock(&ctx->uring_lock);
959 	return ret;
960 }
961 
kiocb_end_write(struct io_kiocb * req)962 static void kiocb_end_write(struct io_kiocb *req)
963 {
964 	/*
965 	 * Tell lockdep we inherited freeze protection from submission
966 	 * thread.
967 	 */
968 	if (req->flags & REQ_F_ISREG) {
969 		struct inode *inode = file_inode(req->file);
970 
971 		__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
972 	}
973 	file_end_write(req->file);
974 }
975 
io_complete_rw(struct kiocb * kiocb,long res,long res2)976 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
977 {
978 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
979 
980 	if (kiocb->ki_flags & IOCB_WRITE)
981 		kiocb_end_write(req);
982 
983 	if ((req->flags & REQ_F_LINK) && res != req->result)
984 		req->flags |= REQ_F_FAIL_LINK;
985 	io_cqring_add_event(req->ctx, req->user_data, res);
986 	io_put_req(req);
987 }
988 
io_complete_rw_iopoll(struct kiocb * kiocb,long res,long res2)989 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
990 {
991 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw);
992 
993 	if (kiocb->ki_flags & IOCB_WRITE)
994 		kiocb_end_write(req);
995 
996 	if ((req->flags & REQ_F_LINK) && res != req->result)
997 		req->flags |= REQ_F_FAIL_LINK;
998 	req->result = res;
999 	if (res != -EAGAIN)
1000 		req->flags |= REQ_F_IOPOLL_COMPLETED;
1001 }
1002 
1003 /*
1004  * After the iocb has been issued, it's safe to be found on the poll list.
1005  * Adding the kiocb to the list AFTER submission ensures that we don't
1006  * find it from a io_iopoll_getevents() thread before the issuer is done
1007  * accessing the kiocb cookie.
1008  */
io_iopoll_req_issued(struct io_kiocb * req)1009 static void io_iopoll_req_issued(struct io_kiocb *req)
1010 {
1011 	struct io_ring_ctx *ctx = req->ctx;
1012 
1013 	/*
1014 	 * Track whether we have multiple files in our lists. This will impact
1015 	 * how we do polling eventually, not spinning if we're on potentially
1016 	 * different devices.
1017 	 */
1018 	if (list_empty(&ctx->poll_list)) {
1019 		ctx->poll_multi_file = false;
1020 	} else if (!ctx->poll_multi_file) {
1021 		struct io_kiocb *list_req;
1022 
1023 		list_req = list_first_entry(&ctx->poll_list, struct io_kiocb,
1024 						list);
1025 		if (list_req->rw.ki_filp != req->rw.ki_filp)
1026 			ctx->poll_multi_file = true;
1027 	}
1028 
1029 	/*
1030 	 * For fast devices, IO may have already completed. If it has, add
1031 	 * it to the front so we find it first.
1032 	 */
1033 	if (req->flags & REQ_F_IOPOLL_COMPLETED)
1034 		list_add(&req->list, &ctx->poll_list);
1035 	else
1036 		list_add_tail(&req->list, &ctx->poll_list);
1037 }
1038 
io_file_put(struct io_submit_state * state)1039 static void io_file_put(struct io_submit_state *state)
1040 {
1041 	if (state->file) {
1042 		int diff = state->has_refs - state->used_refs;
1043 
1044 		if (diff)
1045 			fput_many(state->file, diff);
1046 		state->file = NULL;
1047 	}
1048 }
1049 
1050 /*
1051  * Get as many references to a file as we have IOs left in this submission,
1052  * assuming most submissions are for one file, or at least that each file
1053  * has more than one submission.
1054  */
io_file_get(struct io_submit_state * state,int fd)1055 static struct file *io_file_get(struct io_submit_state *state, int fd)
1056 {
1057 	if (!state)
1058 		return fget(fd);
1059 
1060 	if (state->file) {
1061 		if (state->fd == fd) {
1062 			state->used_refs++;
1063 			state->ios_left--;
1064 			return state->file;
1065 		}
1066 		io_file_put(state);
1067 	}
1068 	state->file = fget_many(fd, state->ios_left);
1069 	if (!state->file)
1070 		return NULL;
1071 
1072 	state->fd = fd;
1073 	state->has_refs = state->ios_left;
1074 	state->used_refs = 1;
1075 	state->ios_left--;
1076 	return state->file;
1077 }
1078 
1079 /*
1080  * If we tracked the file through the SCM inflight mechanism, we could support
1081  * any file. For now, just ensure that anything potentially problematic is done
1082  * inline.
1083  */
io_file_supports_async(struct file * file)1084 static bool io_file_supports_async(struct file *file)
1085 {
1086 	umode_t mode = file_inode(file)->i_mode;
1087 
1088 	if (S_ISBLK(mode) || S_ISCHR(mode))
1089 		return true;
1090 	if (S_ISREG(mode) && file->f_op != &io_uring_fops)
1091 		return true;
1092 
1093 	return false;
1094 }
1095 
io_prep_rw(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1096 static int io_prep_rw(struct io_kiocb *req, const struct sqe_submit *s,
1097 		      bool force_nonblock)
1098 {
1099 	const struct io_uring_sqe *sqe = s->sqe;
1100 	struct io_ring_ctx *ctx = req->ctx;
1101 	struct kiocb *kiocb = &req->rw;
1102 	unsigned ioprio;
1103 	int ret;
1104 
1105 	if (!req->file)
1106 		return -EBADF;
1107 
1108 	if (S_ISREG(file_inode(req->file)->i_mode))
1109 		req->flags |= REQ_F_ISREG;
1110 
1111 	if (force_nonblock)
1112 		req->fsize = rlimit(RLIMIT_FSIZE);
1113 
1114 	/*
1115 	 * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so
1116 	 * we know to async punt it even if it was opened O_NONBLOCK
1117 	 */
1118 	if (force_nonblock && !io_file_supports_async(req->file)) {
1119 		req->flags |= REQ_F_MUST_PUNT;
1120 		return -EAGAIN;
1121 	}
1122 
1123 	kiocb->ki_pos = READ_ONCE(sqe->off);
1124 	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
1125 	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
1126 
1127 	ioprio = READ_ONCE(sqe->ioprio);
1128 	if (ioprio) {
1129 		ret = ioprio_check_cap(ioprio);
1130 		if (ret)
1131 			return ret;
1132 
1133 		kiocb->ki_ioprio = ioprio;
1134 	} else
1135 		kiocb->ki_ioprio = get_current_ioprio();
1136 
1137 	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
1138 	if (unlikely(ret))
1139 		return ret;
1140 
1141 	/* don't allow async punt if RWF_NOWAIT was requested */
1142 	if ((kiocb->ki_flags & IOCB_NOWAIT) ||
1143 	    (req->file->f_flags & O_NONBLOCK))
1144 		req->flags |= REQ_F_NOWAIT;
1145 
1146 	if (force_nonblock)
1147 		kiocb->ki_flags |= IOCB_NOWAIT;
1148 
1149 	if (ctx->flags & IORING_SETUP_IOPOLL) {
1150 		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
1151 		    !kiocb->ki_filp->f_op->iopoll)
1152 			return -EOPNOTSUPP;
1153 
1154 		kiocb->ki_flags |= IOCB_HIPRI;
1155 		kiocb->ki_complete = io_complete_rw_iopoll;
1156 		req->result = 0;
1157 	} else {
1158 		if (kiocb->ki_flags & IOCB_HIPRI)
1159 			return -EINVAL;
1160 		kiocb->ki_complete = io_complete_rw;
1161 	}
1162 	return 0;
1163 }
1164 
io_rw_done(struct kiocb * kiocb,ssize_t ret)1165 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
1166 {
1167 	switch (ret) {
1168 	case -EIOCBQUEUED:
1169 		break;
1170 	case -ERESTARTSYS:
1171 	case -ERESTARTNOINTR:
1172 	case -ERESTARTNOHAND:
1173 	case -ERESTART_RESTARTBLOCK:
1174 		/*
1175 		 * We can't just restart the syscall, since previously
1176 		 * submitted sqes may already be in progress. Just fail this
1177 		 * IO with EINTR.
1178 		 */
1179 		ret = -EINTR;
1180 		/* fall through */
1181 	default:
1182 		kiocb->ki_complete(kiocb, ret, 0);
1183 	}
1184 }
1185 
io_import_fixed(struct io_ring_ctx * ctx,int rw,const struct io_uring_sqe * sqe,struct iov_iter * iter)1186 static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
1187 			   const struct io_uring_sqe *sqe,
1188 			   struct iov_iter *iter)
1189 {
1190 	size_t len = READ_ONCE(sqe->len);
1191 	struct io_mapped_ubuf *imu;
1192 	unsigned index, buf_index;
1193 	size_t offset;
1194 	u64 buf_addr;
1195 
1196 	/* attempt to use fixed buffers without having provided iovecs */
1197 	if (unlikely(!ctx->user_bufs))
1198 		return -EFAULT;
1199 
1200 	buf_index = READ_ONCE(sqe->buf_index);
1201 	if (unlikely(buf_index >= ctx->nr_user_bufs))
1202 		return -EFAULT;
1203 
1204 	index = array_index_nospec(buf_index, ctx->nr_user_bufs);
1205 	imu = &ctx->user_bufs[index];
1206 	buf_addr = READ_ONCE(sqe->addr);
1207 
1208 	/* overflow */
1209 	if (buf_addr + len < buf_addr)
1210 		return -EFAULT;
1211 	/* not inside the mapped region */
1212 	if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
1213 		return -EFAULT;
1214 
1215 	/*
1216 	 * May not be a start of buffer, set size appropriately
1217 	 * and advance us to the beginning.
1218 	 */
1219 	offset = buf_addr - imu->ubuf;
1220 	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
1221 
1222 	if (offset) {
1223 		/*
1224 		 * Don't use iov_iter_advance() here, as it's really slow for
1225 		 * using the latter parts of a big fixed buffer - it iterates
1226 		 * over each segment manually. We can cheat a bit here, because
1227 		 * we know that:
1228 		 *
1229 		 * 1) it's a BVEC iter, we set it up
1230 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1231 		 *    first and last bvec
1232 		 *
1233 		 * So just find our index, and adjust the iterator afterwards.
1234 		 * If the offset is within the first bvec (or the whole first
1235 		 * bvec, just use iov_iter_advance(). This makes it easier
1236 		 * since we can just skip the first segment, which may not
1237 		 * be PAGE_SIZE aligned.
1238 		 */
1239 		const struct bio_vec *bvec = imu->bvec;
1240 
1241 		if (offset < bvec->bv_len) {
1242 			iov_iter_advance(iter, offset);
1243 		} else {
1244 			unsigned long seg_skip;
1245 
1246 			/* skip first vec */
1247 			offset -= bvec->bv_len;
1248 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1249 
1250 			iter->bvec = bvec + seg_skip;
1251 			iter->nr_segs -= seg_skip;
1252 			iter->count -= bvec->bv_len + offset;
1253 			iter->iov_offset = offset & ~PAGE_MASK;
1254 		}
1255 	}
1256 
1257 	return len;
1258 }
1259 
io_import_iovec(struct io_ring_ctx * ctx,int rw,struct io_kiocb * req,struct iovec ** iovec,struct iov_iter * iter)1260 static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
1261 			       struct io_kiocb *req, struct iovec **iovec,
1262 			       struct iov_iter *iter)
1263 {
1264 	const struct io_uring_sqe *sqe = req->submit.sqe;
1265 	void __user *buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1266 	size_t sqe_len = READ_ONCE(sqe->len);
1267 	u8 opcode;
1268 
1269 	opcode = req->submit.opcode;
1270 	if (opcode == IORING_OP_READ_FIXED ||
1271 	    opcode == IORING_OP_WRITE_FIXED) {
1272 		ssize_t ret = io_import_fixed(ctx, rw, sqe, iter);
1273 		*iovec = NULL;
1274 		return ret;
1275 	}
1276 
1277 	if (!req->submit.has_user)
1278 		return -EFAULT;
1279 
1280 #ifdef CONFIG_COMPAT
1281 	if (ctx->compat)
1282 		return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
1283 						iovec, iter);
1284 #endif
1285 
1286 	return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
1287 }
1288 
io_should_merge(struct async_list * al,struct kiocb * kiocb)1289 static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
1290 {
1291 	if (al->file == kiocb->ki_filp) {
1292 		off_t start, end;
1293 
1294 		/*
1295 		 * Allow merging if we're anywhere in the range of the same
1296 		 * page. Generally this happens for sub-page reads or writes,
1297 		 * and it's beneficial to allow the first worker to bring the
1298 		 * page in and the piggy backed work can then work on the
1299 		 * cached page.
1300 		 */
1301 		start = al->io_start & PAGE_MASK;
1302 		end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
1303 		if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
1304 			return true;
1305 	}
1306 
1307 	al->file = NULL;
1308 	return false;
1309 }
1310 
1311 /*
1312  * Make a note of the last file/offset/direction we punted to async
1313  * context. We'll use this information to see if we can piggy back a
1314  * sequential request onto the previous one, if it's still hasn't been
1315  * completed by the async worker.
1316  */
io_async_list_note(int rw,struct io_kiocb * req,size_t len)1317 static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
1318 {
1319 	struct async_list *async_list = &req->ctx->pending_async[rw];
1320 	struct kiocb *kiocb = &req->rw;
1321 	struct file *filp = kiocb->ki_filp;
1322 
1323 	if (io_should_merge(async_list, kiocb)) {
1324 		unsigned long max_bytes;
1325 
1326 		/* Use 8x RA size as a decent limiter for both reads/writes */
1327 		max_bytes = filp->f_ra.ra_pages << (PAGE_SHIFT + 3);
1328 		if (!max_bytes)
1329 			max_bytes = VM_READAHEAD_PAGES << (PAGE_SHIFT + 3);
1330 
1331 		/* If max len are exceeded, reset the state */
1332 		if (async_list->io_len + len <= max_bytes) {
1333 			req->flags |= REQ_F_SEQ_PREV;
1334 			async_list->io_len += len;
1335 		} else {
1336 			async_list->file = NULL;
1337 		}
1338 	}
1339 
1340 	/* New file? Reset state. */
1341 	if (async_list->file != filp) {
1342 		async_list->io_start = kiocb->ki_pos;
1343 		async_list->io_len = len;
1344 		async_list->file = filp;
1345 	}
1346 }
1347 
1348 /*
1349  * For files that don't have ->read_iter() and ->write_iter(), handle them
1350  * by looping over ->read() or ->write() manually.
1351  */
loop_rw_iter(int rw,struct file * file,struct kiocb * kiocb,struct iov_iter * iter)1352 static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb,
1353 			   struct iov_iter *iter)
1354 {
1355 	ssize_t ret = 0;
1356 
1357 	/*
1358 	 * Don't support polled IO through this interface, and we can't
1359 	 * support non-blocking either. For the latter, this just causes
1360 	 * the kiocb to be handled from an async context.
1361 	 */
1362 	if (kiocb->ki_flags & IOCB_HIPRI)
1363 		return -EOPNOTSUPP;
1364 	if (kiocb->ki_flags & IOCB_NOWAIT)
1365 		return -EAGAIN;
1366 
1367 	while (iov_iter_count(iter)) {
1368 		struct iovec iovec;
1369 		ssize_t nr;
1370 
1371 		if (!iov_iter_is_bvec(iter)) {
1372 			iovec = iov_iter_iovec(iter);
1373 		} else {
1374 			/* fixed buffers import bvec */
1375 			iovec.iov_base = kmap(iter->bvec->bv_page)
1376 						+ iter->iov_offset;
1377 			iovec.iov_len = min(iter->count,
1378 					iter->bvec->bv_len - iter->iov_offset);
1379 		}
1380 
1381 		if (rw == READ) {
1382 			nr = file->f_op->read(file, iovec.iov_base,
1383 					      iovec.iov_len, &kiocb->ki_pos);
1384 		} else {
1385 			nr = file->f_op->write(file, iovec.iov_base,
1386 					       iovec.iov_len, &kiocb->ki_pos);
1387 		}
1388 
1389 		if (iov_iter_is_bvec(iter))
1390 			kunmap(iter->bvec->bv_page);
1391 
1392 		if (nr < 0) {
1393 			if (!ret)
1394 				ret = nr;
1395 			break;
1396 		}
1397 		ret += nr;
1398 		if (nr != iovec.iov_len)
1399 			break;
1400 		iov_iter_advance(iter, nr);
1401 	}
1402 
1403 	return ret;
1404 }
1405 
io_read(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1406 static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
1407 		   bool force_nonblock)
1408 {
1409 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1410 	struct kiocb *kiocb = &req->rw;
1411 	struct iov_iter iter;
1412 	struct file *file;
1413 	size_t iov_count;
1414 	ssize_t read_size, ret;
1415 
1416 	ret = io_prep_rw(req, s, force_nonblock);
1417 	if (ret)
1418 		return ret;
1419 	file = kiocb->ki_filp;
1420 
1421 	if (unlikely(!(file->f_mode & FMODE_READ)))
1422 		return -EBADF;
1423 
1424 	ret = io_import_iovec(req->ctx, READ, req, &iovec, &iter);
1425 	if (ret < 0)
1426 		return ret;
1427 
1428 	read_size = ret;
1429 	if (req->flags & REQ_F_LINK)
1430 		req->result = read_size;
1431 
1432 	iov_count = iov_iter_count(&iter);
1433 	ret = rw_verify_area(READ, file, &kiocb->ki_pos, iov_count);
1434 	if (!ret) {
1435 		ssize_t ret2;
1436 
1437 		if (file->f_op->read_iter)
1438 			ret2 = call_read_iter(file, kiocb, &iter);
1439 		else if (req->file->f_op->read)
1440 			ret2 = loop_rw_iter(READ, file, kiocb, &iter);
1441 		else
1442 			ret2 = -EINVAL;
1443 
1444 		/*
1445 		 * In case of a short read, punt to async. This can happen
1446 		 * if we have data partially cached. Alternatively we can
1447 		 * return the short read, in which case the application will
1448 		 * need to issue another SQE and wait for it. That SQE will
1449 		 * need async punt anyway, so it's more efficient to do it
1450 		 * here.
1451 		 */
1452 		if (force_nonblock && !(req->flags & REQ_F_NOWAIT) &&
1453 		    (req->flags & REQ_F_ISREG) &&
1454 		    ret2 > 0 && ret2 < read_size)
1455 			ret2 = -EAGAIN;
1456 		/* Catch -EAGAIN return for forced non-blocking submission */
1457 		if (!force_nonblock || ret2 != -EAGAIN) {
1458 			io_rw_done(kiocb, ret2);
1459 		} else {
1460 			/*
1461 			 * If ->needs_lock is true, we're already in async
1462 			 * context.
1463 			 */
1464 			if (!s->needs_lock)
1465 				io_async_list_note(READ, req, iov_count);
1466 			ret = -EAGAIN;
1467 		}
1468 	}
1469 	kfree(iovec);
1470 	return ret;
1471 }
1472 
io_write(struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)1473 static int io_write(struct io_kiocb *req, const struct sqe_submit *s,
1474 		    bool force_nonblock)
1475 {
1476 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1477 	struct kiocb *kiocb = &req->rw;
1478 	struct iov_iter iter;
1479 	struct file *file;
1480 	size_t iov_count;
1481 	ssize_t ret;
1482 
1483 	ret = io_prep_rw(req, s, force_nonblock);
1484 	if (ret)
1485 		return ret;
1486 
1487 	file = kiocb->ki_filp;
1488 	if (unlikely(!(file->f_mode & FMODE_WRITE)))
1489 		return -EBADF;
1490 
1491 	ret = io_import_iovec(req->ctx, WRITE, req, &iovec, &iter);
1492 	if (ret < 0)
1493 		return ret;
1494 
1495 	if (req->flags & REQ_F_LINK)
1496 		req->result = ret;
1497 
1498 	iov_count = iov_iter_count(&iter);
1499 
1500 	ret = -EAGAIN;
1501 	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT)) {
1502 		/* If ->needs_lock is true, we're already in async context. */
1503 		if (!s->needs_lock)
1504 			io_async_list_note(WRITE, req, iov_count);
1505 		goto out_free;
1506 	}
1507 
1508 	ret = rw_verify_area(WRITE, file, &kiocb->ki_pos, iov_count);
1509 	if (!ret) {
1510 		ssize_t ret2;
1511 
1512 		/*
1513 		 * Open-code file_start_write here to grab freeze protection,
1514 		 * which will be released by another thread in
1515 		 * io_complete_rw().  Fool lockdep by telling it the lock got
1516 		 * released so that it doesn't complain about the held lock when
1517 		 * we return to userspace.
1518 		 */
1519 		if (req->flags & REQ_F_ISREG) {
1520 			__sb_start_write(file_inode(file)->i_sb,
1521 						SB_FREEZE_WRITE, true);
1522 			__sb_writers_release(file_inode(file)->i_sb,
1523 						SB_FREEZE_WRITE);
1524 		}
1525 		kiocb->ki_flags |= IOCB_WRITE;
1526 
1527 		if (!force_nonblock)
1528 			current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize;
1529 
1530 		if (file->f_op->write_iter)
1531 			ret2 = call_write_iter(file, kiocb, &iter);
1532 		else if (req->file->f_op->write)
1533 			ret2 = loop_rw_iter(WRITE, file, kiocb, &iter);
1534 		else
1535 			ret2 = -EINVAL;
1536 
1537 		if (!force_nonblock)
1538 			current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
1539 
1540 		if (!force_nonblock || ret2 != -EAGAIN) {
1541 			io_rw_done(kiocb, ret2);
1542 		} else {
1543 			/*
1544 			 * If ->needs_lock is true, we're already in async
1545 			 * context.
1546 			 */
1547 			if (!s->needs_lock)
1548 				io_async_list_note(WRITE, req, iov_count);
1549 			ret = -EAGAIN;
1550 		}
1551 	}
1552 out_free:
1553 	kfree(iovec);
1554 	return ret;
1555 }
1556 
1557 /*
1558  * IORING_OP_NOP just posts a completion event, nothing else.
1559  */
io_nop(struct io_kiocb * req,u64 user_data)1560 static int io_nop(struct io_kiocb *req, u64 user_data)
1561 {
1562 	struct io_ring_ctx *ctx = req->ctx;
1563 	long err = 0;
1564 
1565 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1566 		return -EINVAL;
1567 
1568 	io_cqring_add_event(ctx, user_data, err);
1569 	io_put_req(req);
1570 	return 0;
1571 }
1572 
io_prep_fsync(struct io_kiocb * req,const struct io_uring_sqe * sqe)1573 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1574 {
1575 	struct io_ring_ctx *ctx = req->ctx;
1576 
1577 	if (!req->file)
1578 		return -EBADF;
1579 
1580 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1581 		return -EINVAL;
1582 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1583 		return -EINVAL;
1584 
1585 	return 0;
1586 }
1587 
io_fsync(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1588 static int io_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1589 		    bool force_nonblock)
1590 {
1591 	loff_t sqe_off = READ_ONCE(sqe->off);
1592 	loff_t sqe_len = READ_ONCE(sqe->len);
1593 	loff_t end = sqe_off + sqe_len;
1594 	unsigned fsync_flags;
1595 	int ret;
1596 
1597 	fsync_flags = READ_ONCE(sqe->fsync_flags);
1598 	if (unlikely(fsync_flags & ~IORING_FSYNC_DATASYNC))
1599 		return -EINVAL;
1600 
1601 	ret = io_prep_fsync(req, sqe);
1602 	if (ret)
1603 		return ret;
1604 
1605 	/* fsync always requires a blocking context */
1606 	if (force_nonblock)
1607 		return -EAGAIN;
1608 
1609 	ret = vfs_fsync_range(req->rw.ki_filp, sqe_off,
1610 				end > 0 ? end : LLONG_MAX,
1611 				fsync_flags & IORING_FSYNC_DATASYNC);
1612 
1613 	if (ret < 0 && (req->flags & REQ_F_LINK))
1614 		req->flags |= REQ_F_FAIL_LINK;
1615 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1616 	io_put_req(req);
1617 	return 0;
1618 }
1619 
io_prep_sfr(struct io_kiocb * req,const struct io_uring_sqe * sqe)1620 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1621 {
1622 	struct io_ring_ctx *ctx = req->ctx;
1623 	int ret = 0;
1624 
1625 	if (!req->file)
1626 		return -EBADF;
1627 
1628 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1629 		return -EINVAL;
1630 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index))
1631 		return -EINVAL;
1632 
1633 	return ret;
1634 }
1635 
io_sync_file_range(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1636 static int io_sync_file_range(struct io_kiocb *req,
1637 			      const struct io_uring_sqe *sqe,
1638 			      bool force_nonblock)
1639 {
1640 	loff_t sqe_off;
1641 	loff_t sqe_len;
1642 	unsigned flags;
1643 	int ret;
1644 
1645 	ret = io_prep_sfr(req, sqe);
1646 	if (ret)
1647 		return ret;
1648 
1649 	/* sync_file_range always requires a blocking context */
1650 	if (force_nonblock)
1651 		return -EAGAIN;
1652 
1653 	sqe_off = READ_ONCE(sqe->off);
1654 	sqe_len = READ_ONCE(sqe->len);
1655 	flags = READ_ONCE(sqe->sync_range_flags);
1656 
1657 	ret = sync_file_range(req->rw.ki_filp, sqe_off, sqe_len, flags);
1658 
1659 	if (ret < 0 && (req->flags & REQ_F_LINK))
1660 		req->flags |= REQ_F_FAIL_LINK;
1661 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1662 	io_put_req(req);
1663 	return 0;
1664 }
1665 
1666 #if defined(CONFIG_NET)
io_send_recvmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock,long (* fn)(struct socket *,struct user_msghdr __user *,unsigned int))1667 static int io_send_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1668 			   bool force_nonblock,
1669 		   long (*fn)(struct socket *, struct user_msghdr __user *,
1670 				unsigned int))
1671 {
1672 	struct socket *sock;
1673 	int ret;
1674 
1675 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1676 		return -EINVAL;
1677 
1678 	sock = sock_from_file(req->file, &ret);
1679 	if (sock) {
1680 		struct user_msghdr __user *msg;
1681 		unsigned flags;
1682 
1683 		flags = READ_ONCE(sqe->msg_flags);
1684 		if (flags & MSG_DONTWAIT)
1685 			req->flags |= REQ_F_NOWAIT;
1686 		else if (force_nonblock)
1687 			flags |= MSG_DONTWAIT;
1688 
1689 #ifdef CONFIG_COMPAT
1690 		if (req->ctx->compat)
1691 			flags |= MSG_CMSG_COMPAT;
1692 #endif
1693 
1694 		msg = (struct user_msghdr __user *) (unsigned long)
1695 			READ_ONCE(sqe->addr);
1696 
1697 		ret = fn(sock, msg, flags);
1698 		if (force_nonblock && ret == -EAGAIN)
1699 			return ret;
1700 		if (ret == -ERESTARTSYS)
1701 			ret = -EINTR;
1702 	}
1703 
1704 	io_req_put_fs(req);
1705 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1706 	io_put_req(req);
1707 	return 0;
1708 }
1709 #endif
1710 
io_sendmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1711 static int io_sendmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1712 		      bool force_nonblock)
1713 {
1714 #if defined(CONFIG_NET)
1715 	return io_send_recvmsg(req, sqe, force_nonblock, __sys_sendmsg_sock);
1716 #else
1717 	return -EOPNOTSUPP;
1718 #endif
1719 }
1720 
io_recvmsg(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool force_nonblock)1721 static int io_recvmsg(struct io_kiocb *req, const struct io_uring_sqe *sqe,
1722 		      bool force_nonblock)
1723 {
1724 #if defined(CONFIG_NET)
1725 	return io_send_recvmsg(req, sqe, force_nonblock, __sys_recvmsg_sock);
1726 #else
1727 	return -EOPNOTSUPP;
1728 #endif
1729 }
1730 
io_poll_remove_one(struct io_kiocb * req)1731 static void io_poll_remove_one(struct io_kiocb *req)
1732 {
1733 	struct io_poll_iocb *poll = &req->poll;
1734 
1735 	spin_lock(&poll->head->lock);
1736 	WRITE_ONCE(poll->canceled, true);
1737 	if (!list_empty(&poll->wait.entry)) {
1738 		list_del_init(&poll->wait.entry);
1739 		io_queue_async_work(req->ctx, req);
1740 	}
1741 	spin_unlock(&poll->head->lock);
1742 
1743 	list_del_init(&req->list);
1744 }
1745 
io_poll_remove_all(struct io_ring_ctx * ctx)1746 static void io_poll_remove_all(struct io_ring_ctx *ctx)
1747 {
1748 	struct io_kiocb *req;
1749 
1750 	spin_lock_irq(&ctx->completion_lock);
1751 	while (!list_empty(&ctx->cancel_list)) {
1752 		req = list_first_entry(&ctx->cancel_list, struct io_kiocb,list);
1753 		io_poll_remove_one(req);
1754 	}
1755 	spin_unlock_irq(&ctx->completion_lock);
1756 }
1757 
1758 /*
1759  * Find a running poll command that matches one specified in sqe->addr,
1760  * and remove it if found.
1761  */
io_poll_remove(struct io_kiocb * req,const struct io_uring_sqe * sqe)1762 static int io_poll_remove(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1763 {
1764 	struct io_ring_ctx *ctx = req->ctx;
1765 	struct io_kiocb *poll_req, *next;
1766 	int ret = -ENOENT;
1767 
1768 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1769 		return -EINVAL;
1770 	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
1771 	    sqe->poll_events)
1772 		return -EINVAL;
1773 
1774 	spin_lock_irq(&ctx->completion_lock);
1775 	list_for_each_entry_safe(poll_req, next, &ctx->cancel_list, list) {
1776 		if (READ_ONCE(sqe->addr) == poll_req->user_data) {
1777 			io_poll_remove_one(poll_req);
1778 			ret = 0;
1779 			break;
1780 		}
1781 	}
1782 	spin_unlock_irq(&ctx->completion_lock);
1783 
1784 	io_cqring_add_event(req->ctx, sqe->user_data, ret);
1785 	io_put_req(req);
1786 	return 0;
1787 }
1788 
io_poll_complete(struct io_ring_ctx * ctx,struct io_kiocb * req,__poll_t mask)1789 static void io_poll_complete(struct io_ring_ctx *ctx, struct io_kiocb *req,
1790 			     __poll_t mask)
1791 {
1792 	req->poll.done = true;
1793 	io_cqring_fill_event(ctx, req->user_data, mangle_poll(mask));
1794 	io_commit_cqring(ctx);
1795 }
1796 
io_poll_complete_work(struct work_struct * work)1797 static void io_poll_complete_work(struct work_struct *work)
1798 {
1799 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
1800 	struct io_poll_iocb *poll = &req->poll;
1801 	struct poll_table_struct pt = { ._key = poll->events };
1802 	struct io_ring_ctx *ctx = req->ctx;
1803 	const struct cred *old_cred;
1804 	__poll_t mask = 0;
1805 
1806 	old_cred = override_creds(ctx->creds);
1807 
1808 	if (!READ_ONCE(poll->canceled))
1809 		mask = vfs_poll(poll->file, &pt) & poll->events;
1810 
1811 	/*
1812 	 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1813 	 * calling ->ki_cancel.  We need the ctx_lock roundtrip here to
1814 	 * synchronize with them.  In the cancellation case the list_del_init
1815 	 * itself is not actually needed, but harmless so we keep it in to
1816 	 * avoid further branches in the fast path.
1817 	 */
1818 	spin_lock_irq(&ctx->completion_lock);
1819 	if (!mask && !READ_ONCE(poll->canceled)) {
1820 		add_wait_queue(poll->head, &poll->wait);
1821 		spin_unlock_irq(&ctx->completion_lock);
1822 		goto out;
1823 	}
1824 	list_del_init(&req->list);
1825 	io_poll_complete(ctx, req, mask);
1826 	spin_unlock_irq(&ctx->completion_lock);
1827 
1828 	io_cqring_ev_posted(ctx);
1829 	io_put_req(req);
1830 out:
1831 	revert_creds(old_cred);
1832 }
1833 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)1834 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1835 			void *key)
1836 {
1837 	struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
1838 							wait);
1839 	struct io_kiocb *req = container_of(poll, struct io_kiocb, poll);
1840 	struct io_ring_ctx *ctx = req->ctx;
1841 	__poll_t mask = key_to_poll(key);
1842 	unsigned long flags;
1843 
1844 	/* for instances that support it check for an event match first: */
1845 	if (mask && !(mask & poll->events))
1846 		return 0;
1847 
1848 	list_del_init(&poll->wait.entry);
1849 
1850 	if (mask && spin_trylock_irqsave(&ctx->completion_lock, flags)) {
1851 		list_del(&req->list);
1852 		io_poll_complete(ctx, req, mask);
1853 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
1854 
1855 		io_cqring_ev_posted(ctx);
1856 		io_put_req(req);
1857 	} else {
1858 		io_queue_async_work(ctx, req);
1859 	}
1860 
1861 	return 1;
1862 }
1863 
1864 struct io_poll_table {
1865 	struct poll_table_struct pt;
1866 	struct io_kiocb *req;
1867 	int error;
1868 };
1869 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)1870 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1871 			       struct poll_table_struct *p)
1872 {
1873 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
1874 
1875 	if (unlikely(pt->req->poll.head)) {
1876 		pt->error = -EINVAL;
1877 		return;
1878 	}
1879 
1880 	pt->error = 0;
1881 	pt->req->poll.head = head;
1882 	add_wait_queue(head, &pt->req->poll.wait);
1883 }
1884 
io_poll_add(struct io_kiocb * req,const struct io_uring_sqe * sqe)1885 static int io_poll_add(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1886 {
1887 	struct io_poll_iocb *poll = &req->poll;
1888 	struct io_ring_ctx *ctx = req->ctx;
1889 	struct io_poll_table ipt;
1890 	bool cancel = false;
1891 	__poll_t mask;
1892 	u16 events;
1893 
1894 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
1895 		return -EINVAL;
1896 	if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
1897 		return -EINVAL;
1898 	if (!poll->file)
1899 		return -EBADF;
1900 
1901 	req->submit.sqe = NULL;
1902 	INIT_WORK(&req->work, io_poll_complete_work);
1903 	events = READ_ONCE(sqe->poll_events);
1904 	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP;
1905 
1906 	poll->head = NULL;
1907 	poll->done = false;
1908 	poll->canceled = false;
1909 
1910 	ipt.pt._qproc = io_poll_queue_proc;
1911 	ipt.pt._key = poll->events;
1912 	ipt.req = req;
1913 	ipt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1914 
1915 	/* initialized the list so that we can do list_empty checks */
1916 	INIT_LIST_HEAD(&poll->wait.entry);
1917 	init_waitqueue_func_entry(&poll->wait, io_poll_wake);
1918 
1919 	INIT_LIST_HEAD(&req->list);
1920 
1921 	mask = vfs_poll(poll->file, &ipt.pt) & poll->events;
1922 
1923 	spin_lock_irq(&ctx->completion_lock);
1924 	if (likely(poll->head)) {
1925 		spin_lock(&poll->head->lock);
1926 		if (unlikely(list_empty(&poll->wait.entry))) {
1927 			if (ipt.error)
1928 				cancel = true;
1929 			ipt.error = 0;
1930 			mask = 0;
1931 		}
1932 		if (mask || ipt.error)
1933 			list_del_init(&poll->wait.entry);
1934 		else if (cancel)
1935 			WRITE_ONCE(poll->canceled, true);
1936 		else if (!poll->done) /* actually waiting for an event */
1937 			list_add_tail(&req->list, &ctx->cancel_list);
1938 		spin_unlock(&poll->head->lock);
1939 	}
1940 	if (mask) { /* no async, we'd stolen it */
1941 		ipt.error = 0;
1942 		io_poll_complete(ctx, req, mask);
1943 	}
1944 	spin_unlock_irq(&ctx->completion_lock);
1945 
1946 	if (mask) {
1947 		io_cqring_ev_posted(ctx);
1948 		io_put_req(req);
1949 	}
1950 	return ipt.error;
1951 }
1952 
io_timeout_fn(struct hrtimer * timer)1953 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
1954 {
1955 	struct io_ring_ctx *ctx;
1956 	struct io_kiocb *req, *prev;
1957 	unsigned long flags;
1958 
1959 	req = container_of(timer, struct io_kiocb, timeout.timer);
1960 	ctx = req->ctx;
1961 	atomic_inc(&ctx->cq_timeouts);
1962 
1963 	spin_lock_irqsave(&ctx->completion_lock, flags);
1964 	/*
1965 	 * Adjust the reqs sequence before the current one because it
1966 	 * will consume a slot in the cq_ring and the the cq_tail pointer
1967 	 * will be increased, otherwise other timeout reqs may return in
1968 	 * advance without waiting for enough wait_nr.
1969 	 */
1970 	prev = req;
1971 	list_for_each_entry_continue_reverse(prev, &ctx->timeout_list, list)
1972 		prev->sequence++;
1973 	list_del(&req->list);
1974 
1975 	io_cqring_fill_event(ctx, req->user_data, -ETIME);
1976 	io_commit_cqring(ctx);
1977 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1978 
1979 	io_cqring_ev_posted(ctx);
1980 
1981 	io_put_req(req);
1982 	return HRTIMER_NORESTART;
1983 }
1984 
io_timeout(struct io_kiocb * req,const struct io_uring_sqe * sqe)1985 static int io_timeout(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1986 {
1987 	unsigned count;
1988 	struct io_ring_ctx *ctx = req->ctx;
1989 	struct list_head *entry;
1990 	struct timespec64 ts;
1991 	unsigned span = 0;
1992 
1993 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
1994 		return -EINVAL;
1995 	if (sqe->flags || sqe->ioprio || sqe->buf_index || sqe->timeout_flags ||
1996 	    sqe->len != 1)
1997 		return -EINVAL;
1998 
1999 	if (get_timespec64(&ts, u64_to_user_ptr(sqe->addr)))
2000 		return -EFAULT;
2001 
2002 	req->flags |= REQ_F_TIMEOUT;
2003 
2004 	/*
2005 	 * sqe->off holds how many events that need to occur for this
2006 	 * timeout event to be satisfied. If it isn't set, then this is
2007 	 * a pure timeout request, sequence isn't used.
2008 	 */
2009 	count = READ_ONCE(sqe->off);
2010 	if (!count) {
2011 		req->flags |= REQ_F_TIMEOUT_NOSEQ;
2012 		spin_lock_irq(&ctx->completion_lock);
2013 		entry = ctx->timeout_list.prev;
2014 		goto add;
2015 	}
2016 
2017 	req->sequence = ctx->cached_sq_head + count - 1;
2018 	/* reuse it to store the count */
2019 	req->submit.sequence = count;
2020 
2021 	/*
2022 	 * Insertion sort, ensuring the first entry in the list is always
2023 	 * the one we need first.
2024 	 */
2025 	spin_lock_irq(&ctx->completion_lock);
2026 	list_for_each_prev(entry, &ctx->timeout_list) {
2027 		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
2028 		unsigned nxt_sq_head;
2029 		long long tmp, tmp_nxt;
2030 
2031 		if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
2032 			continue;
2033 
2034 		/*
2035 		 * Since cached_sq_head + count - 1 can overflow, use type long
2036 		 * long to store it.
2037 		 */
2038 		tmp = (long long)ctx->cached_sq_head + count - 1;
2039 		nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
2040 		tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
2041 
2042 		/*
2043 		 * cached_sq_head may overflow, and it will never overflow twice
2044 		 * once there is some timeout req still be valid.
2045 		 */
2046 		if (ctx->cached_sq_head < nxt_sq_head)
2047 			tmp += UINT_MAX;
2048 
2049 		if (tmp > tmp_nxt)
2050 			break;
2051 
2052 		/*
2053 		 * Sequence of reqs after the insert one and itself should
2054 		 * be adjusted because each timeout req consumes a slot.
2055 		 */
2056 		span++;
2057 		nxt->sequence++;
2058 	}
2059 	req->sequence -= span;
2060 add:
2061 	list_add(&req->list, entry);
2062 
2063 	hrtimer_init(&req->timeout.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
2064 	req->timeout.timer.function = io_timeout_fn;
2065 	hrtimer_start(&req->timeout.timer, timespec64_to_ktime(ts),
2066 			HRTIMER_MODE_REL);
2067 	spin_unlock_irq(&ctx->completion_lock);
2068 	return 0;
2069 }
2070 
io_req_defer(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2071 static int io_req_defer(struct io_ring_ctx *ctx, struct io_kiocb *req,
2072 			struct sqe_submit *s)
2073 {
2074 	struct io_uring_sqe *sqe_copy;
2075 
2076 	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list))
2077 		return 0;
2078 
2079 	sqe_copy = kmalloc(sizeof(*sqe_copy), GFP_KERNEL);
2080 	if (!sqe_copy)
2081 		return -EAGAIN;
2082 
2083 	spin_lock_irq(&ctx->completion_lock);
2084 	if (!io_sequence_defer(ctx, req) && list_empty(&ctx->defer_list)) {
2085 		spin_unlock_irq(&ctx->completion_lock);
2086 		kfree(sqe_copy);
2087 		return 0;
2088 	}
2089 
2090 	memcpy(&req->submit, s, sizeof(*s));
2091 	memcpy(sqe_copy, s->sqe, sizeof(*sqe_copy));
2092 	req->submit.sqe = sqe_copy;
2093 
2094 	INIT_WORK(&req->work, io_sq_wq_submit_work);
2095 	list_add_tail(&req->list, &ctx->defer_list);
2096 	spin_unlock_irq(&ctx->completion_lock);
2097 	return -EIOCBQUEUED;
2098 }
2099 
__io_submit_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct sqe_submit * s,bool force_nonblock)2100 static int __io_submit_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2101 			   const struct sqe_submit *s, bool force_nonblock)
2102 {
2103 	int ret;
2104 
2105 	req->user_data = READ_ONCE(s->sqe->user_data);
2106 
2107 	if (unlikely(s->index >= ctx->sq_entries))
2108 		return -EINVAL;
2109 
2110 	switch (req->submit.opcode) {
2111 	case IORING_OP_NOP:
2112 		ret = io_nop(req, req->user_data);
2113 		break;
2114 	case IORING_OP_READV:
2115 		if (unlikely(s->sqe->buf_index))
2116 			return -EINVAL;
2117 		ret = io_read(req, s, force_nonblock);
2118 		break;
2119 	case IORING_OP_WRITEV:
2120 		if (unlikely(s->sqe->buf_index))
2121 			return -EINVAL;
2122 		ret = io_write(req, s, force_nonblock);
2123 		break;
2124 	case IORING_OP_READ_FIXED:
2125 		ret = io_read(req, s, force_nonblock);
2126 		break;
2127 	case IORING_OP_WRITE_FIXED:
2128 		ret = io_write(req, s, force_nonblock);
2129 		break;
2130 	case IORING_OP_FSYNC:
2131 		ret = io_fsync(req, s->sqe, force_nonblock);
2132 		break;
2133 	case IORING_OP_POLL_ADD:
2134 		ret = io_poll_add(req, s->sqe);
2135 		break;
2136 	case IORING_OP_POLL_REMOVE:
2137 		ret = io_poll_remove(req, s->sqe);
2138 		break;
2139 	case IORING_OP_SYNC_FILE_RANGE:
2140 		ret = io_sync_file_range(req, s->sqe, force_nonblock);
2141 		break;
2142 	case IORING_OP_SENDMSG:
2143 		ret = io_sendmsg(req, s->sqe, force_nonblock);
2144 		break;
2145 	case IORING_OP_RECVMSG:
2146 		ret = io_recvmsg(req, s->sqe, force_nonblock);
2147 		break;
2148 	case IORING_OP_TIMEOUT:
2149 		ret = io_timeout(req, s->sqe);
2150 		break;
2151 	default:
2152 		ret = -EINVAL;
2153 		break;
2154 	}
2155 
2156 	if (ret)
2157 		return ret;
2158 
2159 	if (ctx->flags & IORING_SETUP_IOPOLL) {
2160 		if (req->result == -EAGAIN)
2161 			return -EAGAIN;
2162 
2163 		/* workqueue context doesn't hold uring_lock, grab it now */
2164 		if (s->needs_lock)
2165 			mutex_lock(&ctx->uring_lock);
2166 		io_iopoll_req_issued(req);
2167 		if (s->needs_lock)
2168 			mutex_unlock(&ctx->uring_lock);
2169 	}
2170 
2171 	return 0;
2172 }
2173 
io_async_list_from_req(struct io_ring_ctx * ctx,struct io_kiocb * req)2174 static struct async_list *io_async_list_from_req(struct io_ring_ctx *ctx,
2175 						 struct io_kiocb *req)
2176 {
2177 	switch (req->submit.opcode) {
2178 	case IORING_OP_READV:
2179 	case IORING_OP_READ_FIXED:
2180 		return &ctx->pending_async[READ];
2181 	case IORING_OP_WRITEV:
2182 	case IORING_OP_WRITE_FIXED:
2183 		return &ctx->pending_async[WRITE];
2184 	default:
2185 		return NULL;
2186 	}
2187 }
2188 
io_req_needs_user(struct io_kiocb * req)2189 static inline bool io_req_needs_user(struct io_kiocb *req)
2190 {
2191 	return !(req->submit.opcode == IORING_OP_READ_FIXED ||
2192 		req->submit.opcode == IORING_OP_WRITE_FIXED);
2193 }
2194 
io_sq_wq_submit_work(struct work_struct * work)2195 static void io_sq_wq_submit_work(struct work_struct *work)
2196 {
2197 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
2198 	struct fs_struct *old_fs_struct = current->fs;
2199 	struct io_ring_ctx *ctx = req->ctx;
2200 	struct mm_struct *cur_mm = NULL;
2201 	struct async_list *async_list;
2202 	const struct cred *old_cred;
2203 	LIST_HEAD(req_list);
2204 	mm_segment_t old_fs;
2205 	int ret;
2206 
2207 	old_cred = override_creds(ctx->creds);
2208 	async_list = io_async_list_from_req(ctx, req);
2209 
2210 	allow_kernel_signal(SIGINT);
2211 restart:
2212 	do {
2213 		struct sqe_submit *s = &req->submit;
2214 		const struct io_uring_sqe *sqe = s->sqe;
2215 		unsigned int flags = req->flags;
2216 
2217 		/* Ensure we clear previously set non-block flag */
2218 		req->rw.ki_flags &= ~IOCB_NOWAIT;
2219 
2220 		if ((req->fs && req->fs != current->fs) ||
2221 		    (!req->fs && current->fs != old_fs_struct)) {
2222 			task_lock(current);
2223 			if (req->fs)
2224 				current->fs = req->fs;
2225 			else
2226 				current->fs = old_fs_struct;
2227 			task_unlock(current);
2228 		}
2229 
2230 		ret = 0;
2231 		if (io_req_needs_user(req) && !cur_mm) {
2232 			if (!mmget_not_zero(ctx->sqo_mm)) {
2233 				ret = -EFAULT;
2234 				goto end_req;
2235 			} else {
2236 				cur_mm = ctx->sqo_mm;
2237 				use_mm(cur_mm);
2238 				old_fs = get_fs();
2239 				set_fs(USER_DS);
2240 			}
2241 		}
2242 
2243 		if (!ret) {
2244 			req->work_task = current;
2245 
2246 			/*
2247 			 * Pairs with the smp_store_mb() (B) in
2248 			 * io_cancel_async_work().
2249 			 */
2250 			smp_mb(); /* A */
2251 			if (req->flags & REQ_F_CANCEL) {
2252 				ret = -ECANCELED;
2253 				goto end_req;
2254 			}
2255 
2256 			s->has_user = cur_mm != NULL;
2257 			s->needs_lock = true;
2258 			do {
2259 				ret = __io_submit_sqe(ctx, req, s, false);
2260 				/*
2261 				 * We can get EAGAIN for polled IO even though
2262 				 * we're forcing a sync submission from here,
2263 				 * since we can't wait for request slots on the
2264 				 * block side.
2265 				 */
2266 				if (ret != -EAGAIN)
2267 					break;
2268 				cond_resched();
2269 			} while (1);
2270 		}
2271 end_req:
2272 		spin_lock_irq(&ctx->task_lock);
2273 		list_del_init(&req->task_list);
2274 		spin_unlock_irq(&ctx->task_lock);
2275 
2276 		/* drop submission reference */
2277 		io_put_req(req);
2278 
2279 		if (ret) {
2280 			io_cqring_add_event(ctx, sqe->user_data, ret);
2281 			io_put_req(req);
2282 		}
2283 
2284 		/* async context always use a copy of the sqe */
2285 		kfree(sqe);
2286 
2287 		/* req from defer and link list needn't decrease async cnt */
2288 		if (flags & (REQ_F_IO_DRAINED | REQ_F_LINK_DONE))
2289 			goto out;
2290 
2291 		if (!async_list)
2292 			break;
2293 		if (!list_empty(&req_list)) {
2294 			req = list_first_entry(&req_list, struct io_kiocb,
2295 						list);
2296 			list_del(&req->list);
2297 			continue;
2298 		}
2299 		if (list_empty(&async_list->list))
2300 			break;
2301 
2302 		req = NULL;
2303 		spin_lock(&async_list->lock);
2304 		if (list_empty(&async_list->list)) {
2305 			spin_unlock(&async_list->lock);
2306 			break;
2307 		}
2308 		list_splice_init(&async_list->list, &req_list);
2309 		spin_unlock(&async_list->lock);
2310 
2311 		req = list_first_entry(&req_list, struct io_kiocb, list);
2312 		list_del(&req->list);
2313 	} while (req);
2314 
2315 	/*
2316 	 * Rare case of racing with a submitter. If we find the count has
2317 	 * dropped to zero AND we have pending work items, then restart
2318 	 * the processing. This is a tiny race window.
2319 	 */
2320 	if (async_list) {
2321 		ret = atomic_dec_return(&async_list->cnt);
2322 		while (!ret && !list_empty(&async_list->list)) {
2323 			spin_lock(&async_list->lock);
2324 			atomic_inc(&async_list->cnt);
2325 			list_splice_init(&async_list->list, &req_list);
2326 			spin_unlock(&async_list->lock);
2327 
2328 			if (!list_empty(&req_list)) {
2329 				req = list_first_entry(&req_list,
2330 							struct io_kiocb, list);
2331 				list_del(&req->list);
2332 				goto restart;
2333 			}
2334 			ret = atomic_dec_return(&async_list->cnt);
2335 		}
2336 	}
2337 
2338 out:
2339 	disallow_signal(SIGINT);
2340 	if (cur_mm) {
2341 		set_fs(old_fs);
2342 		unuse_mm(cur_mm);
2343 		mmput(cur_mm);
2344 	}
2345 	revert_creds(old_cred);
2346 	if (old_fs_struct != current->fs) {
2347 		task_lock(current);
2348 		current->fs = old_fs_struct;
2349 		task_unlock(current);
2350 	}
2351 }
2352 
2353 /*
2354  * See if we can piggy back onto previously submitted work, that is still
2355  * running. We currently only allow this if the new request is sequential
2356  * to the previous one we punted.
2357  */
io_add_to_prev_work(struct async_list * list,struct io_kiocb * req)2358 static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
2359 {
2360 	bool ret;
2361 
2362 	if (!list)
2363 		return false;
2364 	if (!(req->flags & REQ_F_SEQ_PREV))
2365 		return false;
2366 	if (!atomic_read(&list->cnt))
2367 		return false;
2368 
2369 	ret = true;
2370 	spin_lock(&list->lock);
2371 	list_add_tail(&req->list, &list->list);
2372 	/*
2373 	 * Ensure we see a simultaneous modification from io_sq_wq_submit_work()
2374 	 */
2375 	smp_mb();
2376 	if (!atomic_read(&list->cnt)) {
2377 		list_del_init(&req->list);
2378 		ret = false;
2379 	}
2380 
2381 	if (ret) {
2382 		struct io_ring_ctx *ctx = req->ctx;
2383 
2384 		req->files = current->files;
2385 
2386 		spin_lock_irq(&ctx->task_lock);
2387 		list_add(&req->task_list, &ctx->task_list);
2388 		req->work_task = NULL;
2389 		spin_unlock_irq(&ctx->task_lock);
2390 	}
2391 	spin_unlock(&list->lock);
2392 	return ret;
2393 }
2394 
io_op_needs_file(struct io_kiocb * req)2395 static bool io_op_needs_file(struct io_kiocb *req)
2396 {
2397 	switch (req->submit.opcode) {
2398 	case IORING_OP_NOP:
2399 	case IORING_OP_POLL_REMOVE:
2400 	case IORING_OP_TIMEOUT:
2401 		return false;
2402 	default:
2403 		return true;
2404 	}
2405 }
2406 
io_req_set_file(struct io_ring_ctx * ctx,const struct sqe_submit * s,struct io_submit_state * state,struct io_kiocb * req)2407 static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
2408 			   struct io_submit_state *state, struct io_kiocb *req)
2409 {
2410 	unsigned flags;
2411 	int fd;
2412 
2413 	flags = READ_ONCE(s->sqe->flags);
2414 	fd = READ_ONCE(s->sqe->fd);
2415 
2416 	if (flags & IOSQE_IO_DRAIN)
2417 		req->flags |= REQ_F_IO_DRAIN;
2418 	/*
2419 	 * All io need record the previous position, if LINK vs DARIN,
2420 	 * it can be used to mark the position of the first IO in the
2421 	 * link list.
2422 	 */
2423 	req->sequence = s->sequence;
2424 
2425 	if (!io_op_needs_file(req))
2426 		return 0;
2427 
2428 	if (flags & IOSQE_FIXED_FILE) {
2429 		if (unlikely(!ctx->user_files ||
2430 		    (unsigned) fd >= ctx->nr_user_files))
2431 			return -EBADF;
2432 		req->file = ctx->user_files[fd];
2433 		req->flags |= REQ_F_FIXED_FILE;
2434 	} else {
2435 		if (s->needs_fixed_file)
2436 			return -EBADF;
2437 		req->file = io_file_get(state, fd);
2438 		if (unlikely(!req->file))
2439 			return -EBADF;
2440 	}
2441 
2442 	return 0;
2443 }
2444 
__io_queue_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2445 static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2446 			struct sqe_submit *s)
2447 {
2448 	int ret;
2449 
2450 	ret = __io_submit_sqe(ctx, req, s, true);
2451 
2452 	/*
2453 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
2454 	 * doesn't support non-blocking read/write attempts
2455 	 */
2456 	if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) ||
2457 	    (req->flags & REQ_F_MUST_PUNT))) {
2458 		struct io_uring_sqe *sqe_copy;
2459 
2460 		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2461 		if (sqe_copy) {
2462 			struct async_list *list;
2463 
2464 			s->sqe = sqe_copy;
2465 			memcpy(&req->submit, s, sizeof(*s));
2466 			list = io_async_list_from_req(ctx, req);
2467 			if (!io_add_to_prev_work(list, req)) {
2468 				if (list)
2469 					atomic_inc(&list->cnt);
2470 				INIT_WORK(&req->work, io_sq_wq_submit_work);
2471 				io_queue_async_work(ctx, req);
2472 			}
2473 
2474 			/*
2475 			 * Queued up for async execution, worker will release
2476 			 * submit reference when the iocb is actually submitted.
2477 			 */
2478 			return 0;
2479 		}
2480 	}
2481 
2482 	/* drop submission reference */
2483 	io_put_req(req);
2484 
2485 	/* and drop final reference, if we failed */
2486 	if (ret) {
2487 		io_cqring_add_event(ctx, req->user_data, ret);
2488 		if (req->flags & REQ_F_LINK)
2489 			req->flags |= REQ_F_FAIL_LINK;
2490 		io_put_req(req);
2491 	}
2492 
2493 	return ret;
2494 }
2495 
io_queue_sqe(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s)2496 static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
2497 			struct sqe_submit *s)
2498 {
2499 	int ret;
2500 
2501 	ret = io_req_defer(ctx, req, s);
2502 	if (ret) {
2503 		if (ret != -EIOCBQUEUED) {
2504 			io_free_req(req);
2505 			io_cqring_add_event(ctx, s->sqe->user_data, ret);
2506 		}
2507 		return 0;
2508 	}
2509 
2510 	return __io_queue_sqe(ctx, req, s);
2511 }
2512 
io_queue_link_head(struct io_ring_ctx * ctx,struct io_kiocb * req,struct sqe_submit * s,struct io_kiocb * shadow)2513 static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
2514 			      struct sqe_submit *s, struct io_kiocb *shadow)
2515 {
2516 	int ret;
2517 	int need_submit = false;
2518 
2519 	if (!shadow)
2520 		return io_queue_sqe(ctx, req, s);
2521 
2522 	/*
2523 	 * Mark the first IO in link list as DRAIN, let all the following
2524 	 * IOs enter the defer list. all IO needs to be completed before link
2525 	 * list.
2526 	 */
2527 	req->flags |= REQ_F_IO_DRAIN;
2528 	ret = io_req_defer(ctx, req, s);
2529 	if (ret) {
2530 		if (ret != -EIOCBQUEUED) {
2531 			io_free_req(req);
2532 			__io_free_req(shadow);
2533 			io_cqring_add_event(ctx, s->sqe->user_data, ret);
2534 			return 0;
2535 		}
2536 	} else {
2537 		/*
2538 		 * If ret == 0 means that all IOs in front of link io are
2539 		 * running done. let's queue link head.
2540 		 */
2541 		need_submit = true;
2542 	}
2543 
2544 	/* Insert shadow req to defer_list, blocking next IOs */
2545 	spin_lock_irq(&ctx->completion_lock);
2546 	list_add_tail(&shadow->list, &ctx->defer_list);
2547 	spin_unlock_irq(&ctx->completion_lock);
2548 
2549 	if (need_submit)
2550 		return __io_queue_sqe(ctx, req, s);
2551 
2552 	return 0;
2553 }
2554 
2555 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
2556 
io_submit_sqe(struct io_ring_ctx * ctx,struct sqe_submit * s,struct io_submit_state * state,struct io_kiocb ** link)2557 static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
2558 			  struct io_submit_state *state, struct io_kiocb **link)
2559 {
2560 	struct io_uring_sqe *sqe_copy;
2561 	struct io_kiocb *req;
2562 	int ret;
2563 
2564 	/* enforce forwards compatibility on users */
2565 	if (unlikely(s->sqe->flags & ~SQE_VALID_FLAGS)) {
2566 		ret = -EINVAL;
2567 		goto err;
2568 	}
2569 
2570 	req = io_get_req(ctx, state);
2571 	if (unlikely(!req)) {
2572 		ret = -EAGAIN;
2573 		goto err;
2574 	}
2575 
2576 	memcpy(&req->submit, s, sizeof(*s));
2577 	ret = io_req_set_file(ctx, s, state, req);
2578 	if (unlikely(ret)) {
2579 err_req:
2580 		io_free_req(req);
2581 err:
2582 		io_cqring_add_event(ctx, s->sqe->user_data, ret);
2583 		return;
2584 	}
2585 
2586 	req->user_data = s->sqe->user_data;
2587 
2588 #if defined(CONFIG_NET)
2589 	switch (req->submit.opcode) {
2590 	case IORING_OP_SENDMSG:
2591 	case IORING_OP_RECVMSG:
2592 		spin_lock(&current->fs->lock);
2593 		if (!current->fs->in_exec) {
2594 			req->fs = current->fs;
2595 			req->fs->users++;
2596 		}
2597 		spin_unlock(&current->fs->lock);
2598 		if (!req->fs) {
2599 			ret = -EAGAIN;
2600 			goto err_req;
2601 		}
2602 	}
2603 #endif
2604 
2605 	/*
2606 	 * If we already have a head request, queue this one for async
2607 	 * submittal once the head completes. If we don't have a head but
2608 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
2609 	 * submitted sync once the chain is complete. If none of those
2610 	 * conditions are true (normal request), then just queue it.
2611 	 */
2612 	if (*link) {
2613 		struct io_kiocb *prev = *link;
2614 
2615 		sqe_copy = kmemdup(s->sqe, sizeof(*sqe_copy), GFP_KERNEL);
2616 		if (!sqe_copy) {
2617 			ret = -EAGAIN;
2618 			goto err_req;
2619 		}
2620 
2621 		s->sqe = sqe_copy;
2622 		memcpy(&req->submit, s, sizeof(*s));
2623 		list_add_tail(&req->list, &prev->link_list);
2624 	} else if (s->sqe->flags & IOSQE_IO_LINK) {
2625 		req->flags |= REQ_F_LINK;
2626 
2627 		memcpy(&req->submit, s, sizeof(*s));
2628 		INIT_LIST_HEAD(&req->link_list);
2629 		*link = req;
2630 	} else {
2631 		io_queue_sqe(ctx, req, s);
2632 	}
2633 }
2634 
2635 /*
2636  * Batched submission is done, ensure local IO is flushed out.
2637  */
io_submit_state_end(struct io_submit_state * state)2638 static void io_submit_state_end(struct io_submit_state *state)
2639 {
2640 	blk_finish_plug(&state->plug);
2641 	io_file_put(state);
2642 	if (state->free_reqs)
2643 		kmem_cache_free_bulk(req_cachep, state->free_reqs,
2644 					&state->reqs[state->cur_req]);
2645 }
2646 
2647 /*
2648  * Start submission side cache.
2649  */
io_submit_state_start(struct io_submit_state * state,struct io_ring_ctx * ctx,unsigned max_ios)2650 static void io_submit_state_start(struct io_submit_state *state,
2651 				  struct io_ring_ctx *ctx, unsigned max_ios)
2652 {
2653 	blk_start_plug(&state->plug);
2654 	state->free_reqs = 0;
2655 	state->file = NULL;
2656 	state->ios_left = max_ios;
2657 }
2658 
io_commit_sqring(struct io_ring_ctx * ctx)2659 static void io_commit_sqring(struct io_ring_ctx *ctx)
2660 {
2661 	struct io_rings *rings = ctx->rings;
2662 
2663 	if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
2664 		/*
2665 		 * Ensure any loads from the SQEs are done at this point,
2666 		 * since once we write the new head, the application could
2667 		 * write new data to them.
2668 		 */
2669 		smp_store_release(&rings->sq.head, ctx->cached_sq_head);
2670 	}
2671 }
2672 
2673 /*
2674  * Fetch an sqe, if one is available. Note that s->sqe will point to memory
2675  * that is mapped by userspace. This means that care needs to be taken to
2676  * ensure that reads are stable, as we cannot rely on userspace always
2677  * being a good citizen. If members of the sqe are validated and then later
2678  * used, it's important that those reads are done through READ_ONCE() to
2679  * prevent a re-load down the line.
2680  */
io_get_sqring(struct io_ring_ctx * ctx,struct sqe_submit * s)2681 static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
2682 {
2683 	struct io_rings *rings = ctx->rings;
2684 	u32 *sq_array = ctx->sq_array;
2685 	unsigned head;
2686 
2687 	/*
2688 	 * The cached sq head (or cq tail) serves two purposes:
2689 	 *
2690 	 * 1) allows us to batch the cost of updating the user visible
2691 	 *    head updates.
2692 	 * 2) allows the kernel side to track the head on its own, even
2693 	 *    though the application is the one updating it.
2694 	 */
2695 	head = ctx->cached_sq_head;
2696 	/* make sure SQ entry isn't read before tail */
2697 	if (head == smp_load_acquire(&rings->sq.tail))
2698 		return false;
2699 
2700 	head = READ_ONCE(sq_array[head & ctx->sq_mask]);
2701 	if (head < ctx->sq_entries) {
2702 		s->index = head;
2703 		s->sqe = &ctx->sq_sqes[head];
2704 		s->opcode = READ_ONCE(s->sqe->opcode);
2705 		s->sequence = ctx->cached_sq_head;
2706 		ctx->cached_sq_head++;
2707 		return true;
2708 	}
2709 
2710 	/* drop invalid entries */
2711 	ctx->cached_sq_head++;
2712 	ctx->cached_sq_dropped++;
2713 	WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped);
2714 	return false;
2715 }
2716 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr,bool has_user,bool mm_fault)2717 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
2718 			  bool has_user, bool mm_fault)
2719 {
2720 	struct io_submit_state state, *statep = NULL;
2721 	struct io_kiocb *link = NULL;
2722 	struct io_kiocb *shadow_req = NULL;
2723 	bool prev_was_link = false;
2724 	int i, submitted = 0;
2725 
2726 	if (nr > IO_PLUG_THRESHOLD) {
2727 		io_submit_state_start(&state, ctx, nr);
2728 		statep = &state;
2729 	}
2730 
2731 	for (i = 0; i < nr; i++) {
2732 		struct sqe_submit s;
2733 
2734 		if (!io_get_sqring(ctx, &s))
2735 			break;
2736 
2737 		/*
2738 		 * If previous wasn't linked and we have a linked command,
2739 		 * that's the end of the chain. Submit the previous link.
2740 		 */
2741 		if (!prev_was_link && link) {
2742 			io_queue_link_head(ctx, link, &link->submit, shadow_req);
2743 			link = NULL;
2744 			shadow_req = NULL;
2745 		}
2746 		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2747 
2748 		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2749 			if (!shadow_req) {
2750 				shadow_req = io_get_req(ctx, NULL);
2751 				if (unlikely(!shadow_req))
2752 					goto out;
2753 				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2754 				refcount_dec(&shadow_req->refs);
2755 			}
2756 			shadow_req->sequence = s.sequence;
2757 		}
2758 
2759 out:
2760 		if (unlikely(mm_fault)) {
2761 			io_cqring_add_event(ctx, s.sqe->user_data,
2762 						-EFAULT);
2763 		} else {
2764 			s.has_user = has_user;
2765 			s.needs_lock = true;
2766 			s.needs_fixed_file = true;
2767 			io_submit_sqe(ctx, &s, statep, &link);
2768 			submitted++;
2769 		}
2770 	}
2771 
2772 	if (link)
2773 		io_queue_link_head(ctx, link, &link->submit, shadow_req);
2774 	if (statep)
2775 		io_submit_state_end(&state);
2776 
2777 	return submitted;
2778 }
2779 
io_sq_thread(void * data)2780 static int io_sq_thread(void *data)
2781 {
2782 	struct io_ring_ctx *ctx = data;
2783 	struct mm_struct *cur_mm = NULL;
2784 	const struct cred *old_cred;
2785 	mm_segment_t old_fs;
2786 	DEFINE_WAIT(wait);
2787 	unsigned inflight;
2788 	unsigned long timeout;
2789 
2790 	complete(&ctx->sqo_thread_started);
2791 
2792 	old_fs = get_fs();
2793 	set_fs(USER_DS);
2794 	old_cred = override_creds(ctx->creds);
2795 
2796 	timeout = inflight = 0;
2797 	while (!kthread_should_park()) {
2798 		bool mm_fault = false;
2799 		unsigned int to_submit;
2800 
2801 		if (inflight) {
2802 			unsigned nr_events = 0;
2803 
2804 			if (ctx->flags & IORING_SETUP_IOPOLL) {
2805 				/*
2806 				 * inflight is the count of the maximum possible
2807 				 * entries we submitted, but it can be smaller
2808 				 * if we dropped some of them. If we don't have
2809 				 * poll entries available, then we know that we
2810 				 * have nothing left to poll for. Reset the
2811 				 * inflight count to zero in that case.
2812 				 */
2813 				mutex_lock(&ctx->uring_lock);
2814 				if (!list_empty(&ctx->poll_list))
2815 					io_iopoll_getevents(ctx, &nr_events, 0);
2816 				else
2817 					inflight = 0;
2818 				mutex_unlock(&ctx->uring_lock);
2819 			} else {
2820 				/*
2821 				 * Normal IO, just pretend everything completed.
2822 				 * We don't have to poll completions for that.
2823 				 */
2824 				nr_events = inflight;
2825 			}
2826 
2827 			inflight -= nr_events;
2828 			if (!inflight)
2829 				timeout = jiffies + ctx->sq_thread_idle;
2830 		}
2831 
2832 		to_submit = io_sqring_entries(ctx);
2833 		if (!to_submit) {
2834 			/*
2835 			 * Drop cur_mm before scheduling, we can't hold it for
2836 			 * long periods (or over schedule()). Do this before
2837 			 * adding ourselves to the waitqueue, as the unuse/drop
2838 			 * may sleep.
2839 			 */
2840 			if (cur_mm) {
2841 				unuse_mm(cur_mm);
2842 				mmput(cur_mm);
2843 				cur_mm = NULL;
2844 			}
2845 
2846 			/*
2847 			 * We're polling. If we're within the defined idle
2848 			 * period, then let us spin without work before going
2849 			 * to sleep.
2850 			 */
2851 			if (inflight || !time_after(jiffies, timeout)) {
2852 				cond_resched();
2853 				continue;
2854 			}
2855 
2856 			prepare_to_wait(&ctx->sqo_wait, &wait,
2857 						TASK_INTERRUPTIBLE);
2858 
2859 			/* Tell userspace we may need a wakeup call */
2860 			ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
2861 			/* make sure to read SQ tail after writing flags */
2862 			smp_mb();
2863 
2864 			to_submit = io_sqring_entries(ctx);
2865 			if (!to_submit) {
2866 				if (kthread_should_park()) {
2867 					finish_wait(&ctx->sqo_wait, &wait);
2868 					break;
2869 				}
2870 				if (signal_pending(current))
2871 					flush_signals(current);
2872 				schedule();
2873 				finish_wait(&ctx->sqo_wait, &wait);
2874 
2875 				ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2876 				continue;
2877 			}
2878 			finish_wait(&ctx->sqo_wait, &wait);
2879 
2880 			ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
2881 		}
2882 
2883 		/* Unless all new commands are FIXED regions, grab mm */
2884 		if (!cur_mm) {
2885 			mm_fault = !mmget_not_zero(ctx->sqo_mm);
2886 			if (!mm_fault) {
2887 				use_mm(ctx->sqo_mm);
2888 				cur_mm = ctx->sqo_mm;
2889 			}
2890 		}
2891 
2892 		to_submit = min(to_submit, ctx->sq_entries);
2893 		inflight += io_submit_sqes(ctx, to_submit, cur_mm != NULL,
2894 					   mm_fault);
2895 
2896 		/* Commit SQ ring head once we've consumed all SQEs */
2897 		io_commit_sqring(ctx);
2898 	}
2899 
2900 	set_fs(old_fs);
2901 	if (cur_mm) {
2902 		unuse_mm(cur_mm);
2903 		mmput(cur_mm);
2904 	}
2905 	revert_creds(old_cred);
2906 
2907 	kthread_parkme();
2908 
2909 	return 0;
2910 }
2911 
io_ring_submit(struct io_ring_ctx * ctx,unsigned int to_submit)2912 static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
2913 {
2914 	struct io_submit_state state, *statep = NULL;
2915 	struct io_kiocb *link = NULL;
2916 	struct io_kiocb *shadow_req = NULL;
2917 	bool prev_was_link = false;
2918 	int i, submit = 0;
2919 
2920 	if (to_submit > IO_PLUG_THRESHOLD) {
2921 		io_submit_state_start(&state, ctx, to_submit);
2922 		statep = &state;
2923 	}
2924 
2925 	for (i = 0; i < to_submit; i++) {
2926 		struct sqe_submit s;
2927 
2928 		if (!io_get_sqring(ctx, &s))
2929 			break;
2930 
2931 		/*
2932 		 * If previous wasn't linked and we have a linked command,
2933 		 * that's the end of the chain. Submit the previous link.
2934 		 */
2935 		if (!prev_was_link && link) {
2936 			io_queue_link_head(ctx, link, &link->submit, shadow_req);
2937 			link = NULL;
2938 			shadow_req = NULL;
2939 		}
2940 		prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
2941 
2942 		if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
2943 			if (!shadow_req) {
2944 				shadow_req = io_get_req(ctx, NULL);
2945 				if (unlikely(!shadow_req))
2946 					goto out;
2947 				shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
2948 				refcount_dec(&shadow_req->refs);
2949 			}
2950 			shadow_req->sequence = s.sequence;
2951 		}
2952 
2953 out:
2954 		s.has_user = true;
2955 		s.needs_lock = false;
2956 		s.needs_fixed_file = false;
2957 		submit++;
2958 		io_submit_sqe(ctx, &s, statep, &link);
2959 	}
2960 
2961 	if (link)
2962 		io_queue_link_head(ctx, link, &link->submit, shadow_req);
2963 	if (statep)
2964 		io_submit_state_end(statep);
2965 
2966 	io_commit_sqring(ctx);
2967 
2968 	return submit;
2969 }
2970 
2971 struct io_wait_queue {
2972 	struct wait_queue_entry wq;
2973 	struct io_ring_ctx *ctx;
2974 	unsigned to_wait;
2975 	unsigned nr_timeouts;
2976 };
2977 
io_should_wake(struct io_wait_queue * iowq)2978 static inline bool io_should_wake(struct io_wait_queue *iowq)
2979 {
2980 	struct io_ring_ctx *ctx = iowq->ctx;
2981 
2982 	/*
2983 	 * Wake up if we have enough events, or if a timeout occured since we
2984 	 * started waiting. For timeouts, we always want to return to userspace,
2985 	 * regardless of event count.
2986 	 */
2987 	return io_cqring_events(ctx->rings) >= iowq->to_wait ||
2988 			atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2989 }
2990 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)2991 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
2992 			    int wake_flags, void *key)
2993 {
2994 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
2995 							wq);
2996 
2997 	if (!io_should_wake(iowq))
2998 		return -1;
2999 
3000 	return autoremove_wake_function(curr, mode, wake_flags, key);
3001 }
3002 
3003 /*
3004  * Wait until events become available, if we don't already have some. The
3005  * application must reap them itself, as they reside on the shared cq ring.
3006  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,const sigset_t __user * sig,size_t sigsz)3007 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
3008 			  const sigset_t __user *sig, size_t sigsz)
3009 {
3010 	struct io_wait_queue iowq = {
3011 		.wq = {
3012 			.private	= current,
3013 			.func		= io_wake_function,
3014 			.entry		= LIST_HEAD_INIT(iowq.wq.entry),
3015 		},
3016 		.ctx		= ctx,
3017 		.to_wait	= min_events,
3018 	};
3019 	struct io_rings *rings = ctx->rings;
3020 	int ret;
3021 
3022 	if (io_cqring_events(rings) >= min_events)
3023 		return 0;
3024 
3025 	if (sig) {
3026 #ifdef CONFIG_COMPAT
3027 		if (in_compat_syscall())
3028 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
3029 						      sigsz);
3030 		else
3031 #endif
3032 			ret = set_user_sigmask(sig, sigsz);
3033 
3034 		if (ret)
3035 			return ret;
3036 	}
3037 
3038 	ret = 0;
3039 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
3040 	do {
3041 		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
3042 						TASK_INTERRUPTIBLE);
3043 		if (io_should_wake(&iowq))
3044 			break;
3045 		schedule();
3046 		if (signal_pending(current)) {
3047 			ret = -ERESTARTSYS;
3048 			break;
3049 		}
3050 	} while (1);
3051 	finish_wait(&ctx->wait, &iowq.wq);
3052 
3053 	restore_saved_sigmask_unless(ret == -ERESTARTSYS);
3054 	if (ret == -ERESTARTSYS)
3055 		ret = -EINTR;
3056 
3057 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
3058 }
3059 
__io_sqe_files_unregister(struct io_ring_ctx * ctx)3060 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
3061 {
3062 	int i;
3063 
3064 	for (i = 0; i < ctx->nr_user_files; i++)
3065 		fput(ctx->user_files[i]);
3066 }
3067 
io_sqe_files_unregister(struct io_ring_ctx * ctx)3068 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
3069 {
3070 	if (!ctx->user_files)
3071 		return -ENXIO;
3072 
3073 	__io_sqe_files_unregister(ctx);
3074 	kfree(ctx->user_files);
3075 	ctx->user_files = NULL;
3076 	ctx->nr_user_files = 0;
3077 	return 0;
3078 }
3079 
io_sq_thread_stop(struct io_ring_ctx * ctx)3080 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
3081 {
3082 	if (ctx->sqo_thread) {
3083 		wait_for_completion(&ctx->sqo_thread_started);
3084 		/*
3085 		 * The park is a bit of a work-around, without it we get
3086 		 * warning spews on shutdown with SQPOLL set and affinity
3087 		 * set to a single CPU.
3088 		 */
3089 		kthread_park(ctx->sqo_thread);
3090 		kthread_stop(ctx->sqo_thread);
3091 		ctx->sqo_thread = NULL;
3092 	}
3093 }
3094 
io_finish_async(struct io_ring_ctx * ctx)3095 static void io_finish_async(struct io_ring_ctx *ctx)
3096 {
3097 	int i;
3098 
3099 	io_sq_thread_stop(ctx);
3100 
3101 	for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
3102 		if (ctx->sqo_wq[i]) {
3103 			destroy_workqueue(ctx->sqo_wq[i]);
3104 			ctx->sqo_wq[i] = NULL;
3105 		}
3106 	}
3107 }
3108 
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3109 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
3110 				 unsigned nr_args)
3111 {
3112 	__s32 __user *fds = (__s32 __user *) arg;
3113 	int fd, ret = 0;
3114 	unsigned i;
3115 
3116 	if (ctx->user_files)
3117 		return -EBUSY;
3118 	if (!nr_args)
3119 		return -EINVAL;
3120 	if (nr_args > IORING_MAX_FIXED_FILES)
3121 		return -EMFILE;
3122 
3123 	ctx->user_files = kcalloc(nr_args, sizeof(struct file *), GFP_KERNEL);
3124 	if (!ctx->user_files)
3125 		return -ENOMEM;
3126 
3127 	for (i = 0; i < nr_args; i++) {
3128 		ret = -EFAULT;
3129 		if (copy_from_user(&fd, &fds[i], sizeof(fd)))
3130 			break;
3131 
3132 		ctx->user_files[i] = fget(fd);
3133 
3134 		ret = -EBADF;
3135 		if (!ctx->user_files[i])
3136 			break;
3137 		/*
3138 		 * Don't allow io_uring instances to be registered. If UNIX
3139 		 * isn't enabled, then this causes a reference cycle and this
3140 		 * instance can never get freed. If UNIX is enabled we'll
3141 		 * handle it just fine, but there's still no point in allowing
3142 		 * a ring fd as it doesn't support regular read/write anyway.
3143 		 */
3144 		if (ctx->user_files[i]->f_op == &io_uring_fops) {
3145 			fput(ctx->user_files[i]);
3146 			break;
3147 		}
3148 		ctx->nr_user_files++;
3149 		ret = 0;
3150 	}
3151 
3152 	if (ret) {
3153 		for (i = 0; i < ctx->nr_user_files; i++)
3154 			fput(ctx->user_files[i]);
3155 
3156 		kfree(ctx->user_files);
3157 		ctx->user_files = NULL;
3158 		ctx->nr_user_files = 0;
3159 		return ret;
3160 	}
3161 
3162 	return 0;
3163 }
3164 
io_sq_offload_start(struct io_ring_ctx * ctx,struct io_uring_params * p)3165 static int io_sq_offload_start(struct io_ring_ctx *ctx,
3166 			       struct io_uring_params *p)
3167 {
3168 	int ret;
3169 
3170 	mmgrab(current->mm);
3171 	ctx->sqo_mm = current->mm;
3172 
3173 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3174 		ret = -EPERM;
3175 		if (!capable(CAP_SYS_ADMIN))
3176 			goto err;
3177 
3178 		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
3179 		if (!ctx->sq_thread_idle)
3180 			ctx->sq_thread_idle = HZ;
3181 
3182 		if (p->flags & IORING_SETUP_SQ_AFF) {
3183 			int cpu = p->sq_thread_cpu;
3184 
3185 			ret = -EINVAL;
3186 			if (cpu >= nr_cpu_ids)
3187 				goto err;
3188 			if (!cpu_online(cpu))
3189 				goto err;
3190 
3191 			ctx->sqo_thread = kthread_create_on_cpu(io_sq_thread,
3192 							ctx, cpu,
3193 							"io_uring-sq");
3194 		} else {
3195 			ctx->sqo_thread = kthread_create(io_sq_thread, ctx,
3196 							"io_uring-sq");
3197 		}
3198 		if (IS_ERR(ctx->sqo_thread)) {
3199 			ret = PTR_ERR(ctx->sqo_thread);
3200 			ctx->sqo_thread = NULL;
3201 			goto err;
3202 		}
3203 		wake_up_process(ctx->sqo_thread);
3204 	} else if (p->flags & IORING_SETUP_SQ_AFF) {
3205 		/* Can't have SQ_AFF without SQPOLL */
3206 		ret = -EINVAL;
3207 		goto err;
3208 	}
3209 
3210 	/* Do QD, or 2 * CPUS, whatever is smallest */
3211 	ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
3212 			WQ_UNBOUND | WQ_FREEZABLE,
3213 			min(ctx->sq_entries - 1, 2 * num_online_cpus()));
3214 	if (!ctx->sqo_wq[0]) {
3215 		ret = -ENOMEM;
3216 		goto err;
3217 	}
3218 
3219 	/*
3220 	 * This is for buffered writes, where we want to limit the parallelism
3221 	 * due to file locking in file systems. As "normal" buffered writes
3222 	 * should parellelize on writeout quite nicely, limit us to having 2
3223 	 * pending. This avoids massive contention on the inode when doing
3224 	 * buffered async writes.
3225 	 */
3226 	ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
3227 						WQ_UNBOUND | WQ_FREEZABLE, 2);
3228 	if (!ctx->sqo_wq[1]) {
3229 		ret = -ENOMEM;
3230 		goto err;
3231 	}
3232 
3233 	return 0;
3234 err:
3235 	io_finish_async(ctx);
3236 	mmdrop(ctx->sqo_mm);
3237 	ctx->sqo_mm = NULL;
3238 	return ret;
3239 }
3240 
io_unaccount_mem(struct user_struct * user,unsigned long nr_pages)3241 static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages)
3242 {
3243 	atomic_long_sub(nr_pages, &user->locked_vm);
3244 }
3245 
io_account_mem(struct user_struct * user,unsigned long nr_pages)3246 static int io_account_mem(struct user_struct *user, unsigned long nr_pages)
3247 {
3248 	unsigned long page_limit, cur_pages, new_pages;
3249 
3250 	/* Don't allow more pages than we can safely lock */
3251 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
3252 
3253 	do {
3254 		cur_pages = atomic_long_read(&user->locked_vm);
3255 		new_pages = cur_pages + nr_pages;
3256 		if (new_pages > page_limit)
3257 			return -ENOMEM;
3258 	} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
3259 					new_pages) != cur_pages);
3260 
3261 	return 0;
3262 }
3263 
io_mem_free(void * ptr)3264 static void io_mem_free(void *ptr)
3265 {
3266 	struct page *page;
3267 
3268 	if (!ptr)
3269 		return;
3270 
3271 	page = virt_to_head_page(ptr);
3272 	if (put_page_testzero(page))
3273 		free_compound_page(page);
3274 }
3275 
io_mem_alloc(size_t size)3276 static void *io_mem_alloc(size_t size)
3277 {
3278 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
3279 				__GFP_NORETRY;
3280 
3281 	return (void *) __get_free_pages(gfp_flags, get_order(size));
3282 }
3283 
rings_size(unsigned sq_entries,unsigned cq_entries,size_t * sq_offset)3284 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
3285 				size_t *sq_offset)
3286 {
3287 	struct io_rings *rings;
3288 	size_t off, sq_array_size;
3289 
3290 	off = struct_size(rings, cqes, cq_entries);
3291 	if (off == SIZE_MAX)
3292 		return SIZE_MAX;
3293 
3294 #ifdef CONFIG_SMP
3295 	off = ALIGN(off, SMP_CACHE_BYTES);
3296 	if (off == 0)
3297 		return SIZE_MAX;
3298 #endif
3299 
3300 	if (sq_offset)
3301 		*sq_offset = off;
3302 
3303 	sq_array_size = array_size(sizeof(u32), sq_entries);
3304 	if (sq_array_size == SIZE_MAX)
3305 		return SIZE_MAX;
3306 
3307 	if (check_add_overflow(off, sq_array_size, &off))
3308 		return SIZE_MAX;
3309 
3310 	return off;
3311 }
3312 
ring_pages(unsigned sq_entries,unsigned cq_entries)3313 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
3314 {
3315 	size_t pages;
3316 
3317 	pages = (size_t)1 << get_order(
3318 		rings_size(sq_entries, cq_entries, NULL));
3319 	pages += (size_t)1 << get_order(
3320 		array_size(sizeof(struct io_uring_sqe), sq_entries));
3321 
3322 	return pages;
3323 }
3324 
io_sqe_buffer_unregister(struct io_ring_ctx * ctx)3325 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
3326 {
3327 	int i, j;
3328 
3329 	if (!ctx->user_bufs)
3330 		return -ENXIO;
3331 
3332 	for (i = 0; i < ctx->nr_user_bufs; i++) {
3333 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3334 
3335 		for (j = 0; j < imu->nr_bvecs; j++)
3336 			put_user_page(imu->bvec[j].bv_page);
3337 
3338 		if (ctx->account_mem)
3339 			io_unaccount_mem(ctx->user, imu->nr_bvecs);
3340 		kvfree(imu->bvec);
3341 		imu->nr_bvecs = 0;
3342 	}
3343 
3344 	kfree(ctx->user_bufs);
3345 	ctx->user_bufs = NULL;
3346 	ctx->nr_user_bufs = 0;
3347 	return 0;
3348 }
3349 
io_copy_iov(struct io_ring_ctx * ctx,struct iovec * dst,void __user * arg,unsigned index)3350 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
3351 		       void __user *arg, unsigned index)
3352 {
3353 	struct iovec __user *src;
3354 
3355 #ifdef CONFIG_COMPAT
3356 	if (ctx->compat) {
3357 		struct compat_iovec __user *ciovs;
3358 		struct compat_iovec ciov;
3359 
3360 		ciovs = (struct compat_iovec __user *) arg;
3361 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
3362 			return -EFAULT;
3363 
3364 		dst->iov_base = (void __user *) (unsigned long) ciov.iov_base;
3365 		dst->iov_len = ciov.iov_len;
3366 		return 0;
3367 	}
3368 #endif
3369 	src = (struct iovec __user *) arg;
3370 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
3371 		return -EFAULT;
3372 	return 0;
3373 }
3374 
io_sqe_buffer_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)3375 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
3376 				  unsigned nr_args)
3377 {
3378 	struct vm_area_struct **vmas = NULL;
3379 	struct page **pages = NULL;
3380 	int i, j, got_pages = 0;
3381 	int ret = -EINVAL;
3382 
3383 	if (ctx->user_bufs)
3384 		return -EBUSY;
3385 	if (!nr_args || nr_args > UIO_MAXIOV)
3386 		return -EINVAL;
3387 
3388 	ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
3389 					GFP_KERNEL);
3390 	if (!ctx->user_bufs)
3391 		return -ENOMEM;
3392 
3393 	for (i = 0; i < nr_args; i++) {
3394 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
3395 		unsigned long off, start, end, ubuf;
3396 		int pret, nr_pages;
3397 		struct iovec iov;
3398 		size_t size;
3399 
3400 		ret = io_copy_iov(ctx, &iov, arg, i);
3401 		if (ret)
3402 			goto err;
3403 
3404 		/*
3405 		 * Don't impose further limits on the size and buffer
3406 		 * constraints here, we'll -EINVAL later when IO is
3407 		 * submitted if they are wrong.
3408 		 */
3409 		ret = -EFAULT;
3410 		if (!iov.iov_base || !iov.iov_len)
3411 			goto err;
3412 
3413 		/* arbitrary limit, but we need something */
3414 		if (iov.iov_len > SZ_1G)
3415 			goto err;
3416 
3417 		ubuf = (unsigned long) iov.iov_base;
3418 		end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
3419 		start = ubuf >> PAGE_SHIFT;
3420 		nr_pages = end - start;
3421 
3422 		if (ctx->account_mem) {
3423 			ret = io_account_mem(ctx->user, nr_pages);
3424 			if (ret)
3425 				goto err;
3426 		}
3427 
3428 		ret = 0;
3429 		if (!pages || nr_pages > got_pages) {
3430 			kvfree(vmas);
3431 			kvfree(pages);
3432 			pages = kvmalloc_array(nr_pages, sizeof(struct page *),
3433 						GFP_KERNEL);
3434 			vmas = kvmalloc_array(nr_pages,
3435 					sizeof(struct vm_area_struct *),
3436 					GFP_KERNEL);
3437 			if (!pages || !vmas) {
3438 				ret = -ENOMEM;
3439 				if (ctx->account_mem)
3440 					io_unaccount_mem(ctx->user, nr_pages);
3441 				goto err;
3442 			}
3443 			got_pages = nr_pages;
3444 		}
3445 
3446 		imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
3447 						GFP_KERNEL);
3448 		ret = -ENOMEM;
3449 		if (!imu->bvec) {
3450 			if (ctx->account_mem)
3451 				io_unaccount_mem(ctx->user, nr_pages);
3452 			goto err;
3453 		}
3454 
3455 		ret = 0;
3456 		down_read(&current->mm->mmap_sem);
3457 		pret = get_user_pages(ubuf, nr_pages,
3458 				      FOLL_WRITE | FOLL_LONGTERM,
3459 				      pages, vmas);
3460 		if (pret == nr_pages) {
3461 			/* don't support file backed memory */
3462 			for (j = 0; j < nr_pages; j++) {
3463 				struct vm_area_struct *vma = vmas[j];
3464 
3465 				if (vma->vm_file &&
3466 				    !is_file_hugepages(vma->vm_file)) {
3467 					ret = -EOPNOTSUPP;
3468 					break;
3469 				}
3470 			}
3471 		} else {
3472 			ret = pret < 0 ? pret : -EFAULT;
3473 		}
3474 		up_read(&current->mm->mmap_sem);
3475 		if (ret) {
3476 			/*
3477 			 * if we did partial map, or found file backed vmas,
3478 			 * release any pages we did get
3479 			 */
3480 			if (pret > 0)
3481 				put_user_pages(pages, pret);
3482 			if (ctx->account_mem)
3483 				io_unaccount_mem(ctx->user, nr_pages);
3484 			kvfree(imu->bvec);
3485 			goto err;
3486 		}
3487 
3488 		off = ubuf & ~PAGE_MASK;
3489 		size = iov.iov_len;
3490 		for (j = 0; j < nr_pages; j++) {
3491 			size_t vec_len;
3492 
3493 			vec_len = min_t(size_t, size, PAGE_SIZE - off);
3494 			imu->bvec[j].bv_page = pages[j];
3495 			imu->bvec[j].bv_len = vec_len;
3496 			imu->bvec[j].bv_offset = off;
3497 			off = 0;
3498 			size -= vec_len;
3499 		}
3500 		/* store original address for later verification */
3501 		imu->ubuf = ubuf;
3502 		imu->len = iov.iov_len;
3503 		imu->nr_bvecs = nr_pages;
3504 
3505 		ctx->nr_user_bufs++;
3506 	}
3507 	kvfree(pages);
3508 	kvfree(vmas);
3509 	return 0;
3510 err:
3511 	kvfree(pages);
3512 	kvfree(vmas);
3513 	io_sqe_buffer_unregister(ctx);
3514 	return ret;
3515 }
3516 
io_eventfd_register(struct io_ring_ctx * ctx,void __user * arg)3517 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
3518 {
3519 	__s32 __user *fds = arg;
3520 	int fd;
3521 
3522 	if (ctx->cq_ev_fd)
3523 		return -EBUSY;
3524 
3525 	if (copy_from_user(&fd, fds, sizeof(*fds)))
3526 		return -EFAULT;
3527 
3528 	ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
3529 	if (IS_ERR(ctx->cq_ev_fd)) {
3530 		int ret = PTR_ERR(ctx->cq_ev_fd);
3531 		ctx->cq_ev_fd = NULL;
3532 		return ret;
3533 	}
3534 
3535 	return 0;
3536 }
3537 
io_eventfd_unregister(struct io_ring_ctx * ctx)3538 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
3539 {
3540 	if (ctx->cq_ev_fd) {
3541 		eventfd_ctx_put(ctx->cq_ev_fd);
3542 		ctx->cq_ev_fd = NULL;
3543 		return 0;
3544 	}
3545 
3546 	return -ENXIO;
3547 }
3548 
io_ring_ctx_free(struct io_ring_ctx * ctx)3549 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
3550 {
3551 	io_finish_async(ctx);
3552 	if (ctx->sqo_mm)
3553 		mmdrop(ctx->sqo_mm);
3554 
3555 	io_iopoll_reap_events(ctx);
3556 	io_sqe_buffer_unregister(ctx);
3557 	io_sqe_files_unregister(ctx);
3558 	io_eventfd_unregister(ctx);
3559 
3560 	io_mem_free(ctx->rings);
3561 	io_mem_free(ctx->sq_sqes);
3562 
3563 	percpu_ref_exit(&ctx->refs);
3564 	if (ctx->account_mem)
3565 		io_unaccount_mem(ctx->user,
3566 				ring_pages(ctx->sq_entries, ctx->cq_entries));
3567 	free_uid(ctx->user);
3568 	if (ctx->creds)
3569 		put_cred(ctx->creds);
3570 	kfree(ctx);
3571 }
3572 
io_uring_poll(struct file * file,poll_table * wait)3573 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
3574 {
3575 	struct io_ring_ctx *ctx = file->private_data;
3576 	__poll_t mask = 0;
3577 
3578 	poll_wait(file, &ctx->cq_wait, wait);
3579 	/*
3580 	 * synchronizes with barrier from wq_has_sleeper call in
3581 	 * io_commit_cqring
3582 	 */
3583 	smp_rmb();
3584 	if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
3585 	    ctx->rings->sq_ring_entries)
3586 		mask |= EPOLLOUT | EPOLLWRNORM;
3587 	if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
3588 		mask |= EPOLLIN | EPOLLRDNORM;
3589 
3590 	return mask;
3591 }
3592 
io_uring_fasync(int fd,struct file * file,int on)3593 static int io_uring_fasync(int fd, struct file *file, int on)
3594 {
3595 	struct io_ring_ctx *ctx = file->private_data;
3596 
3597 	return fasync_helper(fd, file, on, &ctx->cq_fasync);
3598 }
3599 
io_cancel_async_work(struct io_ring_ctx * ctx,struct files_struct * files)3600 static void io_cancel_async_work(struct io_ring_ctx *ctx,
3601 				 struct files_struct *files)
3602 {
3603 	struct io_kiocb *req;
3604 
3605 	spin_lock_irq(&ctx->task_lock);
3606 
3607 	list_for_each_entry(req, &ctx->task_list, task_list) {
3608 		if (files && req->files != files)
3609 			continue;
3610 
3611 		/*
3612 		 * The below executes an smp_mb(), which matches with the
3613 		 * smp_mb() (A) in io_sq_wq_submit_work() such that either
3614 		 * we store REQ_F_CANCEL flag to req->flags or we see the
3615 		 * req->work_task setted in io_sq_wq_submit_work().
3616 		 */
3617 		smp_store_mb(req->flags, req->flags | REQ_F_CANCEL); /* B */
3618 
3619 		if (req->work_task)
3620 			send_sig(SIGINT, req->work_task, 1);
3621 	}
3622 	spin_unlock_irq(&ctx->task_lock);
3623 }
3624 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)3625 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
3626 {
3627 	mutex_lock(&ctx->uring_lock);
3628 	percpu_ref_kill(&ctx->refs);
3629 	mutex_unlock(&ctx->uring_lock);
3630 
3631 	io_cancel_async_work(ctx, NULL);
3632 	io_kill_timeouts(ctx);
3633 	io_poll_remove_all(ctx);
3634 	io_iopoll_reap_events(ctx);
3635 	wait_for_completion(&ctx->ctx_done);
3636 	io_ring_ctx_free(ctx);
3637 }
3638 
io_uring_flush(struct file * file,void * data)3639 static int io_uring_flush(struct file *file, void *data)
3640 {
3641 	struct io_ring_ctx *ctx = file->private_data;
3642 
3643 	if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
3644 		io_cancel_async_work(ctx, data);
3645 
3646 	return 0;
3647 }
3648 
io_uring_release(struct inode * inode,struct file * file)3649 static int io_uring_release(struct inode *inode, struct file *file)
3650 {
3651 	struct io_ring_ctx *ctx = file->private_data;
3652 
3653 	file->private_data = NULL;
3654 	io_ring_ctx_wait_and_kill(ctx);
3655 	return 0;
3656 }
3657 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)3658 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
3659 {
3660 	loff_t offset = (loff_t) vma->vm_pgoff << PAGE_SHIFT;
3661 	unsigned long sz = vma->vm_end - vma->vm_start;
3662 	struct io_ring_ctx *ctx = file->private_data;
3663 	unsigned long pfn;
3664 	struct page *page;
3665 	void *ptr;
3666 
3667 	switch (offset) {
3668 	case IORING_OFF_SQ_RING:
3669 	case IORING_OFF_CQ_RING:
3670 		ptr = ctx->rings;
3671 		break;
3672 	case IORING_OFF_SQES:
3673 		ptr = ctx->sq_sqes;
3674 		break;
3675 	default:
3676 		return -EINVAL;
3677 	}
3678 
3679 	page = virt_to_head_page(ptr);
3680 	if (sz > page_size(page))
3681 		return -EINVAL;
3682 
3683 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
3684 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
3685 }
3686 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const sigset_t __user *,sig,size_t,sigsz)3687 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
3688 		u32, min_complete, u32, flags, const sigset_t __user *, sig,
3689 		size_t, sigsz)
3690 {
3691 	struct io_ring_ctx *ctx;
3692 	long ret = -EBADF;
3693 	int submitted = 0;
3694 	struct fd f;
3695 
3696 	if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP))
3697 		return -EINVAL;
3698 
3699 	f = fdget(fd);
3700 	if (!f.file)
3701 		return -EBADF;
3702 
3703 	ret = -EOPNOTSUPP;
3704 	if (f.file->f_op != &io_uring_fops)
3705 		goto out_fput;
3706 
3707 	ret = -ENXIO;
3708 	ctx = f.file->private_data;
3709 	if (!percpu_ref_tryget(&ctx->refs))
3710 		goto out_fput;
3711 
3712 	/*
3713 	 * For SQ polling, the thread will do all submissions and completions.
3714 	 * Just return the requested submit count, and wake the thread if
3715 	 * we were asked to.
3716 	 */
3717 	ret = 0;
3718 	if (ctx->flags & IORING_SETUP_SQPOLL) {
3719 		if (flags & IORING_ENTER_SQ_WAKEUP)
3720 			wake_up(&ctx->sqo_wait);
3721 		submitted = to_submit;
3722 	} else if (to_submit) {
3723 		to_submit = min(to_submit, ctx->sq_entries);
3724 
3725 		mutex_lock(&ctx->uring_lock);
3726 		submitted = io_ring_submit(ctx, to_submit);
3727 		mutex_unlock(&ctx->uring_lock);
3728 
3729 		if (submitted != to_submit)
3730 			goto out;
3731 	}
3732 	if (flags & IORING_ENTER_GETEVENTS) {
3733 		unsigned nr_events = 0;
3734 
3735 		min_complete = min(min_complete, ctx->cq_entries);
3736 
3737 		if (ctx->flags & IORING_SETUP_IOPOLL) {
3738 			ret = io_iopoll_check(ctx, &nr_events, min_complete);
3739 		} else {
3740 			ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
3741 		}
3742 	}
3743 
3744 out:
3745 	percpu_ref_put(&ctx->refs);
3746 out_fput:
3747 	fdput(f);
3748 	return submitted ? submitted : ret;
3749 }
3750 
3751 static const struct file_operations io_uring_fops = {
3752 	.release	= io_uring_release,
3753 	.flush		= io_uring_flush,
3754 	.mmap		= io_uring_mmap,
3755 	.poll		= io_uring_poll,
3756 	.fasync		= io_uring_fasync,
3757 };
3758 
io_is_uring_fops(struct file * file)3759 bool io_is_uring_fops(struct file *file)
3760 {
3761 	return file->f_op == &io_uring_fops;
3762 }
3763 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)3764 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
3765 				  struct io_uring_params *p)
3766 {
3767 	struct io_rings *rings;
3768 	size_t size, sq_array_offset;
3769 
3770 	/* make sure these are sane, as we already accounted them */
3771 	ctx->sq_entries = p->sq_entries;
3772 	ctx->cq_entries = p->cq_entries;
3773 
3774 	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
3775 	if (size == SIZE_MAX)
3776 		return -EOVERFLOW;
3777 
3778 	rings = io_mem_alloc(size);
3779 	if (!rings)
3780 		return -ENOMEM;
3781 
3782 	ctx->rings = rings;
3783 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
3784 	rings->sq_ring_mask = p->sq_entries - 1;
3785 	rings->cq_ring_mask = p->cq_entries - 1;
3786 	rings->sq_ring_entries = p->sq_entries;
3787 	rings->cq_ring_entries = p->cq_entries;
3788 	ctx->sq_mask = rings->sq_ring_mask;
3789 	ctx->cq_mask = rings->cq_ring_mask;
3790 
3791 	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
3792 	if (size == SIZE_MAX) {
3793 		io_mem_free(ctx->rings);
3794 		ctx->rings = NULL;
3795 		return -EOVERFLOW;
3796 	}
3797 
3798 	ctx->sq_sqes = io_mem_alloc(size);
3799 	if (!ctx->sq_sqes) {
3800 		io_mem_free(ctx->rings);
3801 		ctx->rings = NULL;
3802 		return -ENOMEM;
3803 	}
3804 
3805 	return 0;
3806 }
3807 
3808 /*
3809  * Allocate an anonymous fd, this is what constitutes the application
3810  * visible backing of an io_uring instance. The application mmaps this
3811  * fd to gain access to the SQ/CQ ring details.
3812  */
io_uring_get_fd(struct io_ring_ctx * ctx)3813 static int io_uring_get_fd(struct io_ring_ctx *ctx)
3814 {
3815 	struct file *file;
3816 	int ret;
3817 
3818 	ret = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
3819 	if (ret < 0)
3820 		return ret;
3821 
3822 	file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
3823 					O_RDWR | O_CLOEXEC);
3824 	if (IS_ERR(file)) {
3825 		put_unused_fd(ret);
3826 		return PTR_ERR(file);
3827 	}
3828 
3829 	fd_install(ret, file);
3830 	return ret;
3831 }
3832 
io_uring_create(unsigned entries,struct io_uring_params * p)3833 static int io_uring_create(unsigned entries, struct io_uring_params *p)
3834 {
3835 	struct user_struct *user = NULL;
3836 	struct io_ring_ctx *ctx;
3837 	bool account_mem;
3838 	int ret;
3839 
3840 	if (!entries || entries > IORING_MAX_ENTRIES)
3841 		return -EINVAL;
3842 
3843 	/*
3844 	 * Use twice as many entries for the CQ ring. It's possible for the
3845 	 * application to drive a higher depth than the size of the SQ ring,
3846 	 * since the sqes are only used at submission time. This allows for
3847 	 * some flexibility in overcommitting a bit.
3848 	 */
3849 	p->sq_entries = roundup_pow_of_two(entries);
3850 	p->cq_entries = 2 * p->sq_entries;
3851 
3852 	user = get_uid(current_user());
3853 	account_mem = !capable(CAP_IPC_LOCK);
3854 
3855 	if (account_mem) {
3856 		ret = io_account_mem(user,
3857 				ring_pages(p->sq_entries, p->cq_entries));
3858 		if (ret) {
3859 			free_uid(user);
3860 			return ret;
3861 		}
3862 	}
3863 
3864 	ctx = io_ring_ctx_alloc(p);
3865 	if (!ctx) {
3866 		if (account_mem)
3867 			io_unaccount_mem(user, ring_pages(p->sq_entries,
3868 								p->cq_entries));
3869 		free_uid(user);
3870 		return -ENOMEM;
3871 	}
3872 	ctx->compat = in_compat_syscall();
3873 	ctx->account_mem = account_mem;
3874 	ctx->user = user;
3875 
3876 	ctx->creds = get_current_cred();
3877 	if (!ctx->creds) {
3878 		ret = -ENOMEM;
3879 		goto err;
3880 	}
3881 
3882 	ret = io_allocate_scq_urings(ctx, p);
3883 	if (ret)
3884 		goto err;
3885 
3886 	ret = io_sq_offload_start(ctx, p);
3887 	if (ret)
3888 		goto err;
3889 
3890 	memset(&p->sq_off, 0, sizeof(p->sq_off));
3891 	p->sq_off.head = offsetof(struct io_rings, sq.head);
3892 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
3893 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
3894 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
3895 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
3896 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
3897 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
3898 
3899 	memset(&p->cq_off, 0, sizeof(p->cq_off));
3900 	p->cq_off.head = offsetof(struct io_rings, cq.head);
3901 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
3902 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
3903 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
3904 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
3905 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
3906 
3907 	/*
3908 	 * Install ring fd as the very last thing, so we don't risk someone
3909 	 * having closed it before we finish setup
3910 	 */
3911 	ret = io_uring_get_fd(ctx);
3912 	if (ret < 0)
3913 		goto err;
3914 
3915 	p->features = IORING_FEAT_SINGLE_MMAP;
3916 	return ret;
3917 err:
3918 	io_ring_ctx_wait_and_kill(ctx);
3919 	return ret;
3920 }
3921 
3922 /*
3923  * Sets up an aio uring context, and returns the fd. Applications asks for a
3924  * ring size, we return the actual sq/cq ring sizes (among other things) in the
3925  * params structure passed in.
3926  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)3927 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
3928 {
3929 	struct io_uring_params p;
3930 	long ret;
3931 	int i;
3932 
3933 	if (copy_from_user(&p, params, sizeof(p)))
3934 		return -EFAULT;
3935 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
3936 		if (p.resv[i])
3937 			return -EINVAL;
3938 	}
3939 
3940 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
3941 			IORING_SETUP_SQ_AFF))
3942 		return -EINVAL;
3943 
3944 	ret = io_uring_create(entries, &p);
3945 	if (ret < 0)
3946 		return ret;
3947 
3948 	if (copy_to_user(params, &p, sizeof(p)))
3949 		return -EFAULT;
3950 
3951 	return ret;
3952 }
3953 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)3954 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
3955 		struct io_uring_params __user *, params)
3956 {
3957 	return io_uring_setup(entries, params);
3958 }
3959 
__io_uring_register(struct io_ring_ctx * ctx,unsigned opcode,void __user * arg,unsigned nr_args)3960 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
3961 			       void __user *arg, unsigned nr_args)
3962 	__releases(ctx->uring_lock)
3963 	__acquires(ctx->uring_lock)
3964 {
3965 	int ret;
3966 
3967 	/*
3968 	 * We're inside the ring mutex, if the ref is already dying, then
3969 	 * someone else killed the ctx or is already going through
3970 	 * io_uring_register().
3971 	 */
3972 	if (percpu_ref_is_dying(&ctx->refs))
3973 		return -ENXIO;
3974 
3975 	percpu_ref_kill(&ctx->refs);
3976 
3977 	/*
3978 	 * Drop uring mutex before waiting for references to exit. If another
3979 	 * thread is currently inside io_uring_enter() it might need to grab
3980 	 * the uring_lock to make progress. If we hold it here across the drain
3981 	 * wait, then we can deadlock. It's safe to drop the mutex here, since
3982 	 * no new references will come in after we've killed the percpu ref.
3983 	 */
3984 	mutex_unlock(&ctx->uring_lock);
3985 	wait_for_completion(&ctx->ctx_done);
3986 	mutex_lock(&ctx->uring_lock);
3987 
3988 	switch (opcode) {
3989 	case IORING_REGISTER_BUFFERS:
3990 		ret = io_sqe_buffer_register(ctx, arg, nr_args);
3991 		break;
3992 	case IORING_UNREGISTER_BUFFERS:
3993 		ret = -EINVAL;
3994 		if (arg || nr_args)
3995 			break;
3996 		ret = io_sqe_buffer_unregister(ctx);
3997 		break;
3998 	case IORING_REGISTER_FILES:
3999 		ret = io_sqe_files_register(ctx, arg, nr_args);
4000 		break;
4001 	case IORING_UNREGISTER_FILES:
4002 		ret = -EINVAL;
4003 		if (arg || nr_args)
4004 			break;
4005 		ret = io_sqe_files_unregister(ctx);
4006 		break;
4007 	case IORING_REGISTER_EVENTFD:
4008 		ret = -EINVAL;
4009 		if (nr_args != 1)
4010 			break;
4011 		ret = io_eventfd_register(ctx, arg);
4012 		break;
4013 	case IORING_UNREGISTER_EVENTFD:
4014 		ret = -EINVAL;
4015 		if (arg || nr_args)
4016 			break;
4017 		ret = io_eventfd_unregister(ctx);
4018 		break;
4019 	default:
4020 		ret = -EINVAL;
4021 		break;
4022 	}
4023 
4024 	/* bring the ctx back to life */
4025 	reinit_completion(&ctx->ctx_done);
4026 	percpu_ref_reinit(&ctx->refs);
4027 	return ret;
4028 }
4029 
SYSCALL_DEFINE4(io_uring_register,unsigned int,fd,unsigned int,opcode,void __user *,arg,unsigned int,nr_args)4030 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
4031 		void __user *, arg, unsigned int, nr_args)
4032 {
4033 	struct io_ring_ctx *ctx;
4034 	long ret = -EBADF;
4035 	struct fd f;
4036 
4037 	f = fdget(fd);
4038 	if (!f.file)
4039 		return -EBADF;
4040 
4041 	ret = -EOPNOTSUPP;
4042 	if (f.file->f_op != &io_uring_fops)
4043 		goto out_fput;
4044 
4045 	ctx = f.file->private_data;
4046 
4047 	mutex_lock(&ctx->uring_lock);
4048 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
4049 	mutex_unlock(&ctx->uring_lock);
4050 out_fput:
4051 	fdput(f);
4052 	return ret;
4053 }
4054 
io_uring_init(void)4055 static int __init io_uring_init(void)
4056 {
4057 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
4058 	return 0;
4059 };
4060 __initcall(io_uring_init);
4061