• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Shared application/kernel submission and completion ring pairs, for
4  * supporting fast/efficient IO.
5  *
6  * A note on the read/write ordering memory barriers that are matched between
7  * the application and kernel side.
8  *
9  * After the application reads the CQ ring tail, it must use an
10  * appropriate smp_rmb() to pair with the smp_wmb() the kernel uses
11  * before writing the tail (using smp_load_acquire to read the tail will
12  * do). It also needs a smp_mb() before updating CQ head (ordering the
13  * entry load(s) with the head store), pairing with an implicit barrier
14  * through a control-dependency in io_get_cqring (smp_store_release to
15  * store head will do). Failure to do so could lead to reading invalid
16  * CQ entries.
17  *
18  * Likewise, the application must use an appropriate smp_wmb() before
19  * writing the SQ tail (ordering SQ entry stores with the tail store),
20  * which pairs with smp_load_acquire in io_get_sqring (smp_store_release
21  * to store the tail will do). And it needs a barrier ordering the SQ
22  * head load before writing new SQ entries (smp_load_acquire to read
23  * head will do).
24  *
25  * When using the SQ poll thread (IORING_SETUP_SQPOLL), the application
26  * needs to check the SQ flags for IORING_SQ_NEED_WAKEUP *after*
27  * updating the SQ tail; a full memory barrier smp_mb() is needed
28  * between.
29  *
30  * Also see the examples in the liburing library:
31  *
32  *	git://git.kernel.dk/liburing
33  *
34  * io_uring also uses READ/WRITE_ONCE() for _any_ store or load that happens
35  * from data shared between the kernel and application. This is done both
36  * for ordering purposes, but also to ensure that once a value is loaded from
37  * data that the application could potentially modify, it remains stable.
38  *
39  * Copyright (C) 2018-2019 Jens Axboe
40  * Copyright (c) 2018-2019 Christoph Hellwig
41  */
42 #include <linux/kernel.h>
43 #include <linux/init.h>
44 #include <linux/errno.h>
45 #include <linux/syscalls.h>
46 #include <linux/compat.h>
47 #include <net/compat.h>
48 #include <linux/refcount.h>
49 #include <linux/uio.h>
50 #include <linux/bits.h>
51 
52 #include <linux/sched/signal.h>
53 #include <linux/fs.h>
54 #include <linux/file.h>
55 #include <linux/fdtable.h>
56 #include <linux/mm.h>
57 #include <linux/mman.h>
58 #include <linux/percpu.h>
59 #include <linux/slab.h>
60 #include <linux/kthread.h>
61 #include <linux/blkdev.h>
62 #include <linux/bvec.h>
63 #include <linux/net.h>
64 #include <net/sock.h>
65 #include <net/af_unix.h>
66 #include <net/scm.h>
67 #include <linux/anon_inodes.h>
68 #include <linux/sched/mm.h>
69 #include <linux/uaccess.h>
70 #include <linux/nospec.h>
71 #include <linux/sizes.h>
72 #include <linux/hugetlb.h>
73 #include <linux/highmem.h>
74 #include <linux/namei.h>
75 #include <linux/fsnotify.h>
76 #include <linux/fadvise.h>
77 #include <linux/eventpoll.h>
78 #include <linux/fs_struct.h>
79 #include <linux/splice.h>
80 #include <linux/task_work.h>
81 #include <linux/pagemap.h>
82 #include <linux/io_uring.h>
83 #include <linux/blk-cgroup.h>
84 #include <linux/audit.h>
85 
86 #define CREATE_TRACE_POINTS
87 #include <trace/events/io_uring.h>
88 
89 #include <uapi/linux/io_uring.h>
90 
91 #include "internal.h"
92 #include "io-wq.h"
93 
94 #define IORING_MAX_ENTRIES	32768
95 #define IORING_MAX_CQ_ENTRIES	(2 * IORING_MAX_ENTRIES)
96 
97 /*
98  * Shift of 9 is 512 entries, or exactly one page on 64-bit archs
99  */
100 #define IORING_FILE_TABLE_SHIFT	9
101 #define IORING_MAX_FILES_TABLE	(1U << IORING_FILE_TABLE_SHIFT)
102 #define IORING_FILE_TABLE_MASK	(IORING_MAX_FILES_TABLE - 1)
103 #define IORING_MAX_FIXED_FILES	(64 * IORING_MAX_FILES_TABLE)
104 #define IORING_MAX_RESTRICTIONS	(IORING_RESTRICTION_LAST + \
105 				 IORING_REGISTER_LAST + IORING_OP_LAST)
106 
107 struct io_uring {
108 	u32 head ____cacheline_aligned_in_smp;
109 	u32 tail ____cacheline_aligned_in_smp;
110 };
111 
112 /*
113  * This data is shared with the application through the mmap at offsets
114  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
115  *
116  * The offsets to the member fields are published through struct
117  * io_sqring_offsets when calling io_uring_setup.
118  */
119 struct io_rings {
120 	/*
121 	 * Head and tail offsets into the ring; the offsets need to be
122 	 * masked to get valid indices.
123 	 *
124 	 * The kernel controls head of the sq ring and the tail of the cq ring,
125 	 * and the application controls tail of the sq ring and the head of the
126 	 * cq ring.
127 	 */
128 	struct io_uring		sq, cq;
129 	/*
130 	 * Bitmasks to apply to head and tail offsets (constant, equals
131 	 * ring_entries - 1)
132 	 */
133 	u32			sq_ring_mask, cq_ring_mask;
134 	/* Ring sizes (constant, power of 2) */
135 	u32			sq_ring_entries, cq_ring_entries;
136 	/*
137 	 * Number of invalid entries dropped by the kernel due to
138 	 * invalid index stored in array
139 	 *
140 	 * Written by the kernel, shouldn't be modified by the
141 	 * application (i.e. get number of "new events" by comparing to
142 	 * cached value).
143 	 *
144 	 * After a new SQ head value was read by the application this
145 	 * counter includes all submissions that were dropped reaching
146 	 * the new SQ head (and possibly more).
147 	 */
148 	u32			sq_dropped;
149 	/*
150 	 * Runtime SQ flags
151 	 *
152 	 * Written by the kernel, shouldn't be modified by the
153 	 * application.
154 	 *
155 	 * The application needs a full memory barrier before checking
156 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
157 	 */
158 	u32			sq_flags;
159 	/*
160 	 * Runtime CQ flags
161 	 *
162 	 * Written by the application, shouldn't be modified by the
163 	 * kernel.
164 	 */
165 	u32                     cq_flags;
166 	/*
167 	 * Number of completion events lost because the queue was full;
168 	 * this should be avoided by the application by making sure
169 	 * there are not more requests pending than there is space in
170 	 * the completion queue.
171 	 *
172 	 * Written by the kernel, shouldn't be modified by the
173 	 * application (i.e. get number of "new events" by comparing to
174 	 * cached value).
175 	 *
176 	 * As completion events come in out of order this counter is not
177 	 * ordered with any other data.
178 	 */
179 	u32			cq_overflow;
180 	/*
181 	 * Ring buffer of completion events.
182 	 *
183 	 * The kernel writes completion events fresh every time they are
184 	 * produced, so the application is allowed to modify pending
185 	 * entries.
186 	 */
187 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
188 };
189 
190 struct io_mapped_ubuf {
191 	u64		ubuf;
192 	size_t		len;
193 	struct		bio_vec *bvec;
194 	unsigned int	nr_bvecs;
195 	unsigned long	acct_pages;
196 };
197 
198 struct fixed_file_table {
199 	struct file		**files;
200 };
201 
202 struct fixed_file_ref_node {
203 	struct percpu_ref		refs;
204 	struct list_head		node;
205 	struct list_head		file_list;
206 	struct fixed_file_data		*file_data;
207 	struct llist_node		llist;
208 	bool				done;
209 };
210 
211 struct fixed_file_data {
212 	struct fixed_file_table		*table;
213 	struct io_ring_ctx		*ctx;
214 
215 	struct fixed_file_ref_node	*node;
216 	struct percpu_ref		refs;
217 	struct completion		done;
218 	struct list_head		ref_list;
219 	spinlock_t			lock;
220 };
221 
222 struct io_buffer {
223 	struct list_head list;
224 	__u64 addr;
225 	__u32 len;
226 	__u16 bid;
227 };
228 
229 struct io_restriction {
230 	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
231 	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
232 	u8 sqe_flags_allowed;
233 	u8 sqe_flags_required;
234 	bool registered;
235 };
236 
237 struct io_sq_data {
238 	refcount_t		refs;
239 	struct mutex		lock;
240 
241 	/* ctx's that are using this sqd */
242 	struct list_head	ctx_list;
243 	struct list_head	ctx_new_list;
244 	struct mutex		ctx_lock;
245 
246 	struct task_struct	*thread;
247 	struct wait_queue_head	wait;
248 };
249 
250 struct io_ring_ctx {
251 	struct {
252 		struct percpu_ref	refs;
253 	} ____cacheline_aligned_in_smp;
254 
255 	struct {
256 		unsigned int		flags;
257 		unsigned int		compat: 1;
258 		unsigned int		limit_mem: 1;
259 		unsigned int		cq_overflow_flushed: 1;
260 		unsigned int		drain_next: 1;
261 		unsigned int		eventfd_async: 1;
262 		unsigned int		restricted: 1;
263 		unsigned int		sqo_dead: 1;
264 
265 		/*
266 		 * Ring buffer of indices into array of io_uring_sqe, which is
267 		 * mmapped by the application using the IORING_OFF_SQES offset.
268 		 *
269 		 * This indirection could e.g. be used to assign fixed
270 		 * io_uring_sqe entries to operations and only submit them to
271 		 * the queue when needed.
272 		 *
273 		 * The kernel modifies neither the indices array nor the entries
274 		 * array.
275 		 */
276 		u32			*sq_array;
277 		unsigned		cached_sq_head;
278 		unsigned		sq_entries;
279 		unsigned		sq_mask;
280 		unsigned		sq_thread_idle;
281 		unsigned		cached_sq_dropped;
282 		unsigned		cached_cq_overflow;
283 		unsigned long		sq_check_overflow;
284 
285 		struct list_head	defer_list;
286 		struct list_head	timeout_list;
287 		struct list_head	cq_overflow_list;
288 
289 		struct io_uring_sqe	*sq_sqes;
290 	} ____cacheline_aligned_in_smp;
291 
292 	struct io_rings	*rings;
293 
294 	/* IO offload */
295 	struct io_wq		*io_wq;
296 
297 	/*
298 	 * For SQPOLL usage - we hold a reference to the parent task, so we
299 	 * have access to the ->files
300 	 */
301 	struct task_struct	*sqo_task;
302 
303 	/* Only used for accounting purposes */
304 	struct mm_struct	*mm_account;
305 
306 #ifdef CONFIG_BLK_CGROUP
307 	struct cgroup_subsys_state	*sqo_blkcg_css;
308 #endif
309 
310 	struct io_sq_data	*sq_data;	/* if using sq thread polling */
311 
312 	struct wait_queue_head	sqo_sq_wait;
313 	struct wait_queue_entry	sqo_wait_entry;
314 	struct list_head	sqd_list;
315 
316 	/*
317 	 * If used, fixed file set. Writers must ensure that ->refs is dead,
318 	 * readers must ensure that ->refs is alive as long as the file* is
319 	 * used. Only updated through io_uring_register(2).
320 	 */
321 	struct fixed_file_data	*file_data;
322 	unsigned		nr_user_files;
323 
324 	/* if used, fixed mapped user buffers */
325 	unsigned		nr_user_bufs;
326 	struct io_mapped_ubuf	*user_bufs;
327 
328 	struct user_struct	*user;
329 
330 	const struct cred	*creds;
331 
332 #ifdef CONFIG_AUDIT
333 	kuid_t			loginuid;
334 	unsigned int		sessionid;
335 #endif
336 
337 	struct completion	ref_comp;
338 	struct completion	sq_thread_comp;
339 
340 	/* if all else fails... */
341 	struct io_kiocb		*fallback_req;
342 
343 #if defined(CONFIG_UNIX)
344 	struct socket		*ring_sock;
345 #endif
346 
347 	struct xarray		io_buffers;
348 
349 	struct xarray		personalities;
350 	u32			pers_next;
351 
352 	struct {
353 		unsigned		cached_cq_tail;
354 		unsigned		cq_entries;
355 		unsigned		cq_mask;
356 		atomic_t		cq_timeouts;
357 		unsigned		cq_last_tm_flush;
358 		unsigned long		cq_check_overflow;
359 		struct wait_queue_head	cq_wait;
360 		struct fasync_struct	*cq_fasync;
361 		struct eventfd_ctx	*cq_ev_fd;
362 	} ____cacheline_aligned_in_smp;
363 
364 	struct {
365 		struct mutex		uring_lock;
366 		wait_queue_head_t	wait;
367 	} ____cacheline_aligned_in_smp;
368 
369 	struct {
370 		spinlock_t		completion_lock;
371 
372 		/*
373 		 * ->iopoll_list is protected by the ctx->uring_lock for
374 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
375 		 * For SQPOLL, only the single threaded io_sq_thread() will
376 		 * manipulate the list, hence no extra locking is needed there.
377 		 */
378 		struct list_head	iopoll_list;
379 		struct hlist_head	*cancel_hash;
380 		unsigned		cancel_hash_bits;
381 		bool			poll_multi_file;
382 
383 		spinlock_t		inflight_lock;
384 		struct list_head	inflight_list;
385 	} ____cacheline_aligned_in_smp;
386 
387 	struct delayed_work		file_put_work;
388 	struct llist_head		file_put_llist;
389 
390 	struct work_struct		exit_work;
391 	struct io_restriction		restrictions;
392 };
393 
394 /*
395  * First field must be the file pointer in all the
396  * iocb unions! See also 'struct kiocb' in <linux/fs.h>
397  */
398 struct io_poll_iocb {
399 	struct file			*file;
400 	union {
401 		struct wait_queue_head	*head;
402 		u64			addr;
403 	};
404 	__poll_t			events;
405 	bool				done;
406 	bool				canceled;
407 	struct wait_queue_entry		wait;
408 };
409 
410 struct io_close {
411 	struct file			*file;
412 	struct file			*put_file;
413 	int				fd;
414 };
415 
416 struct io_timeout_data {
417 	struct io_kiocb			*req;
418 	struct hrtimer			timer;
419 	struct timespec64		ts;
420 	enum hrtimer_mode		mode;
421 };
422 
423 struct io_accept {
424 	struct file			*file;
425 	struct sockaddr __user		*addr;
426 	int __user			*addr_len;
427 	int				flags;
428 	unsigned long			nofile;
429 };
430 
431 struct io_sync {
432 	struct file			*file;
433 	loff_t				len;
434 	loff_t				off;
435 	int				flags;
436 	int				mode;
437 };
438 
439 struct io_cancel {
440 	struct file			*file;
441 	u64				addr;
442 };
443 
444 struct io_timeout {
445 	struct file			*file;
446 	u32				off;
447 	u32				target_seq;
448 	struct list_head		list;
449 };
450 
451 struct io_timeout_rem {
452 	struct file			*file;
453 	u64				addr;
454 };
455 
456 struct io_rw {
457 	/* NOTE: kiocb has the file as the first member, so don't do it here */
458 	struct kiocb			kiocb;
459 	u64				addr;
460 	u64				len;
461 };
462 
463 struct io_connect {
464 	struct file			*file;
465 	struct sockaddr __user		*addr;
466 	int				addr_len;
467 };
468 
469 struct io_sr_msg {
470 	struct file			*file;
471 	union {
472 		struct user_msghdr __user *umsg;
473 		void __user		*buf;
474 	};
475 	int				msg_flags;
476 	int				bgid;
477 	size_t				len;
478 	struct io_buffer		*kbuf;
479 };
480 
481 struct io_open {
482 	struct file			*file;
483 	int				dfd;
484 	bool				ignore_nonblock;
485 	struct filename			*filename;
486 	struct open_how			how;
487 	unsigned long			nofile;
488 };
489 
490 struct io_files_update {
491 	struct file			*file;
492 	u64				arg;
493 	u32				nr_args;
494 	u32				offset;
495 };
496 
497 struct io_fadvise {
498 	struct file			*file;
499 	u64				offset;
500 	u32				len;
501 	u32				advice;
502 };
503 
504 struct io_madvise {
505 	struct file			*file;
506 	u64				addr;
507 	u32				len;
508 	u32				advice;
509 };
510 
511 struct io_epoll {
512 	struct file			*file;
513 	int				epfd;
514 	int				op;
515 	int				fd;
516 	struct epoll_event		event;
517 };
518 
519 struct io_splice {
520 	struct file			*file_out;
521 	struct file			*file_in;
522 	loff_t				off_out;
523 	loff_t				off_in;
524 	u64				len;
525 	unsigned int			flags;
526 };
527 
528 struct io_provide_buf {
529 	struct file			*file;
530 	__u64				addr;
531 	__u32				len;
532 	__u32				bgid;
533 	__u16				nbufs;
534 	__u16				bid;
535 };
536 
537 struct io_statx {
538 	struct file			*file;
539 	int				dfd;
540 	unsigned int			mask;
541 	unsigned int			flags;
542 	const char __user		*filename;
543 	struct statx __user		*buffer;
544 };
545 
546 struct io_completion {
547 	struct file			*file;
548 	struct list_head		list;
549 	u32				cflags;
550 };
551 
552 struct io_async_connect {
553 	struct sockaddr_storage		address;
554 };
555 
556 struct io_async_msghdr {
557 	struct iovec			fast_iov[UIO_FASTIOV];
558 	struct iovec			*iov;
559 	struct sockaddr __user		*uaddr;
560 	struct msghdr			msg;
561 	struct sockaddr_storage		addr;
562 };
563 
564 struct io_async_rw {
565 	struct iovec			fast_iov[UIO_FASTIOV];
566 	const struct iovec		*free_iovec;
567 	struct iov_iter			iter;
568 	size_t				bytes_done;
569 	struct wait_page_queue		wpq;
570 };
571 
572 enum {
573 	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
574 	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
575 	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
576 	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
577 	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
578 	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
579 
580 	REQ_F_LINK_HEAD_BIT,
581 	REQ_F_FAIL_LINK_BIT,
582 	REQ_F_INFLIGHT_BIT,
583 	REQ_F_CUR_POS_BIT,
584 	REQ_F_NOWAIT_BIT,
585 	REQ_F_LINK_TIMEOUT_BIT,
586 	REQ_F_ISREG_BIT,
587 	REQ_F_NEED_CLEANUP_BIT,
588 	REQ_F_POLLED_BIT,
589 	REQ_F_BUFFER_SELECTED_BIT,
590 	REQ_F_NO_FILE_TABLE_BIT,
591 	REQ_F_WORK_INITIALIZED_BIT,
592 	REQ_F_LTIMEOUT_ACTIVE_BIT,
593 
594 	/* not a real bit, just to check we're not overflowing the space */
595 	__REQ_F_LAST_BIT,
596 };
597 
598 enum {
599 	/* ctx owns file */
600 	REQ_F_FIXED_FILE	= BIT(REQ_F_FIXED_FILE_BIT),
601 	/* drain existing IO first */
602 	REQ_F_IO_DRAIN		= BIT(REQ_F_IO_DRAIN_BIT),
603 	/* linked sqes */
604 	REQ_F_LINK		= BIT(REQ_F_LINK_BIT),
605 	/* doesn't sever on completion < 0 */
606 	REQ_F_HARDLINK		= BIT(REQ_F_HARDLINK_BIT),
607 	/* IOSQE_ASYNC */
608 	REQ_F_FORCE_ASYNC	= BIT(REQ_F_FORCE_ASYNC_BIT),
609 	/* IOSQE_BUFFER_SELECT */
610 	REQ_F_BUFFER_SELECT	= BIT(REQ_F_BUFFER_SELECT_BIT),
611 
612 	/* head of a link */
613 	REQ_F_LINK_HEAD		= BIT(REQ_F_LINK_HEAD_BIT),
614 	/* fail rest of links */
615 	REQ_F_FAIL_LINK		= BIT(REQ_F_FAIL_LINK_BIT),
616 	/* on inflight list */
617 	REQ_F_INFLIGHT		= BIT(REQ_F_INFLIGHT_BIT),
618 	/* read/write uses file position */
619 	REQ_F_CUR_POS		= BIT(REQ_F_CUR_POS_BIT),
620 	/* must not punt to workers */
621 	REQ_F_NOWAIT		= BIT(REQ_F_NOWAIT_BIT),
622 	/* has or had linked timeout */
623 	REQ_F_LINK_TIMEOUT	= BIT(REQ_F_LINK_TIMEOUT_BIT),
624 	/* regular file */
625 	REQ_F_ISREG		= BIT(REQ_F_ISREG_BIT),
626 	/* needs cleanup */
627 	REQ_F_NEED_CLEANUP	= BIT(REQ_F_NEED_CLEANUP_BIT),
628 	/* already went through poll handler */
629 	REQ_F_POLLED		= BIT(REQ_F_POLLED_BIT),
630 	/* buffer already selected */
631 	REQ_F_BUFFER_SELECTED	= BIT(REQ_F_BUFFER_SELECTED_BIT),
632 	/* doesn't need file table for this request */
633 	REQ_F_NO_FILE_TABLE	= BIT(REQ_F_NO_FILE_TABLE_BIT),
634 	/* io_wq_work is initialized */
635 	REQ_F_WORK_INITIALIZED	= BIT(REQ_F_WORK_INITIALIZED_BIT),
636 	/* linked timeout is active, i.e. prepared by link's head */
637 	REQ_F_LTIMEOUT_ACTIVE	= BIT(REQ_F_LTIMEOUT_ACTIVE_BIT),
638 };
639 
640 struct async_poll {
641 	struct io_poll_iocb	poll;
642 	struct io_poll_iocb	*double_poll;
643 };
644 
645 /*
646  * NOTE! Each of the iocb union members has the file pointer
647  * as the first entry in their struct definition. So you can
648  * access the file pointer through any of the sub-structs,
649  * or directly as just 'ki_filp' in this struct.
650  */
651 struct io_kiocb {
652 	union {
653 		struct file		*file;
654 		struct io_rw		rw;
655 		struct io_poll_iocb	poll;
656 		struct io_accept	accept;
657 		struct io_sync		sync;
658 		struct io_cancel	cancel;
659 		struct io_timeout	timeout;
660 		struct io_timeout_rem	timeout_rem;
661 		struct io_connect	connect;
662 		struct io_sr_msg	sr_msg;
663 		struct io_open		open;
664 		struct io_close		close;
665 		struct io_files_update	files_update;
666 		struct io_fadvise	fadvise;
667 		struct io_madvise	madvise;
668 		struct io_epoll		epoll;
669 		struct io_splice	splice;
670 		struct io_provide_buf	pbuf;
671 		struct io_statx		statx;
672 		/* use only after cleaning per-op data, see io_clean_op() */
673 		struct io_completion	compl;
674 	};
675 
676 	/* opcode allocated if it needs to store data for async defer */
677 	void				*async_data;
678 	u8				opcode;
679 	/* polled IO has completed */
680 	u8				iopoll_completed;
681 
682 	u16				buf_index;
683 	u32				result;
684 
685 	struct io_ring_ctx		*ctx;
686 	unsigned int			flags;
687 	refcount_t			refs;
688 	struct task_struct		*task;
689 	u64				user_data;
690 
691 	struct list_head		link_list;
692 
693 	/*
694 	 * 1. used with ctx->iopoll_list with reads/writes
695 	 * 2. to track reqs with ->files (see io_op_def::file_table)
696 	 */
697 	struct list_head		inflight_entry;
698 
699 	struct list_head		iopoll_entry;
700 
701 	struct percpu_ref		*fixed_file_refs;
702 	struct callback_head		task_work;
703 	/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
704 	struct hlist_node		hash_node;
705 	struct async_poll		*apoll;
706 	struct io_wq_work		work;
707 };
708 
709 struct io_defer_entry {
710 	struct list_head	list;
711 	struct io_kiocb		*req;
712 	u32			seq;
713 };
714 
715 #define IO_IOPOLL_BATCH			8
716 
717 struct io_comp_state {
718 	unsigned int		nr;
719 	struct list_head	list;
720 	struct io_ring_ctx	*ctx;
721 };
722 
723 struct io_submit_state {
724 	struct blk_plug		plug;
725 
726 	/*
727 	 * io_kiocb alloc cache
728 	 */
729 	void			*reqs[IO_IOPOLL_BATCH];
730 	unsigned int		free_reqs;
731 
732 	/*
733 	 * Batch completion logic
734 	 */
735 	struct io_comp_state	comp;
736 
737 	/*
738 	 * File reference cache
739 	 */
740 	struct file		*file;
741 	unsigned int		fd;
742 	unsigned int		has_refs;
743 	unsigned int		ios_left;
744 };
745 
746 struct io_op_def {
747 	/* needs req->file assigned */
748 	unsigned		needs_file : 1;
749 	/* don't fail if file grab fails */
750 	unsigned		needs_file_no_error : 1;
751 	/* hash wq insertion if file is a regular file */
752 	unsigned		hash_reg_file : 1;
753 	/* unbound wq insertion if file is a non-regular file */
754 	unsigned		unbound_nonreg_file : 1;
755 	/* opcode is not supported by this kernel */
756 	unsigned		not_supported : 1;
757 	/* set if opcode supports polled "wait" */
758 	unsigned		pollin : 1;
759 	unsigned		pollout : 1;
760 	/* op supports buffer selection */
761 	unsigned		buffer_select : 1;
762 	/* must always have async data allocated */
763 	unsigned		needs_async_data : 1;
764 	/* size of async data needed, if any */
765 	unsigned short		async_size;
766 	unsigned		work_flags;
767 };
768 
769 static const struct io_op_def io_op_defs[] = {
770 	[IORING_OP_NOP] = {},
771 	[IORING_OP_READV] = {
772 		.needs_file		= 1,
773 		.unbound_nonreg_file	= 1,
774 		.pollin			= 1,
775 		.buffer_select		= 1,
776 		.needs_async_data	= 1,
777 		.async_size		= sizeof(struct io_async_rw),
778 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
779 					  IO_WQ_WORK_FILES,
780 	},
781 	[IORING_OP_WRITEV] = {
782 		.needs_file		= 1,
783 		.hash_reg_file		= 1,
784 		.unbound_nonreg_file	= 1,
785 		.pollout		= 1,
786 		.needs_async_data	= 1,
787 		.async_size		= sizeof(struct io_async_rw),
788 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
789 					  IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES,
790 	},
791 	[IORING_OP_FSYNC] = {
792 		.needs_file		= 1,
793 		.work_flags		= IO_WQ_WORK_BLKCG,
794 	},
795 	[IORING_OP_READ_FIXED] = {
796 		.needs_file		= 1,
797 		.unbound_nonreg_file	= 1,
798 		.pollin			= 1,
799 		.async_size		= sizeof(struct io_async_rw),
800 		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_MM |
801 					  IO_WQ_WORK_FILES,
802 	},
803 	[IORING_OP_WRITE_FIXED] = {
804 		.needs_file		= 1,
805 		.hash_reg_file		= 1,
806 		.unbound_nonreg_file	= 1,
807 		.pollout		= 1,
808 		.async_size		= sizeof(struct io_async_rw),
809 		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE |
810 					  IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
811 	},
812 	[IORING_OP_POLL_ADD] = {
813 		.needs_file		= 1,
814 		.unbound_nonreg_file	= 1,
815 	},
816 	[IORING_OP_POLL_REMOVE] = {},
817 	[IORING_OP_SYNC_FILE_RANGE] = {
818 		.needs_file		= 1,
819 		.work_flags		= IO_WQ_WORK_BLKCG,
820 	},
821 	[IORING_OP_SENDMSG] = {
822 		.needs_file		= 1,
823 		.unbound_nonreg_file	= 1,
824 		.pollout		= 1,
825 		.needs_async_data	= 1,
826 		.async_size		= sizeof(struct io_async_msghdr),
827 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
828 						IO_WQ_WORK_FS,
829 	},
830 	[IORING_OP_RECVMSG] = {
831 		.needs_file		= 1,
832 		.unbound_nonreg_file	= 1,
833 		.pollin			= 1,
834 		.buffer_select		= 1,
835 		.needs_async_data	= 1,
836 		.async_size		= sizeof(struct io_async_msghdr),
837 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
838 						IO_WQ_WORK_FS,
839 	},
840 	[IORING_OP_TIMEOUT] = {
841 		.needs_async_data	= 1,
842 		.async_size		= sizeof(struct io_timeout_data),
843 		.work_flags		= IO_WQ_WORK_MM,
844 	},
845 	[IORING_OP_TIMEOUT_REMOVE] = {},
846 	[IORING_OP_ACCEPT] = {
847 		.needs_file		= 1,
848 		.unbound_nonreg_file	= 1,
849 		.pollin			= 1,
850 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_FILES,
851 	},
852 	[IORING_OP_ASYNC_CANCEL] = {},
853 	[IORING_OP_LINK_TIMEOUT] = {
854 		.needs_async_data	= 1,
855 		.async_size		= sizeof(struct io_timeout_data),
856 		.work_flags		= IO_WQ_WORK_MM,
857 	},
858 	[IORING_OP_CONNECT] = {
859 		.needs_file		= 1,
860 		.unbound_nonreg_file	= 1,
861 		.pollout		= 1,
862 		.needs_async_data	= 1,
863 		.async_size		= sizeof(struct io_async_connect),
864 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_FS,
865 	},
866 	[IORING_OP_FALLOCATE] = {
867 		.needs_file		= 1,
868 		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_FSIZE,
869 	},
870 	[IORING_OP_OPENAT] = {
871 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG |
872 						IO_WQ_WORK_FS,
873 	},
874 	[IORING_OP_CLOSE] = {
875 		.needs_file		= 1,
876 		.needs_file_no_error	= 1,
877 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_BLKCG,
878 	},
879 	[IORING_OP_FILES_UPDATE] = {
880 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_MM,
881 	},
882 	[IORING_OP_STATX] = {
883 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_MM |
884 						IO_WQ_WORK_FS | IO_WQ_WORK_BLKCG,
885 	},
886 	[IORING_OP_READ] = {
887 		.needs_file		= 1,
888 		.unbound_nonreg_file	= 1,
889 		.pollin			= 1,
890 		.buffer_select		= 1,
891 		.async_size		= sizeof(struct io_async_rw),
892 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
893 					  IO_WQ_WORK_FILES,
894 	},
895 	[IORING_OP_WRITE] = {
896 		.needs_file		= 1,
897 		.hash_reg_file		= 1,
898 		.unbound_nonreg_file	= 1,
899 		.pollout		= 1,
900 		.async_size		= sizeof(struct io_async_rw),
901 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
902 					  IO_WQ_WORK_FSIZE | IO_WQ_WORK_FILES,
903 	},
904 	[IORING_OP_FADVISE] = {
905 		.needs_file		= 1,
906 		.work_flags		= IO_WQ_WORK_BLKCG,
907 	},
908 	[IORING_OP_MADVISE] = {
909 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG,
910 	},
911 	[IORING_OP_SEND] = {
912 		.needs_file		= 1,
913 		.unbound_nonreg_file	= 1,
914 		.pollout		= 1,
915 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
916 					  IO_WQ_WORK_FS,
917 	},
918 	[IORING_OP_RECV] = {
919 		.needs_file		= 1,
920 		.unbound_nonreg_file	= 1,
921 		.pollin			= 1,
922 		.buffer_select		= 1,
923 		.work_flags		= IO_WQ_WORK_MM | IO_WQ_WORK_BLKCG |
924 					  IO_WQ_WORK_FS,
925 	},
926 	[IORING_OP_OPENAT2] = {
927 		.work_flags		= IO_WQ_WORK_FILES | IO_WQ_WORK_FS |
928 						IO_WQ_WORK_BLKCG,
929 	},
930 	[IORING_OP_EPOLL_CTL] = {
931 		.unbound_nonreg_file	= 1,
932 		.work_flags		= IO_WQ_WORK_FILES,
933 	},
934 	[IORING_OP_SPLICE] = {
935 		.needs_file		= 1,
936 		.hash_reg_file		= 1,
937 		.unbound_nonreg_file	= 1,
938 		.work_flags		= IO_WQ_WORK_BLKCG | IO_WQ_WORK_FILES,
939 	},
940 	[IORING_OP_PROVIDE_BUFFERS] = {},
941 	[IORING_OP_REMOVE_BUFFERS] = {},
942 	[IORING_OP_TEE] = {
943 		.needs_file		= 1,
944 		.hash_reg_file		= 1,
945 		.unbound_nonreg_file	= 1,
946 	},
947 };
948 
949 enum io_mem_account {
950 	ACCT_LOCKED,
951 	ACCT_PINNED,
952 };
953 
954 static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node);
955 static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
956 			struct io_ring_ctx *ctx);
957 
958 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
959 			     struct io_comp_state *cs);
960 static void io_cqring_fill_event(struct io_kiocb *req, long res);
961 static void io_put_req(struct io_kiocb *req);
962 static void io_put_req_deferred(struct io_kiocb *req, int nr);
963 static void io_double_put_req(struct io_kiocb *req);
964 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req);
965 static void __io_queue_linked_timeout(struct io_kiocb *req);
966 static void io_queue_linked_timeout(struct io_kiocb *req);
967 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
968 				 struct io_uring_files_update *ip,
969 				 unsigned nr_args);
970 static void __io_clean_op(struct io_kiocb *req);
971 static struct file *io_file_get(struct io_submit_state *state,
972 				struct io_kiocb *req, int fd, bool fixed);
973 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs);
974 static void io_file_put_work(struct work_struct *work);
975 
976 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
977 			       struct iovec **iovec, struct iov_iter *iter,
978 			       bool needs_lock);
979 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
980 			     const struct iovec *fast_iov,
981 			     struct iov_iter *iter, bool force);
982 static void io_req_drop_files(struct io_kiocb *req);
983 static void io_req_task_queue(struct io_kiocb *req);
984 
985 static struct kmem_cache *req_cachep;
986 
987 static const struct file_operations io_uring_fops;
988 
io_uring_get_socket(struct file * file)989 struct sock *io_uring_get_socket(struct file *file)
990 {
991 #if defined(CONFIG_UNIX)
992 	if (file->f_op == &io_uring_fops) {
993 		struct io_ring_ctx *ctx = file->private_data;
994 
995 		return ctx->ring_sock->sk;
996 	}
997 #endif
998 	return NULL;
999 }
1000 EXPORT_SYMBOL(io_uring_get_socket);
1001 
io_clean_op(struct io_kiocb * req)1002 static inline void io_clean_op(struct io_kiocb *req)
1003 {
1004 	if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED))
1005 		__io_clean_op(req);
1006 }
1007 
__io_match_files(struct io_kiocb * req,struct files_struct * files)1008 static inline bool __io_match_files(struct io_kiocb *req,
1009 				    struct files_struct *files)
1010 {
1011 	if (req->file && req->file->f_op == &io_uring_fops)
1012 		return true;
1013 
1014 	return ((req->flags & REQ_F_WORK_INITIALIZED) &&
1015 	        (req->work.flags & IO_WQ_WORK_FILES)) &&
1016 		req->work.identity->files == files;
1017 }
1018 
io_refs_resurrect(struct percpu_ref * ref,struct completion * compl)1019 static void io_refs_resurrect(struct percpu_ref *ref, struct completion *compl)
1020 {
1021 	bool got = percpu_ref_tryget(ref);
1022 
1023 	/* already at zero, wait for ->release() */
1024 	if (!got)
1025 		wait_for_completion(compl);
1026 	percpu_ref_resurrect(ref);
1027 	if (got)
1028 		percpu_ref_put(ref);
1029 }
1030 
io_match_task(struct io_kiocb * head,struct task_struct * task,struct files_struct * files)1031 static bool io_match_task(struct io_kiocb *head,
1032 			  struct task_struct *task,
1033 			  struct files_struct *files)
1034 {
1035 	struct io_kiocb *link;
1036 
1037 	if (task && head->task != task) {
1038 		/* in terms of cancelation, always match if req task is dead */
1039 		if (head->task->flags & PF_EXITING)
1040 			return true;
1041 		return false;
1042 	}
1043 	if (!files)
1044 		return true;
1045 	if (__io_match_files(head, files))
1046 		return true;
1047 	if (head->flags & REQ_F_LINK_HEAD) {
1048 		list_for_each_entry(link, &head->link_list, link_list) {
1049 			if (__io_match_files(link, files))
1050 				return true;
1051 		}
1052 	}
1053 	return false;
1054 }
1055 
1056 
io_sq_thread_drop_mm(void)1057 static void io_sq_thread_drop_mm(void)
1058 {
1059 	struct mm_struct *mm = current->mm;
1060 
1061 	if (mm) {
1062 		kthread_unuse_mm(mm);
1063 		mmput(mm);
1064 		current->mm = NULL;
1065 	}
1066 }
1067 
__io_sq_thread_acquire_mm(struct io_ring_ctx * ctx)1068 static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx)
1069 {
1070 	struct mm_struct *mm;
1071 
1072 	if (current->flags & PF_EXITING)
1073 		return -EFAULT;
1074 	if (current->mm)
1075 		return 0;
1076 
1077 	/* Should never happen */
1078 	if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL)))
1079 		return -EFAULT;
1080 
1081 	task_lock(ctx->sqo_task);
1082 	mm = ctx->sqo_task->mm;
1083 	if (unlikely(!mm || !mmget_not_zero(mm)))
1084 		mm = NULL;
1085 	task_unlock(ctx->sqo_task);
1086 
1087 	if (mm) {
1088 		kthread_use_mm(mm);
1089 		return 0;
1090 	}
1091 
1092 	return -EFAULT;
1093 }
1094 
io_sq_thread_acquire_mm(struct io_ring_ctx * ctx,struct io_kiocb * req)1095 static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx,
1096 				   struct io_kiocb *req)
1097 {
1098 	if (!(io_op_defs[req->opcode].work_flags & IO_WQ_WORK_MM))
1099 		return 0;
1100 	return __io_sq_thread_acquire_mm(ctx);
1101 }
1102 
io_sq_thread_associate_blkcg(struct io_ring_ctx * ctx,struct cgroup_subsys_state ** cur_css)1103 static void io_sq_thread_associate_blkcg(struct io_ring_ctx *ctx,
1104 					 struct cgroup_subsys_state **cur_css)
1105 
1106 {
1107 #ifdef CONFIG_BLK_CGROUP
1108 	/* puts the old one when swapping */
1109 	if (*cur_css != ctx->sqo_blkcg_css) {
1110 		kthread_associate_blkcg(ctx->sqo_blkcg_css);
1111 		*cur_css = ctx->sqo_blkcg_css;
1112 	}
1113 #endif
1114 }
1115 
io_sq_thread_unassociate_blkcg(void)1116 static void io_sq_thread_unassociate_blkcg(void)
1117 {
1118 #ifdef CONFIG_BLK_CGROUP
1119 	kthread_associate_blkcg(NULL);
1120 #endif
1121 }
1122 
req_set_fail_links(struct io_kiocb * req)1123 static inline void req_set_fail_links(struct io_kiocb *req)
1124 {
1125 	if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK)
1126 		req->flags |= REQ_F_FAIL_LINK;
1127 }
1128 
1129 /*
1130  * None of these are dereferenced, they are simply used to check if any of
1131  * them have changed. If we're under current and check they are still the
1132  * same, we're fine to grab references to them for actual out-of-line use.
1133  */
io_init_identity(struct io_identity * id)1134 static void io_init_identity(struct io_identity *id)
1135 {
1136 	id->files = current->files;
1137 	id->mm = current->mm;
1138 #ifdef CONFIG_BLK_CGROUP
1139 	rcu_read_lock();
1140 	id->blkcg_css = blkcg_css();
1141 	rcu_read_unlock();
1142 #endif
1143 	id->creds = current_cred();
1144 	id->nsproxy = current->nsproxy;
1145 	id->fs = current->fs;
1146 	id->fsize = rlimit(RLIMIT_FSIZE);
1147 #ifdef CONFIG_AUDIT
1148 	id->loginuid = current->loginuid;
1149 	id->sessionid = current->sessionid;
1150 #endif
1151 	refcount_set(&id->count, 1);
1152 }
1153 
__io_req_init_async(struct io_kiocb * req)1154 static inline void __io_req_init_async(struct io_kiocb *req)
1155 {
1156 	memset(&req->work, 0, sizeof(req->work));
1157 	req->flags |= REQ_F_WORK_INITIALIZED;
1158 }
1159 
1160 /*
1161  * Note: must call io_req_init_async() for the first time you
1162  * touch any members of io_wq_work.
1163  */
io_req_init_async(struct io_kiocb * req)1164 static inline void io_req_init_async(struct io_kiocb *req)
1165 {
1166 	struct io_uring_task *tctx = req->task->io_uring;
1167 
1168 	if (req->flags & REQ_F_WORK_INITIALIZED)
1169 		return;
1170 
1171 	__io_req_init_async(req);
1172 
1173 	/* Grab a ref if this isn't our static identity */
1174 	req->work.identity = tctx->identity;
1175 	if (tctx->identity != &tctx->__identity)
1176 		refcount_inc(&req->work.identity->count);
1177 }
1178 
io_async_submit(struct io_ring_ctx * ctx)1179 static inline bool io_async_submit(struct io_ring_ctx *ctx)
1180 {
1181 	return ctx->flags & IORING_SETUP_SQPOLL;
1182 }
1183 
io_ring_ctx_ref_free(struct percpu_ref * ref)1184 static void io_ring_ctx_ref_free(struct percpu_ref *ref)
1185 {
1186 	struct io_ring_ctx *ctx = container_of(ref, struct io_ring_ctx, refs);
1187 
1188 	complete(&ctx->ref_comp);
1189 }
1190 
io_is_timeout_noseq(struct io_kiocb * req)1191 static inline bool io_is_timeout_noseq(struct io_kiocb *req)
1192 {
1193 	return !req->timeout.off;
1194 }
1195 
io_ring_ctx_alloc(struct io_uring_params * p)1196 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
1197 {
1198 	struct io_ring_ctx *ctx;
1199 	int hash_bits;
1200 
1201 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1202 	if (!ctx)
1203 		return NULL;
1204 
1205 	ctx->fallback_req = kmem_cache_alloc(req_cachep, GFP_KERNEL);
1206 	if (!ctx->fallback_req)
1207 		goto err;
1208 
1209 	/*
1210 	 * Use 5 bits less than the max cq entries, that should give us around
1211 	 * 32 entries per hash list if totally full and uniformly spread.
1212 	 */
1213 	hash_bits = ilog2(p->cq_entries);
1214 	hash_bits -= 5;
1215 	if (hash_bits <= 0)
1216 		hash_bits = 1;
1217 	ctx->cancel_hash_bits = hash_bits;
1218 	ctx->cancel_hash = kmalloc((1U << hash_bits) * sizeof(struct hlist_head),
1219 					GFP_KERNEL);
1220 	if (!ctx->cancel_hash)
1221 		goto err;
1222 	__hash_init(ctx->cancel_hash, 1U << hash_bits);
1223 
1224 	if (percpu_ref_init(&ctx->refs, io_ring_ctx_ref_free,
1225 			    PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
1226 		goto err;
1227 
1228 	ctx->flags = p->flags;
1229 	init_waitqueue_head(&ctx->sqo_sq_wait);
1230 	INIT_LIST_HEAD(&ctx->sqd_list);
1231 	init_waitqueue_head(&ctx->cq_wait);
1232 	INIT_LIST_HEAD(&ctx->cq_overflow_list);
1233 	init_completion(&ctx->ref_comp);
1234 	init_completion(&ctx->sq_thread_comp);
1235 	xa_init_flags(&ctx->io_buffers, XA_FLAGS_ALLOC1);
1236 	xa_init_flags(&ctx->personalities, XA_FLAGS_ALLOC1);
1237 	mutex_init(&ctx->uring_lock);
1238 	init_waitqueue_head(&ctx->wait);
1239 	spin_lock_init(&ctx->completion_lock);
1240 	INIT_LIST_HEAD(&ctx->iopoll_list);
1241 	INIT_LIST_HEAD(&ctx->defer_list);
1242 	INIT_LIST_HEAD(&ctx->timeout_list);
1243 	spin_lock_init(&ctx->inflight_lock);
1244 	INIT_LIST_HEAD(&ctx->inflight_list);
1245 	INIT_DELAYED_WORK(&ctx->file_put_work, io_file_put_work);
1246 	init_llist_head(&ctx->file_put_llist);
1247 	return ctx;
1248 err:
1249 	if (ctx->fallback_req)
1250 		kmem_cache_free(req_cachep, ctx->fallback_req);
1251 	kfree(ctx->cancel_hash);
1252 	kfree(ctx);
1253 	return NULL;
1254 }
1255 
req_need_defer(struct io_kiocb * req,u32 seq)1256 static bool req_need_defer(struct io_kiocb *req, u32 seq)
1257 {
1258 	if (unlikely(req->flags & REQ_F_IO_DRAIN)) {
1259 		struct io_ring_ctx *ctx = req->ctx;
1260 
1261 		return seq != ctx->cached_cq_tail
1262 				+ READ_ONCE(ctx->cached_cq_overflow);
1263 	}
1264 
1265 	return false;
1266 }
1267 
__io_commit_cqring(struct io_ring_ctx * ctx)1268 static void __io_commit_cqring(struct io_ring_ctx *ctx)
1269 {
1270 	struct io_rings *rings = ctx->rings;
1271 
1272 	/* order cqe stores with ring update */
1273 	smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
1274 }
1275 
io_put_identity(struct io_uring_task * tctx,struct io_kiocb * req)1276 static void io_put_identity(struct io_uring_task *tctx, struct io_kiocb *req)
1277 {
1278 	if (req->work.identity == &tctx->__identity)
1279 		return;
1280 	if (refcount_dec_and_test(&req->work.identity->count))
1281 		kfree(req->work.identity);
1282 }
1283 
io_req_clean_work(struct io_kiocb * req)1284 static void io_req_clean_work(struct io_kiocb *req)
1285 {
1286 	if (!(req->flags & REQ_F_WORK_INITIALIZED))
1287 		return;
1288 
1289 	req->flags &= ~REQ_F_WORK_INITIALIZED;
1290 
1291 	if (req->work.flags & IO_WQ_WORK_MM) {
1292 		mmdrop(req->work.identity->mm);
1293 		req->work.flags &= ~IO_WQ_WORK_MM;
1294 	}
1295 #ifdef CONFIG_BLK_CGROUP
1296 	if (req->work.flags & IO_WQ_WORK_BLKCG) {
1297 		css_put(req->work.identity->blkcg_css);
1298 		req->work.flags &= ~IO_WQ_WORK_BLKCG;
1299 	}
1300 #endif
1301 	if (req->work.flags & IO_WQ_WORK_CREDS) {
1302 		put_cred(req->work.identity->creds);
1303 		req->work.flags &= ~IO_WQ_WORK_CREDS;
1304 	}
1305 	if (req->work.flags & IO_WQ_WORK_FS) {
1306 		struct fs_struct *fs = req->work.identity->fs;
1307 
1308 		spin_lock(&req->work.identity->fs->lock);
1309 		if (--fs->users)
1310 			fs = NULL;
1311 		spin_unlock(&req->work.identity->fs->lock);
1312 		if (fs)
1313 			free_fs_struct(fs);
1314 		req->work.flags &= ~IO_WQ_WORK_FS;
1315 	}
1316 	if (req->flags & REQ_F_INFLIGHT)
1317 		io_req_drop_files(req);
1318 
1319 	io_put_identity(req->task->io_uring, req);
1320 }
1321 
1322 /*
1323  * Create a private copy of io_identity, since some fields don't match
1324  * the current context.
1325  */
io_identity_cow(struct io_kiocb * req)1326 static bool io_identity_cow(struct io_kiocb *req)
1327 {
1328 	struct io_uring_task *tctx = req->task->io_uring;
1329 	const struct cred *creds = NULL;
1330 	struct io_identity *id;
1331 
1332 	if (req->work.flags & IO_WQ_WORK_CREDS)
1333 		creds = req->work.identity->creds;
1334 
1335 	id = kmemdup(req->work.identity, sizeof(*id), GFP_KERNEL);
1336 	if (unlikely(!id)) {
1337 		req->work.flags |= IO_WQ_WORK_CANCEL;
1338 		return false;
1339 	}
1340 
1341 	/*
1342 	 * We can safely just re-init the creds we copied  Either the field
1343 	 * matches the current one, or we haven't grabbed it yet. The only
1344 	 * exception is ->creds, through registered personalities, so handle
1345 	 * that one separately.
1346 	 */
1347 	io_init_identity(id);
1348 	if (creds)
1349 		id->creds = creds;
1350 
1351 	/* add one for this request */
1352 	refcount_inc(&id->count);
1353 
1354 	/* drop tctx and req identity references, if needed */
1355 	if (tctx->identity != &tctx->__identity &&
1356 	    refcount_dec_and_test(&tctx->identity->count))
1357 		kfree(tctx->identity);
1358 	if (req->work.identity != &tctx->__identity &&
1359 	    refcount_dec_and_test(&req->work.identity->count))
1360 		kfree(req->work.identity);
1361 
1362 	req->work.identity = id;
1363 	tctx->identity = id;
1364 	return true;
1365 }
1366 
io_grab_identity(struct io_kiocb * req)1367 static bool io_grab_identity(struct io_kiocb *req)
1368 {
1369 	const struct io_op_def *def = &io_op_defs[req->opcode];
1370 	struct io_identity *id = req->work.identity;
1371 	struct io_ring_ctx *ctx = req->ctx;
1372 
1373 	if (def->work_flags & IO_WQ_WORK_FSIZE) {
1374 		if (id->fsize != rlimit(RLIMIT_FSIZE))
1375 			return false;
1376 		req->work.flags |= IO_WQ_WORK_FSIZE;
1377 	}
1378 #ifdef CONFIG_BLK_CGROUP
1379 	if (!(req->work.flags & IO_WQ_WORK_BLKCG) &&
1380 	    (def->work_flags & IO_WQ_WORK_BLKCG)) {
1381 		rcu_read_lock();
1382 		if (id->blkcg_css != blkcg_css()) {
1383 			rcu_read_unlock();
1384 			return false;
1385 		}
1386 		/*
1387 		 * This should be rare, either the cgroup is dying or the task
1388 		 * is moving cgroups. Just punt to root for the handful of ios.
1389 		 */
1390 		if (css_tryget_online(id->blkcg_css))
1391 			req->work.flags |= IO_WQ_WORK_BLKCG;
1392 		rcu_read_unlock();
1393 	}
1394 #endif
1395 	if (!(req->work.flags & IO_WQ_WORK_CREDS)) {
1396 		if (id->creds != current_cred())
1397 			return false;
1398 		get_cred(id->creds);
1399 		req->work.flags |= IO_WQ_WORK_CREDS;
1400 	}
1401 #ifdef CONFIG_AUDIT
1402 	if (!uid_eq(current->loginuid, id->loginuid) ||
1403 	    current->sessionid != id->sessionid)
1404 		return false;
1405 #endif
1406 	if (!(req->work.flags & IO_WQ_WORK_FS) &&
1407 	    (def->work_flags & IO_WQ_WORK_FS)) {
1408 		if (current->fs != id->fs)
1409 			return false;
1410 		spin_lock(&id->fs->lock);
1411 		if (!id->fs->in_exec) {
1412 			id->fs->users++;
1413 			req->work.flags |= IO_WQ_WORK_FS;
1414 		} else {
1415 			req->work.flags |= IO_WQ_WORK_CANCEL;
1416 		}
1417 		spin_unlock(&current->fs->lock);
1418 	}
1419 	if (!(req->work.flags & IO_WQ_WORK_FILES) &&
1420 	    (def->work_flags & IO_WQ_WORK_FILES) &&
1421 	    !(req->flags & REQ_F_NO_FILE_TABLE)) {
1422 		if (id->files != current->files ||
1423 		    id->nsproxy != current->nsproxy)
1424 			return false;
1425 		atomic_inc(&id->files->count);
1426 		get_nsproxy(id->nsproxy);
1427 
1428 		if (!(req->flags & REQ_F_INFLIGHT)) {
1429 			req->flags |= REQ_F_INFLIGHT;
1430 
1431 			spin_lock_irq(&ctx->inflight_lock);
1432 			list_add(&req->inflight_entry, &ctx->inflight_list);
1433 			spin_unlock_irq(&ctx->inflight_lock);
1434 		}
1435 		req->work.flags |= IO_WQ_WORK_FILES;
1436 	}
1437 	if (!(req->work.flags & IO_WQ_WORK_MM) &&
1438 	    (def->work_flags & IO_WQ_WORK_MM)) {
1439 		if (id->mm != current->mm)
1440 			return false;
1441 		mmgrab(id->mm);
1442 		req->work.flags |= IO_WQ_WORK_MM;
1443 	}
1444 
1445 	return true;
1446 }
1447 
io_prep_async_work(struct io_kiocb * req)1448 static void io_prep_async_work(struct io_kiocb *req)
1449 {
1450 	const struct io_op_def *def = &io_op_defs[req->opcode];
1451 	struct io_ring_ctx *ctx = req->ctx;
1452 	struct io_identity *id;
1453 
1454 	io_req_init_async(req);
1455 	id = req->work.identity;
1456 
1457 	if (req->flags & REQ_F_FORCE_ASYNC)
1458 		req->work.flags |= IO_WQ_WORK_CONCURRENT;
1459 
1460 	if (req->flags & REQ_F_ISREG) {
1461 		if (def->hash_reg_file || (ctx->flags & IORING_SETUP_IOPOLL))
1462 			io_wq_hash_work(&req->work, file_inode(req->file));
1463 	} else if (!req->file || !S_ISBLK(file_inode(req->file)->i_mode)) {
1464 		if (def->unbound_nonreg_file)
1465 			req->work.flags |= IO_WQ_WORK_UNBOUND;
1466 	}
1467 
1468 	/* if we fail grabbing identity, we must COW, regrab, and retry */
1469 	if (io_grab_identity(req))
1470 		return;
1471 
1472 	if (!io_identity_cow(req))
1473 		return;
1474 
1475 	/* can't fail at this point */
1476 	if (!io_grab_identity(req))
1477 		WARN_ON(1);
1478 }
1479 
io_prep_async_link(struct io_kiocb * req)1480 static void io_prep_async_link(struct io_kiocb *req)
1481 {
1482 	struct io_kiocb *cur;
1483 
1484 	io_prep_async_work(req);
1485 	if (req->flags & REQ_F_LINK_HEAD)
1486 		list_for_each_entry(cur, &req->link_list, link_list)
1487 			io_prep_async_work(cur);
1488 }
1489 
__io_queue_async_work(struct io_kiocb * req)1490 static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req)
1491 {
1492 	struct io_ring_ctx *ctx = req->ctx;
1493 	struct io_kiocb *link = io_prep_linked_timeout(req);
1494 
1495 	trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req,
1496 					&req->work, req->flags);
1497 	io_wq_enqueue(ctx->io_wq, &req->work);
1498 	return link;
1499 }
1500 
io_queue_async_work(struct io_kiocb * req)1501 static void io_queue_async_work(struct io_kiocb *req)
1502 {
1503 	struct io_kiocb *link;
1504 
1505 	/* init ->work of the whole link before punting */
1506 	io_prep_async_link(req);
1507 	link = __io_queue_async_work(req);
1508 
1509 	if (link)
1510 		io_queue_linked_timeout(link);
1511 }
1512 
io_kill_timeout(struct io_kiocb * req,int status)1513 static void io_kill_timeout(struct io_kiocb *req, int status)
1514 {
1515 	struct io_timeout_data *io = req->async_data;
1516 	int ret;
1517 
1518 	ret = hrtimer_try_to_cancel(&io->timer);
1519 	if (ret != -1) {
1520 		if (status)
1521 			req_set_fail_links(req);
1522 		atomic_set(&req->ctx->cq_timeouts,
1523 			atomic_read(&req->ctx->cq_timeouts) + 1);
1524 		list_del_init(&req->timeout.list);
1525 		io_cqring_fill_event(req, status);
1526 		io_put_req_deferred(req, 1);
1527 	}
1528 }
1529 
1530 /*
1531  * Returns true if we found and killed one or more timeouts
1532  */
io_kill_timeouts(struct io_ring_ctx * ctx,struct task_struct * tsk,struct files_struct * files)1533 static bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk,
1534 			     struct files_struct *files)
1535 {
1536 	struct io_kiocb *req, *tmp;
1537 	int canceled = 0;
1538 
1539 	spin_lock_irq(&ctx->completion_lock);
1540 	list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) {
1541 		if (io_match_task(req, tsk, files)) {
1542 			io_kill_timeout(req, -ECANCELED);
1543 			canceled++;
1544 		}
1545 	}
1546 	spin_unlock_irq(&ctx->completion_lock);
1547 	return canceled != 0;
1548 }
1549 
__io_queue_deferred(struct io_ring_ctx * ctx)1550 static void __io_queue_deferred(struct io_ring_ctx *ctx)
1551 {
1552 	do {
1553 		struct io_defer_entry *de = list_first_entry(&ctx->defer_list,
1554 						struct io_defer_entry, list);
1555 
1556 		if (req_need_defer(de->req, de->seq))
1557 			break;
1558 		list_del_init(&de->list);
1559 		io_req_task_queue(de->req);
1560 		kfree(de);
1561 	} while (!list_empty(&ctx->defer_list));
1562 }
1563 
io_flush_timeouts(struct io_ring_ctx * ctx)1564 static void io_flush_timeouts(struct io_ring_ctx *ctx)
1565 {
1566 	u32 seq;
1567 
1568 	if (list_empty(&ctx->timeout_list))
1569 		return;
1570 
1571 	seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
1572 
1573 	do {
1574 		u32 events_needed, events_got;
1575 		struct io_kiocb *req = list_first_entry(&ctx->timeout_list,
1576 						struct io_kiocb, timeout.list);
1577 
1578 		if (io_is_timeout_noseq(req))
1579 			break;
1580 
1581 		/*
1582 		 * Since seq can easily wrap around over time, subtract
1583 		 * the last seq at which timeouts were flushed before comparing.
1584 		 * Assuming not more than 2^31-1 events have happened since,
1585 		 * these subtractions won't have wrapped, so we can check if
1586 		 * target is in [last_seq, current_seq] by comparing the two.
1587 		 */
1588 		events_needed = req->timeout.target_seq - ctx->cq_last_tm_flush;
1589 		events_got = seq - ctx->cq_last_tm_flush;
1590 		if (events_got < events_needed)
1591 			break;
1592 
1593 		list_del_init(&req->timeout.list);
1594 		io_kill_timeout(req, 0);
1595 	} while (!list_empty(&ctx->timeout_list));
1596 
1597 	ctx->cq_last_tm_flush = seq;
1598 }
1599 
io_commit_cqring(struct io_ring_ctx * ctx)1600 static void io_commit_cqring(struct io_ring_ctx *ctx)
1601 {
1602 	io_flush_timeouts(ctx);
1603 	__io_commit_cqring(ctx);
1604 
1605 	if (unlikely(!list_empty(&ctx->defer_list)))
1606 		__io_queue_deferred(ctx);
1607 }
1608 
io_sqring_full(struct io_ring_ctx * ctx)1609 static inline bool io_sqring_full(struct io_ring_ctx *ctx)
1610 {
1611 	struct io_rings *r = ctx->rings;
1612 
1613 	return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == r->sq_ring_entries;
1614 }
1615 
io_get_cqring(struct io_ring_ctx * ctx)1616 static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
1617 {
1618 	struct io_rings *rings = ctx->rings;
1619 	unsigned tail;
1620 
1621 	tail = ctx->cached_cq_tail;
1622 	/*
1623 	 * writes to the cq entry need to come after reading head; the
1624 	 * control dependency is enough as we're using WRITE_ONCE to
1625 	 * fill the cq entry
1626 	 */
1627 	if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
1628 		return NULL;
1629 
1630 	ctx->cached_cq_tail++;
1631 	return &rings->cqes[tail & ctx->cq_mask];
1632 }
1633 
io_should_trigger_evfd(struct io_ring_ctx * ctx)1634 static inline bool io_should_trigger_evfd(struct io_ring_ctx *ctx)
1635 {
1636 	if (!ctx->cq_ev_fd)
1637 		return false;
1638 	if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
1639 		return false;
1640 	if (!ctx->eventfd_async)
1641 		return true;
1642 	return io_wq_current_is_worker();
1643 }
1644 
io_cqring_ev_posted(struct io_ring_ctx * ctx)1645 static void io_cqring_ev_posted(struct io_ring_ctx *ctx)
1646 {
1647 	if (wq_has_sleeper(&ctx->cq_wait)) {
1648 		wake_up_interruptible(&ctx->cq_wait);
1649 		kill_fasync(&ctx->cq_fasync, SIGIO, POLL_IN);
1650 	}
1651 	if (waitqueue_active(&ctx->wait))
1652 		wake_up(&ctx->wait);
1653 	if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1654 		wake_up(&ctx->sq_data->wait);
1655 	if (io_should_trigger_evfd(ctx))
1656 		eventfd_signal(ctx->cq_ev_fd, 1);
1657 }
1658 
io_cqring_mark_overflow(struct io_ring_ctx * ctx)1659 static void io_cqring_mark_overflow(struct io_ring_ctx *ctx)
1660 {
1661 	if (list_empty(&ctx->cq_overflow_list)) {
1662 		clear_bit(0, &ctx->sq_check_overflow);
1663 		clear_bit(0, &ctx->cq_check_overflow);
1664 		ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
1665 	}
1666 }
1667 
1668 /* Returns true if there are no backlogged entries after the flush */
__io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool force,struct task_struct * tsk,struct files_struct * files)1669 static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1670 				       struct task_struct *tsk,
1671 				       struct files_struct *files)
1672 {
1673 	struct io_rings *rings = ctx->rings;
1674 	struct io_kiocb *req, *tmp;
1675 	struct io_uring_cqe *cqe;
1676 	unsigned long flags;
1677 	LIST_HEAD(list);
1678 
1679 	if (!force) {
1680 		if ((ctx->cached_cq_tail - READ_ONCE(rings->cq.head) ==
1681 		    rings->cq_ring_entries))
1682 			return false;
1683 	}
1684 
1685 	spin_lock_irqsave(&ctx->completion_lock, flags);
1686 
1687 	cqe = NULL;
1688 	list_for_each_entry_safe(req, tmp, &ctx->cq_overflow_list, compl.list) {
1689 		if (!io_match_task(req, tsk, files))
1690 			continue;
1691 
1692 		cqe = io_get_cqring(ctx);
1693 		if (!cqe && !force)
1694 			break;
1695 
1696 		list_move(&req->compl.list, &list);
1697 		if (cqe) {
1698 			WRITE_ONCE(cqe->user_data, req->user_data);
1699 			WRITE_ONCE(cqe->res, req->result);
1700 			WRITE_ONCE(cqe->flags, req->compl.cflags);
1701 		} else {
1702 			ctx->cached_cq_overflow++;
1703 			WRITE_ONCE(ctx->rings->cq_overflow,
1704 				   ctx->cached_cq_overflow);
1705 		}
1706 	}
1707 
1708 	io_commit_cqring(ctx);
1709 	io_cqring_mark_overflow(ctx);
1710 
1711 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1712 	io_cqring_ev_posted(ctx);
1713 
1714 	while (!list_empty(&list)) {
1715 		req = list_first_entry(&list, struct io_kiocb, compl.list);
1716 		list_del(&req->compl.list);
1717 		io_put_req(req);
1718 	}
1719 
1720 	return cqe != NULL;
1721 }
1722 
io_cqring_overflow_flush(struct io_ring_ctx * ctx,bool force,struct task_struct * tsk,struct files_struct * files)1723 static void io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force,
1724 				     struct task_struct *tsk,
1725 				     struct files_struct *files)
1726 {
1727 	if (test_bit(0, &ctx->cq_check_overflow)) {
1728 		/* iopoll syncs against uring_lock, not completion_lock */
1729 		if (ctx->flags & IORING_SETUP_IOPOLL)
1730 			mutex_lock(&ctx->uring_lock);
1731 		__io_cqring_overflow_flush(ctx, force, tsk, files);
1732 		if (ctx->flags & IORING_SETUP_IOPOLL)
1733 			mutex_unlock(&ctx->uring_lock);
1734 	}
1735 }
1736 
__io_cqring_fill_event(struct io_kiocb * req,long res,unsigned int cflags)1737 static void __io_cqring_fill_event(struct io_kiocb *req, long res,
1738 				   unsigned int cflags)
1739 {
1740 	struct io_ring_ctx *ctx = req->ctx;
1741 	struct io_uring_cqe *cqe;
1742 
1743 	trace_io_uring_complete(ctx, req->user_data, res);
1744 
1745 	/*
1746 	 * If we can't get a cq entry, userspace overflowed the
1747 	 * submission (by quite a lot). Increment the overflow count in
1748 	 * the ring.
1749 	 */
1750 	cqe = io_get_cqring(ctx);
1751 	if (likely(cqe)) {
1752 		WRITE_ONCE(cqe->user_data, req->user_data);
1753 		WRITE_ONCE(cqe->res, res);
1754 		WRITE_ONCE(cqe->flags, cflags);
1755 	} else if (ctx->cq_overflow_flushed ||
1756 		   atomic_read(&req->task->io_uring->in_idle)) {
1757 		/*
1758 		 * If we're in ring overflow flush mode, or in task cancel mode,
1759 		 * then we cannot store the request for later flushing, we need
1760 		 * to drop it on the floor.
1761 		 */
1762 		ctx->cached_cq_overflow++;
1763 		WRITE_ONCE(ctx->rings->cq_overflow, ctx->cached_cq_overflow);
1764 	} else {
1765 		if (list_empty(&ctx->cq_overflow_list)) {
1766 			set_bit(0, &ctx->sq_check_overflow);
1767 			set_bit(0, &ctx->cq_check_overflow);
1768 			ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
1769 		}
1770 		io_clean_op(req);
1771 		req->result = res;
1772 		req->compl.cflags = cflags;
1773 		refcount_inc(&req->refs);
1774 		list_add_tail(&req->compl.list, &ctx->cq_overflow_list);
1775 	}
1776 }
1777 
io_cqring_fill_event(struct io_kiocb * req,long res)1778 static void io_cqring_fill_event(struct io_kiocb *req, long res)
1779 {
1780 	__io_cqring_fill_event(req, res, 0);
1781 }
1782 
io_cqring_add_event(struct io_kiocb * req,long res,long cflags)1783 static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags)
1784 {
1785 	struct io_ring_ctx *ctx = req->ctx;
1786 	unsigned long flags;
1787 
1788 	spin_lock_irqsave(&ctx->completion_lock, flags);
1789 	__io_cqring_fill_event(req, res, cflags);
1790 	io_commit_cqring(ctx);
1791 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1792 
1793 	io_cqring_ev_posted(ctx);
1794 }
1795 
io_submit_flush_completions(struct io_comp_state * cs)1796 static void io_submit_flush_completions(struct io_comp_state *cs)
1797 {
1798 	struct io_ring_ctx *ctx = cs->ctx;
1799 
1800 	spin_lock_irq(&ctx->completion_lock);
1801 	while (!list_empty(&cs->list)) {
1802 		struct io_kiocb *req;
1803 
1804 		req = list_first_entry(&cs->list, struct io_kiocb, compl.list);
1805 		list_del(&req->compl.list);
1806 		__io_cqring_fill_event(req, req->result, req->compl.cflags);
1807 
1808 		/*
1809 		 * io_free_req() doesn't care about completion_lock unless one
1810 		 * of these flags is set. REQ_F_WORK_INITIALIZED is in the list
1811 		 * because of a potential deadlock with req->work.fs->lock
1812 		 */
1813 		if (req->flags & (REQ_F_FAIL_LINK|REQ_F_LINK_TIMEOUT
1814 				 |REQ_F_WORK_INITIALIZED)) {
1815 			spin_unlock_irq(&ctx->completion_lock);
1816 			io_put_req(req);
1817 			spin_lock_irq(&ctx->completion_lock);
1818 		} else {
1819 			io_put_req(req);
1820 		}
1821 	}
1822 	io_commit_cqring(ctx);
1823 	spin_unlock_irq(&ctx->completion_lock);
1824 
1825 	io_cqring_ev_posted(ctx);
1826 	cs->nr = 0;
1827 }
1828 
__io_req_complete(struct io_kiocb * req,long res,unsigned cflags,struct io_comp_state * cs)1829 static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags,
1830 			      struct io_comp_state *cs)
1831 {
1832 	if (!cs) {
1833 		io_cqring_add_event(req, res, cflags);
1834 		io_put_req(req);
1835 	} else {
1836 		io_clean_op(req);
1837 		req->result = res;
1838 		req->compl.cflags = cflags;
1839 		list_add_tail(&req->compl.list, &cs->list);
1840 		if (++cs->nr >= 32)
1841 			io_submit_flush_completions(cs);
1842 	}
1843 }
1844 
io_req_complete(struct io_kiocb * req,long res)1845 static void io_req_complete(struct io_kiocb *req, long res)
1846 {
1847 	__io_req_complete(req, res, 0, NULL);
1848 }
1849 
io_is_fallback_req(struct io_kiocb * req)1850 static inline bool io_is_fallback_req(struct io_kiocb *req)
1851 {
1852 	return req == (struct io_kiocb *)
1853 			((unsigned long) req->ctx->fallback_req & ~1UL);
1854 }
1855 
io_get_fallback_req(struct io_ring_ctx * ctx)1856 static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
1857 {
1858 	struct io_kiocb *req;
1859 
1860 	req = ctx->fallback_req;
1861 	if (!test_and_set_bit_lock(0, (unsigned long *) &ctx->fallback_req))
1862 		return req;
1863 
1864 	return NULL;
1865 }
1866 
io_alloc_req(struct io_ring_ctx * ctx,struct io_submit_state * state)1867 static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx,
1868 				     struct io_submit_state *state)
1869 {
1870 	if (!state->free_reqs) {
1871 		gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
1872 		size_t sz;
1873 		int ret;
1874 
1875 		sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
1876 		ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
1877 
1878 		/*
1879 		 * Bulk alloc is all-or-nothing. If we fail to get a batch,
1880 		 * retry single alloc to be on the safe side.
1881 		 */
1882 		if (unlikely(ret <= 0)) {
1883 			state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
1884 			if (!state->reqs[0])
1885 				goto fallback;
1886 			ret = 1;
1887 		}
1888 		state->free_reqs = ret;
1889 	}
1890 
1891 	state->free_reqs--;
1892 	return state->reqs[state->free_reqs];
1893 fallback:
1894 	return io_get_fallback_req(ctx);
1895 }
1896 
io_put_file(struct io_kiocb * req,struct file * file,bool fixed)1897 static inline void io_put_file(struct io_kiocb *req, struct file *file,
1898 			  bool fixed)
1899 {
1900 	if (fixed)
1901 		percpu_ref_put(req->fixed_file_refs);
1902 	else
1903 		fput(file);
1904 }
1905 
io_dismantle_req(struct io_kiocb * req)1906 static void io_dismantle_req(struct io_kiocb *req)
1907 {
1908 	io_clean_op(req);
1909 
1910 	if (req->async_data)
1911 		kfree(req->async_data);
1912 	if (req->file)
1913 		io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE));
1914 
1915 	io_req_clean_work(req);
1916 }
1917 
__io_free_req(struct io_kiocb * req)1918 static void __io_free_req(struct io_kiocb *req)
1919 {
1920 	struct io_uring_task *tctx = req->task->io_uring;
1921 	struct io_ring_ctx *ctx = req->ctx;
1922 
1923 	io_dismantle_req(req);
1924 
1925 	percpu_counter_dec(&tctx->inflight);
1926 	if (atomic_read(&tctx->in_idle))
1927 		wake_up(&tctx->wait);
1928 	put_task_struct(req->task);
1929 
1930 	if (likely(!io_is_fallback_req(req)))
1931 		kmem_cache_free(req_cachep, req);
1932 	else
1933 		clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req);
1934 	percpu_ref_put(&ctx->refs);
1935 }
1936 
io_kill_linked_timeout(struct io_kiocb * req)1937 static void io_kill_linked_timeout(struct io_kiocb *req)
1938 {
1939 	struct io_ring_ctx *ctx = req->ctx;
1940 	struct io_kiocb *link;
1941 	bool cancelled = false;
1942 	unsigned long flags;
1943 
1944 	spin_lock_irqsave(&ctx->completion_lock, flags);
1945 	link = list_first_entry_or_null(&req->link_list, struct io_kiocb,
1946 					link_list);
1947 	/*
1948 	 * Can happen if a linked timeout fired and link had been like
1949 	 * req -> link t-out -> link t-out [-> ...]
1950 	 */
1951 	if (link && (link->flags & REQ_F_LTIMEOUT_ACTIVE)) {
1952 		struct io_timeout_data *io = link->async_data;
1953 		int ret;
1954 
1955 		list_del_init(&link->link_list);
1956 		ret = hrtimer_try_to_cancel(&io->timer);
1957 		if (ret != -1) {
1958 			io_cqring_fill_event(link, -ECANCELED);
1959 			io_commit_cqring(ctx);
1960 			cancelled = true;
1961 		}
1962 	}
1963 	req->flags &= ~REQ_F_LINK_TIMEOUT;
1964 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
1965 
1966 	if (cancelled) {
1967 		io_cqring_ev_posted(ctx);
1968 		io_put_req(link);
1969 	}
1970 }
1971 
io_req_link_next(struct io_kiocb * req)1972 static struct io_kiocb *io_req_link_next(struct io_kiocb *req)
1973 {
1974 	struct io_kiocb *nxt;
1975 
1976 	/*
1977 	 * The list should never be empty when we are called here. But could
1978 	 * potentially happen if the chain is messed up, check to be on the
1979 	 * safe side.
1980 	 */
1981 	if (unlikely(list_empty(&req->link_list)))
1982 		return NULL;
1983 
1984 	nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list);
1985 	list_del_init(&req->link_list);
1986 	if (!list_empty(&nxt->link_list))
1987 		nxt->flags |= REQ_F_LINK_HEAD;
1988 	return nxt;
1989 }
1990 
1991 /*
1992  * Called if REQ_F_LINK_HEAD is set, and we fail the head request
1993  */
io_fail_links(struct io_kiocb * req)1994 static void io_fail_links(struct io_kiocb *req)
1995 {
1996 	struct io_ring_ctx *ctx = req->ctx;
1997 	unsigned long flags;
1998 
1999 	spin_lock_irqsave(&ctx->completion_lock, flags);
2000 	while (!list_empty(&req->link_list)) {
2001 		struct io_kiocb *link = list_first_entry(&req->link_list,
2002 						struct io_kiocb, link_list);
2003 
2004 		list_del_init(&link->link_list);
2005 		trace_io_uring_fail_link(req, link);
2006 
2007 		io_cqring_fill_event(link, -ECANCELED);
2008 
2009 		/*
2010 		 * It's ok to free under spinlock as they're not linked anymore,
2011 		 * but avoid REQ_F_WORK_INITIALIZED because it may deadlock on
2012 		 * work.fs->lock.
2013 		 */
2014 		if (link->flags & REQ_F_WORK_INITIALIZED)
2015 			io_put_req_deferred(link, 2);
2016 		else
2017 			io_double_put_req(link);
2018 	}
2019 
2020 	io_commit_cqring(ctx);
2021 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
2022 
2023 	io_cqring_ev_posted(ctx);
2024 }
2025 
__io_req_find_next(struct io_kiocb * req)2026 static struct io_kiocb *__io_req_find_next(struct io_kiocb *req)
2027 {
2028 	req->flags &= ~REQ_F_LINK_HEAD;
2029 	if (req->flags & REQ_F_LINK_TIMEOUT)
2030 		io_kill_linked_timeout(req);
2031 
2032 	/*
2033 	 * If LINK is set, we have dependent requests in this chain. If we
2034 	 * didn't fail this request, queue the first one up, moving any other
2035 	 * dependencies to the next request. In case of failure, fail the rest
2036 	 * of the chain.
2037 	 */
2038 	if (likely(!(req->flags & REQ_F_FAIL_LINK)))
2039 		return io_req_link_next(req);
2040 	io_fail_links(req);
2041 	return NULL;
2042 }
2043 
io_req_find_next(struct io_kiocb * req)2044 static struct io_kiocb *io_req_find_next(struct io_kiocb *req)
2045 {
2046 	if (likely(!(req->flags & REQ_F_LINK_HEAD)))
2047 		return NULL;
2048 	return __io_req_find_next(req);
2049 }
2050 
io_req_task_work_add(struct io_kiocb * req,bool twa_signal_ok)2051 static int io_req_task_work_add(struct io_kiocb *req, bool twa_signal_ok)
2052 {
2053 	struct task_struct *tsk = req->task;
2054 	struct io_ring_ctx *ctx = req->ctx;
2055 	enum task_work_notify_mode notify;
2056 	int ret;
2057 
2058 	if (tsk->flags & PF_EXITING)
2059 		return -ESRCH;
2060 
2061 	/*
2062 	 * SQPOLL kernel thread doesn't need notification, just a wakeup. For
2063 	 * all other cases, use TWA_SIGNAL unconditionally to ensure we're
2064 	 * processing task_work. There's no reliable way to tell if TWA_RESUME
2065 	 * will do the job.
2066 	 */
2067 	notify = TWA_NONE;
2068 	if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok)
2069 		notify = TWA_SIGNAL;
2070 
2071 	ret = task_work_add(tsk, &req->task_work, notify);
2072 	if (!ret)
2073 		wake_up_process(tsk);
2074 
2075 	return ret;
2076 }
2077 
io_req_task_work_add_fallback(struct io_kiocb * req,void (* cb)(struct callback_head *))2078 static void io_req_task_work_add_fallback(struct io_kiocb *req,
2079 					  void (*cb)(struct callback_head *))
2080 {
2081 	struct task_struct *tsk = io_wq_get_task(req->ctx->io_wq);
2082 
2083 	init_task_work(&req->task_work, cb);
2084 	task_work_add(tsk, &req->task_work, TWA_NONE);
2085 	wake_up_process(tsk);
2086 }
2087 
__io_req_task_cancel(struct io_kiocb * req,int error)2088 static void __io_req_task_cancel(struct io_kiocb *req, int error)
2089 {
2090 	struct io_ring_ctx *ctx = req->ctx;
2091 
2092 	spin_lock_irq(&ctx->completion_lock);
2093 	io_cqring_fill_event(req, error);
2094 	io_commit_cqring(ctx);
2095 	spin_unlock_irq(&ctx->completion_lock);
2096 
2097 	io_cqring_ev_posted(ctx);
2098 	req_set_fail_links(req);
2099 	io_double_put_req(req);
2100 }
2101 
io_req_task_cancel(struct callback_head * cb)2102 static void io_req_task_cancel(struct callback_head *cb)
2103 {
2104 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2105 	struct io_ring_ctx *ctx = req->ctx;
2106 
2107 	mutex_lock(&ctx->uring_lock);
2108 	__io_req_task_cancel(req, -ECANCELED);
2109 	mutex_unlock(&ctx->uring_lock);
2110 	percpu_ref_put(&ctx->refs);
2111 }
2112 
__io_req_task_submit(struct io_kiocb * req)2113 static void __io_req_task_submit(struct io_kiocb *req)
2114 {
2115 	struct io_ring_ctx *ctx = req->ctx;
2116 
2117 	mutex_lock(&ctx->uring_lock);
2118 	if (!ctx->sqo_dead && !__io_sq_thread_acquire_mm(ctx))
2119 		__io_queue_sqe(req, NULL);
2120 	else
2121 		__io_req_task_cancel(req, -EFAULT);
2122 	mutex_unlock(&ctx->uring_lock);
2123 
2124 	if (ctx->flags & IORING_SETUP_SQPOLL)
2125 		io_sq_thread_drop_mm();
2126 }
2127 
io_req_task_submit(struct callback_head * cb)2128 static void io_req_task_submit(struct callback_head *cb)
2129 {
2130 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2131 	struct io_ring_ctx *ctx = req->ctx;
2132 
2133 	__io_req_task_submit(req);
2134 	percpu_ref_put(&ctx->refs);
2135 }
2136 
io_req_task_queue(struct io_kiocb * req)2137 static void io_req_task_queue(struct io_kiocb *req)
2138 {
2139 	int ret;
2140 
2141 	init_task_work(&req->task_work, io_req_task_submit);
2142 	percpu_ref_get(&req->ctx->refs);
2143 
2144 	ret = io_req_task_work_add(req, true);
2145 	if (unlikely(ret))
2146 		io_req_task_work_add_fallback(req, io_req_task_cancel);
2147 }
2148 
io_queue_next(struct io_kiocb * req)2149 static void io_queue_next(struct io_kiocb *req)
2150 {
2151 	struct io_kiocb *nxt = io_req_find_next(req);
2152 
2153 	if (nxt)
2154 		io_req_task_queue(nxt);
2155 }
2156 
io_free_req(struct io_kiocb * req)2157 static void io_free_req(struct io_kiocb *req)
2158 {
2159 	io_queue_next(req);
2160 	__io_free_req(req);
2161 }
2162 
2163 struct req_batch {
2164 	void *reqs[IO_IOPOLL_BATCH];
2165 	int to_free;
2166 
2167 	struct task_struct	*task;
2168 	int			task_refs;
2169 };
2170 
io_init_req_batch(struct req_batch * rb)2171 static inline void io_init_req_batch(struct req_batch *rb)
2172 {
2173 	rb->to_free = 0;
2174 	rb->task_refs = 0;
2175 	rb->task = NULL;
2176 }
2177 
__io_req_free_batch_flush(struct io_ring_ctx * ctx,struct req_batch * rb)2178 static void __io_req_free_batch_flush(struct io_ring_ctx *ctx,
2179 				      struct req_batch *rb)
2180 {
2181 	kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
2182 	percpu_ref_put_many(&ctx->refs, rb->to_free);
2183 	rb->to_free = 0;
2184 }
2185 
io_req_free_batch_finish(struct io_ring_ctx * ctx,struct req_batch * rb)2186 static void io_req_free_batch_finish(struct io_ring_ctx *ctx,
2187 				     struct req_batch *rb)
2188 {
2189 	if (rb->to_free)
2190 		__io_req_free_batch_flush(ctx, rb);
2191 	if (rb->task) {
2192 		struct io_uring_task *tctx = rb->task->io_uring;
2193 
2194 		percpu_counter_sub(&tctx->inflight, rb->task_refs);
2195 		if (atomic_read(&tctx->in_idle))
2196 			wake_up(&tctx->wait);
2197 		put_task_struct_many(rb->task, rb->task_refs);
2198 		rb->task = NULL;
2199 	}
2200 }
2201 
io_req_free_batch(struct req_batch * rb,struct io_kiocb * req)2202 static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req)
2203 {
2204 	if (unlikely(io_is_fallback_req(req))) {
2205 		io_free_req(req);
2206 		return;
2207 	}
2208 	if (req->flags & REQ_F_LINK_HEAD)
2209 		io_queue_next(req);
2210 
2211 	if (req->task != rb->task) {
2212 		if (rb->task) {
2213 			struct io_uring_task *tctx = rb->task->io_uring;
2214 
2215 			percpu_counter_sub(&tctx->inflight, rb->task_refs);
2216 			if (atomic_read(&tctx->in_idle))
2217 				wake_up(&tctx->wait);
2218 			put_task_struct_many(rb->task, rb->task_refs);
2219 		}
2220 		rb->task = req->task;
2221 		rb->task_refs = 0;
2222 	}
2223 	rb->task_refs++;
2224 
2225 	io_dismantle_req(req);
2226 	rb->reqs[rb->to_free++] = req;
2227 	if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
2228 		__io_req_free_batch_flush(req->ctx, rb);
2229 }
2230 
2231 /*
2232  * Drop reference to request, return next in chain (if there is one) if this
2233  * was the last reference to this request.
2234  */
io_put_req_find_next(struct io_kiocb * req)2235 static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req)
2236 {
2237 	struct io_kiocb *nxt = NULL;
2238 
2239 	if (refcount_dec_and_test(&req->refs)) {
2240 		nxt = io_req_find_next(req);
2241 		__io_free_req(req);
2242 	}
2243 	return nxt;
2244 }
2245 
io_put_req(struct io_kiocb * req)2246 static void io_put_req(struct io_kiocb *req)
2247 {
2248 	if (refcount_dec_and_test(&req->refs))
2249 		io_free_req(req);
2250 }
2251 
io_put_req_deferred_cb(struct callback_head * cb)2252 static void io_put_req_deferred_cb(struct callback_head *cb)
2253 {
2254 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
2255 
2256 	io_free_req(req);
2257 }
2258 
io_free_req_deferred(struct io_kiocb * req)2259 static void io_free_req_deferred(struct io_kiocb *req)
2260 {
2261 	int ret;
2262 
2263 	init_task_work(&req->task_work, io_put_req_deferred_cb);
2264 	ret = io_req_task_work_add(req, true);
2265 	if (unlikely(ret))
2266 		io_req_task_work_add_fallback(req, io_put_req_deferred_cb);
2267 }
2268 
io_put_req_deferred(struct io_kiocb * req,int refs)2269 static inline void io_put_req_deferred(struct io_kiocb *req, int refs)
2270 {
2271 	if (refcount_sub_and_test(refs, &req->refs))
2272 		io_free_req_deferred(req);
2273 }
2274 
io_steal_work(struct io_kiocb * req)2275 static struct io_wq_work *io_steal_work(struct io_kiocb *req)
2276 {
2277 	struct io_kiocb *nxt;
2278 
2279 	/*
2280 	 * A ref is owned by io-wq in which context we're. So, if that's the
2281 	 * last one, it's safe to steal next work. False negatives are Ok,
2282 	 * it just will be re-punted async in io_put_work()
2283 	 */
2284 	if (refcount_read(&req->refs) != 1)
2285 		return NULL;
2286 
2287 	nxt = io_req_find_next(req);
2288 	return nxt ? &nxt->work : NULL;
2289 }
2290 
io_double_put_req(struct io_kiocb * req)2291 static void io_double_put_req(struct io_kiocb *req)
2292 {
2293 	/* drop both submit and complete references */
2294 	if (refcount_sub_and_test(2, &req->refs))
2295 		io_free_req(req);
2296 }
2297 
io_cqring_events(struct io_ring_ctx * ctx)2298 static unsigned io_cqring_events(struct io_ring_ctx *ctx)
2299 {
2300 	struct io_rings *rings = ctx->rings;
2301 
2302 	/* See comment at the top of this file */
2303 	smp_rmb();
2304 	return ctx->cached_cq_tail - READ_ONCE(rings->cq.head);
2305 }
2306 
io_sqring_entries(struct io_ring_ctx * ctx)2307 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
2308 {
2309 	struct io_rings *rings = ctx->rings;
2310 
2311 	/* make sure SQ entry isn't read before tail */
2312 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
2313 }
2314 
io_put_kbuf(struct io_kiocb * req,struct io_buffer * kbuf)2315 static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf)
2316 {
2317 	unsigned int cflags;
2318 
2319 	cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT;
2320 	cflags |= IORING_CQE_F_BUFFER;
2321 	req->flags &= ~REQ_F_BUFFER_SELECTED;
2322 	kfree(kbuf);
2323 	return cflags;
2324 }
2325 
io_put_rw_kbuf(struct io_kiocb * req)2326 static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
2327 {
2328 	struct io_buffer *kbuf;
2329 
2330 	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
2331 	return io_put_kbuf(req, kbuf);
2332 }
2333 
io_run_task_work(void)2334 static inline bool io_run_task_work(void)
2335 {
2336 	/*
2337 	 * Not safe to run on exiting task, and the task_work handling will
2338 	 * not add work to such a task.
2339 	 */
2340 	if (unlikely(current->flags & PF_EXITING))
2341 		return false;
2342 	if (current->task_works) {
2343 		__set_current_state(TASK_RUNNING);
2344 		task_work_run();
2345 		return true;
2346 	}
2347 
2348 	return false;
2349 }
2350 
io_iopoll_queue(struct list_head * again)2351 static void io_iopoll_queue(struct list_head *again)
2352 {
2353 	struct io_kiocb *req;
2354 
2355 	do {
2356 		req = list_first_entry(again, struct io_kiocb, iopoll_entry);
2357 		list_del(&req->iopoll_entry);
2358 		__io_complete_rw(req, -EAGAIN, 0, NULL);
2359 	} while (!list_empty(again));
2360 }
2361 
2362 /*
2363  * Find and free completed poll iocbs
2364  */
io_iopoll_complete(struct io_ring_ctx * ctx,unsigned int * nr_events,struct list_head * done)2365 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
2366 			       struct list_head *done)
2367 {
2368 	struct req_batch rb;
2369 	struct io_kiocb *req;
2370 	LIST_HEAD(again);
2371 
2372 	/* order with ->result store in io_complete_rw_iopoll() */
2373 	smp_rmb();
2374 
2375 	io_init_req_batch(&rb);
2376 	while (!list_empty(done)) {
2377 		int cflags = 0;
2378 
2379 		req = list_first_entry(done, struct io_kiocb, iopoll_entry);
2380 		if (READ_ONCE(req->result) == -EAGAIN) {
2381 			req->result = 0;
2382 			req->iopoll_completed = 0;
2383 			list_move_tail(&req->iopoll_entry, &again);
2384 			continue;
2385 		}
2386 		list_del(&req->iopoll_entry);
2387 
2388 		if (req->flags & REQ_F_BUFFER_SELECTED)
2389 			cflags = io_put_rw_kbuf(req);
2390 
2391 		__io_cqring_fill_event(req, req->result, cflags);
2392 		(*nr_events)++;
2393 
2394 		if (refcount_dec_and_test(&req->refs))
2395 			io_req_free_batch(&rb, req);
2396 	}
2397 
2398 	io_commit_cqring(ctx);
2399 	if (ctx->flags & IORING_SETUP_SQPOLL)
2400 		io_cqring_ev_posted(ctx);
2401 	io_req_free_batch_finish(ctx, &rb);
2402 
2403 	if (!list_empty(&again))
2404 		io_iopoll_queue(&again);
2405 }
2406 
io_do_iopoll(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)2407 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
2408 			long min)
2409 {
2410 	struct io_kiocb *req, *tmp;
2411 	LIST_HEAD(done);
2412 	bool spin;
2413 	int ret;
2414 
2415 	/*
2416 	 * Only spin for completions if we don't have multiple devices hanging
2417 	 * off our complete list, and we're under the requested amount.
2418 	 */
2419 	spin = !ctx->poll_multi_file && *nr_events < min;
2420 
2421 	ret = 0;
2422 	list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, iopoll_entry) {
2423 		struct kiocb *kiocb = &req->rw.kiocb;
2424 
2425 		/*
2426 		 * Move completed and retryable entries to our local lists.
2427 		 * If we find a request that requires polling, break out
2428 		 * and complete those lists first, if we have entries there.
2429 		 */
2430 		if (READ_ONCE(req->iopoll_completed)) {
2431 			list_move_tail(&req->iopoll_entry, &done);
2432 			continue;
2433 		}
2434 		if (!list_empty(&done))
2435 			break;
2436 
2437 		ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
2438 		if (ret < 0)
2439 			break;
2440 
2441 		/* iopoll may have completed current req */
2442 		if (READ_ONCE(req->iopoll_completed))
2443 			list_move_tail(&req->iopoll_entry, &done);
2444 
2445 		if (ret && spin)
2446 			spin = false;
2447 		ret = 0;
2448 	}
2449 
2450 	if (!list_empty(&done))
2451 		io_iopoll_complete(ctx, nr_events, &done);
2452 
2453 	return ret;
2454 }
2455 
2456 /*
2457  * Poll for a minimum of 'min' events. Note that if min == 0 we consider that a
2458  * non-spinning poll check - we'll still enter the driver poll loop, but only
2459  * as a non-spinning completion check.
2460  */
io_iopoll_getevents(struct io_ring_ctx * ctx,unsigned int * nr_events,long min)2461 static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
2462 				long min)
2463 {
2464 	while (!list_empty(&ctx->iopoll_list) && !need_resched()) {
2465 		int ret;
2466 
2467 		ret = io_do_iopoll(ctx, nr_events, min);
2468 		if (ret < 0)
2469 			return ret;
2470 		if (*nr_events >= min)
2471 			return 0;
2472 	}
2473 
2474 	return 1;
2475 }
2476 
2477 /*
2478  * We can't just wait for polled events to come to us, we have to actively
2479  * find and complete them.
2480  */
io_iopoll_try_reap_events(struct io_ring_ctx * ctx)2481 static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx)
2482 {
2483 	if (!(ctx->flags & IORING_SETUP_IOPOLL))
2484 		return;
2485 
2486 	mutex_lock(&ctx->uring_lock);
2487 	while (!list_empty(&ctx->iopoll_list)) {
2488 		unsigned int nr_events = 0;
2489 
2490 		io_do_iopoll(ctx, &nr_events, 0);
2491 
2492 		/* let it sleep and repeat later if can't complete a request */
2493 		if (nr_events == 0)
2494 			break;
2495 		/*
2496 		 * Ensure we allow local-to-the-cpu processing to take place,
2497 		 * in this case we need to ensure that we reap all events.
2498 		 * Also let task_work, etc. to progress by releasing the mutex
2499 		 */
2500 		if (need_resched()) {
2501 			mutex_unlock(&ctx->uring_lock);
2502 			cond_resched();
2503 			mutex_lock(&ctx->uring_lock);
2504 		}
2505 	}
2506 	mutex_unlock(&ctx->uring_lock);
2507 }
2508 
io_iopoll_check(struct io_ring_ctx * ctx,long min)2509 static int io_iopoll_check(struct io_ring_ctx *ctx, long min)
2510 {
2511 	unsigned int nr_events = 0;
2512 	int iters = 0, ret = 0;
2513 
2514 	/*
2515 	 * We disallow the app entering submit/complete with polling, but we
2516 	 * still need to lock the ring to prevent racing with polled issue
2517 	 * that got punted to a workqueue.
2518 	 */
2519 	mutex_lock(&ctx->uring_lock);
2520 	do {
2521 		/*
2522 		 * Don't enter poll loop if we already have events pending.
2523 		 * If we do, we can potentially be spinning for commands that
2524 		 * already triggered a CQE (eg in error).
2525 		 */
2526 		if (test_bit(0, &ctx->cq_check_overflow))
2527 			__io_cqring_overflow_flush(ctx, false, NULL, NULL);
2528 		if (io_cqring_events(ctx))
2529 			break;
2530 
2531 		/*
2532 		 * If a submit got punted to a workqueue, we can have the
2533 		 * application entering polling for a command before it gets
2534 		 * issued. That app will hold the uring_lock for the duration
2535 		 * of the poll right here, so we need to take a breather every
2536 		 * now and then to ensure that the issue has a chance to add
2537 		 * the poll to the issued list. Otherwise we can spin here
2538 		 * forever, while the workqueue is stuck trying to acquire the
2539 		 * very same mutex.
2540 		 */
2541 		if (!(++iters & 7)) {
2542 			mutex_unlock(&ctx->uring_lock);
2543 			io_run_task_work();
2544 			mutex_lock(&ctx->uring_lock);
2545 		}
2546 
2547 		ret = io_iopoll_getevents(ctx, &nr_events, min);
2548 		if (ret <= 0)
2549 			break;
2550 		ret = 0;
2551 	} while (min && !nr_events && !need_resched());
2552 
2553 	mutex_unlock(&ctx->uring_lock);
2554 	return ret;
2555 }
2556 
kiocb_end_write(struct io_kiocb * req)2557 static void kiocb_end_write(struct io_kiocb *req)
2558 {
2559 	/*
2560 	 * Tell lockdep we inherited freeze protection from submission
2561 	 * thread.
2562 	 */
2563 	if (req->flags & REQ_F_ISREG) {
2564 		struct inode *inode = file_inode(req->file);
2565 
2566 		__sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
2567 	}
2568 	file_end_write(req->file);
2569 }
2570 
io_complete_rw_common(struct kiocb * kiocb,long res,struct io_comp_state * cs)2571 static void io_complete_rw_common(struct kiocb *kiocb, long res,
2572 				  struct io_comp_state *cs)
2573 {
2574 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2575 	int cflags = 0;
2576 
2577 	if (kiocb->ki_flags & IOCB_WRITE)
2578 		kiocb_end_write(req);
2579 
2580 	if (res != req->result)
2581 		req_set_fail_links(req);
2582 	if (req->flags & REQ_F_BUFFER_SELECTED)
2583 		cflags = io_put_rw_kbuf(req);
2584 	__io_req_complete(req, res, cflags, cs);
2585 }
2586 
2587 #ifdef CONFIG_BLOCK
io_resubmit_prep(struct io_kiocb * req,int error)2588 static bool io_resubmit_prep(struct io_kiocb *req, int error)
2589 {
2590 	req_set_fail_links(req);
2591 	return false;
2592 }
2593 #endif
2594 
io_rw_reissue(struct io_kiocb * req,long res)2595 static bool io_rw_reissue(struct io_kiocb *req, long res)
2596 {
2597 #ifdef CONFIG_BLOCK
2598 	umode_t mode = file_inode(req->file)->i_mode;
2599 	int ret;
2600 
2601 	if (!S_ISBLK(mode) && !S_ISREG(mode))
2602 		return false;
2603 	if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
2604 		return false;
2605 	/*
2606 	 * If ref is dying, we might be running poll reap from the exit work.
2607 	 * Don't attempt to reissue from that path, just let it fail with
2608 	 * -EAGAIN.
2609 	 */
2610 	if (percpu_ref_is_dying(&req->ctx->refs))
2611 		return false;
2612 
2613 	ret = io_sq_thread_acquire_mm(req->ctx, req);
2614 
2615 	if (io_resubmit_prep(req, ret)) {
2616 		refcount_inc(&req->refs);
2617 		io_queue_async_work(req);
2618 		return true;
2619 	}
2620 
2621 #endif
2622 	return false;
2623 }
2624 
__io_complete_rw(struct io_kiocb * req,long res,long res2,struct io_comp_state * cs)2625 static void __io_complete_rw(struct io_kiocb *req, long res, long res2,
2626 			     struct io_comp_state *cs)
2627 {
2628 	if (!io_rw_reissue(req, res))
2629 		io_complete_rw_common(&req->rw.kiocb, res, cs);
2630 }
2631 
io_complete_rw(struct kiocb * kiocb,long res,long res2)2632 static void io_complete_rw(struct kiocb *kiocb, long res, long res2)
2633 {
2634 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2635 
2636 	__io_complete_rw(req, res, res2, NULL);
2637 }
2638 
io_complete_rw_iopoll(struct kiocb * kiocb,long res,long res2)2639 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
2640 {
2641 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2642 
2643 	if (kiocb->ki_flags & IOCB_WRITE)
2644 		kiocb_end_write(req);
2645 
2646 	if (res != -EAGAIN && res != req->result)
2647 		req_set_fail_links(req);
2648 
2649 	WRITE_ONCE(req->result, res);
2650 	/* order with io_poll_complete() checking ->result */
2651 	smp_wmb();
2652 	WRITE_ONCE(req->iopoll_completed, 1);
2653 }
2654 
2655 /*
2656  * After the iocb has been issued, it's safe to be found on the poll list.
2657  * Adding the kiocb to the list AFTER submission ensures that we don't
2658  * find it from a io_iopoll_getevents() thread before the issuer is done
2659  * accessing the kiocb cookie.
2660  */
io_iopoll_req_issued(struct io_kiocb * req)2661 static void io_iopoll_req_issued(struct io_kiocb *req)
2662 {
2663 	struct io_ring_ctx *ctx = req->ctx;
2664 
2665 	/*
2666 	 * Track whether we have multiple files in our lists. This will impact
2667 	 * how we do polling eventually, not spinning if we're on potentially
2668 	 * different devices.
2669 	 */
2670 	if (list_empty(&ctx->iopoll_list)) {
2671 		ctx->poll_multi_file = false;
2672 	} else if (!ctx->poll_multi_file) {
2673 		struct io_kiocb *list_req;
2674 
2675 		list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb,
2676 						iopoll_entry);
2677 		if (list_req->file != req->file)
2678 			ctx->poll_multi_file = true;
2679 	}
2680 
2681 	/*
2682 	 * For fast devices, IO may have already completed. If it has, add
2683 	 * it to the front so we find it first.
2684 	 */
2685 	if (READ_ONCE(req->iopoll_completed))
2686 		list_add(&req->iopoll_entry, &ctx->iopoll_list);
2687 	else
2688 		list_add_tail(&req->iopoll_entry, &ctx->iopoll_list);
2689 
2690 	if ((ctx->flags & IORING_SETUP_SQPOLL) &&
2691 	    wq_has_sleeper(&ctx->sq_data->wait))
2692 		wake_up(&ctx->sq_data->wait);
2693 }
2694 
__io_state_file_put(struct io_submit_state * state)2695 static void __io_state_file_put(struct io_submit_state *state)
2696 {
2697 	if (state->has_refs)
2698 		fput_many(state->file, state->has_refs);
2699 	state->file = NULL;
2700 }
2701 
io_state_file_put(struct io_submit_state * state)2702 static inline void io_state_file_put(struct io_submit_state *state)
2703 {
2704 	if (state->file)
2705 		__io_state_file_put(state);
2706 }
2707 
2708 /*
2709  * Get as many references to a file as we have IOs left in this submission,
2710  * assuming most submissions are for one file, or at least that each file
2711  * has more than one submission.
2712  */
__io_file_get(struct io_submit_state * state,int fd)2713 static struct file *__io_file_get(struct io_submit_state *state, int fd)
2714 {
2715 	if (!state)
2716 		return fget(fd);
2717 
2718 	if (state->file) {
2719 		if (state->fd == fd) {
2720 			state->has_refs--;
2721 			return state->file;
2722 		}
2723 		__io_state_file_put(state);
2724 	}
2725 	state->file = fget_many(fd, state->ios_left);
2726 	if (!state->file)
2727 		return NULL;
2728 
2729 	state->fd = fd;
2730 	state->has_refs = state->ios_left - 1;
2731 	return state->file;
2732 }
2733 
io_bdev_nowait(struct block_device * bdev)2734 static bool io_bdev_nowait(struct block_device *bdev)
2735 {
2736 #ifdef CONFIG_BLOCK
2737 	return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
2738 #else
2739 	return true;
2740 #endif
2741 }
2742 
2743 /*
2744  * If we tracked the file through the SCM inflight mechanism, we could support
2745  * any file. For now, just ensure that anything potentially problematic is done
2746  * inline.
2747  */
io_file_supports_async(struct file * file,int rw)2748 static bool io_file_supports_async(struct file *file, int rw)
2749 {
2750 	umode_t mode = file_inode(file)->i_mode;
2751 
2752 	if (S_ISBLK(mode)) {
2753 		if (io_bdev_nowait(file->f_inode->i_bdev))
2754 			return true;
2755 		return false;
2756 	}
2757 	if (S_ISSOCK(mode))
2758 		return true;
2759 	if (S_ISREG(mode)) {
2760 		if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
2761 		    file->f_op != &io_uring_fops)
2762 			return true;
2763 		return false;
2764 	}
2765 
2766 	/* any ->read/write should understand O_NONBLOCK */
2767 	if (file->f_flags & O_NONBLOCK)
2768 		return true;
2769 
2770 	if (!(file->f_mode & FMODE_NOWAIT))
2771 		return false;
2772 
2773 	if (rw == READ)
2774 		return file->f_op->read_iter != NULL;
2775 
2776 	return file->f_op->write_iter != NULL;
2777 }
2778 
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe)2779 static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
2780 {
2781 	struct io_ring_ctx *ctx = req->ctx;
2782 	struct kiocb *kiocb = &req->rw.kiocb;
2783 	unsigned ioprio;
2784 	int ret;
2785 
2786 	if (S_ISREG(file_inode(req->file)->i_mode))
2787 		req->flags |= REQ_F_ISREG;
2788 
2789 	kiocb->ki_pos = READ_ONCE(sqe->off);
2790 	if (kiocb->ki_pos == -1 && !(req->file->f_mode & FMODE_STREAM)) {
2791 		req->flags |= REQ_F_CUR_POS;
2792 		kiocb->ki_pos = req->file->f_pos;
2793 	}
2794 	kiocb->ki_hint = ki_hint_validate(file_write_hint(kiocb->ki_filp));
2795 	kiocb->ki_flags = iocb_flags(kiocb->ki_filp);
2796 	ret = kiocb_set_rw_flags(kiocb, READ_ONCE(sqe->rw_flags));
2797 	if (unlikely(ret))
2798 		return ret;
2799 
2800 	ioprio = READ_ONCE(sqe->ioprio);
2801 	if (ioprio) {
2802 		ret = ioprio_check_cap(ioprio);
2803 		if (ret)
2804 			return ret;
2805 
2806 		kiocb->ki_ioprio = ioprio;
2807 	} else
2808 		kiocb->ki_ioprio = get_current_ioprio();
2809 
2810 	/* don't allow async punt if RWF_NOWAIT was requested */
2811 	if (kiocb->ki_flags & IOCB_NOWAIT)
2812 		req->flags |= REQ_F_NOWAIT;
2813 
2814 	if (ctx->flags & IORING_SETUP_IOPOLL) {
2815 		if (!(kiocb->ki_flags & IOCB_DIRECT) ||
2816 		    !kiocb->ki_filp->f_op->iopoll)
2817 			return -EOPNOTSUPP;
2818 
2819 		kiocb->ki_flags |= IOCB_HIPRI;
2820 		kiocb->ki_complete = io_complete_rw_iopoll;
2821 		req->iopoll_completed = 0;
2822 	} else {
2823 		if (kiocb->ki_flags & IOCB_HIPRI)
2824 			return -EINVAL;
2825 		kiocb->ki_complete = io_complete_rw;
2826 	}
2827 
2828 	req->rw.addr = READ_ONCE(sqe->addr);
2829 	req->rw.len = READ_ONCE(sqe->len);
2830 	req->buf_index = READ_ONCE(sqe->buf_index);
2831 	return 0;
2832 }
2833 
io_rw_done(struct kiocb * kiocb,ssize_t ret)2834 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
2835 {
2836 	switch (ret) {
2837 	case -EIOCBQUEUED:
2838 		break;
2839 	case -ERESTARTSYS:
2840 	case -ERESTARTNOINTR:
2841 	case -ERESTARTNOHAND:
2842 	case -ERESTART_RESTARTBLOCK:
2843 		/*
2844 		 * We can't just restart the syscall, since previously
2845 		 * submitted sqes may already be in progress. Just fail this
2846 		 * IO with EINTR.
2847 		 */
2848 		ret = -EINTR;
2849 		fallthrough;
2850 	default:
2851 		kiocb->ki_complete(kiocb, ret, 0);
2852 	}
2853 }
2854 
kiocb_done(struct kiocb * kiocb,ssize_t ret,struct io_comp_state * cs)2855 static void kiocb_done(struct kiocb *kiocb, ssize_t ret,
2856 		       struct io_comp_state *cs)
2857 {
2858 	struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb);
2859 	struct io_async_rw *io = req->async_data;
2860 
2861 	/* add previously done IO, if any */
2862 	if (io && io->bytes_done > 0) {
2863 		if (ret < 0)
2864 			ret = io->bytes_done;
2865 		else
2866 			ret += io->bytes_done;
2867 	}
2868 
2869 	if (req->flags & REQ_F_CUR_POS)
2870 		req->file->f_pos = kiocb->ki_pos;
2871 	if (ret >= 0 && kiocb->ki_complete == io_complete_rw)
2872 		__io_complete_rw(req, ret, 0, cs);
2873 	else
2874 		io_rw_done(kiocb, ret);
2875 }
2876 
io_import_fixed(struct io_kiocb * req,int rw,struct iov_iter * iter)2877 static ssize_t io_import_fixed(struct io_kiocb *req, int rw,
2878 			       struct iov_iter *iter)
2879 {
2880 	struct io_ring_ctx *ctx = req->ctx;
2881 	size_t len = req->rw.len;
2882 	struct io_mapped_ubuf *imu;
2883 	u16 index, buf_index = req->buf_index;
2884 	size_t offset;
2885 	u64 buf_addr;
2886 
2887 	if (unlikely(buf_index >= ctx->nr_user_bufs))
2888 		return -EFAULT;
2889 	index = array_index_nospec(buf_index, ctx->nr_user_bufs);
2890 	imu = &ctx->user_bufs[index];
2891 	buf_addr = req->rw.addr;
2892 
2893 	/* overflow */
2894 	if (buf_addr + len < buf_addr)
2895 		return -EFAULT;
2896 	/* not inside the mapped region */
2897 	if (buf_addr < imu->ubuf || buf_addr + len > imu->ubuf + imu->len)
2898 		return -EFAULT;
2899 
2900 	/*
2901 	 * May not be a start of buffer, set size appropriately
2902 	 * and advance us to the beginning.
2903 	 */
2904 	offset = buf_addr - imu->ubuf;
2905 	iov_iter_bvec(iter, rw, imu->bvec, imu->nr_bvecs, offset + len);
2906 
2907 	if (offset) {
2908 		/*
2909 		 * Don't use iov_iter_advance() here, as it's really slow for
2910 		 * using the latter parts of a big fixed buffer - it iterates
2911 		 * over each segment manually. We can cheat a bit here, because
2912 		 * we know that:
2913 		 *
2914 		 * 1) it's a BVEC iter, we set it up
2915 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
2916 		 *    first and last bvec
2917 		 *
2918 		 * So just find our index, and adjust the iterator afterwards.
2919 		 * If the offset is within the first bvec (or the whole first
2920 		 * bvec, just use iov_iter_advance(). This makes it easier
2921 		 * since we can just skip the first segment, which may not
2922 		 * be PAGE_SIZE aligned.
2923 		 */
2924 		const struct bio_vec *bvec = imu->bvec;
2925 
2926 		if (offset <= bvec->bv_len) {
2927 			iov_iter_advance(iter, offset);
2928 		} else {
2929 			unsigned long seg_skip;
2930 
2931 			/* skip first vec */
2932 			offset -= bvec->bv_len;
2933 			seg_skip = 1 + (offset >> PAGE_SHIFT);
2934 
2935 			iter->bvec = bvec + seg_skip;
2936 			iter->nr_segs -= seg_skip;
2937 			iter->count -= bvec->bv_len + offset;
2938 			iter->iov_offset = offset & ~PAGE_MASK;
2939 		}
2940 	}
2941 
2942 	return len;
2943 }
2944 
io_ring_submit_unlock(struct io_ring_ctx * ctx,bool needs_lock)2945 static void io_ring_submit_unlock(struct io_ring_ctx *ctx, bool needs_lock)
2946 {
2947 	if (needs_lock)
2948 		mutex_unlock(&ctx->uring_lock);
2949 }
2950 
io_ring_submit_lock(struct io_ring_ctx * ctx,bool needs_lock)2951 static void io_ring_submit_lock(struct io_ring_ctx *ctx, bool needs_lock)
2952 {
2953 	/*
2954 	 * "Normal" inline submissions always hold the uring_lock, since we
2955 	 * grab it from the system call. Same is true for the SQPOLL offload.
2956 	 * The only exception is when we've detached the request and issue it
2957 	 * from an async worker thread, grab the lock for that case.
2958 	 */
2959 	if (needs_lock)
2960 		mutex_lock(&ctx->uring_lock);
2961 }
2962 
io_buffer_select(struct io_kiocb * req,size_t * len,int bgid,struct io_buffer * kbuf,bool needs_lock)2963 static struct io_buffer *io_buffer_select(struct io_kiocb *req, size_t *len,
2964 					  int bgid, struct io_buffer *kbuf,
2965 					  bool needs_lock)
2966 {
2967 	struct io_buffer *head;
2968 
2969 	if (req->flags & REQ_F_BUFFER_SELECTED)
2970 		return kbuf;
2971 
2972 	io_ring_submit_lock(req->ctx, needs_lock);
2973 
2974 	lockdep_assert_held(&req->ctx->uring_lock);
2975 
2976 	head = xa_load(&req->ctx->io_buffers, bgid);
2977 	if (head) {
2978 		if (!list_empty(&head->list)) {
2979 			kbuf = list_last_entry(&head->list, struct io_buffer,
2980 							list);
2981 			list_del(&kbuf->list);
2982 		} else {
2983 			kbuf = head;
2984 			xa_erase(&req->ctx->io_buffers, bgid);
2985 		}
2986 		if (*len > kbuf->len)
2987 			*len = kbuf->len;
2988 	} else {
2989 		kbuf = ERR_PTR(-ENOBUFS);
2990 	}
2991 
2992 	io_ring_submit_unlock(req->ctx, needs_lock);
2993 
2994 	return kbuf;
2995 }
2996 
io_rw_buffer_select(struct io_kiocb * req,size_t * len,bool needs_lock)2997 static void __user *io_rw_buffer_select(struct io_kiocb *req, size_t *len,
2998 					bool needs_lock)
2999 {
3000 	struct io_buffer *kbuf;
3001 	u16 bgid;
3002 
3003 	kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3004 	bgid = req->buf_index;
3005 	kbuf = io_buffer_select(req, len, bgid, kbuf, needs_lock);
3006 	if (IS_ERR(kbuf))
3007 		return kbuf;
3008 	req->rw.addr = (u64) (unsigned long) kbuf;
3009 	req->flags |= REQ_F_BUFFER_SELECTED;
3010 	return u64_to_user_ptr(kbuf->addr);
3011 }
3012 
3013 #ifdef CONFIG_COMPAT
io_compat_import(struct io_kiocb * req,struct iovec * iov,bool needs_lock)3014 static ssize_t io_compat_import(struct io_kiocb *req, struct iovec *iov,
3015 				bool needs_lock)
3016 {
3017 	struct compat_iovec __user *uiov;
3018 	compat_ssize_t clen;
3019 	void __user *buf;
3020 	ssize_t len;
3021 
3022 	uiov = u64_to_user_ptr(req->rw.addr);
3023 	if (!access_ok(uiov, sizeof(*uiov)))
3024 		return -EFAULT;
3025 	if (__get_user(clen, &uiov->iov_len))
3026 		return -EFAULT;
3027 	if (clen < 0)
3028 		return -EINVAL;
3029 
3030 	len = clen;
3031 	buf = io_rw_buffer_select(req, &len, needs_lock);
3032 	if (IS_ERR(buf))
3033 		return PTR_ERR(buf);
3034 	iov[0].iov_base = buf;
3035 	iov[0].iov_len = (compat_size_t) len;
3036 	return 0;
3037 }
3038 #endif
3039 
__io_iov_buffer_select(struct io_kiocb * req,struct iovec * iov,bool needs_lock)3040 static ssize_t __io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3041 				      bool needs_lock)
3042 {
3043 	struct iovec __user *uiov = u64_to_user_ptr(req->rw.addr);
3044 	void __user *buf;
3045 	ssize_t len;
3046 
3047 	if (copy_from_user(iov, uiov, sizeof(*uiov)))
3048 		return -EFAULT;
3049 
3050 	len = iov[0].iov_len;
3051 	if (len < 0)
3052 		return -EINVAL;
3053 	buf = io_rw_buffer_select(req, &len, needs_lock);
3054 	if (IS_ERR(buf))
3055 		return PTR_ERR(buf);
3056 	iov[0].iov_base = buf;
3057 	iov[0].iov_len = len;
3058 	return 0;
3059 }
3060 
io_iov_buffer_select(struct io_kiocb * req,struct iovec * iov,bool needs_lock)3061 static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov,
3062 				    bool needs_lock)
3063 {
3064 	if (req->flags & REQ_F_BUFFER_SELECTED) {
3065 		struct io_buffer *kbuf;
3066 
3067 		kbuf = (struct io_buffer *) (unsigned long) req->rw.addr;
3068 		iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
3069 		iov[0].iov_len = kbuf->len;
3070 		return 0;
3071 	}
3072 	if (req->rw.len != 1)
3073 		return -EINVAL;
3074 
3075 #ifdef CONFIG_COMPAT
3076 	if (req->ctx->compat)
3077 		return io_compat_import(req, iov, needs_lock);
3078 #endif
3079 
3080 	return __io_iov_buffer_select(req, iov, needs_lock);
3081 }
3082 
__io_import_iovec(int rw,struct io_kiocb * req,struct iovec ** iovec,struct iov_iter * iter,bool needs_lock)3083 static ssize_t __io_import_iovec(int rw, struct io_kiocb *req,
3084 				 struct iovec **iovec, struct iov_iter *iter,
3085 				 bool needs_lock)
3086 {
3087 	void __user *buf = u64_to_user_ptr(req->rw.addr);
3088 	size_t sqe_len = req->rw.len;
3089 	ssize_t ret;
3090 	u8 opcode;
3091 
3092 	opcode = req->opcode;
3093 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
3094 		*iovec = NULL;
3095 		return io_import_fixed(req, rw, iter);
3096 	}
3097 
3098 	/* buffer index only valid with fixed read/write, or buffer select  */
3099 	if (req->buf_index && !(req->flags & REQ_F_BUFFER_SELECT))
3100 		return -EINVAL;
3101 
3102 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) {
3103 		if (req->flags & REQ_F_BUFFER_SELECT) {
3104 			buf = io_rw_buffer_select(req, &sqe_len, needs_lock);
3105 			if (IS_ERR(buf))
3106 				return PTR_ERR(buf);
3107 			req->rw.len = sqe_len;
3108 		}
3109 
3110 		ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
3111 		*iovec = NULL;
3112 		return ret;
3113 	}
3114 
3115 	if (req->flags & REQ_F_BUFFER_SELECT) {
3116 		ret = io_iov_buffer_select(req, *iovec, needs_lock);
3117 		if (!ret) {
3118 			ret = (*iovec)->iov_len;
3119 			iov_iter_init(iter, rw, *iovec, 1, ret);
3120 		}
3121 		*iovec = NULL;
3122 		return ret;
3123 	}
3124 
3125 	return __import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter,
3126 			      req->ctx->compat);
3127 }
3128 
io_import_iovec(int rw,struct io_kiocb * req,struct iovec ** iovec,struct iov_iter * iter,bool needs_lock)3129 static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
3130 			       struct iovec **iovec, struct iov_iter *iter,
3131 			       bool needs_lock)
3132 {
3133 	struct io_async_rw *iorw = req->async_data;
3134 
3135 	if (!iorw)
3136 		return __io_import_iovec(rw, req, iovec, iter, needs_lock);
3137 	*iovec = NULL;
3138 	return 0;
3139 }
3140 
io_kiocb_ppos(struct kiocb * kiocb)3141 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
3142 {
3143 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
3144 }
3145 
3146 /*
3147  * For files that don't have ->read_iter() and ->write_iter(), handle them
3148  * by looping over ->read() or ->write() manually.
3149  */
loop_rw_iter(int rw,struct io_kiocb * req,struct iov_iter * iter)3150 static ssize_t loop_rw_iter(int rw, struct io_kiocb *req, struct iov_iter *iter)
3151 {
3152 	struct kiocb *kiocb = &req->rw.kiocb;
3153 	struct file *file = req->file;
3154 	ssize_t ret = 0;
3155 
3156 	/*
3157 	 * Don't support polled IO through this interface, and we can't
3158 	 * support non-blocking either. For the latter, this just causes
3159 	 * the kiocb to be handled from an async context.
3160 	 */
3161 	if (kiocb->ki_flags & IOCB_HIPRI)
3162 		return -EOPNOTSUPP;
3163 	if (kiocb->ki_flags & IOCB_NOWAIT)
3164 		return -EAGAIN;
3165 
3166 	while (iov_iter_count(iter)) {
3167 		struct iovec iovec;
3168 		ssize_t nr;
3169 
3170 		if (!iov_iter_is_bvec(iter)) {
3171 			iovec = iov_iter_iovec(iter);
3172 		} else {
3173 			iovec.iov_base = u64_to_user_ptr(req->rw.addr);
3174 			iovec.iov_len = req->rw.len;
3175 		}
3176 
3177 		if (rw == READ) {
3178 			nr = file->f_op->read(file, iovec.iov_base,
3179 					      iovec.iov_len, io_kiocb_ppos(kiocb));
3180 		} else {
3181 			nr = file->f_op->write(file, iovec.iov_base,
3182 					       iovec.iov_len, io_kiocb_ppos(kiocb));
3183 		}
3184 
3185 		if (nr < 0) {
3186 			if (!ret)
3187 				ret = nr;
3188 			break;
3189 		}
3190 		if (!iov_iter_is_bvec(iter)) {
3191 			iov_iter_advance(iter, nr);
3192 		} else {
3193 			req->rw.len -= nr;
3194 			req->rw.addr += nr;
3195 		}
3196 		ret += nr;
3197 		if (nr != iovec.iov_len)
3198 			break;
3199 	}
3200 
3201 	return ret;
3202 }
3203 
io_req_map_rw(struct io_kiocb * req,const struct iovec * iovec,const struct iovec * fast_iov,struct iov_iter * iter)3204 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
3205 			  const struct iovec *fast_iov, struct iov_iter *iter)
3206 {
3207 	struct io_async_rw *rw = req->async_data;
3208 
3209 	memcpy(&rw->iter, iter, sizeof(*iter));
3210 	rw->free_iovec = iovec;
3211 	rw->bytes_done = 0;
3212 	/* can only be fixed buffers, no need to do anything */
3213 	if (iov_iter_is_bvec(iter))
3214 		return;
3215 	if (!iovec) {
3216 		unsigned iov_off = 0;
3217 
3218 		rw->iter.iov = rw->fast_iov;
3219 		if (iter->iov != fast_iov) {
3220 			iov_off = iter->iov - fast_iov;
3221 			rw->iter.iov += iov_off;
3222 		}
3223 		if (rw->fast_iov != fast_iov)
3224 			memcpy(rw->fast_iov + iov_off, fast_iov + iov_off,
3225 			       sizeof(struct iovec) * iter->nr_segs);
3226 	} else {
3227 		req->flags |= REQ_F_NEED_CLEANUP;
3228 	}
3229 }
3230 
__io_alloc_async_data(struct io_kiocb * req)3231 static inline int __io_alloc_async_data(struct io_kiocb *req)
3232 {
3233 	WARN_ON_ONCE(!io_op_defs[req->opcode].async_size);
3234 	req->async_data = kmalloc(io_op_defs[req->opcode].async_size, GFP_KERNEL);
3235 	return req->async_data == NULL;
3236 }
3237 
io_alloc_async_data(struct io_kiocb * req)3238 static int io_alloc_async_data(struct io_kiocb *req)
3239 {
3240 	if (!io_op_defs[req->opcode].needs_async_data)
3241 		return 0;
3242 
3243 	return  __io_alloc_async_data(req);
3244 }
3245 
io_setup_async_rw(struct io_kiocb * req,const struct iovec * iovec,const struct iovec * fast_iov,struct iov_iter * iter,bool force)3246 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
3247 			     const struct iovec *fast_iov,
3248 			     struct iov_iter *iter, bool force)
3249 {
3250 	if (!force && !io_op_defs[req->opcode].needs_async_data)
3251 		return 0;
3252 	if (!req->async_data) {
3253 		if (__io_alloc_async_data(req))
3254 			return -ENOMEM;
3255 
3256 		io_req_map_rw(req, iovec, fast_iov, iter);
3257 	}
3258 	return 0;
3259 }
3260 
io_rw_prep_async(struct io_kiocb * req,int rw)3261 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
3262 {
3263 	struct io_async_rw *iorw = req->async_data;
3264 	struct iovec *iov = iorw->fast_iov;
3265 	ssize_t ret;
3266 
3267 	ret = __io_import_iovec(rw, req, &iov, &iorw->iter, false);
3268 	if (unlikely(ret < 0))
3269 		return ret;
3270 
3271 	iorw->bytes_done = 0;
3272 	iorw->free_iovec = iov;
3273 	if (iov)
3274 		req->flags |= REQ_F_NEED_CLEANUP;
3275 	return 0;
3276 }
3277 
io_read_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3278 static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3279 {
3280 	ssize_t ret;
3281 
3282 	ret = io_prep_rw(req, sqe);
3283 	if (ret)
3284 		return ret;
3285 
3286 	if (unlikely(!(req->file->f_mode & FMODE_READ)))
3287 		return -EBADF;
3288 
3289 	/* either don't need iovec imported or already have it */
3290 	if (!req->async_data)
3291 		return 0;
3292 	return io_rw_prep_async(req, READ);
3293 }
3294 
3295 /*
3296  * This is our waitqueue callback handler, registered through lock_page_async()
3297  * when we initially tried to do the IO with the iocb armed our waitqueue.
3298  * This gets called when the page is unlocked, and we generally expect that to
3299  * happen when the page IO is completed and the page is now uptodate. This will
3300  * queue a task_work based retry of the operation, attempting to copy the data
3301  * again. If the latter fails because the page was NOT uptodate, then we will
3302  * do a thread based blocking retry of the operation. That's the unexpected
3303  * slow path.
3304  */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)3305 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3306 			     int sync, void *arg)
3307 {
3308 	struct wait_page_queue *wpq;
3309 	struct io_kiocb *req = wait->private;
3310 	struct wait_page_key *key = arg;
3311 	int ret;
3312 
3313 	wpq = container_of(wait, struct wait_page_queue, wait);
3314 
3315 	if (!wake_page_match(wpq, key))
3316 		return 0;
3317 
3318 	req->rw.kiocb.ki_flags &= ~IOCB_WAITQ;
3319 	list_del_init(&wait->entry);
3320 
3321 	init_task_work(&req->task_work, io_req_task_submit);
3322 	percpu_ref_get(&req->ctx->refs);
3323 
3324 	/* submit ref gets dropped, acquire a new one */
3325 	refcount_inc(&req->refs);
3326 	ret = io_req_task_work_add(req, true);
3327 	if (unlikely(ret))
3328 		io_req_task_work_add_fallback(req, io_req_task_cancel);
3329 	return 1;
3330 }
3331 
3332 /*
3333  * This controls whether a given IO request should be armed for async page
3334  * based retry. If we return false here, the request is handed to the async
3335  * worker threads for retry. If we're doing buffered reads on a regular file,
3336  * we prepare a private wait_page_queue entry and retry the operation. This
3337  * will either succeed because the page is now uptodate and unlocked, or it
3338  * will register a callback when the page is unlocked at IO completion. Through
3339  * that callback, io_uring uses task_work to setup a retry of the operation.
3340  * That retry will attempt the buffered read again. The retry will generally
3341  * succeed, or in rare cases where it fails, we then fall back to using the
3342  * async worker threads for a blocking retry.
3343  */
io_rw_should_retry(struct io_kiocb * req)3344 static bool io_rw_should_retry(struct io_kiocb *req)
3345 {
3346 	struct io_async_rw *rw = req->async_data;
3347 	struct wait_page_queue *wait = &rw->wpq;
3348 	struct kiocb *kiocb = &req->rw.kiocb;
3349 
3350 	/* never retry for NOWAIT, we just complete with -EAGAIN */
3351 	if (req->flags & REQ_F_NOWAIT)
3352 		return false;
3353 
3354 	/* Only for buffered IO */
3355 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
3356 		return false;
3357 
3358 	/*
3359 	 * just use poll if we can, and don't attempt if the fs doesn't
3360 	 * support callback based unlocks
3361 	 */
3362 	if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC))
3363 		return false;
3364 
3365 	wait->wait.func = io_async_buf_func;
3366 	wait->wait.private = req;
3367 	wait->wait.flags = 0;
3368 	INIT_LIST_HEAD(&wait->wait.entry);
3369 	kiocb->ki_flags |= IOCB_WAITQ;
3370 	kiocb->ki_flags &= ~IOCB_NOWAIT;
3371 	kiocb->ki_waitq = wait;
3372 	return true;
3373 }
3374 
io_iter_do_read(struct io_kiocb * req,struct iov_iter * iter)3375 static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter)
3376 {
3377 	if (req->file->f_op->read_iter)
3378 		return call_read_iter(req->file, &req->rw.kiocb, iter);
3379 	else if (req->file->f_op->read)
3380 		return loop_rw_iter(READ, req, iter);
3381 	else
3382 		return -EINVAL;
3383 }
3384 
io_read(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)3385 static int io_read(struct io_kiocb *req, bool force_nonblock,
3386 		   struct io_comp_state *cs)
3387 {
3388 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3389 	struct kiocb *kiocb = &req->rw.kiocb;
3390 	struct iov_iter __iter, *iter = &__iter;
3391 	struct iov_iter iter_cp;
3392 	struct io_async_rw *rw = req->async_data;
3393 	ssize_t io_size, ret, ret2;
3394 	bool no_async;
3395 
3396 	if (rw)
3397 		iter = &rw->iter;
3398 
3399 	ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock);
3400 	if (ret < 0)
3401 		return ret;
3402 	iter_cp = *iter;
3403 	io_size = iov_iter_count(iter);
3404 	req->result = io_size;
3405 	ret = 0;
3406 
3407 	/* Ensure we clear previously set non-block flag */
3408 	if (!force_nonblock)
3409 		kiocb->ki_flags &= ~IOCB_NOWAIT;
3410 	else
3411 		kiocb->ki_flags |= IOCB_NOWAIT;
3412 
3413 
3414 	/* If the file doesn't support async, just async punt */
3415 	no_async = force_nonblock && !io_file_supports_async(req->file, READ);
3416 	if (no_async)
3417 		goto copy_iov;
3418 
3419 	ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), io_size);
3420 	if (unlikely(ret))
3421 		goto out_free;
3422 
3423 	ret = io_iter_do_read(req, iter);
3424 
3425 	if (!ret) {
3426 		goto done;
3427 	} else if (ret == -EIOCBQUEUED) {
3428 		ret = 0;
3429 		goto out_free;
3430 	} else if (ret == -EAGAIN) {
3431 		/* IOPOLL retry should happen for io-wq threads */
3432 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
3433 			goto done;
3434 		/* no retry on NONBLOCK marked file */
3435 		if (req->file->f_flags & O_NONBLOCK)
3436 			goto done;
3437 		/* some cases will consume bytes even on error returns */
3438 		*iter = iter_cp;
3439 		ret = 0;
3440 		goto copy_iov;
3441 	} else if (ret < 0) {
3442 		/* make sure -ERESTARTSYS -> -EINTR is done */
3443 		goto done;
3444 	}
3445 
3446 	/* read it all, or we did blocking attempt. no retry. */
3447 	if (!iov_iter_count(iter) || !force_nonblock ||
3448 	    (req->file->f_flags & O_NONBLOCK) || !(req->flags & REQ_F_ISREG))
3449 		goto done;
3450 
3451 	io_size -= ret;
3452 copy_iov:
3453 	ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true);
3454 	if (ret2) {
3455 		ret = ret2;
3456 		goto out_free;
3457 	}
3458 	if (no_async)
3459 		return -EAGAIN;
3460 	rw = req->async_data;
3461 	/* it's copied and will be cleaned with ->io */
3462 	iovec = NULL;
3463 	/* now use our persistent iterator, if we aren't already */
3464 	iter = &rw->iter;
3465 retry:
3466 	rw->bytes_done += ret;
3467 	/* if we can retry, do so with the callbacks armed */
3468 	if (!io_rw_should_retry(req)) {
3469 		kiocb->ki_flags &= ~IOCB_WAITQ;
3470 		return -EAGAIN;
3471 	}
3472 
3473 	/*
3474 	 * Now retry read with the IOCB_WAITQ parts set in the iocb. If we
3475 	 * get -EIOCBQUEUED, then we'll get a notification when the desired
3476 	 * page gets unlocked. We can also get a partial read here, and if we
3477 	 * do, then just retry at the new offset.
3478 	 */
3479 	ret = io_iter_do_read(req, iter);
3480 	if (ret == -EIOCBQUEUED) {
3481 		ret = 0;
3482 		goto out_free;
3483 	} else if (ret > 0 && ret < io_size) {
3484 		/* we got some bytes, but not all. retry. */
3485 		kiocb->ki_flags &= ~IOCB_WAITQ;
3486 		goto retry;
3487 	}
3488 done:
3489 	kiocb_done(kiocb, ret, cs);
3490 	ret = 0;
3491 out_free:
3492 	/* it's reportedly faster than delegating the null check to kfree() */
3493 	if (iovec)
3494 		kfree(iovec);
3495 	return ret;
3496 }
3497 
io_write_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3498 static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3499 {
3500 	ssize_t ret;
3501 
3502 	ret = io_prep_rw(req, sqe);
3503 	if (ret)
3504 		return ret;
3505 
3506 	if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
3507 		return -EBADF;
3508 
3509 	/* either don't need iovec imported or already have it */
3510 	if (!req->async_data)
3511 		return 0;
3512 	return io_rw_prep_async(req, WRITE);
3513 }
3514 
io_write(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)3515 static int io_write(struct io_kiocb *req, bool force_nonblock,
3516 		    struct io_comp_state *cs)
3517 {
3518 	struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
3519 	struct kiocb *kiocb = &req->rw.kiocb;
3520 	struct iov_iter __iter, *iter = &__iter;
3521 	struct iov_iter iter_cp;
3522 	struct io_async_rw *rw = req->async_data;
3523 	ssize_t ret, ret2, io_size;
3524 
3525 	if (rw)
3526 		iter = &rw->iter;
3527 
3528 	ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock);
3529 	if (ret < 0)
3530 		return ret;
3531 	iter_cp = *iter;
3532 	io_size = iov_iter_count(iter);
3533 	req->result = io_size;
3534 
3535 	/* Ensure we clear previously set non-block flag */
3536 	if (!force_nonblock)
3537 		kiocb->ki_flags &= ~IOCB_NOWAIT;
3538 	else
3539 		kiocb->ki_flags |= IOCB_NOWAIT;
3540 
3541 	/* If the file doesn't support async, just async punt */
3542 	if (force_nonblock && !io_file_supports_async(req->file, WRITE))
3543 		goto copy_iov;
3544 
3545 	/* file path doesn't support NOWAIT for non-direct_IO */
3546 	if (force_nonblock && !(kiocb->ki_flags & IOCB_DIRECT) &&
3547 	    (req->flags & REQ_F_ISREG))
3548 		goto copy_iov;
3549 
3550 	ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), io_size);
3551 	if (unlikely(ret))
3552 		goto out_free;
3553 
3554 	/*
3555 	 * Open-code file_start_write here to grab freeze protection,
3556 	 * which will be released by another thread in
3557 	 * io_complete_rw().  Fool lockdep by telling it the lock got
3558 	 * released so that it doesn't complain about the held lock when
3559 	 * we return to userspace.
3560 	 */
3561 	if (req->flags & REQ_F_ISREG) {
3562 		sb_start_write(file_inode(req->file)->i_sb);
3563 		__sb_writers_release(file_inode(req->file)->i_sb,
3564 					SB_FREEZE_WRITE);
3565 	}
3566 	kiocb->ki_flags |= IOCB_WRITE;
3567 
3568 	if (req->file->f_op->write_iter)
3569 		ret2 = call_write_iter(req->file, kiocb, iter);
3570 	else if (req->file->f_op->write)
3571 		ret2 = loop_rw_iter(WRITE, req, iter);
3572 	else
3573 		ret2 = -EINVAL;
3574 
3575 	/*
3576 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
3577 	 * retry them without IOCB_NOWAIT.
3578 	 */
3579 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
3580 		ret2 = -EAGAIN;
3581 	/* no retry on NONBLOCK marked file */
3582 	if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK))
3583 		goto done;
3584 	if (!force_nonblock || ret2 != -EAGAIN) {
3585 		/* IOPOLL retry should happen for io-wq threads */
3586 		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
3587 			goto copy_iov;
3588 done:
3589 		kiocb_done(kiocb, ret2, cs);
3590 	} else {
3591 copy_iov:
3592 		/* some cases will consume bytes even on error returns */
3593 		*iter = iter_cp;
3594 		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
3595 		if (!ret)
3596 			return -EAGAIN;
3597 	}
3598 out_free:
3599 	/* it's reportedly faster than delegating the null check to kfree() */
3600 	if (iovec)
3601 		kfree(iovec);
3602 	return ret;
3603 }
3604 
__io_splice_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3605 static int __io_splice_prep(struct io_kiocb *req,
3606 			    const struct io_uring_sqe *sqe)
3607 {
3608 	struct io_splice* sp = &req->splice;
3609 	unsigned int valid_flags = SPLICE_F_FD_IN_FIXED | SPLICE_F_ALL;
3610 
3611 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3612 		return -EINVAL;
3613 
3614 	sp->file_in = NULL;
3615 	sp->len = READ_ONCE(sqe->len);
3616 	sp->flags = READ_ONCE(sqe->splice_flags);
3617 
3618 	if (unlikely(sp->flags & ~valid_flags))
3619 		return -EINVAL;
3620 
3621 	sp->file_in = io_file_get(NULL, req, READ_ONCE(sqe->splice_fd_in),
3622 				  (sp->flags & SPLICE_F_FD_IN_FIXED));
3623 	if (!sp->file_in)
3624 		return -EBADF;
3625 	req->flags |= REQ_F_NEED_CLEANUP;
3626 
3627 	if (!S_ISREG(file_inode(sp->file_in)->i_mode)) {
3628 		/*
3629 		 * Splice operation will be punted aync, and here need to
3630 		 * modify io_wq_work.flags, so initialize io_wq_work firstly.
3631 		 */
3632 		io_req_init_async(req);
3633 		req->work.flags |= IO_WQ_WORK_UNBOUND;
3634 	}
3635 
3636 	return 0;
3637 }
3638 
io_tee_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3639 static int io_tee_prep(struct io_kiocb *req,
3640 		       const struct io_uring_sqe *sqe)
3641 {
3642 	if (READ_ONCE(sqe->splice_off_in) || READ_ONCE(sqe->off))
3643 		return -EINVAL;
3644 	return __io_splice_prep(req, sqe);
3645 }
3646 
io_tee(struct io_kiocb * req,bool force_nonblock)3647 static int io_tee(struct io_kiocb *req, bool force_nonblock)
3648 {
3649 	struct io_splice *sp = &req->splice;
3650 	struct file *in = sp->file_in;
3651 	struct file *out = sp->file_out;
3652 	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3653 	long ret = 0;
3654 
3655 	if (force_nonblock)
3656 		return -EAGAIN;
3657 	if (sp->len)
3658 		ret = do_tee(in, out, sp->len, flags);
3659 
3660 	io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3661 	req->flags &= ~REQ_F_NEED_CLEANUP;
3662 
3663 	if (ret != sp->len)
3664 		req_set_fail_links(req);
3665 	io_req_complete(req, ret);
3666 	return 0;
3667 }
3668 
io_splice_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3669 static int io_splice_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3670 {
3671 	struct io_splice* sp = &req->splice;
3672 
3673 	sp->off_in = READ_ONCE(sqe->splice_off_in);
3674 	sp->off_out = READ_ONCE(sqe->off);
3675 	return __io_splice_prep(req, sqe);
3676 }
3677 
io_splice(struct io_kiocb * req,bool force_nonblock)3678 static int io_splice(struct io_kiocb *req, bool force_nonblock)
3679 {
3680 	struct io_splice *sp = &req->splice;
3681 	struct file *in = sp->file_in;
3682 	struct file *out = sp->file_out;
3683 	unsigned int flags = sp->flags & ~SPLICE_F_FD_IN_FIXED;
3684 	loff_t *poff_in, *poff_out;
3685 	long ret = 0;
3686 
3687 	if (force_nonblock)
3688 		return -EAGAIN;
3689 
3690 	poff_in = (sp->off_in == -1) ? NULL : &sp->off_in;
3691 	poff_out = (sp->off_out == -1) ? NULL : &sp->off_out;
3692 
3693 	if (sp->len)
3694 		ret = do_splice(in, poff_in, out, poff_out, sp->len, flags);
3695 
3696 	io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED));
3697 	req->flags &= ~REQ_F_NEED_CLEANUP;
3698 
3699 	if (ret != sp->len)
3700 		req_set_fail_links(req);
3701 	io_req_complete(req, ret);
3702 	return 0;
3703 }
3704 
3705 /*
3706  * IORING_OP_NOP just posts a completion event, nothing else.
3707  */
io_nop(struct io_kiocb * req,struct io_comp_state * cs)3708 static int io_nop(struct io_kiocb *req, struct io_comp_state *cs)
3709 {
3710 	struct io_ring_ctx *ctx = req->ctx;
3711 
3712 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3713 		return -EINVAL;
3714 
3715 	__io_req_complete(req, 0, 0, cs);
3716 	return 0;
3717 }
3718 
io_prep_fsync(struct io_kiocb * req,const struct io_uring_sqe * sqe)3719 static int io_prep_fsync(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3720 {
3721 	struct io_ring_ctx *ctx = req->ctx;
3722 
3723 	if (!req->file)
3724 		return -EBADF;
3725 
3726 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
3727 		return -EINVAL;
3728 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
3729 		     sqe->splice_fd_in))
3730 		return -EINVAL;
3731 
3732 	req->sync.flags = READ_ONCE(sqe->fsync_flags);
3733 	if (unlikely(req->sync.flags & ~IORING_FSYNC_DATASYNC))
3734 		return -EINVAL;
3735 
3736 	req->sync.off = READ_ONCE(sqe->off);
3737 	req->sync.len = READ_ONCE(sqe->len);
3738 	return 0;
3739 }
3740 
io_fsync(struct io_kiocb * req,bool force_nonblock)3741 static int io_fsync(struct io_kiocb *req, bool force_nonblock)
3742 {
3743 	loff_t end = req->sync.off + req->sync.len;
3744 	int ret;
3745 
3746 	/* fsync always requires a blocking context */
3747 	if (force_nonblock)
3748 		return -EAGAIN;
3749 
3750 	ret = vfs_fsync_range(req->file, req->sync.off,
3751 				end > 0 ? end : LLONG_MAX,
3752 				req->sync.flags & IORING_FSYNC_DATASYNC);
3753 	if (ret < 0)
3754 		req_set_fail_links(req);
3755 	io_req_complete(req, ret);
3756 	return 0;
3757 }
3758 
io_fallocate_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3759 static int io_fallocate_prep(struct io_kiocb *req,
3760 			     const struct io_uring_sqe *sqe)
3761 {
3762 	if (sqe->ioprio || sqe->buf_index || sqe->rw_flags ||
3763 	    sqe->splice_fd_in)
3764 		return -EINVAL;
3765 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
3766 		return -EINVAL;
3767 
3768 	req->sync.off = READ_ONCE(sqe->off);
3769 	req->sync.len = READ_ONCE(sqe->addr);
3770 	req->sync.mode = READ_ONCE(sqe->len);
3771 	return 0;
3772 }
3773 
io_fallocate(struct io_kiocb * req,bool force_nonblock)3774 static int io_fallocate(struct io_kiocb *req, bool force_nonblock)
3775 {
3776 	int ret;
3777 
3778 	/* fallocate always requiring blocking context */
3779 	if (force_nonblock)
3780 		return -EAGAIN;
3781 	ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
3782 				req->sync.len);
3783 	if (ret < 0)
3784 		req_set_fail_links(req);
3785 	io_req_complete(req, ret);
3786 	return 0;
3787 }
3788 
__io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3789 static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3790 {
3791 	const char __user *fname;
3792 	int ret;
3793 
3794 	if (unlikely(sqe->ioprio || sqe->buf_index || sqe->splice_fd_in))
3795 		return -EINVAL;
3796 	if (unlikely(req->flags & REQ_F_FIXED_FILE))
3797 		return -EBADF;
3798 
3799 	/* open.how should be already initialised */
3800 	if (!(req->open.how.flags & O_PATH) && force_o_largefile())
3801 		req->open.how.flags |= O_LARGEFILE;
3802 
3803 	req->open.dfd = READ_ONCE(sqe->fd);
3804 	fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
3805 	req->open.filename = getname(fname);
3806 	if (IS_ERR(req->open.filename)) {
3807 		ret = PTR_ERR(req->open.filename);
3808 		req->open.filename = NULL;
3809 		return ret;
3810 	}
3811 	req->open.nofile = rlimit(RLIMIT_NOFILE);
3812 	req->open.ignore_nonblock = false;
3813 	req->flags |= REQ_F_NEED_CLEANUP;
3814 	return 0;
3815 }
3816 
io_openat_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3817 static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3818 {
3819 	u64 flags, mode;
3820 
3821 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3822 		return -EINVAL;
3823 	mode = READ_ONCE(sqe->len);
3824 	flags = READ_ONCE(sqe->open_flags);
3825 	req->open.how = build_open_how(flags, mode);
3826 	return __io_openat_prep(req, sqe);
3827 }
3828 
io_openat2_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3829 static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
3830 {
3831 	struct open_how __user *how;
3832 	size_t len;
3833 	int ret;
3834 
3835 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
3836 		return -EINVAL;
3837 	how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
3838 	len = READ_ONCE(sqe->len);
3839 	if (len < OPEN_HOW_SIZE_VER0)
3840 		return -EINVAL;
3841 
3842 	ret = copy_struct_from_user(&req->open.how, sizeof(req->open.how), how,
3843 					len);
3844 	if (ret)
3845 		return ret;
3846 
3847 	return __io_openat_prep(req, sqe);
3848 }
3849 
io_openat2(struct io_kiocb * req,bool force_nonblock)3850 static int io_openat2(struct io_kiocb *req, bool force_nonblock)
3851 {
3852 	struct open_flags op;
3853 	struct file *file;
3854 	int ret;
3855 
3856 	if (force_nonblock && !req->open.ignore_nonblock)
3857 		return -EAGAIN;
3858 
3859 	ret = build_open_flags(&req->open.how, &op);
3860 	if (ret)
3861 		goto err;
3862 
3863 	ret = __get_unused_fd_flags(req->open.how.flags, req->open.nofile);
3864 	if (ret < 0)
3865 		goto err;
3866 
3867 	file = do_filp_open(req->open.dfd, req->open.filename, &op);
3868 	if (IS_ERR(file)) {
3869 		put_unused_fd(ret);
3870 		ret = PTR_ERR(file);
3871 		/*
3872 		 * A work-around to ensure that /proc/self works that way
3873 		 * that it should - if we get -EOPNOTSUPP back, then assume
3874 		 * that proc_self_get_link() failed us because we're in async
3875 		 * context. We should be safe to retry this from the task
3876 		 * itself with force_nonblock == false set, as it should not
3877 		 * block on lookup. Would be nice to know this upfront and
3878 		 * avoid the async dance, but doesn't seem feasible.
3879 		 */
3880 		if (ret == -EOPNOTSUPP && io_wq_current_is_worker()) {
3881 			req->open.ignore_nonblock = true;
3882 			refcount_inc(&req->refs);
3883 			io_req_task_queue(req);
3884 			return 0;
3885 		}
3886 	} else {
3887 		fsnotify_open(file);
3888 		fd_install(ret, file);
3889 	}
3890 err:
3891 	putname(req->open.filename);
3892 	req->flags &= ~REQ_F_NEED_CLEANUP;
3893 	if (ret < 0)
3894 		req_set_fail_links(req);
3895 	io_req_complete(req, ret);
3896 	return 0;
3897 }
3898 
io_openat(struct io_kiocb * req,bool force_nonblock)3899 static int io_openat(struct io_kiocb *req, bool force_nonblock)
3900 {
3901 	return io_openat2(req, force_nonblock);
3902 }
3903 
io_remove_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3904 static int io_remove_buffers_prep(struct io_kiocb *req,
3905 				  const struct io_uring_sqe *sqe)
3906 {
3907 	struct io_provide_buf *p = &req->pbuf;
3908 	u64 tmp;
3909 
3910 	if (sqe->ioprio || sqe->rw_flags || sqe->addr || sqe->len || sqe->off ||
3911 	    sqe->splice_fd_in)
3912 		return -EINVAL;
3913 
3914 	tmp = READ_ONCE(sqe->fd);
3915 	if (!tmp || tmp > USHRT_MAX)
3916 		return -EINVAL;
3917 
3918 	memset(p, 0, sizeof(*p));
3919 	p->nbufs = tmp;
3920 	p->bgid = READ_ONCE(sqe->buf_group);
3921 	return 0;
3922 }
3923 
__io_remove_buffers(struct io_ring_ctx * ctx,struct io_buffer * buf,int bgid,unsigned nbufs)3924 static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf,
3925 			       int bgid, unsigned nbufs)
3926 {
3927 	unsigned i = 0;
3928 
3929 	/* shouldn't happen */
3930 	if (!nbufs)
3931 		return 0;
3932 
3933 	/* the head kbuf is the list itself */
3934 	while (!list_empty(&buf->list)) {
3935 		struct io_buffer *nxt;
3936 
3937 		nxt = list_first_entry(&buf->list, struct io_buffer, list);
3938 		list_del(&nxt->list);
3939 		kfree(nxt);
3940 		if (++i == nbufs)
3941 			return i;
3942 	}
3943 	i++;
3944 	kfree(buf);
3945 	xa_erase(&ctx->io_buffers, bgid);
3946 
3947 	return i;
3948 }
3949 
io_remove_buffers(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)3950 static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock,
3951 			     struct io_comp_state *cs)
3952 {
3953 	struct io_provide_buf *p = &req->pbuf;
3954 	struct io_ring_ctx *ctx = req->ctx;
3955 	struct io_buffer *head;
3956 	int ret = 0;
3957 
3958 	io_ring_submit_lock(ctx, !force_nonblock);
3959 
3960 	lockdep_assert_held(&ctx->uring_lock);
3961 
3962 	ret = -ENOENT;
3963 	head = xa_load(&ctx->io_buffers, p->bgid);
3964 	if (head)
3965 		ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs);
3966 	if (ret < 0)
3967 		req_set_fail_links(req);
3968 
3969 	/* need to hold the lock to complete IOPOLL requests */
3970 	if (ctx->flags & IORING_SETUP_IOPOLL) {
3971 		__io_req_complete(req, ret, 0, cs);
3972 		io_ring_submit_unlock(ctx, !force_nonblock);
3973 	} else {
3974 		io_ring_submit_unlock(ctx, !force_nonblock);
3975 		__io_req_complete(req, ret, 0, cs);
3976 	}
3977 	return 0;
3978 }
3979 
io_provide_buffers_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)3980 static int io_provide_buffers_prep(struct io_kiocb *req,
3981 				   const struct io_uring_sqe *sqe)
3982 {
3983 	unsigned long size, tmp_check;
3984 	struct io_provide_buf *p = &req->pbuf;
3985 	u64 tmp;
3986 
3987 	if (sqe->ioprio || sqe->rw_flags || sqe->splice_fd_in)
3988 		return -EINVAL;
3989 
3990 	tmp = READ_ONCE(sqe->fd);
3991 	if (!tmp || tmp > USHRT_MAX)
3992 		return -E2BIG;
3993 	p->nbufs = tmp;
3994 	p->addr = READ_ONCE(sqe->addr);
3995 	p->len = READ_ONCE(sqe->len);
3996 
3997 	if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs,
3998 				&size))
3999 		return -EOVERFLOW;
4000 	if (check_add_overflow((unsigned long)p->addr, size, &tmp_check))
4001 		return -EOVERFLOW;
4002 
4003 	size = (unsigned long)p->len * p->nbufs;
4004 	if (!access_ok(u64_to_user_ptr(p->addr), size))
4005 		return -EFAULT;
4006 
4007 	p->bgid = READ_ONCE(sqe->buf_group);
4008 	tmp = READ_ONCE(sqe->off);
4009 	if (tmp > USHRT_MAX)
4010 		return -E2BIG;
4011 	p->bid = tmp;
4012 	return 0;
4013 }
4014 
io_add_buffers(struct io_provide_buf * pbuf,struct io_buffer ** head)4015 static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head)
4016 {
4017 	struct io_buffer *buf;
4018 	u64 addr = pbuf->addr;
4019 	int i, bid = pbuf->bid;
4020 
4021 	for (i = 0; i < pbuf->nbufs; i++) {
4022 		buf = kmalloc(sizeof(*buf), GFP_KERNEL_ACCOUNT);
4023 		if (!buf)
4024 			break;
4025 
4026 		buf->addr = addr;
4027 		buf->len = min_t(__u32, pbuf->len, MAX_RW_COUNT);
4028 		buf->bid = bid;
4029 		addr += pbuf->len;
4030 		bid++;
4031 		if (!*head) {
4032 			INIT_LIST_HEAD(&buf->list);
4033 			*head = buf;
4034 		} else {
4035 			list_add_tail(&buf->list, &(*head)->list);
4036 		}
4037 	}
4038 
4039 	return i ? i : -ENOMEM;
4040 }
4041 
io_provide_buffers(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4042 static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock,
4043 			      struct io_comp_state *cs)
4044 {
4045 	struct io_provide_buf *p = &req->pbuf;
4046 	struct io_ring_ctx *ctx = req->ctx;
4047 	struct io_buffer *head, *list;
4048 	int ret = 0;
4049 
4050 	io_ring_submit_lock(ctx, !force_nonblock);
4051 
4052 	lockdep_assert_held(&ctx->uring_lock);
4053 
4054 	list = head = xa_load(&ctx->io_buffers, p->bgid);
4055 
4056 	ret = io_add_buffers(p, &head);
4057 	if (ret >= 0 && !list) {
4058 		ret = xa_insert(&ctx->io_buffers, p->bgid, head, GFP_KERNEL);
4059 		if (ret < 0)
4060 			__io_remove_buffers(ctx, head, p->bgid, -1U);
4061 	}
4062 	if (ret < 0)
4063 		req_set_fail_links(req);
4064 
4065 	/* need to hold the lock to complete IOPOLL requests */
4066 	if (ctx->flags & IORING_SETUP_IOPOLL) {
4067 		__io_req_complete(req, ret, 0, cs);
4068 		io_ring_submit_unlock(ctx, !force_nonblock);
4069 	} else {
4070 		io_ring_submit_unlock(ctx, !force_nonblock);
4071 		__io_req_complete(req, ret, 0, cs);
4072 	}
4073 	return 0;
4074 }
4075 
io_epoll_ctl_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4076 static int io_epoll_ctl_prep(struct io_kiocb *req,
4077 			     const struct io_uring_sqe *sqe)
4078 {
4079 #if defined(CONFIG_EPOLL)
4080 	if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
4081 		return -EINVAL;
4082 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4083 		return -EINVAL;
4084 
4085 	req->epoll.epfd = READ_ONCE(sqe->fd);
4086 	req->epoll.op = READ_ONCE(sqe->len);
4087 	req->epoll.fd = READ_ONCE(sqe->off);
4088 
4089 	if (ep_op_has_event(req->epoll.op)) {
4090 		struct epoll_event __user *ev;
4091 
4092 		ev = u64_to_user_ptr(READ_ONCE(sqe->addr));
4093 		if (copy_from_user(&req->epoll.event, ev, sizeof(*ev)))
4094 			return -EFAULT;
4095 	}
4096 
4097 	return 0;
4098 #else
4099 	return -EOPNOTSUPP;
4100 #endif
4101 }
4102 
io_epoll_ctl(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4103 static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock,
4104 			struct io_comp_state *cs)
4105 {
4106 #if defined(CONFIG_EPOLL)
4107 	struct io_epoll *ie = &req->epoll;
4108 	int ret;
4109 
4110 	ret = do_epoll_ctl(ie->epfd, ie->op, ie->fd, &ie->event, force_nonblock);
4111 	if (force_nonblock && ret == -EAGAIN)
4112 		return -EAGAIN;
4113 
4114 	if (ret < 0)
4115 		req_set_fail_links(req);
4116 	__io_req_complete(req, ret, 0, cs);
4117 	return 0;
4118 #else
4119 	return -EOPNOTSUPP;
4120 #endif
4121 }
4122 
io_madvise_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4123 static int io_madvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4124 {
4125 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4126 	if (sqe->ioprio || sqe->buf_index || sqe->off || sqe->splice_fd_in)
4127 		return -EINVAL;
4128 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4129 		return -EINVAL;
4130 
4131 	req->madvise.addr = READ_ONCE(sqe->addr);
4132 	req->madvise.len = READ_ONCE(sqe->len);
4133 	req->madvise.advice = READ_ONCE(sqe->fadvise_advice);
4134 	return 0;
4135 #else
4136 	return -EOPNOTSUPP;
4137 #endif
4138 }
4139 
io_madvise(struct io_kiocb * req,bool force_nonblock)4140 static int io_madvise(struct io_kiocb *req, bool force_nonblock)
4141 {
4142 #if defined(CONFIG_ADVISE_SYSCALLS) && defined(CONFIG_MMU)
4143 	struct io_madvise *ma = &req->madvise;
4144 	int ret;
4145 
4146 	if (force_nonblock)
4147 		return -EAGAIN;
4148 
4149 	ret = do_madvise(current->mm, ma->addr, ma->len, ma->advice);
4150 	if (ret < 0)
4151 		req_set_fail_links(req);
4152 	io_req_complete(req, ret);
4153 	return 0;
4154 #else
4155 	return -EOPNOTSUPP;
4156 #endif
4157 }
4158 
io_fadvise_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4159 static int io_fadvise_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4160 {
4161 	if (sqe->ioprio || sqe->buf_index || sqe->addr || sqe->splice_fd_in)
4162 		return -EINVAL;
4163 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4164 		return -EINVAL;
4165 
4166 	req->fadvise.offset = READ_ONCE(sqe->off);
4167 	req->fadvise.len = READ_ONCE(sqe->len);
4168 	req->fadvise.advice = READ_ONCE(sqe->fadvise_advice);
4169 	return 0;
4170 }
4171 
io_fadvise(struct io_kiocb * req,bool force_nonblock)4172 static int io_fadvise(struct io_kiocb *req, bool force_nonblock)
4173 {
4174 	struct io_fadvise *fa = &req->fadvise;
4175 	int ret;
4176 
4177 	if (force_nonblock) {
4178 		switch (fa->advice) {
4179 		case POSIX_FADV_NORMAL:
4180 		case POSIX_FADV_RANDOM:
4181 		case POSIX_FADV_SEQUENTIAL:
4182 			break;
4183 		default:
4184 			return -EAGAIN;
4185 		}
4186 	}
4187 
4188 	ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice);
4189 	if (ret < 0)
4190 		req_set_fail_links(req);
4191 	io_req_complete(req, ret);
4192 	return 0;
4193 }
4194 
io_statx_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4195 static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4196 {
4197 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL)))
4198 		return -EINVAL;
4199 	if (sqe->ioprio || sqe->buf_index || sqe->splice_fd_in)
4200 		return -EINVAL;
4201 	if (req->flags & REQ_F_FIXED_FILE)
4202 		return -EBADF;
4203 
4204 	req->statx.dfd = READ_ONCE(sqe->fd);
4205 	req->statx.mask = READ_ONCE(sqe->len);
4206 	req->statx.filename = u64_to_user_ptr(READ_ONCE(sqe->addr));
4207 	req->statx.buffer = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4208 	req->statx.flags = READ_ONCE(sqe->statx_flags);
4209 
4210 	return 0;
4211 }
4212 
io_statx(struct io_kiocb * req,bool force_nonblock)4213 static int io_statx(struct io_kiocb *req, bool force_nonblock)
4214 {
4215 	struct io_statx *ctx = &req->statx;
4216 	int ret;
4217 
4218 	if (force_nonblock) {
4219 		/* only need file table for an actual valid fd */
4220 		if (ctx->dfd == -1 || ctx->dfd == AT_FDCWD)
4221 			req->flags |= REQ_F_NO_FILE_TABLE;
4222 		return -EAGAIN;
4223 	}
4224 
4225 	ret = do_statx(ctx->dfd, ctx->filename, ctx->flags, ctx->mask,
4226 		       ctx->buffer);
4227 
4228 	if (ret < 0)
4229 		req_set_fail_links(req);
4230 	io_req_complete(req, ret);
4231 	return 0;
4232 }
4233 
io_close_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4234 static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4235 {
4236 	/*
4237 	 * If we queue this for async, it must not be cancellable. That would
4238 	 * leave the 'file' in an undeterminate state, and here need to modify
4239 	 * io_wq_work.flags, so initialize io_wq_work firstly.
4240 	 */
4241 	io_req_init_async(req);
4242 
4243 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4244 		return -EINVAL;
4245 	if (sqe->ioprio || sqe->off || sqe->addr || sqe->len ||
4246 	    sqe->rw_flags || sqe->buf_index || sqe->splice_fd_in)
4247 		return -EINVAL;
4248 	if (req->flags & REQ_F_FIXED_FILE)
4249 		return -EBADF;
4250 
4251 	req->close.fd = READ_ONCE(sqe->fd);
4252 	if ((req->file && req->file->f_op == &io_uring_fops))
4253 		return -EBADF;
4254 
4255 	req->close.put_file = NULL;
4256 	return 0;
4257 }
4258 
io_close(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4259 static int io_close(struct io_kiocb *req, bool force_nonblock,
4260 		    struct io_comp_state *cs)
4261 {
4262 	struct io_close *close = &req->close;
4263 	int ret;
4264 
4265 	/* might be already done during nonblock submission */
4266 	if (!close->put_file) {
4267 		ret = __close_fd_get_file(close->fd, &close->put_file);
4268 		if (ret < 0)
4269 			return (ret == -ENOENT) ? -EBADF : ret;
4270 	}
4271 
4272 	/* if the file has a flush method, be safe and punt to async */
4273 	if (close->put_file->f_op->flush && force_nonblock) {
4274 		/* not safe to cancel at this point */
4275 		req->work.flags |= IO_WQ_WORK_NO_CANCEL;
4276 		/* was never set, but play safe */
4277 		req->flags &= ~REQ_F_NOWAIT;
4278 		/* avoid grabbing files - we don't need the files */
4279 		req->flags |= REQ_F_NO_FILE_TABLE;
4280 		return -EAGAIN;
4281 	}
4282 
4283 	/* No ->flush() or already async, safely close from here */
4284 	ret = filp_close(close->put_file, req->work.identity->files);
4285 	if (ret < 0)
4286 		req_set_fail_links(req);
4287 	fput(close->put_file);
4288 	close->put_file = NULL;
4289 	__io_req_complete(req, ret, 0, cs);
4290 	return 0;
4291 }
4292 
io_prep_sfr(struct io_kiocb * req,const struct io_uring_sqe * sqe)4293 static int io_prep_sfr(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4294 {
4295 	struct io_ring_ctx *ctx = req->ctx;
4296 
4297 	if (!req->file)
4298 		return -EBADF;
4299 
4300 	if (unlikely(ctx->flags & IORING_SETUP_IOPOLL))
4301 		return -EINVAL;
4302 	if (unlikely(sqe->addr || sqe->ioprio || sqe->buf_index ||
4303 		     sqe->splice_fd_in))
4304 		return -EINVAL;
4305 
4306 	req->sync.off = READ_ONCE(sqe->off);
4307 	req->sync.len = READ_ONCE(sqe->len);
4308 	req->sync.flags = READ_ONCE(sqe->sync_range_flags);
4309 	return 0;
4310 }
4311 
io_sync_file_range(struct io_kiocb * req,bool force_nonblock)4312 static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock)
4313 {
4314 	int ret;
4315 
4316 	/* sync_file_range always requires a blocking context */
4317 	if (force_nonblock)
4318 		return -EAGAIN;
4319 
4320 	ret = sync_file_range(req->file, req->sync.off, req->sync.len,
4321 				req->sync.flags);
4322 	if (ret < 0)
4323 		req_set_fail_links(req);
4324 	io_req_complete(req, ret);
4325 	return 0;
4326 }
4327 
4328 #if defined(CONFIG_NET)
io_setup_async_msg(struct io_kiocb * req,struct io_async_msghdr * kmsg)4329 static int io_setup_async_msg(struct io_kiocb *req,
4330 			      struct io_async_msghdr *kmsg)
4331 {
4332 	struct io_async_msghdr *async_msg = req->async_data;
4333 
4334 	if (async_msg)
4335 		return -EAGAIN;
4336 	if (io_alloc_async_data(req)) {
4337 		if (kmsg->iov != kmsg->fast_iov)
4338 			kfree(kmsg->iov);
4339 		return -ENOMEM;
4340 	}
4341 	async_msg = req->async_data;
4342 	req->flags |= REQ_F_NEED_CLEANUP;
4343 	memcpy(async_msg, kmsg, sizeof(*kmsg));
4344 	return -EAGAIN;
4345 }
4346 
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)4347 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
4348 			       struct io_async_msghdr *iomsg)
4349 {
4350 	iomsg->iov = iomsg->fast_iov;
4351 	iomsg->msg.msg_name = &iomsg->addr;
4352 	return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg,
4353 				   req->sr_msg.msg_flags, &iomsg->iov);
4354 }
4355 
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4356 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4357 {
4358 	struct io_async_msghdr *async_msg = req->async_data;
4359 	struct io_sr_msg *sr = &req->sr_msg;
4360 	int ret;
4361 
4362 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4363 		return -EINVAL;
4364 
4365 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
4366 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4367 	sr->len = READ_ONCE(sqe->len);
4368 
4369 #ifdef CONFIG_COMPAT
4370 	if (req->ctx->compat)
4371 		sr->msg_flags |= MSG_CMSG_COMPAT;
4372 #endif
4373 
4374 	if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
4375 		return 0;
4376 	ret = io_sendmsg_copy_hdr(req, async_msg);
4377 	if (!ret)
4378 		req->flags |= REQ_F_NEED_CLEANUP;
4379 	return ret;
4380 }
4381 
io_sendmsg(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4382 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4383 		      struct io_comp_state *cs)
4384 {
4385 	struct io_async_msghdr iomsg, *kmsg;
4386 	struct socket *sock;
4387 	unsigned flags;
4388 	int min_ret = 0;
4389 	int ret;
4390 
4391 	sock = sock_from_file(req->file, &ret);
4392 	if (unlikely(!sock))
4393 		return ret;
4394 
4395 	if (req->async_data) {
4396 		kmsg = req->async_data;
4397 		kmsg->msg.msg_name = &kmsg->addr;
4398 		/* if iov is set, it's allocated already */
4399 		if (!kmsg->iov)
4400 			kmsg->iov = kmsg->fast_iov;
4401 		kmsg->msg.msg_iter.iov = kmsg->iov;
4402 	} else {
4403 		ret = io_sendmsg_copy_hdr(req, &iomsg);
4404 		if (ret)
4405 			return ret;
4406 		kmsg = &iomsg;
4407 	}
4408 
4409 	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
4410 	if (flags & MSG_DONTWAIT)
4411 		req->flags |= REQ_F_NOWAIT;
4412 	else if (force_nonblock)
4413 		flags |= MSG_DONTWAIT;
4414 
4415 	if (flags & MSG_WAITALL)
4416 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4417 
4418 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
4419 	if (force_nonblock && ret == -EAGAIN)
4420 		return io_setup_async_msg(req, kmsg);
4421 	if (ret == -ERESTARTSYS)
4422 		ret = -EINTR;
4423 
4424 	if (kmsg->iov != kmsg->fast_iov)
4425 		kfree(kmsg->iov);
4426 	req->flags &= ~REQ_F_NEED_CLEANUP;
4427 	if (ret < min_ret)
4428 		req_set_fail_links(req);
4429 	__io_req_complete(req, ret, 0, cs);
4430 	return 0;
4431 }
4432 
io_send(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4433 static int io_send(struct io_kiocb *req, bool force_nonblock,
4434 		   struct io_comp_state *cs)
4435 {
4436 	struct io_sr_msg *sr = &req->sr_msg;
4437 	struct msghdr msg;
4438 	struct iovec iov;
4439 	struct socket *sock;
4440 	unsigned flags;
4441 	int min_ret = 0;
4442 	int ret;
4443 
4444 	sock = sock_from_file(req->file, &ret);
4445 	if (unlikely(!sock))
4446 		return ret;
4447 
4448 	ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter);
4449 	if (unlikely(ret))
4450 		return ret;
4451 
4452 	msg.msg_name = NULL;
4453 	msg.msg_control = NULL;
4454 	msg.msg_controllen = 0;
4455 	msg.msg_namelen = 0;
4456 
4457 	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
4458 	if (flags & MSG_DONTWAIT)
4459 		req->flags |= REQ_F_NOWAIT;
4460 	else if (force_nonblock)
4461 		flags |= MSG_DONTWAIT;
4462 
4463 	if (flags & MSG_WAITALL)
4464 		min_ret = iov_iter_count(&msg.msg_iter);
4465 
4466 	msg.msg_flags = flags;
4467 	ret = sock_sendmsg(sock, &msg);
4468 	if (force_nonblock && ret == -EAGAIN)
4469 		return -EAGAIN;
4470 	if (ret == -ERESTARTSYS)
4471 		ret = -EINTR;
4472 
4473 	if (ret < min_ret)
4474 		req_set_fail_links(req);
4475 	__io_req_complete(req, ret, 0, cs);
4476 	return 0;
4477 }
4478 
__io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)4479 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
4480 				 struct io_async_msghdr *iomsg)
4481 {
4482 	struct io_sr_msg *sr = &req->sr_msg;
4483 	struct iovec __user *uiov;
4484 	size_t iov_len;
4485 	int ret;
4486 
4487 	ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg,
4488 					&iomsg->uaddr, &uiov, &iov_len);
4489 	if (ret)
4490 		return ret;
4491 
4492 	if (req->flags & REQ_F_BUFFER_SELECT) {
4493 		if (iov_len > 1)
4494 			return -EINVAL;
4495 		if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov)))
4496 			return -EFAULT;
4497 		sr->len = iomsg->iov[0].iov_len;
4498 		iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1,
4499 				sr->len);
4500 		iomsg->iov = NULL;
4501 	} else {
4502 		ret = __import_iovec(READ, uiov, iov_len, UIO_FASTIOV,
4503 				     &iomsg->iov, &iomsg->msg.msg_iter,
4504 				     false);
4505 		if (ret > 0)
4506 			ret = 0;
4507 	}
4508 
4509 	return ret;
4510 }
4511 
4512 #ifdef CONFIG_COMPAT
__io_compat_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)4513 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
4514 					struct io_async_msghdr *iomsg)
4515 {
4516 	struct compat_msghdr __user *msg_compat;
4517 	struct io_sr_msg *sr = &req->sr_msg;
4518 	struct compat_iovec __user *uiov;
4519 	compat_uptr_t ptr;
4520 	compat_size_t len;
4521 	int ret;
4522 
4523 	msg_compat = (struct compat_msghdr __user *) sr->umsg;
4524 	ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr,
4525 					&ptr, &len);
4526 	if (ret)
4527 		return ret;
4528 
4529 	uiov = compat_ptr(ptr);
4530 	if (req->flags & REQ_F_BUFFER_SELECT) {
4531 		compat_ssize_t clen;
4532 
4533 		if (len > 1)
4534 			return -EINVAL;
4535 		if (!access_ok(uiov, sizeof(*uiov)))
4536 			return -EFAULT;
4537 		if (__get_user(clen, &uiov->iov_len))
4538 			return -EFAULT;
4539 		if (clen < 0)
4540 			return -EINVAL;
4541 		sr->len = clen;
4542 		iomsg->iov[0].iov_len = clen;
4543 		iomsg->iov = NULL;
4544 	} else {
4545 		ret = __import_iovec(READ, (struct iovec __user *)uiov, len,
4546 				   UIO_FASTIOV, &iomsg->iov,
4547 				   &iomsg->msg.msg_iter, true);
4548 		if (ret < 0)
4549 			return ret;
4550 	}
4551 
4552 	return 0;
4553 }
4554 #endif
4555 
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)4556 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
4557 			       struct io_async_msghdr *iomsg)
4558 {
4559 	iomsg->msg.msg_name = &iomsg->addr;
4560 	iomsg->iov = iomsg->fast_iov;
4561 
4562 #ifdef CONFIG_COMPAT
4563 	if (req->ctx->compat)
4564 		return __io_compat_recvmsg_copy_hdr(req, iomsg);
4565 #endif
4566 
4567 	return __io_recvmsg_copy_hdr(req, iomsg);
4568 }
4569 
io_recv_buffer_select(struct io_kiocb * req,bool needs_lock)4570 static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req,
4571 					       bool needs_lock)
4572 {
4573 	struct io_sr_msg *sr = &req->sr_msg;
4574 	struct io_buffer *kbuf;
4575 
4576 	kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock);
4577 	if (IS_ERR(kbuf))
4578 		return kbuf;
4579 
4580 	sr->kbuf = kbuf;
4581 	req->flags |= REQ_F_BUFFER_SELECTED;
4582 	return kbuf;
4583 }
4584 
io_put_recv_kbuf(struct io_kiocb * req)4585 static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req)
4586 {
4587 	return io_put_kbuf(req, req->sr_msg.kbuf);
4588 }
4589 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4590 static int io_recvmsg_prep(struct io_kiocb *req,
4591 			   const struct io_uring_sqe *sqe)
4592 {
4593 	struct io_async_msghdr *async_msg = req->async_data;
4594 	struct io_sr_msg *sr = &req->sr_msg;
4595 	int ret;
4596 
4597 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
4598 		return -EINVAL;
4599 
4600 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
4601 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
4602 	sr->len = READ_ONCE(sqe->len);
4603 	sr->bgid = READ_ONCE(sqe->buf_group);
4604 
4605 #ifdef CONFIG_COMPAT
4606 	if (req->ctx->compat)
4607 		sr->msg_flags |= MSG_CMSG_COMPAT;
4608 #endif
4609 
4610 	if (!async_msg || !io_op_defs[req->opcode].needs_async_data)
4611 		return 0;
4612 	ret = io_recvmsg_copy_hdr(req, async_msg);
4613 	if (!ret)
4614 		req->flags |= REQ_F_NEED_CLEANUP;
4615 	return ret;
4616 }
4617 
io_recvmsg(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4618 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4619 		      struct io_comp_state *cs)
4620 {
4621 	struct io_async_msghdr iomsg, *kmsg;
4622 	struct socket *sock;
4623 	struct io_buffer *kbuf;
4624 	unsigned flags;
4625 	int min_ret = 0;
4626 	int ret, cflags = 0;
4627 
4628 	sock = sock_from_file(req->file, &ret);
4629 	if (unlikely(!sock))
4630 		return ret;
4631 
4632 	if (req->async_data) {
4633 		kmsg = req->async_data;
4634 		kmsg->msg.msg_name = &kmsg->addr;
4635 		/* if iov is set, it's allocated already */
4636 		if (!kmsg->iov)
4637 			kmsg->iov = kmsg->fast_iov;
4638 		kmsg->msg.msg_iter.iov = kmsg->iov;
4639 	} else {
4640 		ret = io_recvmsg_copy_hdr(req, &iomsg);
4641 		if (ret)
4642 			return ret;
4643 		kmsg = &iomsg;
4644 	}
4645 
4646 	if (req->flags & REQ_F_BUFFER_SELECT) {
4647 		kbuf = io_recv_buffer_select(req, !force_nonblock);
4648 		if (IS_ERR(kbuf))
4649 			return PTR_ERR(kbuf);
4650 		kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr);
4651 		iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov,
4652 				1, req->sr_msg.len);
4653 	}
4654 
4655 	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
4656 	if (flags & MSG_DONTWAIT)
4657 		req->flags |= REQ_F_NOWAIT;
4658 	else if (force_nonblock)
4659 		flags |= MSG_DONTWAIT;
4660 
4661 	if (flags & MSG_WAITALL)
4662 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
4663 
4664 	ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg,
4665 					kmsg->uaddr, flags);
4666 	if (force_nonblock && ret == -EAGAIN)
4667 		return io_setup_async_msg(req, kmsg);
4668 	if (ret == -ERESTARTSYS)
4669 		ret = -EINTR;
4670 
4671 	if (req->flags & REQ_F_BUFFER_SELECTED)
4672 		cflags = io_put_recv_kbuf(req);
4673 	if (kmsg->iov != kmsg->fast_iov)
4674 		kfree(kmsg->iov);
4675 	req->flags &= ~REQ_F_NEED_CLEANUP;
4676 	if (ret < min_ret || ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4677 		req_set_fail_links(req);
4678 	__io_req_complete(req, ret, cflags, cs);
4679 	return 0;
4680 }
4681 
io_recv(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4682 static int io_recv(struct io_kiocb *req, bool force_nonblock,
4683 		   struct io_comp_state *cs)
4684 {
4685 	struct io_buffer *kbuf;
4686 	struct io_sr_msg *sr = &req->sr_msg;
4687 	struct msghdr msg;
4688 	void __user *buf = sr->buf;
4689 	struct socket *sock;
4690 	struct iovec iov;
4691 	unsigned flags;
4692 	int min_ret = 0;
4693 	int ret, cflags = 0;
4694 
4695 	sock = sock_from_file(req->file, &ret);
4696 	if (unlikely(!sock))
4697 		return ret;
4698 
4699 	if (req->flags & REQ_F_BUFFER_SELECT) {
4700 		kbuf = io_recv_buffer_select(req, !force_nonblock);
4701 		if (IS_ERR(kbuf))
4702 			return PTR_ERR(kbuf);
4703 		buf = u64_to_user_ptr(kbuf->addr);
4704 	}
4705 
4706 	ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter);
4707 	if (unlikely(ret))
4708 		goto out_free;
4709 
4710 	msg.msg_name = NULL;
4711 	msg.msg_control = NULL;
4712 	msg.msg_controllen = 0;
4713 	msg.msg_namelen = 0;
4714 	msg.msg_iocb = NULL;
4715 	msg.msg_flags = 0;
4716 
4717 	flags = req->sr_msg.msg_flags | MSG_NOSIGNAL;
4718 	if (flags & MSG_DONTWAIT)
4719 		req->flags |= REQ_F_NOWAIT;
4720 	else if (force_nonblock)
4721 		flags |= MSG_DONTWAIT;
4722 
4723 	if (flags & MSG_WAITALL)
4724 		min_ret = iov_iter_count(&msg.msg_iter);
4725 
4726 	ret = sock_recvmsg(sock, &msg, flags);
4727 	if (force_nonblock && ret == -EAGAIN)
4728 		return -EAGAIN;
4729 	if (ret == -ERESTARTSYS)
4730 		ret = -EINTR;
4731 out_free:
4732 	if (req->flags & REQ_F_BUFFER_SELECTED)
4733 		cflags = io_put_recv_kbuf(req);
4734 	if (ret < min_ret || ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))))
4735 		req_set_fail_links(req);
4736 	__io_req_complete(req, ret, cflags, cs);
4737 	return 0;
4738 }
4739 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4740 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4741 {
4742 	struct io_accept *accept = &req->accept;
4743 
4744 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4745 		return -EINVAL;
4746 	if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->splice_fd_in)
4747 		return -EINVAL;
4748 
4749 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4750 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
4751 	accept->flags = READ_ONCE(sqe->accept_flags);
4752 	accept->nofile = rlimit(RLIMIT_NOFILE);
4753 	return 0;
4754 }
4755 
io_accept(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4756 static int io_accept(struct io_kiocb *req, bool force_nonblock,
4757 		     struct io_comp_state *cs)
4758 {
4759 	struct io_accept *accept = &req->accept;
4760 	unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
4761 	int ret;
4762 
4763 	if (req->file->f_flags & O_NONBLOCK)
4764 		req->flags |= REQ_F_NOWAIT;
4765 
4766 	ret = __sys_accept4_file(req->file, file_flags, accept->addr,
4767 					accept->addr_len, accept->flags,
4768 					accept->nofile);
4769 	if (ret == -EAGAIN && force_nonblock)
4770 		return -EAGAIN;
4771 	if (ret < 0) {
4772 		if (ret == -ERESTARTSYS)
4773 			ret = -EINTR;
4774 		req_set_fail_links(req);
4775 	}
4776 	__io_req_complete(req, ret, 0, cs);
4777 	return 0;
4778 }
4779 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4780 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4781 {
4782 	struct io_connect *conn = &req->connect;
4783 	struct io_async_connect *io = req->async_data;
4784 
4785 	if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL)))
4786 		return -EINVAL;
4787 	if (sqe->ioprio || sqe->len || sqe->buf_index || sqe->rw_flags ||
4788 	    sqe->splice_fd_in)
4789 		return -EINVAL;
4790 
4791 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
4792 	conn->addr_len =  READ_ONCE(sqe->addr2);
4793 
4794 	if (!io)
4795 		return 0;
4796 
4797 	return move_addr_to_kernel(conn->addr, conn->addr_len,
4798 					&io->address);
4799 }
4800 
io_connect(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4801 static int io_connect(struct io_kiocb *req, bool force_nonblock,
4802 		      struct io_comp_state *cs)
4803 {
4804 	struct io_async_connect __io, *io;
4805 	unsigned file_flags;
4806 	int ret;
4807 
4808 	if (req->async_data) {
4809 		io = req->async_data;
4810 	} else {
4811 		ret = move_addr_to_kernel(req->connect.addr,
4812 						req->connect.addr_len,
4813 						&__io.address);
4814 		if (ret)
4815 			goto out;
4816 		io = &__io;
4817 	}
4818 
4819 	file_flags = force_nonblock ? O_NONBLOCK : 0;
4820 
4821 	ret = __sys_connect_file(req->file, &io->address,
4822 					req->connect.addr_len, file_flags);
4823 	if ((ret == -EAGAIN || ret == -EINPROGRESS) && force_nonblock) {
4824 		if (req->async_data)
4825 			return -EAGAIN;
4826 		if (io_alloc_async_data(req)) {
4827 			ret = -ENOMEM;
4828 			goto out;
4829 		}
4830 		io = req->async_data;
4831 		memcpy(req->async_data, &__io, sizeof(__io));
4832 		return -EAGAIN;
4833 	}
4834 	if (ret == -ERESTARTSYS)
4835 		ret = -EINTR;
4836 out:
4837 	if (ret < 0)
4838 		req_set_fail_links(req);
4839 	__io_req_complete(req, ret, 0, cs);
4840 	return 0;
4841 }
4842 #else /* !CONFIG_NET */
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4843 static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4844 {
4845 	return -EOPNOTSUPP;
4846 }
4847 
io_sendmsg(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4848 static int io_sendmsg(struct io_kiocb *req, bool force_nonblock,
4849 		      struct io_comp_state *cs)
4850 {
4851 	return -EOPNOTSUPP;
4852 }
4853 
io_send(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4854 static int io_send(struct io_kiocb *req, bool force_nonblock,
4855 		   struct io_comp_state *cs)
4856 {
4857 	return -EOPNOTSUPP;
4858 }
4859 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4860 static int io_recvmsg_prep(struct io_kiocb *req,
4861 			   const struct io_uring_sqe *sqe)
4862 {
4863 	return -EOPNOTSUPP;
4864 }
4865 
io_recvmsg(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4866 static int io_recvmsg(struct io_kiocb *req, bool force_nonblock,
4867 		      struct io_comp_state *cs)
4868 {
4869 	return -EOPNOTSUPP;
4870 }
4871 
io_recv(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4872 static int io_recv(struct io_kiocb *req, bool force_nonblock,
4873 		   struct io_comp_state *cs)
4874 {
4875 	return -EOPNOTSUPP;
4876 }
4877 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4878 static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4879 {
4880 	return -EOPNOTSUPP;
4881 }
4882 
io_accept(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4883 static int io_accept(struct io_kiocb *req, bool force_nonblock,
4884 		     struct io_comp_state *cs)
4885 {
4886 	return -EOPNOTSUPP;
4887 }
4888 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)4889 static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
4890 {
4891 	return -EOPNOTSUPP;
4892 }
4893 
io_connect(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)4894 static int io_connect(struct io_kiocb *req, bool force_nonblock,
4895 		      struct io_comp_state *cs)
4896 {
4897 	return -EOPNOTSUPP;
4898 }
4899 #endif /* CONFIG_NET */
4900 
4901 struct io_poll_table {
4902 	struct poll_table_struct pt;
4903 	struct io_kiocb *req;
4904 	int nr_entries;
4905 	int error;
4906 };
4907 
__io_async_wake(struct io_kiocb * req,struct io_poll_iocb * poll,__poll_t mask,task_work_func_t func)4908 static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll,
4909 			   __poll_t mask, task_work_func_t func)
4910 {
4911 	bool twa_signal_ok;
4912 	int ret;
4913 
4914 	/* for instances that support it check for an event match first: */
4915 	if (mask && !(mask & poll->events))
4916 		return 0;
4917 
4918 	trace_io_uring_task_add(req->ctx, req->opcode, req->user_data, mask);
4919 
4920 	list_del_init(&poll->wait.entry);
4921 
4922 	req->result = mask;
4923 	init_task_work(&req->task_work, func);
4924 	percpu_ref_get(&req->ctx->refs);
4925 
4926 	/*
4927 	 * If we using the signalfd wait_queue_head for this wakeup, then
4928 	 * it's not safe to use TWA_SIGNAL as we could be recursing on the
4929 	 * tsk->sighand->siglock on doing the wakeup. Should not be needed
4930 	 * either, as the normal wakeup will suffice.
4931 	 */
4932 	twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh);
4933 
4934 	/*
4935 	 * If this fails, then the task is exiting. When a task exits, the
4936 	 * work gets canceled, so just cancel this request as well instead
4937 	 * of executing it. We can't safely execute it anyway, as we may not
4938 	 * have the needed state needed for it anyway.
4939 	 */
4940 	ret = io_req_task_work_add(req, twa_signal_ok);
4941 	if (unlikely(ret)) {
4942 		WRITE_ONCE(poll->canceled, true);
4943 		io_req_task_work_add_fallback(req, func);
4944 	}
4945 	return 1;
4946 }
4947 
io_poll_rewait(struct io_kiocb * req,struct io_poll_iocb * poll)4948 static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll)
4949 	__acquires(&req->ctx->completion_lock)
4950 {
4951 	struct io_ring_ctx *ctx = req->ctx;
4952 
4953 	if (!req->result && !READ_ONCE(poll->canceled)) {
4954 		struct poll_table_struct pt = { ._key = poll->events };
4955 
4956 		req->result = vfs_poll(req->file, &pt) & poll->events;
4957 	}
4958 
4959 	spin_lock_irq(&ctx->completion_lock);
4960 	if (!req->result && !READ_ONCE(poll->canceled)) {
4961 		add_wait_queue(poll->head, &poll->wait);
4962 		return true;
4963 	}
4964 
4965 	return false;
4966 }
4967 
io_poll_get_double(struct io_kiocb * req)4968 static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req)
4969 {
4970 	/* pure poll stashes this in ->async_data, poll driven retry elsewhere */
4971 	if (req->opcode == IORING_OP_POLL_ADD)
4972 		return req->async_data;
4973 	return req->apoll->double_poll;
4974 }
4975 
io_poll_get_single(struct io_kiocb * req)4976 static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req)
4977 {
4978 	if (req->opcode == IORING_OP_POLL_ADD)
4979 		return &req->poll;
4980 	return &req->apoll->poll;
4981 }
4982 
io_poll_remove_double(struct io_kiocb * req)4983 static void io_poll_remove_double(struct io_kiocb *req)
4984 {
4985 	struct io_poll_iocb *poll = io_poll_get_double(req);
4986 
4987 	lockdep_assert_held(&req->ctx->completion_lock);
4988 
4989 	if (poll && poll->head) {
4990 		struct wait_queue_head *head = poll->head;
4991 
4992 		spin_lock(&head->lock);
4993 		list_del_init(&poll->wait.entry);
4994 		if (poll->wait.private)
4995 			refcount_dec(&req->refs);
4996 		poll->head = NULL;
4997 		spin_unlock(&head->lock);
4998 	}
4999 }
5000 
io_poll_complete(struct io_kiocb * req,__poll_t mask,int error)5001 static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error)
5002 {
5003 	struct io_ring_ctx *ctx = req->ctx;
5004 
5005 	io_poll_remove_double(req);
5006 	req->poll.done = true;
5007 	io_cqring_fill_event(req, error ? error : mangle_poll(mask));
5008 	io_commit_cqring(ctx);
5009 }
5010 
io_poll_task_func(struct callback_head * cb)5011 static void io_poll_task_func(struct callback_head *cb)
5012 {
5013 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5014 	struct io_ring_ctx *ctx = req->ctx;
5015 	struct io_kiocb *nxt;
5016 
5017 	if (io_poll_rewait(req, &req->poll)) {
5018 		spin_unlock_irq(&ctx->completion_lock);
5019 	} else {
5020 		hash_del(&req->hash_node);
5021 		io_poll_complete(req, req->result, 0);
5022 		spin_unlock_irq(&ctx->completion_lock);
5023 
5024 		nxt = io_put_req_find_next(req);
5025 		io_cqring_ev_posted(ctx);
5026 		if (nxt)
5027 			__io_req_task_submit(nxt);
5028 	}
5029 
5030 	percpu_ref_put(&ctx->refs);
5031 }
5032 
io_poll_double_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)5033 static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode,
5034 			       int sync, void *key)
5035 {
5036 	struct io_kiocb *req = wait->private;
5037 	struct io_poll_iocb *poll = io_poll_get_single(req);
5038 	__poll_t mask = key_to_poll(key);
5039 
5040 	/* for instances that support it check for an event match first: */
5041 	if (mask && !(mask & poll->events))
5042 		return 0;
5043 
5044 	list_del_init(&wait->entry);
5045 
5046 	if (poll && poll->head) {
5047 		bool done;
5048 
5049 		spin_lock(&poll->head->lock);
5050 		done = list_empty(&poll->wait.entry);
5051 		if (!done)
5052 			list_del_init(&poll->wait.entry);
5053 		/* make sure double remove sees this as being gone */
5054 		wait->private = NULL;
5055 		spin_unlock(&poll->head->lock);
5056 		if (!done) {
5057 			/* use wait func handler, so it matches the rq type */
5058 			poll->wait.func(&poll->wait, mode, sync, key);
5059 		}
5060 	}
5061 	refcount_dec(&req->refs);
5062 	return 1;
5063 }
5064 
io_init_poll_iocb(struct io_poll_iocb * poll,__poll_t events,wait_queue_func_t wake_func)5065 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5066 			      wait_queue_func_t wake_func)
5067 {
5068 	poll->head = NULL;
5069 	poll->done = false;
5070 	poll->canceled = false;
5071 	poll->events = events;
5072 	INIT_LIST_HEAD(&poll->wait.entry);
5073 	init_waitqueue_func_entry(&poll->wait, wake_func);
5074 }
5075 
__io_queue_proc(struct io_poll_iocb * poll,struct io_poll_table * pt,struct wait_queue_head * head,struct io_poll_iocb ** poll_ptr)5076 static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt,
5077 			    struct wait_queue_head *head,
5078 			    struct io_poll_iocb **poll_ptr)
5079 {
5080 	struct io_kiocb *req = pt->req;
5081 
5082 	/*
5083 	 * The file being polled uses multiple waitqueues for poll handling
5084 	 * (e.g. one for read, one for write). Setup a separate io_poll_iocb
5085 	 * if this happens.
5086 	 */
5087 	if (unlikely(pt->nr_entries)) {
5088 		struct io_poll_iocb *poll_one = poll;
5089 
5090 		/* already have a 2nd entry, fail a third attempt */
5091 		if (*poll_ptr) {
5092 			pt->error = -EINVAL;
5093 			return;
5094 		}
5095 		/* double add on the same waitqueue head, ignore */
5096 		if (poll->head == head)
5097 			return;
5098 		poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
5099 		if (!poll) {
5100 			pt->error = -ENOMEM;
5101 			return;
5102 		}
5103 		io_init_poll_iocb(poll, poll_one->events, io_poll_double_wake);
5104 		refcount_inc(&req->refs);
5105 		poll->wait.private = req;
5106 		*poll_ptr = poll;
5107 	}
5108 
5109 	pt->nr_entries++;
5110 	poll->head = head;
5111 
5112 	if (poll->events & EPOLLEXCLUSIVE)
5113 		add_wait_queue_exclusive(head, &poll->wait);
5114 	else
5115 		add_wait_queue(head, &poll->wait);
5116 }
5117 
io_async_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)5118 static void io_async_queue_proc(struct file *file, struct wait_queue_head *head,
5119 			       struct poll_table_struct *p)
5120 {
5121 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5122 	struct async_poll *apoll = pt->req->apoll;
5123 
5124 	__io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
5125 }
5126 
io_async_task_func(struct callback_head * cb)5127 static void io_async_task_func(struct callback_head *cb)
5128 {
5129 	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
5130 	struct async_poll *apoll = req->apoll;
5131 	struct io_ring_ctx *ctx = req->ctx;
5132 
5133 	trace_io_uring_task_run(req->ctx, req->opcode, req->user_data);
5134 
5135 	if (io_poll_rewait(req, &apoll->poll)) {
5136 		spin_unlock_irq(&ctx->completion_lock);
5137 		percpu_ref_put(&ctx->refs);
5138 		return;
5139 	}
5140 
5141 	/* If req is still hashed, it cannot have been canceled. Don't check. */
5142 	if (hash_hashed(&req->hash_node))
5143 		hash_del(&req->hash_node);
5144 
5145 	io_poll_remove_double(req);
5146 	spin_unlock_irq(&ctx->completion_lock);
5147 
5148 	if (!READ_ONCE(apoll->poll.canceled))
5149 		__io_req_task_submit(req);
5150 	else
5151 		__io_req_task_cancel(req, -ECANCELED);
5152 
5153 	percpu_ref_put(&ctx->refs);
5154 	kfree(apoll->double_poll);
5155 	kfree(apoll);
5156 }
5157 
io_async_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)5158 static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5159 			void *key)
5160 {
5161 	struct io_kiocb *req = wait->private;
5162 	struct io_poll_iocb *poll = &req->apoll->poll;
5163 
5164 	trace_io_uring_poll_wake(req->ctx, req->opcode, req->user_data,
5165 					key_to_poll(key));
5166 
5167 	return __io_async_wake(req, poll, key_to_poll(key), io_async_task_func);
5168 }
5169 
io_poll_req_insert(struct io_kiocb * req)5170 static void io_poll_req_insert(struct io_kiocb *req)
5171 {
5172 	struct io_ring_ctx *ctx = req->ctx;
5173 	struct hlist_head *list;
5174 
5175 	list = &ctx->cancel_hash[hash_long(req->user_data, ctx->cancel_hash_bits)];
5176 	hlist_add_head(&req->hash_node, list);
5177 }
5178 
__io_arm_poll_handler(struct io_kiocb * req,struct io_poll_iocb * poll,struct io_poll_table * ipt,__poll_t mask,wait_queue_func_t wake_func)5179 static __poll_t __io_arm_poll_handler(struct io_kiocb *req,
5180 				      struct io_poll_iocb *poll,
5181 				      struct io_poll_table *ipt, __poll_t mask,
5182 				      wait_queue_func_t wake_func)
5183 	__acquires(&ctx->completion_lock)
5184 {
5185 	struct io_ring_ctx *ctx = req->ctx;
5186 	bool cancel = false;
5187 
5188 	if (req->file->f_op->may_pollfree) {
5189 		spin_lock_irq(&ctx->completion_lock);
5190 		return -EOPNOTSUPP;
5191 	}
5192 
5193 	INIT_HLIST_NODE(&req->hash_node);
5194 	io_init_poll_iocb(poll, mask, wake_func);
5195 	poll->file = req->file;
5196 	poll->wait.private = req;
5197 
5198 	ipt->pt._key = mask;
5199 	ipt->req = req;
5200 	ipt->error = 0;
5201 	ipt->nr_entries = 0;
5202 
5203 	mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5204 	if (unlikely(!ipt->nr_entries) && !ipt->error)
5205 		ipt->error = -EINVAL;
5206 
5207 	spin_lock_irq(&ctx->completion_lock);
5208 	if (ipt->error)
5209 		io_poll_remove_double(req);
5210 	if (likely(poll->head)) {
5211 		spin_lock(&poll->head->lock);
5212 		if (unlikely(list_empty(&poll->wait.entry))) {
5213 			if (ipt->error)
5214 				cancel = true;
5215 			ipt->error = 0;
5216 			mask = 0;
5217 		}
5218 		if (mask || ipt->error)
5219 			list_del_init(&poll->wait.entry);
5220 		else if (cancel)
5221 			WRITE_ONCE(poll->canceled, true);
5222 		else if (!poll->done) /* actually waiting for an event */
5223 			io_poll_req_insert(req);
5224 		spin_unlock(&poll->head->lock);
5225 	}
5226 
5227 	return mask;
5228 }
5229 
io_arm_poll_handler(struct io_kiocb * req)5230 static bool io_arm_poll_handler(struct io_kiocb *req)
5231 {
5232 	const struct io_op_def *def = &io_op_defs[req->opcode];
5233 	struct io_ring_ctx *ctx = req->ctx;
5234 	struct async_poll *apoll;
5235 	struct io_poll_table ipt;
5236 	__poll_t mask, ret;
5237 	int rw;
5238 
5239 	if (!req->file || !file_can_poll(req->file))
5240 		return false;
5241 	if (req->flags & REQ_F_POLLED)
5242 		return false;
5243 	if (def->pollin)
5244 		rw = READ;
5245 	else if (def->pollout)
5246 		rw = WRITE;
5247 	else
5248 		return false;
5249 	/* if we can't nonblock try, then no point in arming a poll handler */
5250 	if (!io_file_supports_async(req->file, rw))
5251 		return false;
5252 
5253 	apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
5254 	if (unlikely(!apoll))
5255 		return false;
5256 	apoll->double_poll = NULL;
5257 
5258 	req->flags |= REQ_F_POLLED;
5259 	req->apoll = apoll;
5260 
5261 	mask = 0;
5262 	if (def->pollin)
5263 		mask |= POLLIN | POLLRDNORM;
5264 	if (def->pollout)
5265 		mask |= POLLOUT | POLLWRNORM;
5266 
5267 	/* If reading from MSG_ERRQUEUE using recvmsg, ignore POLLIN */
5268 	if ((req->opcode == IORING_OP_RECVMSG) &&
5269 	    (req->sr_msg.msg_flags & MSG_ERRQUEUE))
5270 		mask &= ~POLLIN;
5271 
5272 	mask |= POLLERR | POLLPRI;
5273 
5274 	ipt.pt._qproc = io_async_queue_proc;
5275 
5276 	ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask,
5277 					io_async_wake);
5278 	if (ret || ipt.error) {
5279 		io_poll_remove_double(req);
5280 		spin_unlock_irq(&ctx->completion_lock);
5281 		kfree(apoll->double_poll);
5282 		kfree(apoll);
5283 		return false;
5284 	}
5285 	spin_unlock_irq(&ctx->completion_lock);
5286 	trace_io_uring_poll_arm(ctx, req->opcode, req->user_data, mask,
5287 					apoll->poll.events);
5288 	return true;
5289 }
5290 
__io_poll_remove_one(struct io_kiocb * req,struct io_poll_iocb * poll)5291 static bool __io_poll_remove_one(struct io_kiocb *req,
5292 				 struct io_poll_iocb *poll)
5293 {
5294 	bool do_complete = false;
5295 
5296 	spin_lock(&poll->head->lock);
5297 	WRITE_ONCE(poll->canceled, true);
5298 	if (!list_empty(&poll->wait.entry)) {
5299 		list_del_init(&poll->wait.entry);
5300 		do_complete = true;
5301 	}
5302 	spin_unlock(&poll->head->lock);
5303 	hash_del(&req->hash_node);
5304 	return do_complete;
5305 }
5306 
io_poll_remove_one(struct io_kiocb * req)5307 static bool io_poll_remove_one(struct io_kiocb *req)
5308 {
5309 	bool do_complete;
5310 
5311 	io_poll_remove_double(req);
5312 
5313 	if (req->opcode == IORING_OP_POLL_ADD) {
5314 		do_complete = __io_poll_remove_one(req, &req->poll);
5315 	} else {
5316 		struct async_poll *apoll = req->apoll;
5317 
5318 		/* non-poll requests have submit ref still */
5319 		do_complete = __io_poll_remove_one(req, &apoll->poll);
5320 		if (do_complete) {
5321 			io_put_req(req);
5322 			kfree(apoll->double_poll);
5323 			kfree(apoll);
5324 		}
5325 	}
5326 
5327 	if (do_complete) {
5328 		io_cqring_fill_event(req, -ECANCELED);
5329 		io_commit_cqring(req->ctx);
5330 		req_set_fail_links(req);
5331 		io_put_req_deferred(req, 1);
5332 	}
5333 
5334 	return do_complete;
5335 }
5336 
5337 /*
5338  * Returns true if we found and killed one or more poll requests
5339  */
io_poll_remove_all(struct io_ring_ctx * ctx,struct task_struct * tsk,struct files_struct * files)5340 static bool io_poll_remove_all(struct io_ring_ctx *ctx, struct task_struct *tsk,
5341 			       struct files_struct *files)
5342 {
5343 	struct hlist_node *tmp;
5344 	struct io_kiocb *req;
5345 	int posted = 0, i;
5346 
5347 	spin_lock_irq(&ctx->completion_lock);
5348 	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
5349 		struct hlist_head *list;
5350 
5351 		list = &ctx->cancel_hash[i];
5352 		hlist_for_each_entry_safe(req, tmp, list, hash_node) {
5353 			if (io_match_task(req, tsk, files))
5354 				posted += io_poll_remove_one(req);
5355 		}
5356 	}
5357 	spin_unlock_irq(&ctx->completion_lock);
5358 
5359 	if (posted)
5360 		io_cqring_ev_posted(ctx);
5361 
5362 	return posted != 0;
5363 }
5364 
io_poll_cancel(struct io_ring_ctx * ctx,__u64 sqe_addr)5365 static int io_poll_cancel(struct io_ring_ctx *ctx, __u64 sqe_addr)
5366 {
5367 	struct hlist_head *list;
5368 	struct io_kiocb *req;
5369 
5370 	list = &ctx->cancel_hash[hash_long(sqe_addr, ctx->cancel_hash_bits)];
5371 	hlist_for_each_entry(req, list, hash_node) {
5372 		if (sqe_addr != req->user_data)
5373 			continue;
5374 		if (io_poll_remove_one(req))
5375 			return 0;
5376 		return -EALREADY;
5377 	}
5378 
5379 	return -ENOENT;
5380 }
5381 
io_poll_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5382 static int io_poll_remove_prep(struct io_kiocb *req,
5383 			       const struct io_uring_sqe *sqe)
5384 {
5385 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5386 		return -EINVAL;
5387 	if (sqe->ioprio || sqe->off || sqe->len || sqe->buf_index ||
5388 	    sqe->poll_events)
5389 		return -EINVAL;
5390 
5391 	req->poll.addr = READ_ONCE(sqe->addr);
5392 	return 0;
5393 }
5394 
5395 /*
5396  * Find a running poll command that matches one specified in sqe->addr,
5397  * and remove it if found.
5398  */
io_poll_remove(struct io_kiocb * req)5399 static int io_poll_remove(struct io_kiocb *req)
5400 {
5401 	struct io_ring_ctx *ctx = req->ctx;
5402 	u64 addr;
5403 	int ret;
5404 
5405 	addr = req->poll.addr;
5406 	spin_lock_irq(&ctx->completion_lock);
5407 	ret = io_poll_cancel(ctx, addr);
5408 	spin_unlock_irq(&ctx->completion_lock);
5409 
5410 	if (ret < 0)
5411 		req_set_fail_links(req);
5412 	io_req_complete(req, ret);
5413 	return 0;
5414 }
5415 
io_poll_wake(struct wait_queue_entry * wait,unsigned mode,int sync,void * key)5416 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5417 			void *key)
5418 {
5419 	struct io_kiocb *req = wait->private;
5420 	struct io_poll_iocb *poll = &req->poll;
5421 
5422 	return __io_async_wake(req, poll, key_to_poll(key), io_poll_task_func);
5423 }
5424 
io_poll_queue_proc(struct file * file,struct wait_queue_head * head,struct poll_table_struct * p)5425 static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head,
5426 			       struct poll_table_struct *p)
5427 {
5428 	struct io_poll_table *pt = container_of(p, struct io_poll_table, pt);
5429 
5430 	__io_queue_proc(&pt->req->poll, pt, head, (struct io_poll_iocb **) &pt->req->async_data);
5431 }
5432 
io_poll_add_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5433 static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5434 {
5435 	struct io_poll_iocb *poll = &req->poll;
5436 	u32 events;
5437 
5438 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5439 		return -EINVAL;
5440 	if (sqe->addr || sqe->ioprio || sqe->off || sqe->len || sqe->buf_index)
5441 		return -EINVAL;
5442 
5443 	events = READ_ONCE(sqe->poll32_events);
5444 #ifdef __BIG_ENDIAN
5445 	events = swahw32(events);
5446 #endif
5447 	poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP |
5448 		       (events & EPOLLEXCLUSIVE);
5449 	return 0;
5450 }
5451 
io_poll_add(struct io_kiocb * req)5452 static int io_poll_add(struct io_kiocb *req)
5453 {
5454 	struct io_poll_iocb *poll = &req->poll;
5455 	struct io_ring_ctx *ctx = req->ctx;
5456 	struct io_poll_table ipt;
5457 	__poll_t mask;
5458 
5459 	ipt.pt._qproc = io_poll_queue_proc;
5460 
5461 	mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events,
5462 					io_poll_wake);
5463 
5464 	if (mask) { /* no async, we'd stolen it */
5465 		ipt.error = 0;
5466 		io_poll_complete(req, mask, 0);
5467 	}
5468 	spin_unlock_irq(&ctx->completion_lock);
5469 
5470 	if (mask) {
5471 		io_cqring_ev_posted(ctx);
5472 		io_put_req(req);
5473 	}
5474 	return ipt.error;
5475 }
5476 
io_timeout_fn(struct hrtimer * timer)5477 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer)
5478 {
5479 	struct io_timeout_data *data = container_of(timer,
5480 						struct io_timeout_data, timer);
5481 	struct io_kiocb *req = data->req;
5482 	struct io_ring_ctx *ctx = req->ctx;
5483 	unsigned long flags;
5484 
5485 	spin_lock_irqsave(&ctx->completion_lock, flags);
5486 	list_del_init(&req->timeout.list);
5487 	atomic_set(&req->ctx->cq_timeouts,
5488 		atomic_read(&req->ctx->cq_timeouts) + 1);
5489 
5490 	io_cqring_fill_event(req, -ETIME);
5491 	io_commit_cqring(ctx);
5492 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
5493 
5494 	io_cqring_ev_posted(ctx);
5495 	req_set_fail_links(req);
5496 	io_put_req(req);
5497 	return HRTIMER_NORESTART;
5498 }
5499 
__io_timeout_cancel(struct io_kiocb * req)5500 static int __io_timeout_cancel(struct io_kiocb *req)
5501 {
5502 	struct io_timeout_data *io = req->async_data;
5503 	int ret;
5504 
5505 	ret = hrtimer_try_to_cancel(&io->timer);
5506 	if (ret == -1)
5507 		return -EALREADY;
5508 	list_del_init(&req->timeout.list);
5509 
5510 	req_set_fail_links(req);
5511 	io_cqring_fill_event(req, -ECANCELED);
5512 	io_put_req_deferred(req, 1);
5513 	return 0;
5514 }
5515 
io_timeout_cancel(struct io_ring_ctx * ctx,__u64 user_data)5516 static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data)
5517 {
5518 	struct io_kiocb *req;
5519 	int ret = -ENOENT;
5520 
5521 	list_for_each_entry(req, &ctx->timeout_list, timeout.list) {
5522 		if (user_data == req->user_data) {
5523 			ret = 0;
5524 			break;
5525 		}
5526 	}
5527 
5528 	if (ret == -ENOENT)
5529 		return ret;
5530 
5531 	return __io_timeout_cancel(req);
5532 }
5533 
io_timeout_remove_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5534 static int io_timeout_remove_prep(struct io_kiocb *req,
5535 				  const struct io_uring_sqe *sqe)
5536 {
5537 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5538 		return -EINVAL;
5539 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5540 		return -EINVAL;
5541 	if (sqe->ioprio || sqe->buf_index || sqe->len || sqe->timeout_flags ||
5542 	    sqe->splice_fd_in)
5543 		return -EINVAL;
5544 
5545 	req->timeout_rem.addr = READ_ONCE(sqe->addr);
5546 	return 0;
5547 }
5548 
5549 /*
5550  * Remove or update an existing timeout command
5551  */
io_timeout_remove(struct io_kiocb * req)5552 static int io_timeout_remove(struct io_kiocb *req)
5553 {
5554 	struct io_ring_ctx *ctx = req->ctx;
5555 	int ret;
5556 
5557 	spin_lock_irq(&ctx->completion_lock);
5558 	ret = io_timeout_cancel(ctx, req->timeout_rem.addr);
5559 
5560 	io_cqring_fill_event(req, ret);
5561 	io_commit_cqring(ctx);
5562 	spin_unlock_irq(&ctx->completion_lock);
5563 	io_cqring_ev_posted(ctx);
5564 	if (ret < 0)
5565 		req_set_fail_links(req);
5566 	io_put_req(req);
5567 	return 0;
5568 }
5569 
io_timeout_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe,bool is_timeout_link)5570 static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
5571 			   bool is_timeout_link)
5572 {
5573 	struct io_timeout_data *data;
5574 	unsigned flags;
5575 	u32 off = READ_ONCE(sqe->off);
5576 
5577 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5578 		return -EINVAL;
5579 	if (sqe->ioprio || sqe->buf_index || sqe->len != 1 ||
5580 	    sqe->splice_fd_in)
5581 		return -EINVAL;
5582 	if (off && is_timeout_link)
5583 		return -EINVAL;
5584 	flags = READ_ONCE(sqe->timeout_flags);
5585 	if (flags & ~IORING_TIMEOUT_ABS)
5586 		return -EINVAL;
5587 
5588 	req->timeout.off = off;
5589 
5590 	if (!req->async_data && io_alloc_async_data(req))
5591 		return -ENOMEM;
5592 
5593 	data = req->async_data;
5594 	data->req = req;
5595 
5596 	if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr)))
5597 		return -EFAULT;
5598 
5599 	if (flags & IORING_TIMEOUT_ABS)
5600 		data->mode = HRTIMER_MODE_ABS;
5601 	else
5602 		data->mode = HRTIMER_MODE_REL;
5603 
5604 	hrtimer_init(&data->timer, CLOCK_MONOTONIC, data->mode);
5605 	return 0;
5606 }
5607 
io_timeout(struct io_kiocb * req)5608 static int io_timeout(struct io_kiocb *req)
5609 {
5610 	struct io_ring_ctx *ctx = req->ctx;
5611 	struct io_timeout_data *data = req->async_data;
5612 	struct list_head *entry;
5613 	u32 tail, off = req->timeout.off;
5614 
5615 	spin_lock_irq(&ctx->completion_lock);
5616 
5617 	/*
5618 	 * sqe->off holds how many events that need to occur for this
5619 	 * timeout event to be satisfied. If it isn't set, then this is
5620 	 * a pure timeout request, sequence isn't used.
5621 	 */
5622 	if (io_is_timeout_noseq(req)) {
5623 		entry = ctx->timeout_list.prev;
5624 		goto add;
5625 	}
5626 
5627 	tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts);
5628 	req->timeout.target_seq = tail + off;
5629 
5630 	/* Update the last seq here in case io_flush_timeouts() hasn't.
5631 	 * This is safe because ->completion_lock is held, and submissions
5632 	 * and completions are never mixed in the same ->completion_lock section.
5633 	 */
5634 	ctx->cq_last_tm_flush = tail;
5635 
5636 	/*
5637 	 * Insertion sort, ensuring the first entry in the list is always
5638 	 * the one we need first.
5639 	 */
5640 	list_for_each_prev(entry, &ctx->timeout_list) {
5641 		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb,
5642 						  timeout.list);
5643 
5644 		if (io_is_timeout_noseq(nxt))
5645 			continue;
5646 		/* nxt.seq is behind @tail, otherwise would've been completed */
5647 		if (off >= nxt->timeout.target_seq - tail)
5648 			break;
5649 	}
5650 add:
5651 	list_add(&req->timeout.list, entry);
5652 	data->timer.function = io_timeout_fn;
5653 	hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode);
5654 	spin_unlock_irq(&ctx->completion_lock);
5655 	return 0;
5656 }
5657 
io_cancel_cb(struct io_wq_work * work,void * data)5658 static bool io_cancel_cb(struct io_wq_work *work, void *data)
5659 {
5660 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
5661 
5662 	return req->user_data == (unsigned long) data;
5663 }
5664 
io_async_cancel_one(struct io_ring_ctx * ctx,void * sqe_addr)5665 static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
5666 {
5667 	enum io_wq_cancel cancel_ret;
5668 	int ret = 0;
5669 
5670 	cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
5671 	switch (cancel_ret) {
5672 	case IO_WQ_CANCEL_OK:
5673 		ret = 0;
5674 		break;
5675 	case IO_WQ_CANCEL_RUNNING:
5676 		ret = -EALREADY;
5677 		break;
5678 	case IO_WQ_CANCEL_NOTFOUND:
5679 		ret = -ENOENT;
5680 		break;
5681 	}
5682 
5683 	return ret;
5684 }
5685 
io_async_find_and_cancel(struct io_ring_ctx * ctx,struct io_kiocb * req,__u64 sqe_addr,int success_ret)5686 static void io_async_find_and_cancel(struct io_ring_ctx *ctx,
5687 				     struct io_kiocb *req, __u64 sqe_addr,
5688 				     int success_ret)
5689 {
5690 	unsigned long flags;
5691 	int ret;
5692 
5693 	ret = io_async_cancel_one(ctx, (void *) (unsigned long) sqe_addr);
5694 	if (ret != -ENOENT) {
5695 		spin_lock_irqsave(&ctx->completion_lock, flags);
5696 		goto done;
5697 	}
5698 
5699 	spin_lock_irqsave(&ctx->completion_lock, flags);
5700 	ret = io_timeout_cancel(ctx, sqe_addr);
5701 	if (ret != -ENOENT)
5702 		goto done;
5703 	ret = io_poll_cancel(ctx, sqe_addr);
5704 done:
5705 	if (!ret)
5706 		ret = success_ret;
5707 	io_cqring_fill_event(req, ret);
5708 	io_commit_cqring(ctx);
5709 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
5710 	io_cqring_ev_posted(ctx);
5711 
5712 	if (ret < 0)
5713 		req_set_fail_links(req);
5714 	io_put_req(req);
5715 }
5716 
io_async_cancel_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5717 static int io_async_cancel_prep(struct io_kiocb *req,
5718 				const struct io_uring_sqe *sqe)
5719 {
5720 	if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL))
5721 		return -EINVAL;
5722 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5723 		return -EINVAL;
5724 	if (sqe->ioprio || sqe->off || sqe->len || sqe->cancel_flags ||
5725 	    sqe->splice_fd_in)
5726 		return -EINVAL;
5727 
5728 	req->cancel.addr = READ_ONCE(sqe->addr);
5729 	return 0;
5730 }
5731 
io_async_cancel(struct io_kiocb * req)5732 static int io_async_cancel(struct io_kiocb *req)
5733 {
5734 	struct io_ring_ctx *ctx = req->ctx;
5735 
5736 	io_async_find_and_cancel(ctx, req, req->cancel.addr, 0);
5737 	return 0;
5738 }
5739 
io_files_update_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5740 static int io_files_update_prep(struct io_kiocb *req,
5741 				const struct io_uring_sqe *sqe)
5742 {
5743 	if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL))
5744 		return -EINVAL;
5745 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
5746 		return -EINVAL;
5747 	if (sqe->ioprio || sqe->rw_flags)
5748 		return -EINVAL;
5749 
5750 	req->files_update.offset = READ_ONCE(sqe->off);
5751 	req->files_update.nr_args = READ_ONCE(sqe->len);
5752 	if (!req->files_update.nr_args)
5753 		return -EINVAL;
5754 	req->files_update.arg = READ_ONCE(sqe->addr);
5755 	return 0;
5756 }
5757 
io_files_update(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)5758 static int io_files_update(struct io_kiocb *req, bool force_nonblock,
5759 			   struct io_comp_state *cs)
5760 {
5761 	struct io_ring_ctx *ctx = req->ctx;
5762 	struct io_uring_files_update up;
5763 	int ret;
5764 
5765 	if (force_nonblock)
5766 		return -EAGAIN;
5767 
5768 	up.offset = req->files_update.offset;
5769 	up.fds = req->files_update.arg;
5770 
5771 	mutex_lock(&ctx->uring_lock);
5772 	ret = __io_sqe_files_update(ctx, &up, req->files_update.nr_args);
5773 	mutex_unlock(&ctx->uring_lock);
5774 
5775 	if (ret < 0)
5776 		req_set_fail_links(req);
5777 	__io_req_complete(req, ret, 0, cs);
5778 	return 0;
5779 }
5780 
io_req_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5781 static int io_req_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5782 {
5783 	switch (req->opcode) {
5784 	case IORING_OP_NOP:
5785 		return 0;
5786 	case IORING_OP_READV:
5787 	case IORING_OP_READ_FIXED:
5788 	case IORING_OP_READ:
5789 		return io_read_prep(req, sqe);
5790 	case IORING_OP_WRITEV:
5791 	case IORING_OP_WRITE_FIXED:
5792 	case IORING_OP_WRITE:
5793 		return io_write_prep(req, sqe);
5794 	case IORING_OP_POLL_ADD:
5795 		return io_poll_add_prep(req, sqe);
5796 	case IORING_OP_POLL_REMOVE:
5797 		return io_poll_remove_prep(req, sqe);
5798 	case IORING_OP_FSYNC:
5799 		return io_prep_fsync(req, sqe);
5800 	case IORING_OP_SYNC_FILE_RANGE:
5801 		return io_prep_sfr(req, sqe);
5802 	case IORING_OP_SENDMSG:
5803 	case IORING_OP_SEND:
5804 		return io_sendmsg_prep(req, sqe);
5805 	case IORING_OP_RECVMSG:
5806 	case IORING_OP_RECV:
5807 		return io_recvmsg_prep(req, sqe);
5808 	case IORING_OP_CONNECT:
5809 		return io_connect_prep(req, sqe);
5810 	case IORING_OP_TIMEOUT:
5811 		return io_timeout_prep(req, sqe, false);
5812 	case IORING_OP_TIMEOUT_REMOVE:
5813 		return io_timeout_remove_prep(req, sqe);
5814 	case IORING_OP_ASYNC_CANCEL:
5815 		return io_async_cancel_prep(req, sqe);
5816 	case IORING_OP_LINK_TIMEOUT:
5817 		return io_timeout_prep(req, sqe, true);
5818 	case IORING_OP_ACCEPT:
5819 		return io_accept_prep(req, sqe);
5820 	case IORING_OP_FALLOCATE:
5821 		return io_fallocate_prep(req, sqe);
5822 	case IORING_OP_OPENAT:
5823 		return io_openat_prep(req, sqe);
5824 	case IORING_OP_CLOSE:
5825 		return io_close_prep(req, sqe);
5826 	case IORING_OP_FILES_UPDATE:
5827 		return io_files_update_prep(req, sqe);
5828 	case IORING_OP_STATX:
5829 		return io_statx_prep(req, sqe);
5830 	case IORING_OP_FADVISE:
5831 		return io_fadvise_prep(req, sqe);
5832 	case IORING_OP_MADVISE:
5833 		return io_madvise_prep(req, sqe);
5834 	case IORING_OP_OPENAT2:
5835 		return io_openat2_prep(req, sqe);
5836 	case IORING_OP_EPOLL_CTL:
5837 		return io_epoll_ctl_prep(req, sqe);
5838 	case IORING_OP_SPLICE:
5839 		return io_splice_prep(req, sqe);
5840 	case IORING_OP_PROVIDE_BUFFERS:
5841 		return io_provide_buffers_prep(req, sqe);
5842 	case IORING_OP_REMOVE_BUFFERS:
5843 		return io_remove_buffers_prep(req, sqe);
5844 	case IORING_OP_TEE:
5845 		return io_tee_prep(req, sqe);
5846 	}
5847 
5848 	printk_once(KERN_WARNING "io_uring: unhandled opcode %d\n",
5849 			req->opcode);
5850 	return-EINVAL;
5851 }
5852 
io_req_defer_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)5853 static int io_req_defer_prep(struct io_kiocb *req,
5854 			     const struct io_uring_sqe *sqe)
5855 {
5856 	if (!sqe)
5857 		return 0;
5858 	if (io_alloc_async_data(req))
5859 		return -EAGAIN;
5860 	return io_req_prep(req, sqe);
5861 }
5862 
io_get_sequence(struct io_kiocb * req)5863 static u32 io_get_sequence(struct io_kiocb *req)
5864 {
5865 	struct io_kiocb *pos;
5866 	struct io_ring_ctx *ctx = req->ctx;
5867 	u32 total_submitted, nr_reqs = 1;
5868 
5869 	if (req->flags & REQ_F_LINK_HEAD)
5870 		list_for_each_entry(pos, &req->link_list, link_list)
5871 			nr_reqs++;
5872 
5873 	total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped;
5874 	return total_submitted - nr_reqs;
5875 }
5876 
io_req_defer(struct io_kiocb * req,const struct io_uring_sqe * sqe)5877 static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
5878 {
5879 	struct io_ring_ctx *ctx = req->ctx;
5880 	struct io_defer_entry *de;
5881 	int ret;
5882 	u32 seq;
5883 
5884 	/* Still need defer if there is pending req in defer list. */
5885 	if (likely(list_empty_careful(&ctx->defer_list) &&
5886 		!(req->flags & REQ_F_IO_DRAIN)))
5887 		return 0;
5888 
5889 	seq = io_get_sequence(req);
5890 	/* Still a chance to pass the sequence check */
5891 	if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list))
5892 		return 0;
5893 
5894 	if (!req->async_data) {
5895 		ret = io_req_defer_prep(req, sqe);
5896 		if (ret)
5897 			return ret;
5898 	}
5899 	io_prep_async_link(req);
5900 	de = kmalloc(sizeof(*de), GFP_KERNEL);
5901 	if (!de)
5902 		return -ENOMEM;
5903 
5904 	spin_lock_irq(&ctx->completion_lock);
5905 	if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) {
5906 		spin_unlock_irq(&ctx->completion_lock);
5907 		kfree(de);
5908 		io_queue_async_work(req);
5909 		return -EIOCBQUEUED;
5910 	}
5911 
5912 	trace_io_uring_defer(ctx, req, req->user_data);
5913 	de->req = req;
5914 	de->seq = seq;
5915 	list_add_tail(&de->list, &ctx->defer_list);
5916 	spin_unlock_irq(&ctx->completion_lock);
5917 	return -EIOCBQUEUED;
5918 }
5919 
io_req_drop_files(struct io_kiocb * req)5920 static void io_req_drop_files(struct io_kiocb *req)
5921 {
5922 	struct io_ring_ctx *ctx = req->ctx;
5923 	struct io_uring_task *tctx = req->task->io_uring;
5924 	unsigned long flags;
5925 
5926 	if (req->work.flags & IO_WQ_WORK_FILES) {
5927 		put_files_struct(req->work.identity->files);
5928 		put_nsproxy(req->work.identity->nsproxy);
5929 	}
5930 	spin_lock_irqsave(&ctx->inflight_lock, flags);
5931 	list_del(&req->inflight_entry);
5932 	spin_unlock_irqrestore(&ctx->inflight_lock, flags);
5933 	req->flags &= ~REQ_F_INFLIGHT;
5934 	req->work.flags &= ~IO_WQ_WORK_FILES;
5935 	if (atomic_read(&tctx->in_idle))
5936 		wake_up(&tctx->wait);
5937 }
5938 
__io_clean_op(struct io_kiocb * req)5939 static void __io_clean_op(struct io_kiocb *req)
5940 {
5941 	if (req->flags & REQ_F_BUFFER_SELECTED) {
5942 		switch (req->opcode) {
5943 		case IORING_OP_READV:
5944 		case IORING_OP_READ_FIXED:
5945 		case IORING_OP_READ:
5946 			kfree((void *)(unsigned long)req->rw.addr);
5947 			break;
5948 		case IORING_OP_RECVMSG:
5949 		case IORING_OP_RECV:
5950 			kfree(req->sr_msg.kbuf);
5951 			break;
5952 		}
5953 		req->flags &= ~REQ_F_BUFFER_SELECTED;
5954 	}
5955 
5956 	if (req->flags & REQ_F_NEED_CLEANUP) {
5957 		switch (req->opcode) {
5958 		case IORING_OP_READV:
5959 		case IORING_OP_READ_FIXED:
5960 		case IORING_OP_READ:
5961 		case IORING_OP_WRITEV:
5962 		case IORING_OP_WRITE_FIXED:
5963 		case IORING_OP_WRITE: {
5964 			struct io_async_rw *io = req->async_data;
5965 			if (io->free_iovec)
5966 				kfree(io->free_iovec);
5967 			break;
5968 			}
5969 		case IORING_OP_RECVMSG:
5970 		case IORING_OP_SENDMSG: {
5971 			struct io_async_msghdr *io = req->async_data;
5972 			if (io->iov != io->fast_iov)
5973 				kfree(io->iov);
5974 			break;
5975 			}
5976 		case IORING_OP_SPLICE:
5977 		case IORING_OP_TEE:
5978 			io_put_file(req, req->splice.file_in,
5979 				    (req->splice.flags & SPLICE_F_FD_IN_FIXED));
5980 			break;
5981 		case IORING_OP_OPENAT:
5982 		case IORING_OP_OPENAT2:
5983 			if (req->open.filename)
5984 				putname(req->open.filename);
5985 			break;
5986 		}
5987 		req->flags &= ~REQ_F_NEED_CLEANUP;
5988 	}
5989 }
5990 
io_issue_sqe(struct io_kiocb * req,bool force_nonblock,struct io_comp_state * cs)5991 static int io_issue_sqe(struct io_kiocb *req, bool force_nonblock,
5992 			struct io_comp_state *cs)
5993 {
5994 	struct io_ring_ctx *ctx = req->ctx;
5995 	int ret;
5996 
5997 	switch (req->opcode) {
5998 	case IORING_OP_NOP:
5999 		ret = io_nop(req, cs);
6000 		break;
6001 	case IORING_OP_READV:
6002 	case IORING_OP_READ_FIXED:
6003 	case IORING_OP_READ:
6004 		ret = io_read(req, force_nonblock, cs);
6005 		break;
6006 	case IORING_OP_WRITEV:
6007 	case IORING_OP_WRITE_FIXED:
6008 	case IORING_OP_WRITE:
6009 		ret = io_write(req, force_nonblock, cs);
6010 		break;
6011 	case IORING_OP_FSYNC:
6012 		ret = io_fsync(req, force_nonblock);
6013 		break;
6014 	case IORING_OP_POLL_ADD:
6015 		ret = io_poll_add(req);
6016 		break;
6017 	case IORING_OP_POLL_REMOVE:
6018 		ret = io_poll_remove(req);
6019 		break;
6020 	case IORING_OP_SYNC_FILE_RANGE:
6021 		ret = io_sync_file_range(req, force_nonblock);
6022 		break;
6023 	case IORING_OP_SENDMSG:
6024 		ret = io_sendmsg(req, force_nonblock, cs);
6025 		break;
6026 	case IORING_OP_SEND:
6027 		ret = io_send(req, force_nonblock, cs);
6028 		break;
6029 	case IORING_OP_RECVMSG:
6030 		ret = io_recvmsg(req, force_nonblock, cs);
6031 		break;
6032 	case IORING_OP_RECV:
6033 		ret = io_recv(req, force_nonblock, cs);
6034 		break;
6035 	case IORING_OP_TIMEOUT:
6036 		ret = io_timeout(req);
6037 		break;
6038 	case IORING_OP_TIMEOUT_REMOVE:
6039 		ret = io_timeout_remove(req);
6040 		break;
6041 	case IORING_OP_ACCEPT:
6042 		ret = io_accept(req, force_nonblock, cs);
6043 		break;
6044 	case IORING_OP_CONNECT:
6045 		ret = io_connect(req, force_nonblock, cs);
6046 		break;
6047 	case IORING_OP_ASYNC_CANCEL:
6048 		ret = io_async_cancel(req);
6049 		break;
6050 	case IORING_OP_FALLOCATE:
6051 		ret = io_fallocate(req, force_nonblock);
6052 		break;
6053 	case IORING_OP_OPENAT:
6054 		ret = io_openat(req, force_nonblock);
6055 		break;
6056 	case IORING_OP_CLOSE:
6057 		ret = io_close(req, force_nonblock, cs);
6058 		break;
6059 	case IORING_OP_FILES_UPDATE:
6060 		ret = io_files_update(req, force_nonblock, cs);
6061 		break;
6062 	case IORING_OP_STATX:
6063 		ret = io_statx(req, force_nonblock);
6064 		break;
6065 	case IORING_OP_FADVISE:
6066 		ret = io_fadvise(req, force_nonblock);
6067 		break;
6068 	case IORING_OP_MADVISE:
6069 		ret = io_madvise(req, force_nonblock);
6070 		break;
6071 	case IORING_OP_OPENAT2:
6072 		ret = io_openat2(req, force_nonblock);
6073 		break;
6074 	case IORING_OP_EPOLL_CTL:
6075 		ret = io_epoll_ctl(req, force_nonblock, cs);
6076 		break;
6077 	case IORING_OP_SPLICE:
6078 		ret = io_splice(req, force_nonblock);
6079 		break;
6080 	case IORING_OP_PROVIDE_BUFFERS:
6081 		ret = io_provide_buffers(req, force_nonblock, cs);
6082 		break;
6083 	case IORING_OP_REMOVE_BUFFERS:
6084 		ret = io_remove_buffers(req, force_nonblock, cs);
6085 		break;
6086 	case IORING_OP_TEE:
6087 		ret = io_tee(req, force_nonblock);
6088 		break;
6089 	default:
6090 		ret = -EINVAL;
6091 		break;
6092 	}
6093 
6094 	if (ret)
6095 		return ret;
6096 
6097 	/* If the op doesn't have a file, we're not polling for it */
6098 	if ((ctx->flags & IORING_SETUP_IOPOLL) && req->file) {
6099 		const bool in_async = io_wq_current_is_worker();
6100 
6101 		/* workqueue context doesn't hold uring_lock, grab it now */
6102 		if (in_async)
6103 			mutex_lock(&ctx->uring_lock);
6104 
6105 		io_iopoll_req_issued(req);
6106 
6107 		if (in_async)
6108 			mutex_unlock(&ctx->uring_lock);
6109 	}
6110 
6111 	return 0;
6112 }
6113 
io_wq_submit_work(struct io_wq_work * work)6114 static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work)
6115 {
6116 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
6117 	struct io_kiocb *timeout;
6118 	int ret = 0;
6119 
6120 	timeout = io_prep_linked_timeout(req);
6121 	if (timeout)
6122 		io_queue_linked_timeout(timeout);
6123 
6124 	/* if NO_CANCEL is set, we must still run the work */
6125 	if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) ==
6126 				IO_WQ_WORK_CANCEL) {
6127 		/* io-wq is going to take down one */
6128 		refcount_inc(&req->refs);
6129 		percpu_ref_get(&req->ctx->refs);
6130 		io_req_task_work_add_fallback(req, io_req_task_cancel);
6131 		return io_steal_work(req);
6132 	}
6133 
6134 	if (!ret) {
6135 		do {
6136 			ret = io_issue_sqe(req, false, NULL);
6137 			/*
6138 			 * We can get EAGAIN for polled IO even though we're
6139 			 * forcing a sync submission from here, since we can't
6140 			 * wait for request slots on the block side.
6141 			 */
6142 			if (ret != -EAGAIN)
6143 				break;
6144 			cond_resched();
6145 		} while (1);
6146 	}
6147 
6148 	if (ret) {
6149 		struct io_ring_ctx *lock_ctx = NULL;
6150 
6151 		if (req->ctx->flags & IORING_SETUP_IOPOLL)
6152 			lock_ctx = req->ctx;
6153 
6154 		/*
6155 		 * io_iopoll_complete() does not hold completion_lock to
6156 		 * complete polled io, so here for polled io, we can not call
6157 		 * io_req_complete() directly, otherwise there maybe concurrent
6158 		 * access to cqring, defer_list, etc, which is not safe. Given
6159 		 * that io_iopoll_complete() is always called under uring_lock,
6160 		 * so here for polled io, we also get uring_lock to complete
6161 		 * it.
6162 		 */
6163 		if (lock_ctx)
6164 			mutex_lock(&lock_ctx->uring_lock);
6165 
6166 		req_set_fail_links(req);
6167 		io_req_complete(req, ret);
6168 
6169 		if (lock_ctx)
6170 			mutex_unlock(&lock_ctx->uring_lock);
6171 	}
6172 
6173 	return io_steal_work(req);
6174 }
6175 
io_file_from_index(struct io_ring_ctx * ctx,int index)6176 static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
6177 					      int index)
6178 {
6179 	struct fixed_file_table *table;
6180 
6181 	table = &ctx->file_data->table[index >> IORING_FILE_TABLE_SHIFT];
6182 	return table->files[index & IORING_FILE_TABLE_MASK];
6183 }
6184 
io_file_get(struct io_submit_state * state,struct io_kiocb * req,int fd,bool fixed)6185 static struct file *io_file_get(struct io_submit_state *state,
6186 				struct io_kiocb *req, int fd, bool fixed)
6187 {
6188 	struct io_ring_ctx *ctx = req->ctx;
6189 	struct file *file;
6190 
6191 	if (fixed) {
6192 		if (unlikely((unsigned int)fd >= ctx->nr_user_files))
6193 			return NULL;
6194 		fd = array_index_nospec(fd, ctx->nr_user_files);
6195 		file = io_file_from_index(ctx, fd);
6196 		if (file) {
6197 			req->fixed_file_refs = &ctx->file_data->node->refs;
6198 			percpu_ref_get(req->fixed_file_refs);
6199 		}
6200 	} else {
6201 		trace_io_uring_file_get(ctx, fd);
6202 		file = __io_file_get(state, fd);
6203 	}
6204 
6205 	if (file && file->f_op == &io_uring_fops &&
6206 	    !(req->flags & REQ_F_INFLIGHT)) {
6207 		io_req_init_async(req);
6208 		req->flags |= REQ_F_INFLIGHT;
6209 
6210 		spin_lock_irq(&ctx->inflight_lock);
6211 		list_add(&req->inflight_entry, &ctx->inflight_list);
6212 		spin_unlock_irq(&ctx->inflight_lock);
6213 	}
6214 
6215 	return file;
6216 }
6217 
io_req_set_file(struct io_submit_state * state,struct io_kiocb * req,int fd)6218 static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
6219 			   int fd)
6220 {
6221 	bool fixed;
6222 
6223 	fixed = (req->flags & REQ_F_FIXED_FILE) != 0;
6224 	if (unlikely(!fixed && io_async_submit(req->ctx)))
6225 		return -EBADF;
6226 
6227 	req->file = io_file_get(state, req, fd, fixed);
6228 	if (req->file || io_op_defs[req->opcode].needs_file_no_error)
6229 		return 0;
6230 	return -EBADF;
6231 }
6232 
io_link_timeout_fn(struct hrtimer * timer)6233 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer)
6234 {
6235 	struct io_timeout_data *data = container_of(timer,
6236 						struct io_timeout_data, timer);
6237 	struct io_kiocb *req = data->req;
6238 	struct io_ring_ctx *ctx = req->ctx;
6239 	struct io_kiocb *prev = NULL;
6240 	unsigned long flags;
6241 
6242 	spin_lock_irqsave(&ctx->completion_lock, flags);
6243 
6244 	/*
6245 	 * We don't expect the list to be empty, that will only happen if we
6246 	 * race with the completion of the linked work.
6247 	 */
6248 	if (!list_empty(&req->link_list)) {
6249 		prev = list_entry(req->link_list.prev, struct io_kiocb,
6250 				  link_list);
6251 		if (refcount_inc_not_zero(&prev->refs))
6252 			list_del_init(&req->link_list);
6253 		else
6254 			prev = NULL;
6255 	}
6256 
6257 	spin_unlock_irqrestore(&ctx->completion_lock, flags);
6258 
6259 	if (prev) {
6260 		io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME);
6261 		io_put_req_deferred(prev, 1);
6262 	} else {
6263 		io_cqring_add_event(req, -ETIME, 0);
6264 		io_put_req_deferred(req, 1);
6265 	}
6266 	return HRTIMER_NORESTART;
6267 }
6268 
__io_queue_linked_timeout(struct io_kiocb * req)6269 static void __io_queue_linked_timeout(struct io_kiocb *req)
6270 {
6271 	/*
6272 	 * If the list is now empty, then our linked request finished before
6273 	 * we got a chance to setup the timer
6274 	 */
6275 	if (!list_empty(&req->link_list)) {
6276 		struct io_timeout_data *data = req->async_data;
6277 
6278 		data->timer.function = io_link_timeout_fn;
6279 		hrtimer_start(&data->timer, timespec64_to_ktime(data->ts),
6280 				data->mode);
6281 	}
6282 }
6283 
io_queue_linked_timeout(struct io_kiocb * req)6284 static void io_queue_linked_timeout(struct io_kiocb *req)
6285 {
6286 	struct io_ring_ctx *ctx = req->ctx;
6287 
6288 	spin_lock_irq(&ctx->completion_lock);
6289 	__io_queue_linked_timeout(req);
6290 	spin_unlock_irq(&ctx->completion_lock);
6291 
6292 	/* drop submission reference */
6293 	io_put_req(req);
6294 }
6295 
io_prep_linked_timeout(struct io_kiocb * req)6296 static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req)
6297 {
6298 	struct io_kiocb *nxt;
6299 
6300 	if (!(req->flags & REQ_F_LINK_HEAD))
6301 		return NULL;
6302 	if (req->flags & REQ_F_LINK_TIMEOUT)
6303 		return NULL;
6304 
6305 	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb,
6306 					link_list);
6307 	if (!nxt || nxt->opcode != IORING_OP_LINK_TIMEOUT)
6308 		return NULL;
6309 
6310 	nxt->flags |= REQ_F_LTIMEOUT_ACTIVE;
6311 	req->flags |= REQ_F_LINK_TIMEOUT;
6312 	return nxt;
6313 }
6314 
__io_queue_sqe(struct io_kiocb * req,struct io_comp_state * cs)6315 static void __io_queue_sqe(struct io_kiocb *req, struct io_comp_state *cs)
6316 {
6317 	struct io_kiocb *linked_timeout;
6318 	const struct cred *old_creds = NULL;
6319 	int ret;
6320 
6321 again:
6322 	linked_timeout = io_prep_linked_timeout(req);
6323 
6324 	if ((req->flags & REQ_F_WORK_INITIALIZED) &&
6325 	    (req->work.flags & IO_WQ_WORK_CREDS) &&
6326 	    req->work.identity->creds != current_cred()) {
6327 		if (old_creds)
6328 			revert_creds(old_creds);
6329 		if (old_creds == req->work.identity->creds)
6330 			old_creds = NULL; /* restored original creds */
6331 		else
6332 			old_creds = override_creds(req->work.identity->creds);
6333 	}
6334 
6335 	ret = io_issue_sqe(req, true, cs);
6336 
6337 	/*
6338 	 * We async punt it if the file wasn't marked NOWAIT, or if the file
6339 	 * doesn't support non-blocking read/write attempts
6340 	 */
6341 	if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
6342 		if (!io_arm_poll_handler(req)) {
6343 			/*
6344 			 * Queued up for async execution, worker will release
6345 			 * submit reference when the iocb is actually submitted.
6346 			 */
6347 			io_queue_async_work(req);
6348 		}
6349 
6350 		if (linked_timeout)
6351 			io_queue_linked_timeout(linked_timeout);
6352 	} else if (likely(!ret)) {
6353 		/* drop submission reference */
6354 		req = io_put_req_find_next(req);
6355 		if (linked_timeout)
6356 			io_queue_linked_timeout(linked_timeout);
6357 
6358 		if (req) {
6359 			if (!(req->flags & REQ_F_FORCE_ASYNC))
6360 				goto again;
6361 			io_queue_async_work(req);
6362 		}
6363 	} else {
6364 		/* un-prep timeout, so it'll be killed as any other linked */
6365 		req->flags &= ~REQ_F_LINK_TIMEOUT;
6366 		req_set_fail_links(req);
6367 		io_put_req(req);
6368 		io_req_complete(req, ret);
6369 	}
6370 
6371 	if (old_creds)
6372 		revert_creds(old_creds);
6373 }
6374 
io_queue_sqe(struct io_kiocb * req,const struct io_uring_sqe * sqe,struct io_comp_state * cs)6375 static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6376 			 struct io_comp_state *cs)
6377 {
6378 	int ret;
6379 
6380 	ret = io_req_defer(req, sqe);
6381 	if (ret) {
6382 		if (ret != -EIOCBQUEUED) {
6383 fail_req:
6384 			req_set_fail_links(req);
6385 			io_put_req(req);
6386 			io_req_complete(req, ret);
6387 		}
6388 	} else if (req->flags & REQ_F_FORCE_ASYNC) {
6389 		if (!req->async_data) {
6390 			ret = io_req_defer_prep(req, sqe);
6391 			if (unlikely(ret))
6392 				goto fail_req;
6393 		}
6394 		io_queue_async_work(req);
6395 	} else {
6396 		if (sqe) {
6397 			ret = io_req_prep(req, sqe);
6398 			if (unlikely(ret))
6399 				goto fail_req;
6400 		}
6401 		__io_queue_sqe(req, cs);
6402 	}
6403 }
6404 
io_queue_link_head(struct io_kiocb * req,struct io_comp_state * cs)6405 static inline void io_queue_link_head(struct io_kiocb *req,
6406 				      struct io_comp_state *cs)
6407 {
6408 	if (unlikely(req->flags & REQ_F_FAIL_LINK)) {
6409 		io_put_req(req);
6410 		io_req_complete(req, -ECANCELED);
6411 	} else
6412 		io_queue_sqe(req, NULL, cs);
6413 }
6414 
io_submit_sqe(struct io_kiocb * req,const struct io_uring_sqe * sqe,struct io_kiocb ** link,struct io_comp_state * cs)6415 static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
6416 			 struct io_kiocb **link, struct io_comp_state *cs)
6417 {
6418 	struct io_ring_ctx *ctx = req->ctx;
6419 	int ret;
6420 
6421 	/*
6422 	 * If we already have a head request, queue this one for async
6423 	 * submittal once the head completes. If we don't have a head but
6424 	 * IOSQE_IO_LINK is set in the sqe, start a new head. This one will be
6425 	 * submitted sync once the chain is complete. If none of those
6426 	 * conditions are true (normal request), then just queue it.
6427 	 */
6428 	if (*link) {
6429 		struct io_kiocb *head = *link;
6430 
6431 		/*
6432 		 * Taking sequential execution of a link, draining both sides
6433 		 * of the link also fullfils IOSQE_IO_DRAIN semantics for all
6434 		 * requests in the link. So, it drains the head and the
6435 		 * next after the link request. The last one is done via
6436 		 * drain_next flag to persist the effect across calls.
6437 		 */
6438 		if (req->flags & REQ_F_IO_DRAIN) {
6439 			head->flags |= REQ_F_IO_DRAIN;
6440 			ctx->drain_next = 1;
6441 		}
6442 		ret = io_req_defer_prep(req, sqe);
6443 		if (unlikely(ret)) {
6444 			/* fail even hard links since we don't submit */
6445 			head->flags |= REQ_F_FAIL_LINK;
6446 			return ret;
6447 		}
6448 		trace_io_uring_link(ctx, req, head);
6449 		list_add_tail(&req->link_list, &head->link_list);
6450 
6451 		/* last request of a link, enqueue the link */
6452 		if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) {
6453 			io_queue_link_head(head, cs);
6454 			*link = NULL;
6455 		}
6456 	} else {
6457 		if (unlikely(ctx->drain_next)) {
6458 			req->flags |= REQ_F_IO_DRAIN;
6459 			ctx->drain_next = 0;
6460 		}
6461 		if (req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) {
6462 			req->flags |= REQ_F_LINK_HEAD;
6463 			INIT_LIST_HEAD(&req->link_list);
6464 
6465 			ret = io_req_defer_prep(req, sqe);
6466 			if (unlikely(ret))
6467 				req->flags |= REQ_F_FAIL_LINK;
6468 			*link = req;
6469 		} else {
6470 			io_queue_sqe(req, sqe, cs);
6471 		}
6472 	}
6473 
6474 	return 0;
6475 }
6476 
6477 /*
6478  * Batched submission is done, ensure local IO is flushed out.
6479  */
io_submit_state_end(struct io_submit_state * state)6480 static void io_submit_state_end(struct io_submit_state *state)
6481 {
6482 	if (!list_empty(&state->comp.list))
6483 		io_submit_flush_completions(&state->comp);
6484 	blk_finish_plug(&state->plug);
6485 	io_state_file_put(state);
6486 	if (state->free_reqs)
6487 		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
6488 }
6489 
6490 /*
6491  * Start submission side cache.
6492  */
io_submit_state_start(struct io_submit_state * state,struct io_ring_ctx * ctx,unsigned int max_ios)6493 static void io_submit_state_start(struct io_submit_state *state,
6494 				  struct io_ring_ctx *ctx, unsigned int max_ios)
6495 {
6496 	blk_start_plug(&state->plug);
6497 	state->comp.nr = 0;
6498 	INIT_LIST_HEAD(&state->comp.list);
6499 	state->comp.ctx = ctx;
6500 	state->free_reqs = 0;
6501 	state->file = NULL;
6502 	state->ios_left = max_ios;
6503 }
6504 
io_commit_sqring(struct io_ring_ctx * ctx)6505 static void io_commit_sqring(struct io_ring_ctx *ctx)
6506 {
6507 	struct io_rings *rings = ctx->rings;
6508 
6509 	/*
6510 	 * Ensure any loads from the SQEs are done at this point,
6511 	 * since once we write the new head, the application could
6512 	 * write new data to them.
6513 	 */
6514 	smp_store_release(&rings->sq.head, ctx->cached_sq_head);
6515 }
6516 
6517 /*
6518  * Fetch an sqe, if one is available. Note that sqe_ptr will point to memory
6519  * that is mapped by userspace. This means that care needs to be taken to
6520  * ensure that reads are stable, as we cannot rely on userspace always
6521  * being a good citizen. If members of the sqe are validated and then later
6522  * used, it's important that those reads are done through READ_ONCE() to
6523  * prevent a re-load down the line.
6524  */
io_get_sqe(struct io_ring_ctx * ctx)6525 static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx)
6526 {
6527 	u32 *sq_array = ctx->sq_array;
6528 	unsigned head;
6529 
6530 	/*
6531 	 * The cached sq head (or cq tail) serves two purposes:
6532 	 *
6533 	 * 1) allows us to batch the cost of updating the user visible
6534 	 *    head updates.
6535 	 * 2) allows the kernel side to track the head on its own, even
6536 	 *    though the application is the one updating it.
6537 	 */
6538 	head = READ_ONCE(sq_array[ctx->cached_sq_head & ctx->sq_mask]);
6539 	if (likely(head < ctx->sq_entries))
6540 		return &ctx->sq_sqes[head];
6541 
6542 	/* drop invalid entries */
6543 	ctx->cached_sq_dropped++;
6544 	WRITE_ONCE(ctx->rings->sq_dropped, ctx->cached_sq_dropped);
6545 	return NULL;
6546 }
6547 
io_consume_sqe(struct io_ring_ctx * ctx)6548 static inline void io_consume_sqe(struct io_ring_ctx *ctx)
6549 {
6550 	ctx->cached_sq_head++;
6551 }
6552 
6553 /*
6554  * Check SQE restrictions (opcode and flags).
6555  *
6556  * Returns 'true' if SQE is allowed, 'false' otherwise.
6557  */
io_check_restriction(struct io_ring_ctx * ctx,struct io_kiocb * req,unsigned int sqe_flags)6558 static inline bool io_check_restriction(struct io_ring_ctx *ctx,
6559 					struct io_kiocb *req,
6560 					unsigned int sqe_flags)
6561 {
6562 	if (!ctx->restricted)
6563 		return true;
6564 
6565 	if (!test_bit(req->opcode, ctx->restrictions.sqe_op))
6566 		return false;
6567 
6568 	if ((sqe_flags & ctx->restrictions.sqe_flags_required) !=
6569 	    ctx->restrictions.sqe_flags_required)
6570 		return false;
6571 
6572 	if (sqe_flags & ~(ctx->restrictions.sqe_flags_allowed |
6573 			  ctx->restrictions.sqe_flags_required))
6574 		return false;
6575 
6576 	return true;
6577 }
6578 
6579 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
6580 				IOSQE_IO_HARDLINK | IOSQE_ASYNC | \
6581 				IOSQE_BUFFER_SELECT)
6582 
io_init_req(struct io_ring_ctx * ctx,struct io_kiocb * req,const struct io_uring_sqe * sqe,struct io_submit_state * state)6583 static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
6584 		       const struct io_uring_sqe *sqe,
6585 		       struct io_submit_state *state)
6586 {
6587 	unsigned int sqe_flags;
6588 	int id, ret;
6589 
6590 	req->opcode = READ_ONCE(sqe->opcode);
6591 	req->user_data = READ_ONCE(sqe->user_data);
6592 	req->async_data = NULL;
6593 	req->file = NULL;
6594 	req->ctx = ctx;
6595 	req->flags = 0;
6596 	/* one is dropped after submission, the other at completion */
6597 	refcount_set(&req->refs, 2);
6598 	req->task = current;
6599 	req->result = 0;
6600 
6601 	if (unlikely(req->opcode >= IORING_OP_LAST))
6602 		return -EINVAL;
6603 
6604 	if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
6605 		return -EFAULT;
6606 
6607 	sqe_flags = READ_ONCE(sqe->flags);
6608 	/* enforce forwards compatibility on users */
6609 	if (unlikely(sqe_flags & ~SQE_VALID_FLAGS))
6610 		return -EINVAL;
6611 
6612 	if (unlikely(!io_check_restriction(ctx, req, sqe_flags)))
6613 		return -EACCES;
6614 
6615 	if ((sqe_flags & IOSQE_BUFFER_SELECT) &&
6616 	    !io_op_defs[req->opcode].buffer_select)
6617 		return -EOPNOTSUPP;
6618 
6619 	id = READ_ONCE(sqe->personality);
6620 	if (id) {
6621 		struct io_identity *iod;
6622 
6623 		iod = xa_load(&ctx->personalities, id);
6624 		if (unlikely(!iod))
6625 			return -EINVAL;
6626 		refcount_inc(&iod->count);
6627 
6628 		__io_req_init_async(req);
6629 		get_cred(iod->creds);
6630 		req->work.identity = iod;
6631 		req->work.flags |= IO_WQ_WORK_CREDS;
6632 	}
6633 
6634 	/* same numerical values with corresponding REQ_F_*, safe to copy */
6635 	req->flags |= sqe_flags;
6636 
6637 	if (!io_op_defs[req->opcode].needs_file)
6638 		return 0;
6639 
6640 	ret = io_req_set_file(state, req, READ_ONCE(sqe->fd));
6641 	state->ios_left--;
6642 	return ret;
6643 }
6644 
io_submit_sqes(struct io_ring_ctx * ctx,unsigned int nr)6645 static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr)
6646 {
6647 	struct io_submit_state state;
6648 	struct io_kiocb *link = NULL;
6649 	int i, submitted = 0;
6650 
6651 	/* if we have a backlog and couldn't flush it all, return BUSY */
6652 	if (test_bit(0, &ctx->sq_check_overflow)) {
6653 		if (!__io_cqring_overflow_flush(ctx, false, NULL, NULL))
6654 			return -EBUSY;
6655 	}
6656 
6657 	/* make sure SQ entry isn't read before tail */
6658 	nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx));
6659 
6660 	if (!percpu_ref_tryget_many(&ctx->refs, nr))
6661 		return -EAGAIN;
6662 
6663 	percpu_counter_add(&current->io_uring->inflight, nr);
6664 	refcount_add(nr, &current->usage);
6665 
6666 	io_submit_state_start(&state, ctx, nr);
6667 
6668 	for (i = 0; i < nr; i++) {
6669 		const struct io_uring_sqe *sqe;
6670 		struct io_kiocb *req;
6671 		int err;
6672 
6673 		sqe = io_get_sqe(ctx);
6674 		if (unlikely(!sqe)) {
6675 			io_consume_sqe(ctx);
6676 			break;
6677 		}
6678 		req = io_alloc_req(ctx, &state);
6679 		if (unlikely(!req)) {
6680 			if (!submitted)
6681 				submitted = -EAGAIN;
6682 			break;
6683 		}
6684 		io_consume_sqe(ctx);
6685 		/* will complete beyond this point, count as submitted */
6686 		submitted++;
6687 
6688 		err = io_init_req(ctx, req, sqe, &state);
6689 		if (unlikely(err)) {
6690 fail_req:
6691 			io_put_req(req);
6692 			io_req_complete(req, err);
6693 			break;
6694 		}
6695 
6696 		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
6697 						true, io_async_submit(ctx));
6698 		err = io_submit_sqe(req, sqe, &link, &state.comp);
6699 		if (err)
6700 			goto fail_req;
6701 	}
6702 
6703 	if (unlikely(submitted != nr)) {
6704 		int ref_used = (submitted == -EAGAIN) ? 0 : submitted;
6705 		struct io_uring_task *tctx = current->io_uring;
6706 		int unused = nr - ref_used;
6707 
6708 		percpu_ref_put_many(&ctx->refs, unused);
6709 		percpu_counter_sub(&tctx->inflight, unused);
6710 		put_task_struct_many(current, unused);
6711 	}
6712 	if (link)
6713 		io_queue_link_head(link, &state.comp);
6714 	io_submit_state_end(&state);
6715 
6716 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
6717 	io_commit_sqring(ctx);
6718 
6719 	return submitted;
6720 }
6721 
io_ring_set_wakeup_flag(struct io_ring_ctx * ctx)6722 static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
6723 {
6724 	/* Tell userspace we may need a wakeup call */
6725 	spin_lock_irq(&ctx->completion_lock);
6726 	ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
6727 	spin_unlock_irq(&ctx->completion_lock);
6728 }
6729 
io_ring_clear_wakeup_flag(struct io_ring_ctx * ctx)6730 static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
6731 {
6732 	spin_lock_irq(&ctx->completion_lock);
6733 	ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6734 	spin_unlock_irq(&ctx->completion_lock);
6735 }
6736 
io_sq_wake_function(struct wait_queue_entry * wqe,unsigned mode,int sync,void * key)6737 static int io_sq_wake_function(struct wait_queue_entry *wqe, unsigned mode,
6738 			       int sync, void *key)
6739 {
6740 	struct io_ring_ctx *ctx = container_of(wqe, struct io_ring_ctx, sqo_wait_entry);
6741 	int ret;
6742 
6743 	ret = autoremove_wake_function(wqe, mode, sync, key);
6744 	if (ret) {
6745 		unsigned long flags;
6746 
6747 		spin_lock_irqsave(&ctx->completion_lock, flags);
6748 		ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
6749 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
6750 	}
6751 	return ret;
6752 }
6753 
6754 enum sq_ret {
6755 	SQT_IDLE	= 1,
6756 	SQT_SPIN	= 2,
6757 	SQT_DID_WORK	= 4,
6758 };
6759 
__io_sq_thread(struct io_ring_ctx * ctx,unsigned long start_jiffies,bool cap_entries)6760 static enum sq_ret __io_sq_thread(struct io_ring_ctx *ctx,
6761 				  unsigned long start_jiffies, bool cap_entries)
6762 {
6763 	unsigned long timeout = start_jiffies + ctx->sq_thread_idle;
6764 	struct io_sq_data *sqd = ctx->sq_data;
6765 	unsigned int to_submit;
6766 	int ret = 0;
6767 
6768 again:
6769 	if (!list_empty(&ctx->iopoll_list)) {
6770 		unsigned nr_events = 0;
6771 
6772 		mutex_lock(&ctx->uring_lock);
6773 		if (!list_empty(&ctx->iopoll_list) && !need_resched())
6774 			io_do_iopoll(ctx, &nr_events, 0);
6775 		mutex_unlock(&ctx->uring_lock);
6776 	}
6777 
6778 	to_submit = io_sqring_entries(ctx);
6779 
6780 	/*
6781 	 * If submit got -EBUSY, flag us as needing the application
6782 	 * to enter the kernel to reap and flush events.
6783 	 */
6784 	if (!to_submit || ret == -EBUSY || need_resched()) {
6785 		/*
6786 		 * Drop cur_mm before scheduling, we can't hold it for
6787 		 * long periods (or over schedule()). Do this before
6788 		 * adding ourselves to the waitqueue, as the unuse/drop
6789 		 * may sleep.
6790 		 */
6791 		io_sq_thread_drop_mm();
6792 
6793 		/*
6794 		 * We're polling. If we're within the defined idle
6795 		 * period, then let us spin without work before going
6796 		 * to sleep. The exception is if we got EBUSY doing
6797 		 * more IO, we should wait for the application to
6798 		 * reap events and wake us up.
6799 		 */
6800 		if (!list_empty(&ctx->iopoll_list) || need_resched() ||
6801 		    (!time_after(jiffies, timeout) && ret != -EBUSY &&
6802 		    !percpu_ref_is_dying(&ctx->refs)))
6803 			return SQT_SPIN;
6804 
6805 		prepare_to_wait(&sqd->wait, &ctx->sqo_wait_entry,
6806 					TASK_INTERRUPTIBLE);
6807 
6808 		/*
6809 		 * While doing polled IO, before going to sleep, we need
6810 		 * to check if there are new reqs added to iopoll_list,
6811 		 * it is because reqs may have been punted to io worker
6812 		 * and will be added to iopoll_list later, hence check
6813 		 * the iopoll_list again.
6814 		 */
6815 		if ((ctx->flags & IORING_SETUP_IOPOLL) &&
6816 		    !list_empty_careful(&ctx->iopoll_list)) {
6817 			finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
6818 			goto again;
6819 		}
6820 
6821 		to_submit = io_sqring_entries(ctx);
6822 		if (!to_submit || ret == -EBUSY)
6823 			return SQT_IDLE;
6824 	}
6825 
6826 	finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
6827 	io_ring_clear_wakeup_flag(ctx);
6828 
6829 	/* if we're handling multiple rings, cap submit size for fairness */
6830 	if (cap_entries && to_submit > 8)
6831 		to_submit = 8;
6832 
6833 	mutex_lock(&ctx->uring_lock);
6834 	if (likely(!percpu_ref_is_dying(&ctx->refs) && !ctx->sqo_dead))
6835 		ret = io_submit_sqes(ctx, to_submit);
6836 	mutex_unlock(&ctx->uring_lock);
6837 
6838 	if (!io_sqring_full(ctx) && wq_has_sleeper(&ctx->sqo_sq_wait))
6839 		wake_up(&ctx->sqo_sq_wait);
6840 
6841 	return SQT_DID_WORK;
6842 }
6843 
io_sqd_init_new(struct io_sq_data * sqd)6844 static void io_sqd_init_new(struct io_sq_data *sqd)
6845 {
6846 	struct io_ring_ctx *ctx;
6847 
6848 	while (!list_empty(&sqd->ctx_new_list)) {
6849 		ctx = list_first_entry(&sqd->ctx_new_list, struct io_ring_ctx, sqd_list);
6850 		init_wait(&ctx->sqo_wait_entry);
6851 		ctx->sqo_wait_entry.func = io_sq_wake_function;
6852 		list_move_tail(&ctx->sqd_list, &sqd->ctx_list);
6853 		complete(&ctx->sq_thread_comp);
6854 	}
6855 }
6856 
io_sq_thread(void * data)6857 static int io_sq_thread(void *data)
6858 {
6859 	struct cgroup_subsys_state *cur_css = NULL;
6860 	const struct cred *old_cred = NULL;
6861 	struct io_sq_data *sqd = data;
6862 	struct io_ring_ctx *ctx;
6863 	unsigned long start_jiffies;
6864 
6865 	start_jiffies = jiffies;
6866 	while (!kthread_should_stop()) {
6867 		enum sq_ret ret = 0;
6868 		bool cap_entries;
6869 
6870 		/*
6871 		 * Any changes to the sqd lists are synchronized through the
6872 		 * kthread parking. This synchronizes the thread vs users,
6873 		 * the users are synchronized on the sqd->ctx_lock.
6874 		 */
6875 		if (kthread_should_park()) {
6876 			kthread_parkme();
6877 			/*
6878 			 * When sq thread is unparked, in case the previous park operation
6879 			 * comes from io_put_sq_data(), which means that sq thread is going
6880 			 * to be stopped, so here needs to have a check.
6881 			 */
6882 			if (kthread_should_stop())
6883 				break;
6884 		}
6885 
6886 		if (unlikely(!list_empty(&sqd->ctx_new_list)))
6887 			io_sqd_init_new(sqd);
6888 
6889 		cap_entries = !list_is_singular(&sqd->ctx_list);
6890 
6891 		list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) {
6892 			if (current->cred != ctx->creds) {
6893 				if (old_cred)
6894 					revert_creds(old_cred);
6895 				old_cred = override_creds(ctx->creds);
6896 			}
6897 			io_sq_thread_associate_blkcg(ctx, &cur_css);
6898 #ifdef CONFIG_AUDIT
6899 			current->loginuid = ctx->loginuid;
6900 			current->sessionid = ctx->sessionid;
6901 #endif
6902 
6903 			ret |= __io_sq_thread(ctx, start_jiffies, cap_entries);
6904 
6905 			io_sq_thread_drop_mm();
6906 		}
6907 
6908 		if (ret & SQT_SPIN) {
6909 			io_run_task_work();
6910 			io_sq_thread_drop_mm();
6911 			cond_resched();
6912 		} else if (ret == SQT_IDLE) {
6913 			if (kthread_should_park())
6914 				continue;
6915 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6916 				io_ring_set_wakeup_flag(ctx);
6917 			schedule();
6918 			start_jiffies = jiffies;
6919 			list_for_each_entry(ctx, &sqd->ctx_list, sqd_list)
6920 				io_ring_clear_wakeup_flag(ctx);
6921 		}
6922 	}
6923 
6924 	io_run_task_work();
6925 	io_sq_thread_drop_mm();
6926 
6927 	if (cur_css)
6928 		io_sq_thread_unassociate_blkcg();
6929 	if (old_cred)
6930 		revert_creds(old_cred);
6931 
6932 	kthread_parkme();
6933 
6934 	return 0;
6935 }
6936 
6937 struct io_wait_queue {
6938 	struct wait_queue_entry wq;
6939 	struct io_ring_ctx *ctx;
6940 	unsigned to_wait;
6941 	unsigned nr_timeouts;
6942 };
6943 
io_should_wake(struct io_wait_queue * iowq)6944 static inline bool io_should_wake(struct io_wait_queue *iowq)
6945 {
6946 	struct io_ring_ctx *ctx = iowq->ctx;
6947 
6948 	/*
6949 	 * Wake up if we have enough events, or if a timeout occurred since we
6950 	 * started waiting. For timeouts, we always want to return to userspace,
6951 	 * regardless of event count.
6952 	 */
6953 	return io_cqring_events(ctx) >= iowq->to_wait ||
6954 			atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
6955 }
6956 
io_wake_function(struct wait_queue_entry * curr,unsigned int mode,int wake_flags,void * key)6957 static int io_wake_function(struct wait_queue_entry *curr, unsigned int mode,
6958 			    int wake_flags, void *key)
6959 {
6960 	struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue,
6961 							wq);
6962 
6963 	/*
6964 	 * Cannot safely flush overflowed CQEs from here, ensure we wake up
6965 	 * the task, and the next invocation will do it.
6966 	 */
6967 	if (io_should_wake(iowq) || test_bit(0, &iowq->ctx->cq_check_overflow))
6968 		return autoremove_wake_function(curr, mode, wake_flags, key);
6969 	return -1;
6970 }
6971 
io_run_task_work_sig(void)6972 static int io_run_task_work_sig(void)
6973 {
6974 	if (io_run_task_work())
6975 		return 1;
6976 	if (!signal_pending(current))
6977 		return 0;
6978 	if (current->jobctl & JOBCTL_TASK_WORK) {
6979 		spin_lock_irq(&current->sighand->siglock);
6980 		current->jobctl &= ~JOBCTL_TASK_WORK;
6981 		recalc_sigpending();
6982 		spin_unlock_irq(&current->sighand->siglock);
6983 		return 1;
6984 	}
6985 	return -EINTR;
6986 }
6987 
6988 /*
6989  * Wait until events become available, if we don't already have some. The
6990  * application must reap them itself, as they reside on the shared cq ring.
6991  */
io_cqring_wait(struct io_ring_ctx * ctx,int min_events,const sigset_t __user * sig,size_t sigsz)6992 static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
6993 			  const sigset_t __user *sig, size_t sigsz)
6994 {
6995 	struct io_wait_queue iowq = {
6996 		.wq = {
6997 			.private	= current,
6998 			.func		= io_wake_function,
6999 			.entry		= LIST_HEAD_INIT(iowq.wq.entry),
7000 		},
7001 		.ctx		= ctx,
7002 		.to_wait	= min_events,
7003 	};
7004 	struct io_rings *rings = ctx->rings;
7005 	int ret = 0;
7006 
7007 	do {
7008 		io_cqring_overflow_flush(ctx, false, NULL, NULL);
7009 		if (io_cqring_events(ctx) >= min_events)
7010 			return 0;
7011 		if (!io_run_task_work())
7012 			break;
7013 	} while (1);
7014 
7015 	if (sig) {
7016 #ifdef CONFIG_COMPAT
7017 		if (in_compat_syscall())
7018 			ret = set_compat_user_sigmask((const compat_sigset_t __user *)sig,
7019 						      sigsz);
7020 		else
7021 #endif
7022 			ret = set_user_sigmask(sig, sigsz);
7023 
7024 		if (ret)
7025 			return ret;
7026 	}
7027 
7028 	iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
7029 	trace_io_uring_cqring_wait(ctx, min_events);
7030 	do {
7031 		io_cqring_overflow_flush(ctx, false, NULL, NULL);
7032 		prepare_to_wait_exclusive(&ctx->wait, &iowq.wq,
7033 						TASK_INTERRUPTIBLE);
7034 		/* make sure we run task_work before checking for signals */
7035 		ret = io_run_task_work_sig();
7036 		if (ret > 0) {
7037 			finish_wait(&ctx->wait, &iowq.wq);
7038 			continue;
7039 		}
7040 		else if (ret < 0)
7041 			break;
7042 		if (io_should_wake(&iowq))
7043 			break;
7044 		if (test_bit(0, &ctx->cq_check_overflow)) {
7045 			finish_wait(&ctx->wait, &iowq.wq);
7046 			continue;
7047 		}
7048 		schedule();
7049 	} while (1);
7050 	finish_wait(&ctx->wait, &iowq.wq);
7051 
7052 	restore_saved_sigmask_unless(ret == -EINTR);
7053 
7054 	return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
7055 }
7056 
__io_sqe_files_unregister(struct io_ring_ctx * ctx)7057 static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
7058 {
7059 #if defined(CONFIG_UNIX)
7060 	if (ctx->ring_sock) {
7061 		struct sock *sock = ctx->ring_sock->sk;
7062 		struct sk_buff *skb;
7063 
7064 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
7065 			kfree_skb(skb);
7066 	}
7067 #else
7068 	int i;
7069 
7070 	for (i = 0; i < ctx->nr_user_files; i++) {
7071 		struct file *file;
7072 
7073 		file = io_file_from_index(ctx, i);
7074 		if (file)
7075 			fput(file);
7076 	}
7077 #endif
7078 }
7079 
io_file_ref_kill(struct percpu_ref * ref)7080 static void io_file_ref_kill(struct percpu_ref *ref)
7081 {
7082 	struct fixed_file_data *data;
7083 
7084 	data = container_of(ref, struct fixed_file_data, refs);
7085 	complete(&data->done);
7086 }
7087 
io_sqe_files_set_node(struct fixed_file_data * file_data,struct fixed_file_ref_node * ref_node)7088 static void io_sqe_files_set_node(struct fixed_file_data *file_data,
7089 				  struct fixed_file_ref_node *ref_node)
7090 {
7091 	spin_lock_bh(&file_data->lock);
7092 	file_data->node = ref_node;
7093 	list_add_tail(&ref_node->node, &file_data->ref_list);
7094 	spin_unlock_bh(&file_data->lock);
7095 	percpu_ref_get(&file_data->refs);
7096 }
7097 
io_sqe_files_unregister(struct io_ring_ctx * ctx)7098 static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
7099 {
7100 	struct fixed_file_data *data = ctx->file_data;
7101 	struct fixed_file_ref_node *backup_node, *ref_node = NULL;
7102 	unsigned nr_tables, i;
7103 	int ret;
7104 
7105 	if (!data)
7106 		return -ENXIO;
7107 	backup_node = alloc_fixed_file_ref_node(ctx);
7108 	if (!backup_node)
7109 		return -ENOMEM;
7110 
7111 	spin_lock_bh(&data->lock);
7112 	ref_node = data->node;
7113 	spin_unlock_bh(&data->lock);
7114 	if (ref_node)
7115 		percpu_ref_kill(&ref_node->refs);
7116 
7117 	percpu_ref_kill(&data->refs);
7118 
7119 	/* wait for all refs nodes to complete */
7120 	flush_delayed_work(&ctx->file_put_work);
7121 	do {
7122 		ret = wait_for_completion_interruptible(&data->done);
7123 		if (!ret)
7124 			break;
7125 		ret = io_run_task_work_sig();
7126 		if (ret < 0) {
7127 			percpu_ref_resurrect(&data->refs);
7128 			reinit_completion(&data->done);
7129 			io_sqe_files_set_node(data, backup_node);
7130 			return ret;
7131 		}
7132 	} while (1);
7133 
7134 	__io_sqe_files_unregister(ctx);
7135 	nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
7136 	for (i = 0; i < nr_tables; i++)
7137 		kfree(data->table[i].files);
7138 	kfree(data->table);
7139 	percpu_ref_exit(&data->refs);
7140 	kfree(data);
7141 	ctx->file_data = NULL;
7142 	ctx->nr_user_files = 0;
7143 	destroy_fixed_file_ref_node(backup_node);
7144 	return 0;
7145 }
7146 
io_put_sq_data(struct io_sq_data * sqd)7147 static void io_put_sq_data(struct io_sq_data *sqd)
7148 {
7149 	if (refcount_dec_and_test(&sqd->refs)) {
7150 		/*
7151 		 * The park is a bit of a work-around, without it we get
7152 		 * warning spews on shutdown with SQPOLL set and affinity
7153 		 * set to a single CPU.
7154 		 */
7155 		if (sqd->thread) {
7156 			kthread_park(sqd->thread);
7157 			kthread_stop(sqd->thread);
7158 		}
7159 
7160 		kfree(sqd);
7161 	}
7162 }
7163 
io_attach_sq_data(struct io_uring_params * p)7164 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p)
7165 {
7166 	struct io_ring_ctx *ctx_attach;
7167 	struct io_sq_data *sqd;
7168 	struct fd f;
7169 
7170 	f = fdget(p->wq_fd);
7171 	if (!f.file)
7172 		return ERR_PTR(-ENXIO);
7173 	if (f.file->f_op != &io_uring_fops) {
7174 		fdput(f);
7175 		return ERR_PTR(-EINVAL);
7176 	}
7177 
7178 	ctx_attach = f.file->private_data;
7179 	sqd = ctx_attach->sq_data;
7180 	if (!sqd) {
7181 		fdput(f);
7182 		return ERR_PTR(-EINVAL);
7183 	}
7184 
7185 	refcount_inc(&sqd->refs);
7186 	fdput(f);
7187 	return sqd;
7188 }
7189 
io_get_sq_data(struct io_uring_params * p)7190 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p)
7191 {
7192 	struct io_sq_data *sqd;
7193 
7194 	if (p->flags & IORING_SETUP_ATTACH_WQ)
7195 		return io_attach_sq_data(p);
7196 
7197 	sqd = kzalloc(sizeof(*sqd), GFP_KERNEL);
7198 	if (!sqd)
7199 		return ERR_PTR(-ENOMEM);
7200 
7201 	refcount_set(&sqd->refs, 1);
7202 	INIT_LIST_HEAD(&sqd->ctx_list);
7203 	INIT_LIST_HEAD(&sqd->ctx_new_list);
7204 	mutex_init(&sqd->ctx_lock);
7205 	mutex_init(&sqd->lock);
7206 	init_waitqueue_head(&sqd->wait);
7207 	return sqd;
7208 }
7209 
io_sq_thread_unpark(struct io_sq_data * sqd)7210 static void io_sq_thread_unpark(struct io_sq_data *sqd)
7211 	__releases(&sqd->lock)
7212 {
7213 	if (!sqd->thread)
7214 		return;
7215 	kthread_unpark(sqd->thread);
7216 	mutex_unlock(&sqd->lock);
7217 }
7218 
io_sq_thread_park(struct io_sq_data * sqd)7219 static void io_sq_thread_park(struct io_sq_data *sqd)
7220 	__acquires(&sqd->lock)
7221 {
7222 	if (!sqd->thread)
7223 		return;
7224 	mutex_lock(&sqd->lock);
7225 	kthread_park(sqd->thread);
7226 }
7227 
io_sq_thread_stop(struct io_ring_ctx * ctx)7228 static void io_sq_thread_stop(struct io_ring_ctx *ctx)
7229 {
7230 	struct io_sq_data *sqd = ctx->sq_data;
7231 
7232 	if (sqd) {
7233 		if (sqd->thread) {
7234 			/*
7235 			 * We may arrive here from the error branch in
7236 			 * io_sq_offload_create() where the kthread is created
7237 			 * without being waked up, thus wake it up now to make
7238 			 * sure the wait will complete.
7239 			 */
7240 			wake_up_process(sqd->thread);
7241 			wait_for_completion(&ctx->sq_thread_comp);
7242 
7243 			io_sq_thread_park(sqd);
7244 		}
7245 
7246 		mutex_lock(&sqd->ctx_lock);
7247 		list_del(&ctx->sqd_list);
7248 		mutex_unlock(&sqd->ctx_lock);
7249 
7250 		if (sqd->thread) {
7251 			finish_wait(&sqd->wait, &ctx->sqo_wait_entry);
7252 			io_sq_thread_unpark(sqd);
7253 		}
7254 
7255 		io_put_sq_data(sqd);
7256 		ctx->sq_data = NULL;
7257 	}
7258 }
7259 
io_finish_async(struct io_ring_ctx * ctx)7260 static void io_finish_async(struct io_ring_ctx *ctx)
7261 {
7262 	io_sq_thread_stop(ctx);
7263 
7264 	if (ctx->io_wq) {
7265 		io_wq_destroy(ctx->io_wq);
7266 		ctx->io_wq = NULL;
7267 	}
7268 }
7269 
7270 #if defined(CONFIG_UNIX)
7271 /*
7272  * Ensure the UNIX gc is aware of our file set, so we are certain that
7273  * the io_uring can be safely unregistered on process exit, even if we have
7274  * loops in the file referencing.
7275  */
__io_sqe_files_scm(struct io_ring_ctx * ctx,int nr,int offset)7276 static int __io_sqe_files_scm(struct io_ring_ctx *ctx, int nr, int offset)
7277 {
7278 	struct sock *sk = ctx->ring_sock->sk;
7279 	struct scm_fp_list *fpl;
7280 	struct sk_buff *skb;
7281 	int i, nr_files;
7282 
7283 	fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
7284 	if (!fpl)
7285 		return -ENOMEM;
7286 
7287 	skb = alloc_skb(0, GFP_KERNEL);
7288 	if (!skb) {
7289 		kfree(fpl);
7290 		return -ENOMEM;
7291 	}
7292 
7293 	skb->sk = sk;
7294 	skb->scm_io_uring = 1;
7295 
7296 	nr_files = 0;
7297 	fpl->user = get_uid(ctx->user);
7298 	for (i = 0; i < nr; i++) {
7299 		struct file *file = io_file_from_index(ctx, i + offset);
7300 
7301 		if (!file)
7302 			continue;
7303 		fpl->fp[nr_files] = get_file(file);
7304 		unix_inflight(fpl->user, fpl->fp[nr_files]);
7305 		nr_files++;
7306 	}
7307 
7308 	if (nr_files) {
7309 		fpl->max = SCM_MAX_FD;
7310 		fpl->count = nr_files;
7311 		UNIXCB(skb).fp = fpl;
7312 		skb->destructor = unix_destruct_scm;
7313 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
7314 		skb_queue_head(&sk->sk_receive_queue, skb);
7315 
7316 		for (i = 0; i < nr_files; i++)
7317 			fput(fpl->fp[i]);
7318 	} else {
7319 		kfree_skb(skb);
7320 		kfree(fpl);
7321 	}
7322 
7323 	return 0;
7324 }
7325 
7326 /*
7327  * If UNIX sockets are enabled, fd passing can cause a reference cycle which
7328  * causes regular reference counting to break down. We rely on the UNIX
7329  * garbage collection to take care of this problem for us.
7330  */
io_sqe_files_scm(struct io_ring_ctx * ctx)7331 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7332 {
7333 	unsigned left, total;
7334 	int ret = 0;
7335 
7336 	total = 0;
7337 	left = ctx->nr_user_files;
7338 	while (left) {
7339 		unsigned this_files = min_t(unsigned, left, SCM_MAX_FD);
7340 
7341 		ret = __io_sqe_files_scm(ctx, this_files, total);
7342 		if (ret)
7343 			break;
7344 		left -= this_files;
7345 		total += this_files;
7346 	}
7347 
7348 	if (!ret)
7349 		return 0;
7350 
7351 	while (total < ctx->nr_user_files) {
7352 		struct file *file = io_file_from_index(ctx, total);
7353 
7354 		if (file)
7355 			fput(file);
7356 		total++;
7357 	}
7358 
7359 	return ret;
7360 }
7361 #else
io_sqe_files_scm(struct io_ring_ctx * ctx)7362 static int io_sqe_files_scm(struct io_ring_ctx *ctx)
7363 {
7364 	return 0;
7365 }
7366 #endif
7367 
io_sqe_alloc_file_tables(struct fixed_file_data * file_data,unsigned nr_tables,unsigned nr_files)7368 static int io_sqe_alloc_file_tables(struct fixed_file_data *file_data,
7369 				    unsigned nr_tables, unsigned nr_files)
7370 {
7371 	int i;
7372 
7373 	for (i = 0; i < nr_tables; i++) {
7374 		struct fixed_file_table *table = &file_data->table[i];
7375 		unsigned this_files;
7376 
7377 		this_files = min(nr_files, IORING_MAX_FILES_TABLE);
7378 		table->files = kcalloc(this_files, sizeof(struct file *),
7379 					GFP_KERNEL_ACCOUNT);
7380 		if (!table->files)
7381 			break;
7382 		nr_files -= this_files;
7383 	}
7384 
7385 	if (i == nr_tables)
7386 		return 0;
7387 
7388 	for (i = 0; i < nr_tables; i++) {
7389 		struct fixed_file_table *table = &file_data->table[i];
7390 		kfree(table->files);
7391 	}
7392 	return 1;
7393 }
7394 
io_ring_file_put(struct io_ring_ctx * ctx,struct file * file)7395 static void io_ring_file_put(struct io_ring_ctx *ctx, struct file *file)
7396 {
7397 #if defined(CONFIG_UNIX)
7398 	struct sock *sock = ctx->ring_sock->sk;
7399 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
7400 	struct sk_buff *skb;
7401 	int i;
7402 
7403 	__skb_queue_head_init(&list);
7404 
7405 	/*
7406 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
7407 	 * remove this entry and rearrange the file array.
7408 	 */
7409 	skb = skb_dequeue(head);
7410 	while (skb) {
7411 		struct scm_fp_list *fp;
7412 
7413 		fp = UNIXCB(skb).fp;
7414 		for (i = 0; i < fp->count; i++) {
7415 			int left;
7416 
7417 			if (fp->fp[i] != file)
7418 				continue;
7419 
7420 			unix_notinflight(fp->user, fp->fp[i]);
7421 			left = fp->count - 1 - i;
7422 			if (left) {
7423 				memmove(&fp->fp[i], &fp->fp[i + 1],
7424 						left * sizeof(struct file *));
7425 			}
7426 			fp->count--;
7427 			if (!fp->count) {
7428 				kfree_skb(skb);
7429 				skb = NULL;
7430 			} else {
7431 				__skb_queue_tail(&list, skb);
7432 			}
7433 			fput(file);
7434 			file = NULL;
7435 			break;
7436 		}
7437 
7438 		if (!file)
7439 			break;
7440 
7441 		__skb_queue_tail(&list, skb);
7442 
7443 		skb = skb_dequeue(head);
7444 	}
7445 
7446 	if (skb_peek(&list)) {
7447 		spin_lock_irq(&head->lock);
7448 		while ((skb = __skb_dequeue(&list)) != NULL)
7449 			__skb_queue_tail(head, skb);
7450 		spin_unlock_irq(&head->lock);
7451 	}
7452 #else
7453 	fput(file);
7454 #endif
7455 }
7456 
7457 struct io_file_put {
7458 	struct list_head list;
7459 	struct file *file;
7460 };
7461 
__io_file_put_work(struct fixed_file_ref_node * ref_node)7462 static void __io_file_put_work(struct fixed_file_ref_node *ref_node)
7463 {
7464 	struct fixed_file_data *file_data = ref_node->file_data;
7465 	struct io_ring_ctx *ctx = file_data->ctx;
7466 	struct io_file_put *pfile, *tmp;
7467 
7468 	list_for_each_entry_safe(pfile, tmp, &ref_node->file_list, list) {
7469 		list_del(&pfile->list);
7470 		io_ring_file_put(ctx, pfile->file);
7471 		kfree(pfile);
7472 	}
7473 
7474 	percpu_ref_exit(&ref_node->refs);
7475 	kfree(ref_node);
7476 	percpu_ref_put(&file_data->refs);
7477 }
7478 
io_file_put_work(struct work_struct * work)7479 static void io_file_put_work(struct work_struct *work)
7480 {
7481 	struct io_ring_ctx *ctx;
7482 	struct llist_node *node;
7483 
7484 	ctx = container_of(work, struct io_ring_ctx, file_put_work.work);
7485 	node = llist_del_all(&ctx->file_put_llist);
7486 
7487 	while (node) {
7488 		struct fixed_file_ref_node *ref_node;
7489 		struct llist_node *next = node->next;
7490 
7491 		ref_node = llist_entry(node, struct fixed_file_ref_node, llist);
7492 		__io_file_put_work(ref_node);
7493 		node = next;
7494 	}
7495 }
7496 
io_file_data_ref_zero(struct percpu_ref * ref)7497 static void io_file_data_ref_zero(struct percpu_ref *ref)
7498 {
7499 	struct fixed_file_ref_node *ref_node;
7500 	struct fixed_file_data *data;
7501 	struct io_ring_ctx *ctx;
7502 	bool first_add = false;
7503 	int delay = HZ;
7504 
7505 	ref_node = container_of(ref, struct fixed_file_ref_node, refs);
7506 	data = ref_node->file_data;
7507 	ctx = data->ctx;
7508 
7509 	spin_lock_bh(&data->lock);
7510 	ref_node->done = true;
7511 
7512 	while (!list_empty(&data->ref_list)) {
7513 		ref_node = list_first_entry(&data->ref_list,
7514 					struct fixed_file_ref_node, node);
7515 		/* recycle ref nodes in order */
7516 		if (!ref_node->done)
7517 			break;
7518 		list_del(&ref_node->node);
7519 		first_add |= llist_add(&ref_node->llist, &ctx->file_put_llist);
7520 	}
7521 	spin_unlock_bh(&data->lock);
7522 
7523 	if (percpu_ref_is_dying(&data->refs))
7524 		delay = 0;
7525 
7526 	if (!delay)
7527 		mod_delayed_work(system_wq, &ctx->file_put_work, 0);
7528 	else if (first_add)
7529 		queue_delayed_work(system_wq, &ctx->file_put_work, delay);
7530 }
7531 
alloc_fixed_file_ref_node(struct io_ring_ctx * ctx)7532 static struct fixed_file_ref_node *alloc_fixed_file_ref_node(
7533 			struct io_ring_ctx *ctx)
7534 {
7535 	struct fixed_file_ref_node *ref_node;
7536 
7537 	ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
7538 	if (!ref_node)
7539 		return NULL;
7540 
7541 	if (percpu_ref_init(&ref_node->refs, io_file_data_ref_zero,
7542 			    0, GFP_KERNEL)) {
7543 		kfree(ref_node);
7544 		return NULL;
7545 	}
7546 	INIT_LIST_HEAD(&ref_node->node);
7547 	INIT_LIST_HEAD(&ref_node->file_list);
7548 	ref_node->file_data = ctx->file_data;
7549 	ref_node->done = false;
7550 	return ref_node;
7551 }
7552 
destroy_fixed_file_ref_node(struct fixed_file_ref_node * ref_node)7553 static void destroy_fixed_file_ref_node(struct fixed_file_ref_node *ref_node)
7554 {
7555 	percpu_ref_exit(&ref_node->refs);
7556 	kfree(ref_node);
7557 }
7558 
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)7559 static int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
7560 				 unsigned nr_args)
7561 {
7562 	__s32 __user *fds = (__s32 __user *) arg;
7563 	unsigned nr_tables, i;
7564 	struct file *file;
7565 	int fd, ret = -ENOMEM;
7566 	struct fixed_file_ref_node *ref_node;
7567 	struct fixed_file_data *file_data;
7568 
7569 	if (ctx->file_data)
7570 		return -EBUSY;
7571 	if (!nr_args)
7572 		return -EINVAL;
7573 	if (nr_args > IORING_MAX_FIXED_FILES)
7574 		return -EMFILE;
7575 	if (nr_args > rlimit(RLIMIT_NOFILE))
7576 		return -EMFILE;
7577 
7578 	file_data = kzalloc(sizeof(*ctx->file_data), GFP_KERNEL_ACCOUNT);
7579 	if (!file_data)
7580 		return -ENOMEM;
7581 	file_data->ctx = ctx;
7582 	init_completion(&file_data->done);
7583 	INIT_LIST_HEAD(&file_data->ref_list);
7584 	spin_lock_init(&file_data->lock);
7585 
7586 	nr_tables = DIV_ROUND_UP(nr_args, IORING_MAX_FILES_TABLE);
7587 	file_data->table = kcalloc(nr_tables, sizeof(*file_data->table),
7588 				   GFP_KERNEL_ACCOUNT);
7589 	if (!file_data->table)
7590 		goto out_free;
7591 
7592 	if (percpu_ref_init(&file_data->refs, io_file_ref_kill,
7593 				PERCPU_REF_ALLOW_REINIT, GFP_KERNEL))
7594 		goto out_free;
7595 
7596 	if (io_sqe_alloc_file_tables(file_data, nr_tables, nr_args))
7597 		goto out_ref;
7598 	ctx->file_data = file_data;
7599 
7600 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
7601 		struct fixed_file_table *table;
7602 		unsigned index;
7603 
7604 		if (copy_from_user(&fd, &fds[i], sizeof(fd))) {
7605 			ret = -EFAULT;
7606 			goto out_fput;
7607 		}
7608 		/* allow sparse sets */
7609 		if (fd == -1)
7610 			continue;
7611 
7612 		file = fget(fd);
7613 		ret = -EBADF;
7614 		if (!file)
7615 			goto out_fput;
7616 
7617 		/*
7618 		 * Don't allow io_uring instances to be registered. If UNIX
7619 		 * isn't enabled, then this causes a reference cycle and this
7620 		 * instance can never get freed. If UNIX is enabled we'll
7621 		 * handle it just fine, but there's still no point in allowing
7622 		 * a ring fd as it doesn't support regular read/write anyway.
7623 		 */
7624 		if (file->f_op == &io_uring_fops) {
7625 			fput(file);
7626 			goto out_fput;
7627 		}
7628 		table = &file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7629 		index = i & IORING_FILE_TABLE_MASK;
7630 		table->files[index] = file;
7631 	}
7632 
7633 	ret = io_sqe_files_scm(ctx);
7634 	if (ret) {
7635 		io_sqe_files_unregister(ctx);
7636 		return ret;
7637 	}
7638 
7639 	ref_node = alloc_fixed_file_ref_node(ctx);
7640 	if (!ref_node) {
7641 		io_sqe_files_unregister(ctx);
7642 		return -ENOMEM;
7643 	}
7644 
7645 	io_sqe_files_set_node(file_data, ref_node);
7646 	return ret;
7647 out_fput:
7648 	for (i = 0; i < ctx->nr_user_files; i++) {
7649 		file = io_file_from_index(ctx, i);
7650 		if (file)
7651 			fput(file);
7652 	}
7653 	for (i = 0; i < nr_tables; i++)
7654 		kfree(file_data->table[i].files);
7655 	ctx->nr_user_files = 0;
7656 out_ref:
7657 	percpu_ref_exit(&file_data->refs);
7658 out_free:
7659 	kfree(file_data->table);
7660 	kfree(file_data);
7661 	ctx->file_data = NULL;
7662 	return ret;
7663 }
7664 
io_sqe_file_register(struct io_ring_ctx * ctx,struct file * file,int index)7665 static int io_sqe_file_register(struct io_ring_ctx *ctx, struct file *file,
7666 				int index)
7667 {
7668 #if defined(CONFIG_UNIX)
7669 	struct sock *sock = ctx->ring_sock->sk;
7670 	struct sk_buff_head *head = &sock->sk_receive_queue;
7671 	struct sk_buff *skb;
7672 
7673 	/*
7674 	 * See if we can merge this file into an existing skb SCM_RIGHTS
7675 	 * file set. If there's no room, fall back to allocating a new skb
7676 	 * and filling it in.
7677 	 */
7678 	spin_lock_irq(&head->lock);
7679 	skb = skb_peek(head);
7680 	if (skb) {
7681 		struct scm_fp_list *fpl = UNIXCB(skb).fp;
7682 
7683 		if (fpl->count < SCM_MAX_FD) {
7684 			__skb_unlink(skb, head);
7685 			spin_unlock_irq(&head->lock);
7686 			fpl->fp[fpl->count] = get_file(file);
7687 			unix_inflight(fpl->user, fpl->fp[fpl->count]);
7688 			fpl->count++;
7689 			spin_lock_irq(&head->lock);
7690 			__skb_queue_head(head, skb);
7691 		} else {
7692 			skb = NULL;
7693 		}
7694 	}
7695 	spin_unlock_irq(&head->lock);
7696 
7697 	if (skb) {
7698 		fput(file);
7699 		return 0;
7700 	}
7701 
7702 	return __io_sqe_files_scm(ctx, 1, index);
7703 #else
7704 	return 0;
7705 #endif
7706 }
7707 
io_queue_file_removal(struct fixed_file_data * data,struct file * file)7708 static int io_queue_file_removal(struct fixed_file_data *data,
7709 				 struct file *file)
7710 {
7711 	struct io_file_put *pfile;
7712 	struct fixed_file_ref_node *ref_node = data->node;
7713 
7714 	pfile = kzalloc(sizeof(*pfile), GFP_KERNEL);
7715 	if (!pfile)
7716 		return -ENOMEM;
7717 
7718 	pfile->file = file;
7719 	list_add(&pfile->list, &ref_node->file_list);
7720 
7721 	return 0;
7722 }
7723 
__io_sqe_files_update(struct io_ring_ctx * ctx,struct io_uring_files_update * up,unsigned nr_args)7724 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
7725 				 struct io_uring_files_update *up,
7726 				 unsigned nr_args)
7727 {
7728 	struct fixed_file_data *data = ctx->file_data;
7729 	struct fixed_file_ref_node *ref_node;
7730 	struct file *file;
7731 	__s32 __user *fds;
7732 	int fd, i, err;
7733 	__u32 done;
7734 	bool needs_switch = false;
7735 
7736 	if (check_add_overflow(up->offset, nr_args, &done))
7737 		return -EOVERFLOW;
7738 	if (done > ctx->nr_user_files)
7739 		return -EINVAL;
7740 
7741 	ref_node = alloc_fixed_file_ref_node(ctx);
7742 	if (!ref_node)
7743 		return -ENOMEM;
7744 
7745 	done = 0;
7746 	fds = u64_to_user_ptr(up->fds);
7747 	while (nr_args) {
7748 		struct fixed_file_table *table;
7749 		unsigned index;
7750 
7751 		err = 0;
7752 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
7753 			err = -EFAULT;
7754 			break;
7755 		}
7756 		i = array_index_nospec(up->offset, ctx->nr_user_files);
7757 		table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
7758 		index = i & IORING_FILE_TABLE_MASK;
7759 		if (table->files[index]) {
7760 			file = table->files[index];
7761 			err = io_queue_file_removal(data, file);
7762 			if (err)
7763 				break;
7764 			table->files[index] = NULL;
7765 			needs_switch = true;
7766 		}
7767 		if (fd != -1) {
7768 			file = fget(fd);
7769 			if (!file) {
7770 				err = -EBADF;
7771 				break;
7772 			}
7773 			/*
7774 			 * Don't allow io_uring instances to be registered. If
7775 			 * UNIX isn't enabled, then this causes a reference
7776 			 * cycle and this instance can never get freed. If UNIX
7777 			 * is enabled we'll handle it just fine, but there's
7778 			 * still no point in allowing a ring fd as it doesn't
7779 			 * support regular read/write anyway.
7780 			 */
7781 			if (file->f_op == &io_uring_fops) {
7782 				fput(file);
7783 				err = -EBADF;
7784 				break;
7785 			}
7786 			table->files[index] = file;
7787 			err = io_sqe_file_register(ctx, file, i);
7788 			if (err) {
7789 				table->files[index] = NULL;
7790 				fput(file);
7791 				break;
7792 			}
7793 		}
7794 		nr_args--;
7795 		done++;
7796 		up->offset++;
7797 	}
7798 
7799 	if (needs_switch) {
7800 		percpu_ref_kill(&data->node->refs);
7801 		io_sqe_files_set_node(data, ref_node);
7802 	} else
7803 		destroy_fixed_file_ref_node(ref_node);
7804 
7805 	return done ? done : err;
7806 }
7807 
io_sqe_files_update(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)7808 static int io_sqe_files_update(struct io_ring_ctx *ctx, void __user *arg,
7809 			       unsigned nr_args)
7810 {
7811 	struct io_uring_files_update up;
7812 
7813 	if (!ctx->file_data)
7814 		return -ENXIO;
7815 	if (!nr_args)
7816 		return -EINVAL;
7817 	if (copy_from_user(&up, arg, sizeof(up)))
7818 		return -EFAULT;
7819 	if (up.resv)
7820 		return -EINVAL;
7821 
7822 	return __io_sqe_files_update(ctx, &up, nr_args);
7823 }
7824 
io_free_work(struct io_wq_work * work)7825 static void io_free_work(struct io_wq_work *work)
7826 {
7827 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
7828 
7829 	/* Consider that io_steal_work() relies on this ref */
7830 	io_put_req(req);
7831 }
7832 
io_init_wq_offload(struct io_ring_ctx * ctx,struct io_uring_params * p)7833 static int io_init_wq_offload(struct io_ring_ctx *ctx,
7834 			      struct io_uring_params *p)
7835 {
7836 	struct io_wq_data data;
7837 	struct fd f;
7838 	struct io_ring_ctx *ctx_attach;
7839 	unsigned int concurrency;
7840 	int ret = 0;
7841 
7842 	data.user = ctx->user;
7843 	data.free_work = io_free_work;
7844 	data.do_work = io_wq_submit_work;
7845 
7846 	if (!(p->flags & IORING_SETUP_ATTACH_WQ)) {
7847 		/* Do QD, or 4 * CPUS, whatever is smallest */
7848 		concurrency = min(ctx->sq_entries, 4 * num_online_cpus());
7849 
7850 		ctx->io_wq = io_wq_create(concurrency, &data);
7851 		if (IS_ERR(ctx->io_wq)) {
7852 			ret = PTR_ERR(ctx->io_wq);
7853 			ctx->io_wq = NULL;
7854 		}
7855 		return ret;
7856 	}
7857 
7858 	f = fdget(p->wq_fd);
7859 	if (!f.file)
7860 		return -EBADF;
7861 
7862 	if (f.file->f_op != &io_uring_fops) {
7863 		ret = -EINVAL;
7864 		goto out_fput;
7865 	}
7866 
7867 	ctx_attach = f.file->private_data;
7868 	/* @io_wq is protected by holding the fd */
7869 	if (!io_wq_get(ctx_attach->io_wq, &data)) {
7870 		ret = -EINVAL;
7871 		goto out_fput;
7872 	}
7873 
7874 	ctx->io_wq = ctx_attach->io_wq;
7875 out_fput:
7876 	fdput(f);
7877 	return ret;
7878 }
7879 
io_uring_alloc_task_context(struct task_struct * task)7880 static int io_uring_alloc_task_context(struct task_struct *task)
7881 {
7882 	struct io_uring_task *tctx;
7883 	int ret;
7884 
7885 	tctx = kmalloc(sizeof(*tctx), GFP_KERNEL);
7886 	if (unlikely(!tctx))
7887 		return -ENOMEM;
7888 
7889 	ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL);
7890 	if (unlikely(ret)) {
7891 		kfree(tctx);
7892 		return ret;
7893 	}
7894 
7895 	xa_init(&tctx->xa);
7896 	init_waitqueue_head(&tctx->wait);
7897 	tctx->last = NULL;
7898 	atomic_set(&tctx->in_idle, 0);
7899 	tctx->sqpoll = false;
7900 	io_init_identity(&tctx->__identity);
7901 	tctx->identity = &tctx->__identity;
7902 	task->io_uring = tctx;
7903 	return 0;
7904 }
7905 
__io_uring_free(struct task_struct * tsk)7906 void __io_uring_free(struct task_struct *tsk)
7907 {
7908 	struct io_uring_task *tctx = tsk->io_uring;
7909 
7910 	WARN_ON_ONCE(!xa_empty(&tctx->xa));
7911 	WARN_ON_ONCE(refcount_read(&tctx->identity->count) != 1);
7912 	if (tctx->identity != &tctx->__identity)
7913 		kfree(tctx->identity);
7914 	percpu_counter_destroy(&tctx->inflight);
7915 	kfree(tctx);
7916 	tsk->io_uring = NULL;
7917 }
7918 
io_sq_offload_create(struct io_ring_ctx * ctx,struct io_uring_params * p)7919 static int io_sq_offload_create(struct io_ring_ctx *ctx,
7920 				struct io_uring_params *p)
7921 {
7922 	int ret;
7923 
7924 	if (ctx->flags & IORING_SETUP_SQPOLL) {
7925 		struct io_sq_data *sqd;
7926 
7927 		ret = -EPERM;
7928 		if (!capable(CAP_SYS_ADMIN))
7929 			goto err;
7930 
7931 		sqd = io_get_sq_data(p);
7932 		if (IS_ERR(sqd)) {
7933 			ret = PTR_ERR(sqd);
7934 			goto err;
7935 		}
7936 
7937 		ctx->sq_data = sqd;
7938 		io_sq_thread_park(sqd);
7939 		mutex_lock(&sqd->ctx_lock);
7940 		list_add(&ctx->sqd_list, &sqd->ctx_new_list);
7941 		mutex_unlock(&sqd->ctx_lock);
7942 		io_sq_thread_unpark(sqd);
7943 
7944 		ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle);
7945 		if (!ctx->sq_thread_idle)
7946 			ctx->sq_thread_idle = HZ;
7947 
7948 		if (sqd->thread)
7949 			goto done;
7950 
7951 		if (p->flags & IORING_SETUP_SQ_AFF) {
7952 			int cpu = p->sq_thread_cpu;
7953 
7954 			ret = -EINVAL;
7955 			if (cpu >= nr_cpu_ids)
7956 				goto err;
7957 			if (!cpu_online(cpu))
7958 				goto err;
7959 
7960 			sqd->thread = kthread_create_on_cpu(io_sq_thread, sqd,
7961 							cpu, "io_uring-sq");
7962 		} else {
7963 			sqd->thread = kthread_create(io_sq_thread, sqd,
7964 							"io_uring-sq");
7965 		}
7966 		if (IS_ERR(sqd->thread)) {
7967 			ret = PTR_ERR(sqd->thread);
7968 			sqd->thread = NULL;
7969 			goto err;
7970 		}
7971 		ret = io_uring_alloc_task_context(sqd->thread);
7972 		if (ret)
7973 			goto err;
7974 	} else if (p->flags & IORING_SETUP_SQ_AFF) {
7975 		/* Can't have SQ_AFF without SQPOLL */
7976 		ret = -EINVAL;
7977 		goto err;
7978 	}
7979 
7980 done:
7981 	ret = io_init_wq_offload(ctx, p);
7982 	if (ret)
7983 		goto err;
7984 
7985 	return 0;
7986 err:
7987 	io_finish_async(ctx);
7988 	return ret;
7989 }
7990 
io_sq_offload_start(struct io_ring_ctx * ctx)7991 static void io_sq_offload_start(struct io_ring_ctx *ctx)
7992 {
7993 	struct io_sq_data *sqd = ctx->sq_data;
7994 
7995 	ctx->flags &= ~IORING_SETUP_R_DISABLED;
7996 	if ((ctx->flags & IORING_SETUP_SQPOLL) && sqd && sqd->thread)
7997 		wake_up_process(sqd->thread);
7998 }
7999 
__io_unaccount_mem(struct user_struct * user,unsigned long nr_pages)8000 static inline void __io_unaccount_mem(struct user_struct *user,
8001 				      unsigned long nr_pages)
8002 {
8003 	atomic_long_sub(nr_pages, &user->locked_vm);
8004 }
8005 
__io_account_mem(struct user_struct * user,unsigned long nr_pages)8006 static inline int __io_account_mem(struct user_struct *user,
8007 				   unsigned long nr_pages)
8008 {
8009 	unsigned long page_limit, cur_pages, new_pages;
8010 
8011 	/* Don't allow more pages than we can safely lock */
8012 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
8013 
8014 	do {
8015 		cur_pages = atomic_long_read(&user->locked_vm);
8016 		new_pages = cur_pages + nr_pages;
8017 		if (new_pages > page_limit)
8018 			return -ENOMEM;
8019 	} while (atomic_long_cmpxchg(&user->locked_vm, cur_pages,
8020 					new_pages) != cur_pages);
8021 
8022 	return 0;
8023 }
8024 
io_unaccount_mem(struct io_ring_ctx * ctx,unsigned long nr_pages,enum io_mem_account acct)8025 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8026 			     enum io_mem_account acct)
8027 {
8028 	if (ctx->limit_mem)
8029 		__io_unaccount_mem(ctx->user, nr_pages);
8030 
8031 	if (ctx->mm_account) {
8032 		if (acct == ACCT_LOCKED)
8033 			ctx->mm_account->locked_vm -= nr_pages;
8034 		else if (acct == ACCT_PINNED)
8035 			atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
8036 	}
8037 }
8038 
io_account_mem(struct io_ring_ctx * ctx,unsigned long nr_pages,enum io_mem_account acct)8039 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages,
8040 			  enum io_mem_account acct)
8041 {
8042 	int ret;
8043 
8044 	if (ctx->limit_mem) {
8045 		ret = __io_account_mem(ctx->user, nr_pages);
8046 		if (ret)
8047 			return ret;
8048 	}
8049 
8050 	if (ctx->mm_account) {
8051 		if (acct == ACCT_LOCKED)
8052 			ctx->mm_account->locked_vm += nr_pages;
8053 		else if (acct == ACCT_PINNED)
8054 			atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
8055 	}
8056 
8057 	return 0;
8058 }
8059 
io_mem_free(void * ptr)8060 static void io_mem_free(void *ptr)
8061 {
8062 	struct page *page;
8063 
8064 	if (!ptr)
8065 		return;
8066 
8067 	page = virt_to_head_page(ptr);
8068 	if (put_page_testzero(page))
8069 		free_compound_page(page);
8070 }
8071 
io_mem_alloc(size_t size)8072 static void *io_mem_alloc(size_t size)
8073 {
8074 	gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_COMP |
8075 				__GFP_NORETRY;
8076 
8077 	return (void *) __get_free_pages(gfp_flags, get_order(size));
8078 }
8079 
rings_size(unsigned sq_entries,unsigned cq_entries,size_t * sq_offset)8080 static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
8081 				size_t *sq_offset)
8082 {
8083 	struct io_rings *rings;
8084 	size_t off, sq_array_size;
8085 
8086 	off = struct_size(rings, cqes, cq_entries);
8087 	if (off == SIZE_MAX)
8088 		return SIZE_MAX;
8089 
8090 #ifdef CONFIG_SMP
8091 	off = ALIGN(off, SMP_CACHE_BYTES);
8092 	if (off == 0)
8093 		return SIZE_MAX;
8094 #endif
8095 
8096 	if (sq_offset)
8097 		*sq_offset = off;
8098 
8099 	sq_array_size = array_size(sizeof(u32), sq_entries);
8100 	if (sq_array_size == SIZE_MAX)
8101 		return SIZE_MAX;
8102 
8103 	if (check_add_overflow(off, sq_array_size, &off))
8104 		return SIZE_MAX;
8105 
8106 	return off;
8107 }
8108 
ring_pages(unsigned sq_entries,unsigned cq_entries)8109 static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
8110 {
8111 	size_t pages;
8112 
8113 	pages = (size_t)1 << get_order(
8114 		rings_size(sq_entries, cq_entries, NULL));
8115 	pages += (size_t)1 << get_order(
8116 		array_size(sizeof(struct io_uring_sqe), sq_entries));
8117 
8118 	return pages;
8119 }
8120 
io_sqe_buffer_unregister(struct io_ring_ctx * ctx)8121 static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
8122 {
8123 	int i, j;
8124 
8125 	if (!ctx->user_bufs)
8126 		return -ENXIO;
8127 
8128 	for (i = 0; i < ctx->nr_user_bufs; i++) {
8129 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8130 
8131 		for (j = 0; j < imu->nr_bvecs; j++)
8132 			unpin_user_page(imu->bvec[j].bv_page);
8133 
8134 		if (imu->acct_pages)
8135 			io_unaccount_mem(ctx, imu->acct_pages, ACCT_PINNED);
8136 		kvfree(imu->bvec);
8137 		imu->nr_bvecs = 0;
8138 	}
8139 
8140 	kfree(ctx->user_bufs);
8141 	ctx->user_bufs = NULL;
8142 	ctx->nr_user_bufs = 0;
8143 	return 0;
8144 }
8145 
io_copy_iov(struct io_ring_ctx * ctx,struct iovec * dst,void __user * arg,unsigned index)8146 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
8147 		       void __user *arg, unsigned index)
8148 {
8149 	struct iovec __user *src;
8150 
8151 #ifdef CONFIG_COMPAT
8152 	if (ctx->compat) {
8153 		struct compat_iovec __user *ciovs;
8154 		struct compat_iovec ciov;
8155 
8156 		ciovs = (struct compat_iovec __user *) arg;
8157 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
8158 			return -EFAULT;
8159 
8160 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
8161 		dst->iov_len = ciov.iov_len;
8162 		return 0;
8163 	}
8164 #endif
8165 	src = (struct iovec __user *) arg;
8166 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
8167 		return -EFAULT;
8168 	return 0;
8169 }
8170 
8171 /*
8172  * Not super efficient, but this is just a registration time. And we do cache
8173  * the last compound head, so generally we'll only do a full search if we don't
8174  * match that one.
8175  *
8176  * We check if the given compound head page has already been accounted, to
8177  * avoid double accounting it. This allows us to account the full size of the
8178  * page, not just the constituent pages of a huge page.
8179  */
headpage_already_acct(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct page * hpage)8180 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
8181 				  int nr_pages, struct page *hpage)
8182 {
8183 	int i, j;
8184 
8185 	/* check current page array */
8186 	for (i = 0; i < nr_pages; i++) {
8187 		if (!PageCompound(pages[i]))
8188 			continue;
8189 		if (compound_head(pages[i]) == hpage)
8190 			return true;
8191 	}
8192 
8193 	/* check previously registered pages */
8194 	for (i = 0; i < ctx->nr_user_bufs; i++) {
8195 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8196 
8197 		for (j = 0; j < imu->nr_bvecs; j++) {
8198 			if (!PageCompound(imu->bvec[j].bv_page))
8199 				continue;
8200 			if (compound_head(imu->bvec[j].bv_page) == hpage)
8201 				return true;
8202 		}
8203 	}
8204 
8205 	return false;
8206 }
8207 
io_buffer_account_pin(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct io_mapped_ubuf * imu,struct page ** last_hpage)8208 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
8209 				 int nr_pages, struct io_mapped_ubuf *imu,
8210 				 struct page **last_hpage)
8211 {
8212 	int i, ret;
8213 
8214 	for (i = 0; i < nr_pages; i++) {
8215 		if (!PageCompound(pages[i])) {
8216 			imu->acct_pages++;
8217 		} else {
8218 			struct page *hpage;
8219 
8220 			hpage = compound_head(pages[i]);
8221 			if (hpage == *last_hpage)
8222 				continue;
8223 			*last_hpage = hpage;
8224 			if (headpage_already_acct(ctx, pages, i, hpage))
8225 				continue;
8226 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
8227 		}
8228 	}
8229 
8230 	if (!imu->acct_pages)
8231 		return 0;
8232 
8233 	ret = io_account_mem(ctx, imu->acct_pages, ACCT_PINNED);
8234 	if (ret)
8235 		imu->acct_pages = 0;
8236 	return ret;
8237 }
8238 
io_sqe_buffer_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)8239 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
8240 				  unsigned nr_args)
8241 {
8242 	struct vm_area_struct **vmas = NULL;
8243 	struct page **pages = NULL;
8244 	struct page *last_hpage = NULL;
8245 	int i, j, got_pages = 0;
8246 	int ret = -EINVAL;
8247 
8248 	if (ctx->user_bufs)
8249 		return -EBUSY;
8250 	if (!nr_args || nr_args > UIO_MAXIOV)
8251 		return -EINVAL;
8252 
8253 	ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
8254 					GFP_KERNEL);
8255 	if (!ctx->user_bufs)
8256 		return -ENOMEM;
8257 
8258 	for (i = 0; i < nr_args; i++) {
8259 		struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
8260 		unsigned long off, start, end, ubuf;
8261 		int pret, nr_pages;
8262 		struct iovec iov;
8263 		size_t size;
8264 
8265 		ret = io_copy_iov(ctx, &iov, arg, i);
8266 		if (ret)
8267 			goto err;
8268 
8269 		/*
8270 		 * Don't impose further limits on the size and buffer
8271 		 * constraints here, we'll -EINVAL later when IO is
8272 		 * submitted if they are wrong.
8273 		 */
8274 		ret = -EFAULT;
8275 		if (!iov.iov_base || !iov.iov_len)
8276 			goto err;
8277 
8278 		/* arbitrary limit, but we need something */
8279 		if (iov.iov_len > SZ_1G)
8280 			goto err;
8281 
8282 		ubuf = (unsigned long) iov.iov_base;
8283 		end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
8284 		start = ubuf >> PAGE_SHIFT;
8285 		nr_pages = end - start;
8286 
8287 		ret = 0;
8288 		if (!pages || nr_pages > got_pages) {
8289 			kvfree(vmas);
8290 			kvfree(pages);
8291 			pages = kvmalloc_array(nr_pages, sizeof(struct page *),
8292 						GFP_KERNEL);
8293 			vmas = kvmalloc_array(nr_pages,
8294 					sizeof(struct vm_area_struct *),
8295 					GFP_KERNEL);
8296 			if (!pages || !vmas) {
8297 				ret = -ENOMEM;
8298 				goto err;
8299 			}
8300 			got_pages = nr_pages;
8301 		}
8302 
8303 		imu->bvec = kvmalloc_array(nr_pages, sizeof(struct bio_vec),
8304 						GFP_KERNEL);
8305 		ret = -ENOMEM;
8306 		if (!imu->bvec)
8307 			goto err;
8308 
8309 		ret = 0;
8310 		mmap_read_lock(current->mm);
8311 		pret = pin_user_pages(ubuf, nr_pages,
8312 				      FOLL_WRITE | FOLL_LONGTERM,
8313 				      pages, vmas);
8314 		if (pret == nr_pages) {
8315 			/* don't support file backed memory */
8316 			for (j = 0; j < nr_pages; j++) {
8317 				struct vm_area_struct *vma = vmas[j];
8318 
8319 				if (vma->vm_file &&
8320 				    !is_file_hugepages(vma->vm_file)) {
8321 					ret = -EOPNOTSUPP;
8322 					break;
8323 				}
8324 			}
8325 		} else {
8326 			ret = pret < 0 ? pret : -EFAULT;
8327 		}
8328 		mmap_read_unlock(current->mm);
8329 		if (ret) {
8330 			/*
8331 			 * if we did partial map, or found file backed vmas,
8332 			 * release any pages we did get
8333 			 */
8334 			if (pret > 0)
8335 				unpin_user_pages(pages, pret);
8336 			kvfree(imu->bvec);
8337 			goto err;
8338 		}
8339 
8340 		ret = io_buffer_account_pin(ctx, pages, pret, imu, &last_hpage);
8341 		if (ret) {
8342 			unpin_user_pages(pages, pret);
8343 			kvfree(imu->bvec);
8344 			goto err;
8345 		}
8346 
8347 		off = ubuf & ~PAGE_MASK;
8348 		size = iov.iov_len;
8349 		for (j = 0; j < nr_pages; j++) {
8350 			size_t vec_len;
8351 
8352 			vec_len = min_t(size_t, size, PAGE_SIZE - off);
8353 			imu->bvec[j].bv_page = pages[j];
8354 			imu->bvec[j].bv_len = vec_len;
8355 			imu->bvec[j].bv_offset = off;
8356 			off = 0;
8357 			size -= vec_len;
8358 		}
8359 		/* store original address for later verification */
8360 		imu->ubuf = ubuf;
8361 		imu->len = iov.iov_len;
8362 		imu->nr_bvecs = nr_pages;
8363 
8364 		ctx->nr_user_bufs++;
8365 	}
8366 	kvfree(pages);
8367 	kvfree(vmas);
8368 	return 0;
8369 err:
8370 	kvfree(pages);
8371 	kvfree(vmas);
8372 	io_sqe_buffer_unregister(ctx);
8373 	return ret;
8374 }
8375 
io_eventfd_register(struct io_ring_ctx * ctx,void __user * arg)8376 static int io_eventfd_register(struct io_ring_ctx *ctx, void __user *arg)
8377 {
8378 	__s32 __user *fds = arg;
8379 	int fd;
8380 
8381 	if (ctx->cq_ev_fd)
8382 		return -EBUSY;
8383 
8384 	if (copy_from_user(&fd, fds, sizeof(*fds)))
8385 		return -EFAULT;
8386 
8387 	ctx->cq_ev_fd = eventfd_ctx_fdget(fd);
8388 	if (IS_ERR(ctx->cq_ev_fd)) {
8389 		int ret = PTR_ERR(ctx->cq_ev_fd);
8390 		ctx->cq_ev_fd = NULL;
8391 		return ret;
8392 	}
8393 
8394 	return 0;
8395 }
8396 
io_eventfd_unregister(struct io_ring_ctx * ctx)8397 static int io_eventfd_unregister(struct io_ring_ctx *ctx)
8398 {
8399 	if (ctx->cq_ev_fd) {
8400 		eventfd_ctx_put(ctx->cq_ev_fd);
8401 		ctx->cq_ev_fd = NULL;
8402 		return 0;
8403 	}
8404 
8405 	return -ENXIO;
8406 }
8407 
io_destroy_buffers(struct io_ring_ctx * ctx)8408 static void io_destroy_buffers(struct io_ring_ctx *ctx)
8409 {
8410 	struct io_buffer *buf;
8411 	unsigned long index;
8412 
8413 	xa_for_each(&ctx->io_buffers, index, buf)
8414 		__io_remove_buffers(ctx, buf, index, -1U);
8415 }
8416 
io_ring_ctx_free(struct io_ring_ctx * ctx)8417 static void io_ring_ctx_free(struct io_ring_ctx *ctx)
8418 {
8419 	io_finish_async(ctx);
8420 	io_sqe_buffer_unregister(ctx);
8421 
8422 	if (ctx->sqo_task) {
8423 		put_task_struct(ctx->sqo_task);
8424 		ctx->sqo_task = NULL;
8425 		mmdrop(ctx->mm_account);
8426 		ctx->mm_account = NULL;
8427 	}
8428 
8429 #ifdef CONFIG_BLK_CGROUP
8430 	if (ctx->sqo_blkcg_css)
8431 		css_put(ctx->sqo_blkcg_css);
8432 #endif
8433 
8434 	io_sqe_files_unregister(ctx);
8435 	io_eventfd_unregister(ctx);
8436 	io_destroy_buffers(ctx);
8437 
8438 #if defined(CONFIG_UNIX)
8439 	if (ctx->ring_sock) {
8440 		ctx->ring_sock->file = NULL; /* so that iput() is called */
8441 		sock_release(ctx->ring_sock);
8442 	}
8443 #endif
8444 
8445 	io_mem_free(ctx->rings);
8446 	io_mem_free(ctx->sq_sqes);
8447 
8448 	percpu_ref_exit(&ctx->refs);
8449 	free_uid(ctx->user);
8450 	put_cred(ctx->creds);
8451 	kfree(ctx->cancel_hash);
8452 	kmem_cache_free(req_cachep, ctx->fallback_req);
8453 	kfree(ctx);
8454 }
8455 
io_uring_poll(struct file * file,poll_table * wait)8456 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
8457 {
8458 	struct io_ring_ctx *ctx = file->private_data;
8459 	__poll_t mask = 0;
8460 
8461 	poll_wait(file, &ctx->cq_wait, wait);
8462 	/*
8463 	 * synchronizes with barrier from wq_has_sleeper call in
8464 	 * io_commit_cqring
8465 	 */
8466 	smp_rmb();
8467 	if (!io_sqring_full(ctx))
8468 		mask |= EPOLLOUT | EPOLLWRNORM;
8469 
8470 	/*
8471 	 * Don't flush cqring overflow list here, just do a simple check.
8472 	 * Otherwise there could possible be ABBA deadlock:
8473 	 *      CPU0                    CPU1
8474 	 *      ----                    ----
8475 	 * lock(&ctx->uring_lock);
8476 	 *                              lock(&ep->mtx);
8477 	 *                              lock(&ctx->uring_lock);
8478 	 * lock(&ep->mtx);
8479 	 *
8480 	 * Users may get EPOLLIN meanwhile seeing nothing in cqring, this
8481 	 * pushs them to do the flush.
8482 	 */
8483 	if (io_cqring_events(ctx) || test_bit(0, &ctx->cq_check_overflow))
8484 		mask |= EPOLLIN | EPOLLRDNORM;
8485 
8486 	return mask;
8487 }
8488 
io_uring_fasync(int fd,struct file * file,int on)8489 static int io_uring_fasync(int fd, struct file *file, int on)
8490 {
8491 	struct io_ring_ctx *ctx = file->private_data;
8492 
8493 	return fasync_helper(fd, file, on, &ctx->cq_fasync);
8494 }
8495 
io_unregister_personality(struct io_ring_ctx * ctx,unsigned id)8496 static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
8497 {
8498 	struct io_identity *iod;
8499 
8500 	iod = xa_erase(&ctx->personalities, id);
8501 	if (iod) {
8502 		put_cred(iod->creds);
8503 		if (refcount_dec_and_test(&iod->count))
8504 			kfree(iod);
8505 		return 0;
8506 	}
8507 
8508 	return -EINVAL;
8509 }
8510 
io_ring_exit_work(struct work_struct * work)8511 static void io_ring_exit_work(struct work_struct *work)
8512 {
8513 	struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx,
8514 					       exit_work);
8515 
8516 	/*
8517 	 * If we're doing polled IO and end up having requests being
8518 	 * submitted async (out-of-line), then completions can come in while
8519 	 * we're waiting for refs to drop. We need to reap these manually,
8520 	 * as nobody else will be looking for them.
8521 	 */
8522 	do {
8523 		io_iopoll_try_reap_events(ctx);
8524 	} while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20));
8525 	io_ring_ctx_free(ctx);
8526 }
8527 
io_cancel_ctx_cb(struct io_wq_work * work,void * data)8528 static bool io_cancel_ctx_cb(struct io_wq_work *work, void *data)
8529 {
8530 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8531 
8532 	return req->ctx == data;
8533 }
8534 
io_ring_ctx_wait_and_kill(struct io_ring_ctx * ctx)8535 static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
8536 {
8537 	unsigned long index;
8538 	struct io_identify *iod;
8539 
8540 	mutex_lock(&ctx->uring_lock);
8541 	percpu_ref_kill(&ctx->refs);
8542 	/* if force is set, the ring is going away. always drop after that */
8543 
8544 	if (WARN_ON_ONCE((ctx->flags & IORING_SETUP_SQPOLL) && !ctx->sqo_dead))
8545 		ctx->sqo_dead = 1;
8546 
8547 	ctx->cq_overflow_flushed = 1;
8548 	if (ctx->rings)
8549 		__io_cqring_overflow_flush(ctx, true, NULL, NULL);
8550 	mutex_unlock(&ctx->uring_lock);
8551 
8552 	io_kill_timeouts(ctx, NULL, NULL);
8553 	io_poll_remove_all(ctx, NULL, NULL);
8554 
8555 	if (ctx->io_wq)
8556 		io_wq_cancel_cb(ctx->io_wq, io_cancel_ctx_cb, ctx, true);
8557 
8558 	/* if we failed setting up the ctx, we might not have any rings */
8559 	io_iopoll_try_reap_events(ctx);
8560 	xa_for_each(&ctx->personalities, index, iod)
8561 		 io_unregister_personality(ctx, index);
8562 
8563 	/*
8564 	 * Do this upfront, so we won't have a grace period where the ring
8565 	 * is closed but resources aren't reaped yet. This can cause
8566 	 * spurious failure in setting up a new ring.
8567 	 */
8568 	io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries),
8569 			 ACCT_LOCKED);
8570 
8571 	INIT_WORK(&ctx->exit_work, io_ring_exit_work);
8572 	/*
8573 	 * Use system_unbound_wq to avoid spawning tons of event kworkers
8574 	 * if we're exiting a ton of rings at the same time. It just adds
8575 	 * noise and overhead, there's no discernable change in runtime
8576 	 * over using system_wq.
8577 	 */
8578 	queue_work(system_unbound_wq, &ctx->exit_work);
8579 }
8580 
io_uring_release(struct inode * inode,struct file * file)8581 static int io_uring_release(struct inode *inode, struct file *file)
8582 {
8583 	struct io_ring_ctx *ctx = file->private_data;
8584 
8585 	file->private_data = NULL;
8586 	io_ring_ctx_wait_and_kill(ctx);
8587 	return 0;
8588 }
8589 
8590 struct io_task_cancel {
8591 	struct task_struct *task;
8592 	struct files_struct *files;
8593 };
8594 
io_cancel_task_cb(struct io_wq_work * work,void * data)8595 static bool io_cancel_task_cb(struct io_wq_work *work, void *data)
8596 {
8597 	struct io_kiocb *req = container_of(work, struct io_kiocb, work);
8598 	struct io_task_cancel *cancel = data;
8599 	bool ret;
8600 
8601 	if (cancel->files && (req->flags & REQ_F_LINK_TIMEOUT)) {
8602 		unsigned long flags;
8603 		struct io_ring_ctx *ctx = req->ctx;
8604 
8605 		/* protect against races with linked timeouts */
8606 		spin_lock_irqsave(&ctx->completion_lock, flags);
8607 		ret = io_match_task(req, cancel->task, cancel->files);
8608 		spin_unlock_irqrestore(&ctx->completion_lock, flags);
8609 	} else {
8610 		ret = io_match_task(req, cancel->task, cancel->files);
8611 	}
8612 	return ret;
8613 }
8614 
io_cancel_defer_files(struct io_ring_ctx * ctx,struct task_struct * task,struct files_struct * files)8615 static void io_cancel_defer_files(struct io_ring_ctx *ctx,
8616 				  struct task_struct *task,
8617 				  struct files_struct *files)
8618 {
8619 	struct io_defer_entry *de = NULL;
8620 	LIST_HEAD(list);
8621 
8622 	spin_lock_irq(&ctx->completion_lock);
8623 	list_for_each_entry_reverse(de, &ctx->defer_list, list) {
8624 		if (io_match_task(de->req, task, files)) {
8625 			list_cut_position(&list, &ctx->defer_list, &de->list);
8626 			break;
8627 		}
8628 	}
8629 	spin_unlock_irq(&ctx->completion_lock);
8630 
8631 	while (!list_empty(&list)) {
8632 		de = list_first_entry(&list, struct io_defer_entry, list);
8633 		list_del_init(&de->list);
8634 		req_set_fail_links(de->req);
8635 		io_put_req(de->req);
8636 		io_req_complete(de->req, -ECANCELED);
8637 		kfree(de);
8638 	}
8639 }
8640 
io_uring_count_inflight(struct io_ring_ctx * ctx,struct task_struct * task,struct files_struct * files)8641 static int io_uring_count_inflight(struct io_ring_ctx *ctx,
8642 				   struct task_struct *task,
8643 				   struct files_struct *files)
8644 {
8645 	struct io_kiocb *req;
8646 	int cnt = 0;
8647 
8648 	spin_lock_irq(&ctx->inflight_lock);
8649 	list_for_each_entry(req, &ctx->inflight_list, inflight_entry)
8650 		cnt += io_match_task(req, task, files);
8651 	spin_unlock_irq(&ctx->inflight_lock);
8652 	return cnt;
8653 }
8654 
io_uring_cancel_files(struct io_ring_ctx * ctx,struct task_struct * task,struct files_struct * files)8655 static void io_uring_cancel_files(struct io_ring_ctx *ctx,
8656 				  struct task_struct *task,
8657 				  struct files_struct *files)
8658 {
8659 	while (!list_empty_careful(&ctx->inflight_list)) {
8660 		struct io_task_cancel cancel = { .task = task, .files = files };
8661 		DEFINE_WAIT(wait);
8662 		int inflight;
8663 
8664 		inflight = io_uring_count_inflight(ctx, task, files);
8665 		if (!inflight)
8666 			break;
8667 
8668 		io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
8669 		io_poll_remove_all(ctx, task, files);
8670 		io_kill_timeouts(ctx, task, files);
8671 		/* cancellations _may_ trigger task work */
8672 		io_run_task_work();
8673 
8674 		prepare_to_wait(&task->io_uring->wait, &wait,
8675 				TASK_UNINTERRUPTIBLE);
8676 		if (inflight == io_uring_count_inflight(ctx, task, files))
8677 			schedule();
8678 		finish_wait(&task->io_uring->wait, &wait);
8679 	}
8680 }
8681 
__io_uring_cancel_task_requests(struct io_ring_ctx * ctx,struct task_struct * task)8682 static void __io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8683 					    struct task_struct *task)
8684 {
8685 	while (1) {
8686 		struct io_task_cancel cancel = { .task = task, .files = NULL, };
8687 		enum io_wq_cancel cret;
8688 		bool ret = false;
8689 
8690 		cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_task_cb, &cancel, true);
8691 		if (cret != IO_WQ_CANCEL_NOTFOUND)
8692 			ret = true;
8693 
8694 		/* SQPOLL thread does its own polling */
8695 		if (!(ctx->flags & IORING_SETUP_SQPOLL)) {
8696 			while (!list_empty_careful(&ctx->iopoll_list)) {
8697 				io_iopoll_try_reap_events(ctx);
8698 				ret = true;
8699 			}
8700 		}
8701 
8702 		ret |= io_poll_remove_all(ctx, task, NULL);
8703 		ret |= io_kill_timeouts(ctx, task, NULL);
8704 		if (!ret)
8705 			break;
8706 		io_run_task_work();
8707 		cond_resched();
8708 	}
8709 }
8710 
io_disable_sqo_submit(struct io_ring_ctx * ctx)8711 static void io_disable_sqo_submit(struct io_ring_ctx *ctx)
8712 {
8713 	mutex_lock(&ctx->uring_lock);
8714 	ctx->sqo_dead = 1;
8715 	if (ctx->flags & IORING_SETUP_R_DISABLED)
8716 		io_sq_offload_start(ctx);
8717 	mutex_unlock(&ctx->uring_lock);
8718 
8719 	/* make sure callers enter the ring to get error */
8720 	if (ctx->rings)
8721 		io_ring_set_wakeup_flag(ctx);
8722 }
8723 
8724 /*
8725  * We need to iteratively cancel requests, in case a request has dependent
8726  * hard links. These persist even for failure of cancelations, hence keep
8727  * looping until none are found.
8728  */
io_uring_cancel_task_requests(struct io_ring_ctx * ctx,struct files_struct * files)8729 static void io_uring_cancel_task_requests(struct io_ring_ctx *ctx,
8730 					  struct files_struct *files)
8731 {
8732 	struct task_struct *task = current;
8733 
8734 	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
8735 		io_disable_sqo_submit(ctx);
8736 		task = ctx->sq_data->thread;
8737 		atomic_inc(&task->io_uring->in_idle);
8738 		io_sq_thread_park(ctx->sq_data);
8739 	}
8740 
8741 	io_cancel_defer_files(ctx, task, files);
8742 	io_cqring_overflow_flush(ctx, true, task, files);
8743 
8744 	if (!files)
8745 		__io_uring_cancel_task_requests(ctx, task);
8746 	else
8747 		io_uring_cancel_files(ctx, task, files);
8748 
8749 	if ((ctx->flags & IORING_SETUP_SQPOLL) && ctx->sq_data) {
8750 		atomic_dec(&task->io_uring->in_idle);
8751 		io_sq_thread_unpark(ctx->sq_data);
8752 	}
8753 }
8754 
8755 /*
8756  * Note that this task has used io_uring. We use it for cancelation purposes.
8757  */
io_uring_add_task_file(struct io_ring_ctx * ctx,struct file * file)8758 static int io_uring_add_task_file(struct io_ring_ctx *ctx, struct file *file)
8759 {
8760 	struct io_uring_task *tctx = current->io_uring;
8761 	int ret;
8762 
8763 	if (unlikely(!tctx)) {
8764 		ret = io_uring_alloc_task_context(current);
8765 		if (unlikely(ret))
8766 			return ret;
8767 		tctx = current->io_uring;
8768 	}
8769 	if (tctx->last != file) {
8770 		void *old = xa_load(&tctx->xa, (unsigned long)file);
8771 
8772 		if (!old) {
8773 			get_file(file);
8774 			ret = xa_err(xa_store(&tctx->xa, (unsigned long)file,
8775 						file, GFP_KERNEL));
8776 			if (ret) {
8777 				fput(file);
8778 				return ret;
8779 			}
8780 		}
8781 		tctx->last = file;
8782 	}
8783 
8784 	/*
8785 	 * This is race safe in that the task itself is doing this, hence it
8786 	 * cannot be going through the exit/cancel paths at the same time.
8787 	 * This cannot be modified while exit/cancel is running.
8788 	 */
8789 	if (!tctx->sqpoll && (ctx->flags & IORING_SETUP_SQPOLL))
8790 		tctx->sqpoll = true;
8791 
8792 	return 0;
8793 }
8794 
8795 /*
8796  * Remove this io_uring_file -> task mapping.
8797  */
io_uring_del_task_file(struct file * file)8798 static void io_uring_del_task_file(struct file *file)
8799 {
8800 	struct io_uring_task *tctx = current->io_uring;
8801 
8802 	if (tctx->last == file)
8803 		tctx->last = NULL;
8804 	file = xa_erase(&tctx->xa, (unsigned long)file);
8805 	if (file)
8806 		fput(file);
8807 }
8808 
io_uring_remove_task_files(struct io_uring_task * tctx)8809 static void io_uring_remove_task_files(struct io_uring_task *tctx)
8810 {
8811 	struct file *file;
8812 	unsigned long index;
8813 
8814 	xa_for_each(&tctx->xa, index, file)
8815 		io_uring_del_task_file(file);
8816 }
8817 
__io_uring_files_cancel(struct files_struct * files)8818 void __io_uring_files_cancel(struct files_struct *files)
8819 {
8820 	struct io_uring_task *tctx = current->io_uring;
8821 	struct file *file;
8822 	unsigned long index;
8823 
8824 	/* make sure overflow events are dropped */
8825 	atomic_inc(&tctx->in_idle);
8826 	xa_for_each(&tctx->xa, index, file)
8827 		io_uring_cancel_task_requests(file->private_data, files);
8828 	atomic_dec(&tctx->in_idle);
8829 
8830 	if (files)
8831 		io_uring_remove_task_files(tctx);
8832 }
8833 
tctx_inflight(struct io_uring_task * tctx)8834 static s64 tctx_inflight(struct io_uring_task *tctx)
8835 {
8836 	unsigned long index;
8837 	struct file *file;
8838 	s64 inflight;
8839 
8840 	inflight = percpu_counter_sum(&tctx->inflight);
8841 	if (!tctx->sqpoll)
8842 		return inflight;
8843 
8844 	/*
8845 	 * If we have SQPOLL rings, then we need to iterate and find them, and
8846 	 * add the pending count for those.
8847 	 */
8848 	xa_for_each(&tctx->xa, index, file) {
8849 		struct io_ring_ctx *ctx = file->private_data;
8850 
8851 		if (ctx->flags & IORING_SETUP_SQPOLL) {
8852 			struct io_uring_task *__tctx = ctx->sqo_task->io_uring;
8853 
8854 			inflight += percpu_counter_sum(&__tctx->inflight);
8855 		}
8856 	}
8857 
8858 	return inflight;
8859 }
8860 
8861 /*
8862  * Find any io_uring fd that this task has registered or done IO on, and cancel
8863  * requests.
8864  */
__io_uring_task_cancel(void)8865 void __io_uring_task_cancel(void)
8866 {
8867 	struct io_uring_task *tctx = current->io_uring;
8868 	DEFINE_WAIT(wait);
8869 	s64 inflight;
8870 
8871 	/* make sure overflow events are dropped */
8872 	atomic_inc(&tctx->in_idle);
8873 
8874 	/* trigger io_disable_sqo_submit() */
8875 	if (tctx->sqpoll)
8876 		__io_uring_files_cancel(NULL);
8877 
8878 	do {
8879 		/* read completions before cancelations */
8880 		inflight = tctx_inflight(tctx);
8881 		if (!inflight)
8882 			break;
8883 		__io_uring_files_cancel(NULL);
8884 
8885 		prepare_to_wait(&tctx->wait, &wait, TASK_UNINTERRUPTIBLE);
8886 
8887 		/*
8888 		 * If we've seen completions, retry without waiting. This
8889 		 * avoids a race where a completion comes in before we did
8890 		 * prepare_to_wait().
8891 		 */
8892 		if (inflight == tctx_inflight(tctx))
8893 			schedule();
8894 		finish_wait(&tctx->wait, &wait);
8895 	} while (1);
8896 
8897 	atomic_dec(&tctx->in_idle);
8898 
8899 	io_uring_remove_task_files(tctx);
8900 }
8901 
io_uring_flush(struct file * file,void * data)8902 static int io_uring_flush(struct file *file, void *data)
8903 {
8904 	struct io_uring_task *tctx = current->io_uring;
8905 	struct io_ring_ctx *ctx = file->private_data;
8906 
8907 	if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
8908 		io_uring_cancel_task_requests(ctx, NULL);
8909 
8910 	if (!tctx)
8911 		return 0;
8912 
8913 	/* we should have cancelled and erased it before PF_EXITING */
8914 	WARN_ON_ONCE((current->flags & PF_EXITING) &&
8915 		     xa_load(&tctx->xa, (unsigned long)file));
8916 
8917 	/*
8918 	 * fput() is pending, will be 2 if the only other ref is our potential
8919 	 * task file note. If the task is exiting, drop regardless of count.
8920 	 */
8921 	if (atomic_long_read(&file->f_count) != 2)
8922 		return 0;
8923 
8924 	if (ctx->flags & IORING_SETUP_SQPOLL) {
8925 		/* there is only one file note, which is owned by sqo_task */
8926 		WARN_ON_ONCE(ctx->sqo_task != current &&
8927 			     xa_load(&tctx->xa, (unsigned long)file));
8928 		/* sqo_dead check is for when this happens after cancellation */
8929 		WARN_ON_ONCE(ctx->sqo_task == current && !ctx->sqo_dead &&
8930 			     !xa_load(&tctx->xa, (unsigned long)file));
8931 
8932 		io_disable_sqo_submit(ctx);
8933 	}
8934 
8935 	if (!(ctx->flags & IORING_SETUP_SQPOLL) || ctx->sqo_task == current)
8936 		io_uring_del_task_file(file);
8937 	return 0;
8938 }
8939 
io_uring_validate_mmap_request(struct file * file,loff_t pgoff,size_t sz)8940 static void *io_uring_validate_mmap_request(struct file *file,
8941 					    loff_t pgoff, size_t sz)
8942 {
8943 	struct io_ring_ctx *ctx = file->private_data;
8944 	loff_t offset = pgoff << PAGE_SHIFT;
8945 	struct page *page;
8946 	void *ptr;
8947 
8948 	switch (offset) {
8949 	case IORING_OFF_SQ_RING:
8950 	case IORING_OFF_CQ_RING:
8951 		ptr = ctx->rings;
8952 		break;
8953 	case IORING_OFF_SQES:
8954 		ptr = ctx->sq_sqes;
8955 		break;
8956 	default:
8957 		return ERR_PTR(-EINVAL);
8958 	}
8959 
8960 	page = virt_to_head_page(ptr);
8961 	if (sz > page_size(page))
8962 		return ERR_PTR(-EINVAL);
8963 
8964 	return ptr;
8965 }
8966 
8967 #ifdef CONFIG_MMU
8968 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)8969 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8970 {
8971 	size_t sz = vma->vm_end - vma->vm_start;
8972 	unsigned long pfn;
8973 	void *ptr;
8974 
8975 	ptr = io_uring_validate_mmap_request(file, vma->vm_pgoff, sz);
8976 	if (IS_ERR(ptr))
8977 		return PTR_ERR(ptr);
8978 
8979 	pfn = virt_to_phys(ptr) >> PAGE_SHIFT;
8980 	return remap_pfn_range(vma, vma->vm_start, pfn, sz, vma->vm_page_prot);
8981 }
8982 
8983 #else /* !CONFIG_MMU */
8984 
io_uring_mmap(struct file * file,struct vm_area_struct * vma)8985 static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
8986 {
8987 	return vma->vm_flags & (VM_SHARED | VM_MAYSHARE) ? 0 : -EINVAL;
8988 }
8989 
io_uring_nommu_mmap_capabilities(struct file * file)8990 static unsigned int io_uring_nommu_mmap_capabilities(struct file *file)
8991 {
8992 	return NOMMU_MAP_DIRECT | NOMMU_MAP_READ | NOMMU_MAP_WRITE;
8993 }
8994 
io_uring_nommu_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)8995 static unsigned long io_uring_nommu_get_unmapped_area(struct file *file,
8996 	unsigned long addr, unsigned long len,
8997 	unsigned long pgoff, unsigned long flags)
8998 {
8999 	void *ptr;
9000 
9001 	ptr = io_uring_validate_mmap_request(file, pgoff, len);
9002 	if (IS_ERR(ptr))
9003 		return PTR_ERR(ptr);
9004 
9005 	return (unsigned long) ptr;
9006 }
9007 
9008 #endif /* !CONFIG_MMU */
9009 
io_sqpoll_wait_sq(struct io_ring_ctx * ctx)9010 static int io_sqpoll_wait_sq(struct io_ring_ctx *ctx)
9011 {
9012 	int ret = 0;
9013 	DEFINE_WAIT(wait);
9014 
9015 	do {
9016 		if (!io_sqring_full(ctx))
9017 			break;
9018 
9019 		prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
9020 
9021 		if (unlikely(ctx->sqo_dead)) {
9022 			ret = -EOWNERDEAD;
9023 			break;
9024 		}
9025 
9026 		if (!io_sqring_full(ctx))
9027 			break;
9028 
9029 		schedule();
9030 	} while (!signal_pending(current));
9031 
9032 	finish_wait(&ctx->sqo_sq_wait, &wait);
9033 	return ret;
9034 }
9035 
SYSCALL_DEFINE6(io_uring_enter,unsigned int,fd,u32,to_submit,u32,min_complete,u32,flags,const sigset_t __user *,sig,size_t,sigsz)9036 SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
9037 		u32, min_complete, u32, flags, const sigset_t __user *, sig,
9038 		size_t, sigsz)
9039 {
9040 	struct io_ring_ctx *ctx;
9041 	long ret = -EBADF;
9042 	int submitted = 0;
9043 	struct fd f;
9044 
9045 	io_run_task_work();
9046 
9047 	if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP |
9048 			IORING_ENTER_SQ_WAIT))
9049 		return -EINVAL;
9050 
9051 	f = fdget(fd);
9052 	if (!f.file)
9053 		return -EBADF;
9054 
9055 	ret = -EOPNOTSUPP;
9056 	if (f.file->f_op != &io_uring_fops)
9057 		goto out_fput;
9058 
9059 	ret = -ENXIO;
9060 	ctx = f.file->private_data;
9061 	if (!percpu_ref_tryget(&ctx->refs))
9062 		goto out_fput;
9063 
9064 	ret = -EBADFD;
9065 	if (ctx->flags & IORING_SETUP_R_DISABLED)
9066 		goto out;
9067 
9068 	/*
9069 	 * For SQ polling, the thread will do all submissions and completions.
9070 	 * Just return the requested submit count, and wake the thread if
9071 	 * we were asked to.
9072 	 */
9073 	ret = 0;
9074 	if (ctx->flags & IORING_SETUP_SQPOLL) {
9075 		io_cqring_overflow_flush(ctx, false, NULL, NULL);
9076 
9077 		if (unlikely(ctx->sqo_dead)) {
9078 			ret = -EOWNERDEAD;
9079 			goto out;
9080 		}
9081 		if (flags & IORING_ENTER_SQ_WAKEUP)
9082 			wake_up(&ctx->sq_data->wait);
9083 		if (flags & IORING_ENTER_SQ_WAIT) {
9084 			ret = io_sqpoll_wait_sq(ctx);
9085 			if (ret)
9086 				goto out;
9087 		}
9088 		submitted = to_submit;
9089 	} else if (to_submit) {
9090 		ret = io_uring_add_task_file(ctx, f.file);
9091 		if (unlikely(ret))
9092 			goto out;
9093 		mutex_lock(&ctx->uring_lock);
9094 		submitted = io_submit_sqes(ctx, to_submit);
9095 		mutex_unlock(&ctx->uring_lock);
9096 
9097 		if (submitted != to_submit)
9098 			goto out;
9099 	}
9100 	if (flags & IORING_ENTER_GETEVENTS) {
9101 		min_complete = min(min_complete, ctx->cq_entries);
9102 
9103 		/*
9104 		 * When SETUP_IOPOLL and SETUP_SQPOLL are both enabled, user
9105 		 * space applications don't need to do io completion events
9106 		 * polling again, they can rely on io_sq_thread to do polling
9107 		 * work, which can reduce cpu usage and uring_lock contention.
9108 		 */
9109 		if (ctx->flags & IORING_SETUP_IOPOLL &&
9110 		    !(ctx->flags & IORING_SETUP_SQPOLL)) {
9111 			ret = io_iopoll_check(ctx, min_complete);
9112 		} else {
9113 			ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
9114 		}
9115 	}
9116 
9117 out:
9118 	percpu_ref_put(&ctx->refs);
9119 out_fput:
9120 	fdput(f);
9121 	return submitted ? submitted : ret;
9122 }
9123 
9124 #ifdef CONFIG_PROC_FS
io_uring_show_cred(struct seq_file * m,unsigned int id,const struct io_identity * iod)9125 static int io_uring_show_cred(struct seq_file *m, unsigned int id,
9126 		const struct io_identity *iod)
9127 {
9128 	const struct cred *cred = iod->creds;
9129 	struct user_namespace *uns = seq_user_ns(m);
9130 	struct group_info *gi;
9131 	kernel_cap_t cap;
9132 	unsigned __capi;
9133 	int g;
9134 
9135 	seq_printf(m, "%5d\n", id);
9136 	seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid));
9137 	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid));
9138 	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid));
9139 	seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid));
9140 	seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid));
9141 	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid));
9142 	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid));
9143 	seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid));
9144 	seq_puts(m, "\n\tGroups:\t");
9145 	gi = cred->group_info;
9146 	for (g = 0; g < gi->ngroups; g++) {
9147 		seq_put_decimal_ull(m, g ? " " : "",
9148 					from_kgid_munged(uns, gi->gid[g]));
9149 	}
9150 	seq_puts(m, "\n\tCapEff:\t");
9151 	cap = cred->cap_effective;
9152 	CAP_FOR_EACH_U32(__capi)
9153 		seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8);
9154 	seq_putc(m, '\n');
9155 	return 0;
9156 }
9157 
__io_uring_show_fdinfo(struct io_ring_ctx * ctx,struct seq_file * m)9158 static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m)
9159 {
9160 	struct io_sq_data *sq = NULL;
9161 	bool has_lock;
9162 	int i;
9163 
9164 	/*
9165 	 * Avoid ABBA deadlock between the seq lock and the io_uring mutex,
9166 	 * since fdinfo case grabs it in the opposite direction of normal use
9167 	 * cases. If we fail to get the lock, we just don't iterate any
9168 	 * structures that could be going away outside the io_uring mutex.
9169 	 */
9170 	has_lock = mutex_trylock(&ctx->uring_lock);
9171 
9172 	if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL))
9173 		sq = ctx->sq_data;
9174 
9175 	seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1);
9176 	seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1);
9177 	seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files);
9178 	for (i = 0; has_lock && i < ctx->nr_user_files; i++) {
9179 		struct fixed_file_table *table;
9180 		struct file *f;
9181 
9182 		table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT];
9183 		f = table->files[i & IORING_FILE_TABLE_MASK];
9184 		if (f)
9185 			seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname);
9186 		else
9187 			seq_printf(m, "%5u: <none>\n", i);
9188 	}
9189 	seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs);
9190 	for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) {
9191 		struct io_mapped_ubuf *buf = &ctx->user_bufs[i];
9192 
9193 		seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf,
9194 						(unsigned int) buf->len);
9195 	}
9196 	if (has_lock && !xa_empty(&ctx->personalities)) {
9197 		unsigned long index;
9198 		const struct io_identity *iod;
9199 
9200 		seq_printf(m, "Personalities:\n");
9201 		xa_for_each(&ctx->personalities, index, iod)
9202 			io_uring_show_cred(m, index, iod);
9203 	}
9204 	seq_printf(m, "PollList:\n");
9205 	spin_lock_irq(&ctx->completion_lock);
9206 	for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) {
9207 		struct hlist_head *list = &ctx->cancel_hash[i];
9208 		struct io_kiocb *req;
9209 
9210 		hlist_for_each_entry(req, list, hash_node)
9211 			seq_printf(m, "  op=%d, task_works=%d\n", req->opcode,
9212 					req->task->task_works != NULL);
9213 	}
9214 	spin_unlock_irq(&ctx->completion_lock);
9215 	if (has_lock)
9216 		mutex_unlock(&ctx->uring_lock);
9217 }
9218 
io_uring_show_fdinfo(struct seq_file * m,struct file * f)9219 static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
9220 {
9221 	struct io_ring_ctx *ctx = f->private_data;
9222 
9223 	if (percpu_ref_tryget(&ctx->refs)) {
9224 		__io_uring_show_fdinfo(ctx, m);
9225 		percpu_ref_put(&ctx->refs);
9226 	}
9227 }
9228 #endif
9229 
9230 static const struct file_operations io_uring_fops = {
9231 	.release	= io_uring_release,
9232 	.flush		= io_uring_flush,
9233 	.mmap		= io_uring_mmap,
9234 #ifndef CONFIG_MMU
9235 	.get_unmapped_area = io_uring_nommu_get_unmapped_area,
9236 	.mmap_capabilities = io_uring_nommu_mmap_capabilities,
9237 #endif
9238 	.poll		= io_uring_poll,
9239 	.fasync		= io_uring_fasync,
9240 #ifdef CONFIG_PROC_FS
9241 	.show_fdinfo	= io_uring_show_fdinfo,
9242 #endif
9243 };
9244 
io_allocate_scq_urings(struct io_ring_ctx * ctx,struct io_uring_params * p)9245 static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
9246 				  struct io_uring_params *p)
9247 {
9248 	struct io_rings *rings;
9249 	size_t size, sq_array_offset;
9250 
9251 	/* make sure these are sane, as we already accounted them */
9252 	ctx->sq_entries = p->sq_entries;
9253 	ctx->cq_entries = p->cq_entries;
9254 
9255 	size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
9256 	if (size == SIZE_MAX)
9257 		return -EOVERFLOW;
9258 
9259 	rings = io_mem_alloc(size);
9260 	if (!rings)
9261 		return -ENOMEM;
9262 
9263 	ctx->rings = rings;
9264 	ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
9265 	rings->sq_ring_mask = p->sq_entries - 1;
9266 	rings->cq_ring_mask = p->cq_entries - 1;
9267 	rings->sq_ring_entries = p->sq_entries;
9268 	rings->cq_ring_entries = p->cq_entries;
9269 	ctx->sq_mask = rings->sq_ring_mask;
9270 	ctx->cq_mask = rings->cq_ring_mask;
9271 
9272 	size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
9273 	if (size == SIZE_MAX) {
9274 		io_mem_free(ctx->rings);
9275 		ctx->rings = NULL;
9276 		return -EOVERFLOW;
9277 	}
9278 
9279 	ctx->sq_sqes = io_mem_alloc(size);
9280 	if (!ctx->sq_sqes) {
9281 		io_mem_free(ctx->rings);
9282 		ctx->rings = NULL;
9283 		return -ENOMEM;
9284 	}
9285 
9286 	return 0;
9287 }
9288 
io_uring_install_fd(struct io_ring_ctx * ctx,struct file * file)9289 static int io_uring_install_fd(struct io_ring_ctx *ctx, struct file *file)
9290 {
9291 	int ret, fd;
9292 
9293 	fd = get_unused_fd_flags(O_RDWR | O_CLOEXEC);
9294 	if (fd < 0)
9295 		return fd;
9296 
9297 	ret = io_uring_add_task_file(ctx, file);
9298 	if (ret) {
9299 		put_unused_fd(fd);
9300 		return ret;
9301 	}
9302 	fd_install(fd, file);
9303 	return fd;
9304 }
9305 
9306 /*
9307  * Allocate an anonymous fd, this is what constitutes the application
9308  * visible backing of an io_uring instance. The application mmaps this
9309  * fd to gain access to the SQ/CQ ring details. If UNIX sockets are enabled,
9310  * we have to tie this fd to a socket for file garbage collection purposes.
9311  */
io_uring_get_file(struct io_ring_ctx * ctx)9312 static struct file *io_uring_get_file(struct io_ring_ctx *ctx)
9313 {
9314 	struct file *file;
9315 #if defined(CONFIG_UNIX)
9316 	int ret;
9317 
9318 	ret = sock_create_kern(&init_net, PF_UNIX, SOCK_RAW, IPPROTO_IP,
9319 				&ctx->ring_sock);
9320 	if (ret)
9321 		return ERR_PTR(ret);
9322 #endif
9323 
9324 	file = anon_inode_getfile("[io_uring]", &io_uring_fops, ctx,
9325 					O_RDWR | O_CLOEXEC);
9326 #if defined(CONFIG_UNIX)
9327 	if (IS_ERR(file)) {
9328 		sock_release(ctx->ring_sock);
9329 		ctx->ring_sock = NULL;
9330 	} else {
9331 		ctx->ring_sock->file = file;
9332 	}
9333 #endif
9334 	return file;
9335 }
9336 
io_uring_create(unsigned entries,struct io_uring_params * p,struct io_uring_params __user * params)9337 static int io_uring_create(unsigned entries, struct io_uring_params *p,
9338 			   struct io_uring_params __user *params)
9339 {
9340 	struct user_struct *user = NULL;
9341 	struct io_ring_ctx *ctx;
9342 	struct file *file;
9343 	bool limit_mem;
9344 	int ret;
9345 
9346 	if (!entries)
9347 		return -EINVAL;
9348 	if (entries > IORING_MAX_ENTRIES) {
9349 		if (!(p->flags & IORING_SETUP_CLAMP))
9350 			return -EINVAL;
9351 		entries = IORING_MAX_ENTRIES;
9352 	}
9353 
9354 	/*
9355 	 * Use twice as many entries for the CQ ring. It's possible for the
9356 	 * application to drive a higher depth than the size of the SQ ring,
9357 	 * since the sqes are only used at submission time. This allows for
9358 	 * some flexibility in overcommitting a bit. If the application has
9359 	 * set IORING_SETUP_CQSIZE, it will have passed in the desired number
9360 	 * of CQ ring entries manually.
9361 	 */
9362 	p->sq_entries = roundup_pow_of_two(entries);
9363 	if (p->flags & IORING_SETUP_CQSIZE) {
9364 		/*
9365 		 * If IORING_SETUP_CQSIZE is set, we do the same roundup
9366 		 * to a power-of-two, if it isn't already. We do NOT impose
9367 		 * any cq vs sq ring sizing.
9368 		 */
9369 		if (!p->cq_entries)
9370 			return -EINVAL;
9371 		if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
9372 			if (!(p->flags & IORING_SETUP_CLAMP))
9373 				return -EINVAL;
9374 			p->cq_entries = IORING_MAX_CQ_ENTRIES;
9375 		}
9376 		p->cq_entries = roundup_pow_of_two(p->cq_entries);
9377 		if (p->cq_entries < p->sq_entries)
9378 			return -EINVAL;
9379 	} else {
9380 		p->cq_entries = 2 * p->sq_entries;
9381 	}
9382 
9383 	user = get_uid(current_user());
9384 	limit_mem = !capable(CAP_IPC_LOCK);
9385 
9386 	if (limit_mem) {
9387 		ret = __io_account_mem(user,
9388 				ring_pages(p->sq_entries, p->cq_entries));
9389 		if (ret) {
9390 			free_uid(user);
9391 			return ret;
9392 		}
9393 	}
9394 
9395 	ctx = io_ring_ctx_alloc(p);
9396 	if (!ctx) {
9397 		if (limit_mem)
9398 			__io_unaccount_mem(user, ring_pages(p->sq_entries,
9399 								p->cq_entries));
9400 		free_uid(user);
9401 		return -ENOMEM;
9402 	}
9403 	ctx->compat = in_compat_syscall();
9404 	ctx->user = user;
9405 	ctx->creds = get_current_cred();
9406 #ifdef CONFIG_AUDIT
9407 	ctx->loginuid = current->loginuid;
9408 	ctx->sessionid = current->sessionid;
9409 #endif
9410 	ctx->sqo_task = get_task_struct(current);
9411 
9412 	/*
9413 	 * This is just grabbed for accounting purposes. When a process exits,
9414 	 * the mm is exited and dropped before the files, hence we need to hang
9415 	 * on to this mm purely for the purposes of being able to unaccount
9416 	 * memory (locked/pinned vm). It's not used for anything else.
9417 	 */
9418 	mmgrab(current->mm);
9419 	ctx->mm_account = current->mm;
9420 
9421 #ifdef CONFIG_BLK_CGROUP
9422 	/*
9423 	 * The sq thread will belong to the original cgroup it was inited in.
9424 	 * If the cgroup goes offline (e.g. disabling the io controller), then
9425 	 * issued bios will be associated with the closest cgroup later in the
9426 	 * block layer.
9427 	 */
9428 	rcu_read_lock();
9429 	ctx->sqo_blkcg_css = blkcg_css();
9430 	ret = css_tryget_online(ctx->sqo_blkcg_css);
9431 	rcu_read_unlock();
9432 	if (!ret) {
9433 		/* don't init against a dying cgroup, have the user try again */
9434 		ctx->sqo_blkcg_css = NULL;
9435 		ret = -ENODEV;
9436 		goto err;
9437 	}
9438 #endif
9439 
9440 	/*
9441 	 * Account memory _before_ installing the file descriptor. Once
9442 	 * the descriptor is installed, it can get closed at any time. Also
9443 	 * do this before hitting the general error path, as ring freeing
9444 	 * will un-account as well.
9445 	 */
9446 	io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries),
9447 		       ACCT_LOCKED);
9448 	ctx->limit_mem = limit_mem;
9449 
9450 	ret = io_allocate_scq_urings(ctx, p);
9451 	if (ret)
9452 		goto err;
9453 
9454 	ret = io_sq_offload_create(ctx, p);
9455 	if (ret)
9456 		goto err;
9457 
9458 	if (!(p->flags & IORING_SETUP_R_DISABLED))
9459 		io_sq_offload_start(ctx);
9460 
9461 	memset(&p->sq_off, 0, sizeof(p->sq_off));
9462 	p->sq_off.head = offsetof(struct io_rings, sq.head);
9463 	p->sq_off.tail = offsetof(struct io_rings, sq.tail);
9464 	p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
9465 	p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
9466 	p->sq_off.flags = offsetof(struct io_rings, sq_flags);
9467 	p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
9468 	p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
9469 
9470 	memset(&p->cq_off, 0, sizeof(p->cq_off));
9471 	p->cq_off.head = offsetof(struct io_rings, cq.head);
9472 	p->cq_off.tail = offsetof(struct io_rings, cq.tail);
9473 	p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
9474 	p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
9475 	p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
9476 	p->cq_off.cqes = offsetof(struct io_rings, cqes);
9477 	p->cq_off.flags = offsetof(struct io_rings, cq_flags);
9478 
9479 	p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP |
9480 			IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS |
9481 			IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL |
9482 			IORING_FEAT_POLL_32BITS;
9483 
9484 	if (copy_to_user(params, p, sizeof(*p))) {
9485 		ret = -EFAULT;
9486 		goto err;
9487 	}
9488 
9489 	file = io_uring_get_file(ctx);
9490 	if (IS_ERR(file)) {
9491 		ret = PTR_ERR(file);
9492 		goto err;
9493 	}
9494 
9495 	/*
9496 	 * Install ring fd as the very last thing, so we don't risk someone
9497 	 * having closed it before we finish setup
9498 	 */
9499 	ret = io_uring_install_fd(ctx, file);
9500 	if (ret < 0) {
9501 		io_disable_sqo_submit(ctx);
9502 		/* fput will clean it up */
9503 		fput(file);
9504 		return ret;
9505 	}
9506 
9507 	trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags);
9508 	return ret;
9509 err:
9510 	io_disable_sqo_submit(ctx);
9511 	io_ring_ctx_wait_and_kill(ctx);
9512 	return ret;
9513 }
9514 
9515 /*
9516  * Sets up an aio uring context, and returns the fd. Applications asks for a
9517  * ring size, we return the actual sq/cq ring sizes (among other things) in the
9518  * params structure passed in.
9519  */
io_uring_setup(u32 entries,struct io_uring_params __user * params)9520 static long io_uring_setup(u32 entries, struct io_uring_params __user *params)
9521 {
9522 	struct io_uring_params p;
9523 	int i;
9524 
9525 	if (copy_from_user(&p, params, sizeof(p)))
9526 		return -EFAULT;
9527 	for (i = 0; i < ARRAY_SIZE(p.resv); i++) {
9528 		if (p.resv[i])
9529 			return -EINVAL;
9530 	}
9531 
9532 	if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL |
9533 			IORING_SETUP_SQ_AFF | IORING_SETUP_CQSIZE |
9534 			IORING_SETUP_CLAMP | IORING_SETUP_ATTACH_WQ |
9535 			IORING_SETUP_R_DISABLED))
9536 		return -EINVAL;
9537 
9538 	return  io_uring_create(entries, &p, params);
9539 }
9540 
SYSCALL_DEFINE2(io_uring_setup,u32,entries,struct io_uring_params __user *,params)9541 SYSCALL_DEFINE2(io_uring_setup, u32, entries,
9542 		struct io_uring_params __user *, params)
9543 {
9544 	return io_uring_setup(entries, params);
9545 }
9546 
io_probe(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)9547 static int io_probe(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
9548 {
9549 	struct io_uring_probe *p;
9550 	size_t size;
9551 	int i, ret;
9552 
9553 	size = struct_size(p, ops, nr_args);
9554 	if (size == SIZE_MAX)
9555 		return -EOVERFLOW;
9556 	p = kzalloc(size, GFP_KERNEL);
9557 	if (!p)
9558 		return -ENOMEM;
9559 
9560 	ret = -EFAULT;
9561 	if (copy_from_user(p, arg, size))
9562 		goto out;
9563 	ret = -EINVAL;
9564 	if (memchr_inv(p, 0, size))
9565 		goto out;
9566 
9567 	p->last_op = IORING_OP_LAST - 1;
9568 	if (nr_args > IORING_OP_LAST)
9569 		nr_args = IORING_OP_LAST;
9570 
9571 	for (i = 0; i < nr_args; i++) {
9572 		p->ops[i].op = i;
9573 		if (!io_op_defs[i].not_supported)
9574 			p->ops[i].flags = IO_URING_OP_SUPPORTED;
9575 	}
9576 	p->ops_len = i;
9577 
9578 	ret = 0;
9579 	if (copy_to_user(arg, p, size))
9580 		ret = -EFAULT;
9581 out:
9582 	kfree(p);
9583 	return ret;
9584 }
9585 
io_register_personality(struct io_ring_ctx * ctx)9586 static int io_register_personality(struct io_ring_ctx *ctx)
9587 {
9588 	struct io_identity *iod;
9589 	u32 id;
9590 	int ret;
9591 
9592 	iod = kmalloc(sizeof(*iod), GFP_KERNEL);
9593 	if (unlikely(!iod))
9594 		return -ENOMEM;
9595 
9596 	io_init_identity(iod);
9597 	iod->creds = get_current_cred();
9598 
9599 	ret = xa_alloc_cyclic(&ctx->personalities, &id, (void *)iod,
9600 			XA_LIMIT(0, USHRT_MAX), &ctx->pers_next, GFP_KERNEL);
9601 	if (ret < 0) {
9602 		put_cred(iod->creds);
9603 		kfree(iod);
9604 		return ret;
9605 	}
9606 	return id;
9607 }
9608 
io_register_restrictions(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args)9609 static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
9610 				    unsigned int nr_args)
9611 {
9612 	struct io_uring_restriction *res;
9613 	size_t size;
9614 	int i, ret;
9615 
9616 	/* Restrictions allowed only if rings started disabled */
9617 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9618 		return -EBADFD;
9619 
9620 	/* We allow only a single restrictions registration */
9621 	if (ctx->restrictions.registered)
9622 		return -EBUSY;
9623 
9624 	if (!arg || nr_args > IORING_MAX_RESTRICTIONS)
9625 		return -EINVAL;
9626 
9627 	size = array_size(nr_args, sizeof(*res));
9628 	if (size == SIZE_MAX)
9629 		return -EOVERFLOW;
9630 
9631 	res = memdup_user(arg, size);
9632 	if (IS_ERR(res))
9633 		return PTR_ERR(res);
9634 
9635 	ret = 0;
9636 
9637 	for (i = 0; i < nr_args; i++) {
9638 		switch (res[i].opcode) {
9639 		case IORING_RESTRICTION_REGISTER_OP:
9640 			if (res[i].register_op >= IORING_REGISTER_LAST) {
9641 				ret = -EINVAL;
9642 				goto out;
9643 			}
9644 
9645 			__set_bit(res[i].register_op,
9646 				  ctx->restrictions.register_op);
9647 			break;
9648 		case IORING_RESTRICTION_SQE_OP:
9649 			if (res[i].sqe_op >= IORING_OP_LAST) {
9650 				ret = -EINVAL;
9651 				goto out;
9652 			}
9653 
9654 			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
9655 			break;
9656 		case IORING_RESTRICTION_SQE_FLAGS_ALLOWED:
9657 			ctx->restrictions.sqe_flags_allowed = res[i].sqe_flags;
9658 			break;
9659 		case IORING_RESTRICTION_SQE_FLAGS_REQUIRED:
9660 			ctx->restrictions.sqe_flags_required = res[i].sqe_flags;
9661 			break;
9662 		default:
9663 			ret = -EINVAL;
9664 			goto out;
9665 		}
9666 	}
9667 
9668 out:
9669 	/* Reset all restrictions if an error happened */
9670 	if (ret != 0)
9671 		memset(&ctx->restrictions, 0, sizeof(ctx->restrictions));
9672 	else
9673 		ctx->restrictions.registered = true;
9674 
9675 	kfree(res);
9676 	return ret;
9677 }
9678 
io_register_enable_rings(struct io_ring_ctx * ctx)9679 static int io_register_enable_rings(struct io_ring_ctx *ctx)
9680 {
9681 	if (!(ctx->flags & IORING_SETUP_R_DISABLED))
9682 		return -EBADFD;
9683 
9684 	if (ctx->restrictions.registered)
9685 		ctx->restricted = 1;
9686 
9687 	io_sq_offload_start(ctx);
9688 	return 0;
9689 }
9690 
io_register_op_must_quiesce(int op)9691 static bool io_register_op_must_quiesce(int op)
9692 {
9693 	switch (op) {
9694 	case IORING_UNREGISTER_FILES:
9695 	case IORING_REGISTER_FILES_UPDATE:
9696 	case IORING_REGISTER_PROBE:
9697 	case IORING_REGISTER_PERSONALITY:
9698 	case IORING_UNREGISTER_PERSONALITY:
9699 		return false;
9700 	default:
9701 		return true;
9702 	}
9703 }
9704 
__io_uring_register(struct io_ring_ctx * ctx,unsigned opcode,void __user * arg,unsigned nr_args)9705 static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
9706 			       void __user *arg, unsigned nr_args)
9707 	__releases(ctx->uring_lock)
9708 	__acquires(ctx->uring_lock)
9709 {
9710 	int ret;
9711 
9712 	/*
9713 	 * We're inside the ring mutex, if the ref is already dying, then
9714 	 * someone else killed the ctx or is already going through
9715 	 * io_uring_register().
9716 	 */
9717 	if (percpu_ref_is_dying(&ctx->refs))
9718 		return -ENXIO;
9719 
9720 	if (io_register_op_must_quiesce(opcode)) {
9721 		percpu_ref_kill(&ctx->refs);
9722 
9723 		/*
9724 		 * Drop uring mutex before waiting for references to exit. If
9725 		 * another thread is currently inside io_uring_enter() it might
9726 		 * need to grab the uring_lock to make progress. If we hold it
9727 		 * here across the drain wait, then we can deadlock. It's safe
9728 		 * to drop the mutex here, since no new references will come in
9729 		 * after we've killed the percpu ref.
9730 		 */
9731 		mutex_unlock(&ctx->uring_lock);
9732 		do {
9733 			ret = wait_for_completion_interruptible(&ctx->ref_comp);
9734 			if (!ret)
9735 				break;
9736 			ret = io_run_task_work_sig();
9737 			if (ret < 0)
9738 				break;
9739 		} while (1);
9740 		mutex_lock(&ctx->uring_lock);
9741 
9742 		if (ret) {
9743 			io_refs_resurrect(&ctx->refs, &ctx->ref_comp);
9744 			return ret;
9745 		}
9746 	}
9747 
9748 	if (ctx->restricted) {
9749 		if (opcode >= IORING_REGISTER_LAST) {
9750 			ret = -EINVAL;
9751 			goto out;
9752 		}
9753 
9754 		if (!test_bit(opcode, ctx->restrictions.register_op)) {
9755 			ret = -EACCES;
9756 			goto out;
9757 		}
9758 	}
9759 
9760 	switch (opcode) {
9761 	case IORING_REGISTER_BUFFERS:
9762 		ret = io_sqe_buffer_register(ctx, arg, nr_args);
9763 		break;
9764 	case IORING_UNREGISTER_BUFFERS:
9765 		ret = -EINVAL;
9766 		if (arg || nr_args)
9767 			break;
9768 		ret = io_sqe_buffer_unregister(ctx);
9769 		break;
9770 	case IORING_REGISTER_FILES:
9771 		ret = io_sqe_files_register(ctx, arg, nr_args);
9772 		break;
9773 	case IORING_UNREGISTER_FILES:
9774 		ret = -EINVAL;
9775 		if (arg || nr_args)
9776 			break;
9777 		ret = io_sqe_files_unregister(ctx);
9778 		break;
9779 	case IORING_REGISTER_FILES_UPDATE:
9780 		ret = io_sqe_files_update(ctx, arg, nr_args);
9781 		break;
9782 	case IORING_REGISTER_EVENTFD:
9783 	case IORING_REGISTER_EVENTFD_ASYNC:
9784 		ret = -EINVAL;
9785 		if (nr_args != 1)
9786 			break;
9787 		ret = io_eventfd_register(ctx, arg);
9788 		if (ret)
9789 			break;
9790 		if (opcode == IORING_REGISTER_EVENTFD_ASYNC)
9791 			ctx->eventfd_async = 1;
9792 		else
9793 			ctx->eventfd_async = 0;
9794 		break;
9795 	case IORING_UNREGISTER_EVENTFD:
9796 		ret = -EINVAL;
9797 		if (arg || nr_args)
9798 			break;
9799 		ret = io_eventfd_unregister(ctx);
9800 		break;
9801 	case IORING_REGISTER_PROBE:
9802 		ret = -EINVAL;
9803 		if (!arg || nr_args > 256)
9804 			break;
9805 		ret = io_probe(ctx, arg, nr_args);
9806 		break;
9807 	case IORING_REGISTER_PERSONALITY:
9808 		ret = -EINVAL;
9809 		if (arg || nr_args)
9810 			break;
9811 		ret = io_register_personality(ctx);
9812 		break;
9813 	case IORING_UNREGISTER_PERSONALITY:
9814 		ret = -EINVAL;
9815 		if (arg)
9816 			break;
9817 		ret = io_unregister_personality(ctx, nr_args);
9818 		break;
9819 	case IORING_REGISTER_ENABLE_RINGS:
9820 		ret = -EINVAL;
9821 		if (arg || nr_args)
9822 			break;
9823 		ret = io_register_enable_rings(ctx);
9824 		break;
9825 	case IORING_REGISTER_RESTRICTIONS:
9826 		ret = io_register_restrictions(ctx, arg, nr_args);
9827 		break;
9828 	default:
9829 		ret = -EINVAL;
9830 		break;
9831 	}
9832 
9833 out:
9834 	if (io_register_op_must_quiesce(opcode)) {
9835 		/* bring the ctx back to life */
9836 		percpu_ref_reinit(&ctx->refs);
9837 		reinit_completion(&ctx->ref_comp);
9838 	}
9839 	return ret;
9840 }
9841 
SYSCALL_DEFINE4(io_uring_register,unsigned int,fd,unsigned int,opcode,void __user *,arg,unsigned int,nr_args)9842 SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode,
9843 		void __user *, arg, unsigned int, nr_args)
9844 {
9845 	struct io_ring_ctx *ctx;
9846 	long ret = -EBADF;
9847 	struct fd f;
9848 
9849 	f = fdget(fd);
9850 	if (!f.file)
9851 		return -EBADF;
9852 
9853 	ret = -EOPNOTSUPP;
9854 	if (f.file->f_op != &io_uring_fops)
9855 		goto out_fput;
9856 
9857 	ctx = f.file->private_data;
9858 
9859 	mutex_lock(&ctx->uring_lock);
9860 	ret = __io_uring_register(ctx, opcode, arg, nr_args);
9861 	mutex_unlock(&ctx->uring_lock);
9862 	trace_io_uring_register(ctx, opcode, ctx->nr_user_files, ctx->nr_user_bufs,
9863 							ctx->cq_ev_fd != NULL, ret);
9864 out_fput:
9865 	fdput(f);
9866 	return ret;
9867 }
9868 
io_uring_init(void)9869 static int __init io_uring_init(void)
9870 {
9871 #define __BUILD_BUG_VERIFY_ELEMENT(stype, eoffset, etype, ename) do { \
9872 	BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \
9873 	BUILD_BUG_ON(sizeof(etype) != sizeof_field(stype, ename)); \
9874 } while (0)
9875 
9876 #define BUILD_BUG_SQE_ELEM(eoffset, etype, ename) \
9877 	__BUILD_BUG_VERIFY_ELEMENT(struct io_uring_sqe, eoffset, etype, ename)
9878 	BUILD_BUG_ON(sizeof(struct io_uring_sqe) != 64);
9879 	BUILD_BUG_SQE_ELEM(0,  __u8,   opcode);
9880 	BUILD_BUG_SQE_ELEM(1,  __u8,   flags);
9881 	BUILD_BUG_SQE_ELEM(2,  __u16,  ioprio);
9882 	BUILD_BUG_SQE_ELEM(4,  __s32,  fd);
9883 	BUILD_BUG_SQE_ELEM(8,  __u64,  off);
9884 	BUILD_BUG_SQE_ELEM(8,  __u64,  addr2);
9885 	BUILD_BUG_SQE_ELEM(16, __u64,  addr);
9886 	BUILD_BUG_SQE_ELEM(16, __u64,  splice_off_in);
9887 	BUILD_BUG_SQE_ELEM(24, __u32,  len);
9888 	BUILD_BUG_SQE_ELEM(28,     __kernel_rwf_t, rw_flags);
9889 	BUILD_BUG_SQE_ELEM(28, /* compat */   int, rw_flags);
9890 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags);
9891 	BUILD_BUG_SQE_ELEM(28, __u32,  fsync_flags);
9892 	BUILD_BUG_SQE_ELEM(28, /* compat */ __u16,  poll_events);
9893 	BUILD_BUG_SQE_ELEM(28, __u32,  poll32_events);
9894 	BUILD_BUG_SQE_ELEM(28, __u32,  sync_range_flags);
9895 	BUILD_BUG_SQE_ELEM(28, __u32,  msg_flags);
9896 	BUILD_BUG_SQE_ELEM(28, __u32,  timeout_flags);
9897 	BUILD_BUG_SQE_ELEM(28, __u32,  accept_flags);
9898 	BUILD_BUG_SQE_ELEM(28, __u32,  cancel_flags);
9899 	BUILD_BUG_SQE_ELEM(28, __u32,  open_flags);
9900 	BUILD_BUG_SQE_ELEM(28, __u32,  statx_flags);
9901 	BUILD_BUG_SQE_ELEM(28, __u32,  fadvise_advice);
9902 	BUILD_BUG_SQE_ELEM(28, __u32,  splice_flags);
9903 	BUILD_BUG_SQE_ELEM(32, __u64,  user_data);
9904 	BUILD_BUG_SQE_ELEM(40, __u16,  buf_index);
9905 	BUILD_BUG_SQE_ELEM(42, __u16,  personality);
9906 	BUILD_BUG_SQE_ELEM(44, __s32,  splice_fd_in);
9907 
9908 	BUILD_BUG_ON(ARRAY_SIZE(io_op_defs) != IORING_OP_LAST);
9909 	BUILD_BUG_ON(__REQ_F_LAST_BIT >= 8 * sizeof(int));
9910 	req_cachep = KMEM_CACHE(io_kiocb, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
9911 	return 0;
9912 };
9913 __initcall(io_uring_init);
9914