• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/blk-mq.h>
7 #include <linux/mm.h>
8 #include <linux/slab.h>
9 #include <linux/fsnotify.h>
10 #include <linux/poll.h>
11 #include <linux/nospec.h>
12 #include <linux/compat.h>
13 #include <linux/io_uring.h>
14 
15 #include <uapi/linux/io_uring.h>
16 
17 #include "io_uring.h"
18 #include "opdef.h"
19 #include "kbuf.h"
20 #include "rsrc.h"
21 #include "rw.h"
22 
23 struct io_rw {
24 	/* NOTE: kiocb has the file as the first member, so don't do it here */
25 	struct kiocb			kiocb;
26 	u64				addr;
27 	u32				len;
28 	rwf_t				flags;
29 };
30 
io_file_supports_nowait(struct io_kiocb * req,__poll_t mask)31 static bool io_file_supports_nowait(struct io_kiocb *req, __poll_t mask)
32 {
33 	/* If FMODE_NOWAIT is set for a file, we're golden */
34 	if (req->flags & REQ_F_SUPPORT_NOWAIT)
35 		return true;
36 	/* No FMODE_NOWAIT, if we can poll, check the status */
37 	if (io_file_can_poll(req)) {
38 		struct poll_table_struct pt = { ._key = mask };
39 
40 		return vfs_poll(req->file, &pt) & mask;
41 	}
42 	/* No FMODE_NOWAIT support, and file isn't pollable. Tough luck. */
43 	return false;
44 }
45 
46 #ifdef CONFIG_COMPAT
io_iov_compat_buffer_select_prep(struct io_rw * rw)47 static int io_iov_compat_buffer_select_prep(struct io_rw *rw)
48 {
49 	struct compat_iovec __user *uiov;
50 	compat_ssize_t clen;
51 
52 	uiov = u64_to_user_ptr(rw->addr);
53 	if (!access_ok(uiov, sizeof(*uiov)))
54 		return -EFAULT;
55 	if (__get_user(clen, &uiov->iov_len))
56 		return -EFAULT;
57 	if (clen < 0)
58 		return -EINVAL;
59 
60 	rw->len = clen;
61 	return 0;
62 }
63 #endif
64 
io_iov_buffer_select_prep(struct io_kiocb * req)65 static int io_iov_buffer_select_prep(struct io_kiocb *req)
66 {
67 	struct iovec __user *uiov;
68 	struct iovec iov;
69 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
70 
71 	if (rw->len != 1)
72 		return -EINVAL;
73 
74 #ifdef CONFIG_COMPAT
75 	if (req->ctx->compat)
76 		return io_iov_compat_buffer_select_prep(rw);
77 #endif
78 
79 	uiov = u64_to_user_ptr(rw->addr);
80 	if (copy_from_user(&iov, uiov, sizeof(*uiov)))
81 		return -EFAULT;
82 	rw->len = iov.iov_len;
83 	return 0;
84 }
85 
io_prep_rw(struct io_kiocb * req,const struct io_uring_sqe * sqe)86 int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe)
87 {
88 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
89 	unsigned ioprio;
90 	int ret;
91 
92 	rw->kiocb.ki_pos = READ_ONCE(sqe->off);
93 	/* used for fixed read/write too - just read unconditionally */
94 	req->buf_index = READ_ONCE(sqe->buf_index);
95 
96 	if (req->opcode == IORING_OP_READ_FIXED ||
97 	    req->opcode == IORING_OP_WRITE_FIXED) {
98 		struct io_ring_ctx *ctx = req->ctx;
99 		u16 index;
100 
101 		if (unlikely(req->buf_index >= ctx->nr_user_bufs))
102 			return -EFAULT;
103 		index = array_index_nospec(req->buf_index, ctx->nr_user_bufs);
104 		req->imu = ctx->user_bufs[index];
105 		io_req_set_rsrc_node(req, ctx, 0);
106 	}
107 
108 	ioprio = READ_ONCE(sqe->ioprio);
109 	if (ioprio) {
110 		ret = ioprio_check_cap(ioprio);
111 		if (ret)
112 			return ret;
113 
114 		rw->kiocb.ki_ioprio = ioprio;
115 	} else {
116 		rw->kiocb.ki_ioprio = get_current_ioprio();
117 	}
118 	rw->kiocb.dio_complete = NULL;
119 
120 	rw->addr = READ_ONCE(sqe->addr);
121 	rw->len = READ_ONCE(sqe->len);
122 	rw->flags = READ_ONCE(sqe->rw_flags);
123 
124 	/* Have to do this validation here, as this is in io_read() rw->len might
125 	 * have chanaged due to buffer selection
126 	 */
127 	if (req->opcode == IORING_OP_READV && req->flags & REQ_F_BUFFER_SELECT) {
128 		ret = io_iov_buffer_select_prep(req);
129 		if (ret)
130 			return ret;
131 	}
132 
133 	return 0;
134 }
135 
io_readv_writev_cleanup(struct io_kiocb * req)136 void io_readv_writev_cleanup(struct io_kiocb *req)
137 {
138 	struct io_async_rw *io = req->async_data;
139 
140 	kfree(io->free_iovec);
141 }
142 
io_rw_done(struct kiocb * kiocb,ssize_t ret)143 static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret)
144 {
145 	switch (ret) {
146 	case -EIOCBQUEUED:
147 		break;
148 	case -ERESTARTSYS:
149 	case -ERESTARTNOINTR:
150 	case -ERESTARTNOHAND:
151 	case -ERESTART_RESTARTBLOCK:
152 		/*
153 		 * We can't just restart the syscall, since previously
154 		 * submitted sqes may already be in progress. Just fail this
155 		 * IO with EINTR.
156 		 */
157 		ret = -EINTR;
158 		fallthrough;
159 	default:
160 		kiocb->ki_complete(kiocb, ret);
161 	}
162 }
163 
io_kiocb_update_pos(struct io_kiocb * req)164 static inline loff_t *io_kiocb_update_pos(struct io_kiocb *req)
165 {
166 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
167 
168 	if (rw->kiocb.ki_pos != -1)
169 		return &rw->kiocb.ki_pos;
170 
171 	if (!(req->file->f_mode & FMODE_STREAM)) {
172 		req->flags |= REQ_F_CUR_POS;
173 		rw->kiocb.ki_pos = req->file->f_pos;
174 		return &rw->kiocb.ki_pos;
175 	}
176 
177 	rw->kiocb.ki_pos = 0;
178 	return NULL;
179 }
180 
181 #ifdef CONFIG_BLOCK
io_resubmit_prep(struct io_kiocb * req)182 static bool io_resubmit_prep(struct io_kiocb *req)
183 {
184 	struct io_async_rw *io = req->async_data;
185 
186 	if (!req_has_async_data(req))
187 		return !io_req_prep_async(req);
188 	iov_iter_restore(&io->s.iter, &io->s.iter_state);
189 	return true;
190 }
191 
io_rw_should_reissue(struct io_kiocb * req)192 static bool io_rw_should_reissue(struct io_kiocb *req)
193 {
194 	umode_t mode = file_inode(req->file)->i_mode;
195 	struct io_ring_ctx *ctx = req->ctx;
196 
197 	if (!S_ISBLK(mode) && !S_ISREG(mode))
198 		return false;
199 	if ((req->flags & REQ_F_NOWAIT) || (io_wq_current_is_worker() &&
200 	    !(ctx->flags & IORING_SETUP_IOPOLL)))
201 		return false;
202 	/*
203 	 * If ref is dying, we might be running poll reap from the exit work.
204 	 * Don't attempt to reissue from that path, just let it fail with
205 	 * -EAGAIN.
206 	 */
207 	if (percpu_ref_is_dying(&ctx->refs))
208 		return false;
209 	/*
210 	 * Play it safe and assume not safe to re-import and reissue if we're
211 	 * not in the original thread group (or in task context).
212 	 */
213 	if (!same_thread_group(req->task, current) || !in_task())
214 		return false;
215 	return true;
216 }
217 #else
io_resubmit_prep(struct io_kiocb * req)218 static bool io_resubmit_prep(struct io_kiocb *req)
219 {
220 	return false;
221 }
io_rw_should_reissue(struct io_kiocb * req)222 static bool io_rw_should_reissue(struct io_kiocb *req)
223 {
224 	return false;
225 }
226 #endif
227 
io_req_end_write(struct io_kiocb * req)228 static void io_req_end_write(struct io_kiocb *req)
229 {
230 	if (req->flags & REQ_F_ISREG) {
231 		struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
232 
233 		kiocb_end_write(&rw->kiocb);
234 	}
235 }
236 
237 /*
238  * Trigger the notifications after having done some IO, and finish the write
239  * accounting, if any.
240  */
io_req_io_end(struct io_kiocb * req)241 static void io_req_io_end(struct io_kiocb *req)
242 {
243 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
244 
245 	if (rw->kiocb.ki_flags & IOCB_WRITE) {
246 		io_req_end_write(req);
247 		fsnotify_modify(req->file);
248 	} else {
249 		fsnotify_access(req->file);
250 	}
251 }
252 
__io_complete_rw_common(struct io_kiocb * req,long res)253 static bool __io_complete_rw_common(struct io_kiocb *req, long res)
254 {
255 	if (unlikely(res != req->cqe.res)) {
256 		if ((res == -EAGAIN || res == -EOPNOTSUPP) &&
257 		    io_rw_should_reissue(req)) {
258 			/*
259 			 * Reissue will start accounting again, finish the
260 			 * current cycle.
261 			 */
262 			io_req_io_end(req);
263 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
264 			return true;
265 		}
266 		req_set_fail(req);
267 		req->cqe.res = res;
268 	}
269 	return false;
270 }
271 
io_fixup_rw_res(struct io_kiocb * req,long res)272 static inline int io_fixup_rw_res(struct io_kiocb *req, long res)
273 {
274 	struct io_async_rw *io = req->async_data;
275 
276 	/* add previously done IO, if any */
277 	if (req_has_async_data(req) && io->bytes_done > 0) {
278 		if (res < 0)
279 			res = io->bytes_done;
280 		else
281 			res += io->bytes_done;
282 	}
283 	return res;
284 }
285 
io_req_rw_complete(struct io_kiocb * req,struct io_tw_state * ts)286 void io_req_rw_complete(struct io_kiocb *req, struct io_tw_state *ts)
287 {
288 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
289 	struct kiocb *kiocb = &rw->kiocb;
290 
291 	if ((kiocb->ki_flags & IOCB_DIO_CALLER_COMP) && kiocb->dio_complete) {
292 		long res = kiocb->dio_complete(rw->kiocb.private);
293 
294 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
295 	}
296 
297 	io_req_io_end(req);
298 
299 	if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING)) {
300 		unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED;
301 
302 		req->cqe.flags |= io_put_kbuf(req, issue_flags);
303 	}
304 	io_req_task_complete(req, ts);
305 }
306 
io_complete_rw(struct kiocb * kiocb,long res)307 static void io_complete_rw(struct kiocb *kiocb, long res)
308 {
309 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
310 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
311 
312 	if (!kiocb->dio_complete || !(kiocb->ki_flags & IOCB_DIO_CALLER_COMP)) {
313 		if (__io_complete_rw_common(req, res))
314 			return;
315 		io_req_set_res(req, io_fixup_rw_res(req, res), 0);
316 	}
317 	req->io_task_work.func = io_req_rw_complete;
318 	__io_req_task_work_add(req, IOU_F_TWQ_LAZY_WAKE);
319 }
320 
io_complete_rw_iopoll(struct kiocb * kiocb,long res)321 static void io_complete_rw_iopoll(struct kiocb *kiocb, long res)
322 {
323 	struct io_rw *rw = container_of(kiocb, struct io_rw, kiocb);
324 	struct io_kiocb *req = cmd_to_io_kiocb(rw);
325 
326 	if (kiocb->ki_flags & IOCB_WRITE)
327 		io_req_end_write(req);
328 	if (unlikely(res != req->cqe.res)) {
329 		if (res == -EAGAIN && io_rw_should_reissue(req)) {
330 			req->flags |= REQ_F_REISSUE | REQ_F_PARTIAL_IO;
331 			return;
332 		}
333 		req->cqe.res = res;
334 	}
335 
336 	/* order with io_iopoll_complete() checking ->iopoll_completed */
337 	smp_store_release(&req->iopoll_completed, 1);
338 }
339 
kiocb_done(struct io_kiocb * req,ssize_t ret,unsigned int issue_flags)340 static int kiocb_done(struct io_kiocb *req, ssize_t ret,
341 		       unsigned int issue_flags)
342 {
343 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
344 	unsigned final_ret = io_fixup_rw_res(req, ret);
345 
346 	if (ret >= 0 && req->flags & REQ_F_CUR_POS)
347 		req->file->f_pos = rw->kiocb.ki_pos;
348 	if (ret >= 0 && (rw->kiocb.ki_complete == io_complete_rw)) {
349 		if (!__io_complete_rw_common(req, ret)) {
350 			/*
351 			 * Safe to call io_end from here as we're inline
352 			 * from the submission path.
353 			 */
354 			io_req_io_end(req);
355 			io_req_set_res(req, final_ret,
356 				       io_put_kbuf(req, issue_flags));
357 			return IOU_OK;
358 		}
359 	} else {
360 		io_rw_done(&rw->kiocb, ret);
361 	}
362 
363 	if (req->flags & REQ_F_REISSUE) {
364 		req->flags &= ~REQ_F_REISSUE;
365 		if (io_resubmit_prep(req))
366 			return -EAGAIN;
367 		else
368 			io_req_task_queue_fail(req, final_ret);
369 	}
370 	return IOU_ISSUE_SKIP_COMPLETE;
371 }
372 
__io_import_iovec(int ddir,struct io_kiocb * req,struct io_rw_state * s,unsigned int issue_flags)373 static struct iovec *__io_import_iovec(int ddir, struct io_kiocb *req,
374 				       struct io_rw_state *s,
375 				       unsigned int issue_flags)
376 {
377 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
378 	struct iov_iter *iter = &s->iter;
379 	u8 opcode = req->opcode;
380 	struct iovec *iovec;
381 	void __user *buf;
382 	size_t sqe_len;
383 	ssize_t ret;
384 
385 	if (opcode == IORING_OP_READ_FIXED || opcode == IORING_OP_WRITE_FIXED) {
386 		ret = io_import_fixed(ddir, iter, req->imu, rw->addr, rw->len);
387 		if (ret)
388 			return ERR_PTR(ret);
389 		return NULL;
390 	}
391 
392 	buf = u64_to_user_ptr(rw->addr);
393 	sqe_len = rw->len;
394 
395 	if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE ||
396 	    (req->flags & REQ_F_BUFFER_SELECT)) {
397 		if (io_do_buffer_select(req)) {
398 			buf = io_buffer_select(req, &sqe_len, issue_flags);
399 			if (!buf)
400 				return ERR_PTR(-ENOBUFS);
401 			rw->addr = (unsigned long) buf;
402 			rw->len = sqe_len;
403 		}
404 
405 		ret = import_ubuf(ddir, buf, sqe_len, iter);
406 		if (ret)
407 			return ERR_PTR(ret);
408 		return NULL;
409 	}
410 
411 	iovec = s->fast_iov;
412 	ret = __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &iovec, iter,
413 			      req->ctx->compat);
414 	if (unlikely(ret < 0))
415 		return ERR_PTR(ret);
416 	return iovec;
417 }
418 
io_import_iovec(int rw,struct io_kiocb * req,struct iovec ** iovec,struct io_rw_state * s,unsigned int issue_flags)419 static inline int io_import_iovec(int rw, struct io_kiocb *req,
420 				  struct iovec **iovec, struct io_rw_state *s,
421 				  unsigned int issue_flags)
422 {
423 	*iovec = __io_import_iovec(rw, req, s, issue_flags);
424 	if (IS_ERR(*iovec))
425 		return PTR_ERR(*iovec);
426 
427 	iov_iter_save_state(&s->iter, &s->iter_state);
428 	return 0;
429 }
430 
io_kiocb_ppos(struct kiocb * kiocb)431 static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb)
432 {
433 	return (kiocb->ki_filp->f_mode & FMODE_STREAM) ? NULL : &kiocb->ki_pos;
434 }
435 
436 /*
437  * For files that don't have ->read_iter() and ->write_iter(), handle them
438  * by looping over ->read() or ->write() manually.
439  */
loop_rw_iter(int ddir,struct io_rw * rw,struct iov_iter * iter)440 static ssize_t loop_rw_iter(int ddir, struct io_rw *rw, struct iov_iter *iter)
441 {
442 	struct kiocb *kiocb = &rw->kiocb;
443 	struct file *file = kiocb->ki_filp;
444 	ssize_t ret = 0;
445 	loff_t *ppos;
446 
447 	/*
448 	 * Don't support polled IO through this interface, and we can't
449 	 * support non-blocking either. For the latter, this just causes
450 	 * the kiocb to be handled from an async context.
451 	 */
452 	if (kiocb->ki_flags & IOCB_HIPRI)
453 		return -EOPNOTSUPP;
454 	if ((kiocb->ki_flags & IOCB_NOWAIT) &&
455 	    !(kiocb->ki_filp->f_flags & O_NONBLOCK))
456 		return -EAGAIN;
457 
458 	ppos = io_kiocb_ppos(kiocb);
459 
460 	while (iov_iter_count(iter)) {
461 		void __user *addr;
462 		size_t len;
463 		ssize_t nr;
464 
465 		if (iter_is_ubuf(iter)) {
466 			addr = iter->ubuf + iter->iov_offset;
467 			len = iov_iter_count(iter);
468 		} else if (!iov_iter_is_bvec(iter)) {
469 			addr = iter_iov_addr(iter);
470 			len = iter_iov_len(iter);
471 		} else {
472 			addr = u64_to_user_ptr(rw->addr);
473 			len = rw->len;
474 		}
475 
476 		if (ddir == READ)
477 			nr = file->f_op->read(file, addr, len, ppos);
478 		else
479 			nr = file->f_op->write(file, addr, len, ppos);
480 
481 		if (nr < 0) {
482 			if (!ret)
483 				ret = nr;
484 			break;
485 		}
486 		ret += nr;
487 		if (!iov_iter_is_bvec(iter)) {
488 			iov_iter_advance(iter, nr);
489 		} else {
490 			rw->addr += nr;
491 			rw->len -= nr;
492 			if (!rw->len)
493 				break;
494 		}
495 		if (nr != len)
496 			break;
497 	}
498 
499 	return ret;
500 }
501 
io_req_map_rw(struct io_kiocb * req,const struct iovec * iovec,const struct iovec * fast_iov,struct iov_iter * iter)502 static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec,
503 			  const struct iovec *fast_iov, struct iov_iter *iter)
504 {
505 	struct io_async_rw *io = req->async_data;
506 
507 	memcpy(&io->s.iter, iter, sizeof(*iter));
508 	io->free_iovec = iovec;
509 	io->bytes_done = 0;
510 	/* can only be fixed buffers, no need to do anything */
511 	if (iov_iter_is_bvec(iter) || iter_is_ubuf(iter))
512 		return;
513 	if (!iovec) {
514 		unsigned iov_off = 0;
515 
516 		io->s.iter.__iov = io->s.fast_iov;
517 		if (iter->__iov != fast_iov) {
518 			iov_off = iter_iov(iter) - fast_iov;
519 			io->s.iter.__iov += iov_off;
520 		}
521 		if (io->s.fast_iov != fast_iov)
522 			memcpy(io->s.fast_iov + iov_off, fast_iov + iov_off,
523 			       sizeof(struct iovec) * iter->nr_segs);
524 	} else {
525 		req->flags |= REQ_F_NEED_CLEANUP;
526 	}
527 }
528 
io_setup_async_rw(struct io_kiocb * req,const struct iovec * iovec,struct io_rw_state * s,bool force)529 static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec,
530 			     struct io_rw_state *s, bool force)
531 {
532 	if (!force && !io_cold_defs[req->opcode].prep_async)
533 		return 0;
534 	if (!req_has_async_data(req)) {
535 		struct io_async_rw *iorw;
536 
537 		if (io_alloc_async_data(req)) {
538 			kfree(iovec);
539 			return -ENOMEM;
540 		}
541 
542 		io_req_map_rw(req, iovec, s->fast_iov, &s->iter);
543 		iorw = req->async_data;
544 		/* we've copied and mapped the iter, ensure state is saved */
545 		iov_iter_save_state(&iorw->s.iter, &iorw->s.iter_state);
546 	}
547 	return 0;
548 }
549 
io_rw_prep_async(struct io_kiocb * req,int rw)550 static inline int io_rw_prep_async(struct io_kiocb *req, int rw)
551 {
552 	struct io_async_rw *iorw = req->async_data;
553 	struct iovec *iov;
554 	int ret;
555 
556 	iorw->bytes_done = 0;
557 	iorw->free_iovec = NULL;
558 
559 	/* submission path, ->uring_lock should already be taken */
560 	ret = io_import_iovec(rw, req, &iov, &iorw->s, 0);
561 	if (unlikely(ret < 0))
562 		return ret;
563 
564 	if (iov) {
565 		iorw->free_iovec = iov;
566 		req->flags |= REQ_F_NEED_CLEANUP;
567 	}
568 
569 	return 0;
570 }
571 
io_readv_prep_async(struct io_kiocb * req)572 int io_readv_prep_async(struct io_kiocb *req)
573 {
574 	return io_rw_prep_async(req, ITER_DEST);
575 }
576 
io_writev_prep_async(struct io_kiocb * req)577 int io_writev_prep_async(struct io_kiocb *req)
578 {
579 	return io_rw_prep_async(req, ITER_SOURCE);
580 }
581 
582 /*
583  * This is our waitqueue callback handler, registered through __folio_lock_async()
584  * when we initially tried to do the IO with the iocb armed our waitqueue.
585  * This gets called when the page is unlocked, and we generally expect that to
586  * happen when the page IO is completed and the page is now uptodate. This will
587  * queue a task_work based retry of the operation, attempting to copy the data
588  * again. If the latter fails because the page was NOT uptodate, then we will
589  * do a thread based blocking retry of the operation. That's the unexpected
590  * slow path.
591  */
io_async_buf_func(struct wait_queue_entry * wait,unsigned mode,int sync,void * arg)592 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
593 			     int sync, void *arg)
594 {
595 	struct wait_page_queue *wpq;
596 	struct io_kiocb *req = wait->private;
597 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
598 	struct wait_page_key *key = arg;
599 
600 	wpq = container_of(wait, struct wait_page_queue, wait);
601 
602 	if (!wake_page_match(wpq, key))
603 		return 0;
604 
605 	rw->kiocb.ki_flags &= ~IOCB_WAITQ;
606 	list_del_init(&wait->entry);
607 	io_req_task_queue(req);
608 	return 1;
609 }
610 
611 /*
612  * This controls whether a given IO request should be armed for async page
613  * based retry. If we return false here, the request is handed to the async
614  * worker threads for retry. If we're doing buffered reads on a regular file,
615  * we prepare a private wait_page_queue entry and retry the operation. This
616  * will either succeed because the page is now uptodate and unlocked, or it
617  * will register a callback when the page is unlocked at IO completion. Through
618  * that callback, io_uring uses task_work to setup a retry of the operation.
619  * That retry will attempt the buffered read again. The retry will generally
620  * succeed, or in rare cases where it fails, we then fall back to using the
621  * async worker threads for a blocking retry.
622  */
io_rw_should_retry(struct io_kiocb * req)623 static bool io_rw_should_retry(struct io_kiocb *req)
624 {
625 	struct io_async_rw *io = req->async_data;
626 	struct wait_page_queue *wait = &io->wpq;
627 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
628 	struct kiocb *kiocb = &rw->kiocb;
629 
630 	/* never retry for NOWAIT, we just complete with -EAGAIN */
631 	if (req->flags & REQ_F_NOWAIT)
632 		return false;
633 
634 	/* Only for buffered IO */
635 	if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI))
636 		return false;
637 
638 	/*
639 	 * just use poll if we can, and don't attempt if the fs doesn't
640 	 * support callback based unlocks
641 	 */
642 	if (io_file_can_poll(req) || !(req->file->f_mode & FMODE_BUF_RASYNC))
643 		return false;
644 
645 	wait->wait.func = io_async_buf_func;
646 	wait->wait.private = req;
647 	wait->wait.flags = 0;
648 	INIT_LIST_HEAD(&wait->wait.entry);
649 	kiocb->ki_flags |= IOCB_WAITQ;
650 	kiocb->ki_flags &= ~IOCB_NOWAIT;
651 	kiocb->ki_waitq = wait;
652 	return true;
653 }
654 
io_iter_do_read(struct io_rw * rw,struct iov_iter * iter)655 static inline int io_iter_do_read(struct io_rw *rw, struct iov_iter *iter)
656 {
657 	struct file *file = rw->kiocb.ki_filp;
658 
659 	if (likely(file->f_op->read_iter))
660 		return call_read_iter(file, &rw->kiocb, iter);
661 	else if (file->f_op->read)
662 		return loop_rw_iter(READ, rw, iter);
663 	else
664 		return -EINVAL;
665 }
666 
need_complete_io(struct io_kiocb * req)667 static bool need_complete_io(struct io_kiocb *req)
668 {
669 	return req->flags & REQ_F_ISREG ||
670 		S_ISBLK(file_inode(req->file)->i_mode);
671 }
672 
io_rw_init_file(struct io_kiocb * req,fmode_t mode)673 static int io_rw_init_file(struct io_kiocb *req, fmode_t mode)
674 {
675 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
676 	struct kiocb *kiocb = &rw->kiocb;
677 	struct io_ring_ctx *ctx = req->ctx;
678 	struct file *file = req->file;
679 	int ret;
680 
681 	if (unlikely(!file || !(file->f_mode & mode)))
682 		return -EBADF;
683 
684 	if (!(req->flags & REQ_F_FIXED_FILE))
685 		req->flags |= io_file_get_flags(file);
686 
687 	kiocb->ki_flags = file->f_iocb_flags;
688 	ret = kiocb_set_rw_flags(kiocb, rw->flags);
689 	if (unlikely(ret))
690 		return ret;
691 	kiocb->ki_flags |= IOCB_ALLOC_CACHE;
692 
693 	/*
694 	 * If the file is marked O_NONBLOCK, still allow retry for it if it
695 	 * supports async. Otherwise it's impossible to use O_NONBLOCK files
696 	 * reliably. If not, or it IOCB_NOWAIT is set, don't retry.
697 	 */
698 	if (kiocb->ki_flags & IOCB_NOWAIT ||
699 	    ((file->f_flags & O_NONBLOCK && !(req->flags & REQ_F_SUPPORT_NOWAIT))))
700 		req->flags |= REQ_F_NOWAIT;
701 
702 	if (ctx->flags & IORING_SETUP_IOPOLL) {
703 		if (!(kiocb->ki_flags & IOCB_DIRECT) || !file->f_op->iopoll)
704 			return -EOPNOTSUPP;
705 
706 		kiocb->private = NULL;
707 		kiocb->ki_flags |= IOCB_HIPRI;
708 		kiocb->ki_complete = io_complete_rw_iopoll;
709 		req->iopoll_completed = 0;
710 	} else {
711 		if (kiocb->ki_flags & IOCB_HIPRI)
712 			return -EINVAL;
713 		kiocb->ki_complete = io_complete_rw;
714 	}
715 
716 	return 0;
717 }
718 
__io_read(struct io_kiocb * req,unsigned int issue_flags)719 static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
720 {
721 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
722 	struct io_rw_state __s, *s = &__s;
723 	struct iovec *iovec;
724 	struct kiocb *kiocb = &rw->kiocb;
725 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
726 	struct io_async_rw *io;
727 	ssize_t ret, ret2;
728 	loff_t *ppos;
729 
730 	if (!req_has_async_data(req)) {
731 		ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
732 		if (unlikely(ret < 0))
733 			return ret;
734 	} else {
735 		io = req->async_data;
736 		s = &io->s;
737 
738 		/*
739 		 * Safe and required to re-import if we're using provided
740 		 * buffers, as we dropped the selected one before retry.
741 		 */
742 		if (io_do_buffer_select(req)) {
743 			ret = io_import_iovec(ITER_DEST, req, &iovec, s, issue_flags);
744 			if (unlikely(ret < 0))
745 				return ret;
746 		}
747 
748 		/*
749 		 * We come here from an earlier attempt, restore our state to
750 		 * match in case it doesn't. It's cheap enough that we don't
751 		 * need to make this conditional.
752 		 */
753 		iov_iter_restore(&s->iter, &s->iter_state);
754 		iovec = NULL;
755 	}
756 	ret = io_rw_init_file(req, FMODE_READ);
757 	if (unlikely(ret)) {
758 		kfree(iovec);
759 		return ret;
760 	}
761 	req->cqe.res = iov_iter_count(&s->iter);
762 
763 	if (force_nonblock) {
764 		/* If the file doesn't support async, just async punt */
765 		if (unlikely(!io_file_supports_nowait(req, EPOLLIN))) {
766 			ret = io_setup_async_rw(req, iovec, s, true);
767 			return ret ?: -EAGAIN;
768 		}
769 		kiocb->ki_flags |= IOCB_NOWAIT;
770 	} else {
771 		/* Ensure we clear previously set non-block flag */
772 		kiocb->ki_flags &= ~IOCB_NOWAIT;
773 	}
774 
775 	ppos = io_kiocb_update_pos(req);
776 
777 	ret = rw_verify_area(READ, req->file, ppos, req->cqe.res);
778 	if (unlikely(ret)) {
779 		kfree(iovec);
780 		return ret;
781 	}
782 
783 	ret = io_iter_do_read(rw, &s->iter);
784 
785 	/*
786 	 * Some file systems like to return -EOPNOTSUPP for an IOCB_NOWAIT
787 	 * issue, even though they should be returning -EAGAIN. To be safe,
788 	 * retry from blocking context for either.
789 	 */
790 	if (ret == -EOPNOTSUPP && force_nonblock)
791 		ret = -EAGAIN;
792 
793 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
794 		req->flags &= ~REQ_F_REISSUE;
795 		/* if we can poll, just do that */
796 		if (req->opcode == IORING_OP_READ && io_file_can_poll(req))
797 			return -EAGAIN;
798 		/* IOPOLL retry should happen for io-wq threads */
799 		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
800 			goto done;
801 		/* no retry on NONBLOCK nor RWF_NOWAIT */
802 		if (req->flags & REQ_F_NOWAIT)
803 			goto done;
804 		ret = 0;
805 	} else if (ret == -EIOCBQUEUED) {
806 		req->flags |= REQ_F_PARTIAL_IO;
807 		io_kbuf_recycle(req, issue_flags);
808 		if (iovec)
809 			kfree(iovec);
810 		return IOU_ISSUE_SKIP_COMPLETE;
811 	} else if (ret == req->cqe.res || ret <= 0 || !force_nonblock ||
812 		   (req->flags & REQ_F_NOWAIT) || !need_complete_io(req)) {
813 		/* read all, failed, already did sync or don't want to retry */
814 		goto done;
815 	}
816 
817 	/*
818 	 * Don't depend on the iter state matching what was consumed, or being
819 	 * untouched in case of error. Restore it and we'll advance it
820 	 * manually if we need to.
821 	 */
822 	iov_iter_restore(&s->iter, &s->iter_state);
823 
824 	ret2 = io_setup_async_rw(req, iovec, s, true);
825 	iovec = NULL;
826 	if (ret2) {
827 		ret = ret > 0 ? ret : ret2;
828 		goto done;
829 	}
830 
831 	req->flags |= REQ_F_PARTIAL_IO;
832 	io_kbuf_recycle(req, issue_flags);
833 
834 	io = req->async_data;
835 	s = &io->s;
836 	/*
837 	 * Now use our persistent iterator and state, if we aren't already.
838 	 * We've restored and mapped the iter to match.
839 	 */
840 
841 	do {
842 		/*
843 		 * We end up here because of a partial read, either from
844 		 * above or inside this loop. Advance the iter by the bytes
845 		 * that were consumed.
846 		 */
847 		iov_iter_advance(&s->iter, ret);
848 		if (!iov_iter_count(&s->iter))
849 			break;
850 		io->bytes_done += ret;
851 		iov_iter_save_state(&s->iter, &s->iter_state);
852 
853 		/* if we can retry, do so with the callbacks armed */
854 		if (!io_rw_should_retry(req)) {
855 			kiocb->ki_flags &= ~IOCB_WAITQ;
856 			return -EAGAIN;
857 		}
858 
859 		req->cqe.res = iov_iter_count(&s->iter);
860 		/*
861 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
862 		 * we get -EIOCBQUEUED, then we'll get a notification when the
863 		 * desired page gets unlocked. We can also get a partial read
864 		 * here, and if we do, then just retry at the new offset.
865 		 */
866 		ret = io_iter_do_read(rw, &s->iter);
867 		if (ret == -EIOCBQUEUED)
868 			return IOU_ISSUE_SKIP_COMPLETE;
869 		/* we got some bytes, but not all. retry. */
870 		kiocb->ki_flags &= ~IOCB_WAITQ;
871 		iov_iter_restore(&s->iter, &s->iter_state);
872 	} while (ret > 0);
873 done:
874 	/* it's faster to check here then delegate to kfree */
875 	if (iovec)
876 		kfree(iovec);
877 	return ret;
878 }
879 
io_read(struct io_kiocb * req,unsigned int issue_flags)880 int io_read(struct io_kiocb *req, unsigned int issue_flags)
881 {
882 	int ret;
883 
884 	ret = __io_read(req, issue_flags);
885 	if (ret >= 0)
886 		return kiocb_done(req, ret, issue_flags);
887 
888 	return ret;
889 }
890 
io_kiocb_start_write(struct io_kiocb * req,struct kiocb * kiocb)891 static bool io_kiocb_start_write(struct io_kiocb *req, struct kiocb *kiocb)
892 {
893 	struct inode *inode;
894 	bool ret;
895 
896 	if (!(req->flags & REQ_F_ISREG))
897 		return true;
898 	if (!(kiocb->ki_flags & IOCB_NOWAIT)) {
899 		kiocb_start_write(kiocb);
900 		return true;
901 	}
902 
903 	inode = file_inode(kiocb->ki_filp);
904 	ret = sb_start_write_trylock(inode->i_sb);
905 	if (ret)
906 		__sb_writers_release(inode->i_sb, SB_FREEZE_WRITE);
907 	return ret;
908 }
909 
io_write(struct io_kiocb * req,unsigned int issue_flags)910 int io_write(struct io_kiocb *req, unsigned int issue_flags)
911 {
912 	struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
913 	struct io_rw_state __s, *s = &__s;
914 	struct iovec *iovec;
915 	struct kiocb *kiocb = &rw->kiocb;
916 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
917 	ssize_t ret, ret2;
918 	loff_t *ppos;
919 
920 	if (!req_has_async_data(req)) {
921 		ret = io_import_iovec(ITER_SOURCE, req, &iovec, s, issue_flags);
922 		if (unlikely(ret < 0))
923 			return ret;
924 	} else {
925 		struct io_async_rw *io = req->async_data;
926 
927 		s = &io->s;
928 		iov_iter_restore(&s->iter, &s->iter_state);
929 		iovec = NULL;
930 	}
931 	ret = io_rw_init_file(req, FMODE_WRITE);
932 	if (unlikely(ret)) {
933 		kfree(iovec);
934 		return ret;
935 	}
936 	req->cqe.res = iov_iter_count(&s->iter);
937 
938 	if (force_nonblock) {
939 		/* If the file doesn't support async, just async punt */
940 		if (unlikely(!io_file_supports_nowait(req, EPOLLOUT)))
941 			goto copy_iov;
942 
943 		/* File path supports NOWAIT for non-direct_IO only for block devices. */
944 		if (!(kiocb->ki_flags & IOCB_DIRECT) &&
945 			!(kiocb->ki_filp->f_mode & FMODE_BUF_WASYNC) &&
946 			(req->flags & REQ_F_ISREG))
947 			goto copy_iov;
948 
949 		kiocb->ki_flags |= IOCB_NOWAIT;
950 	} else {
951 		/* Ensure we clear previously set non-block flag */
952 		kiocb->ki_flags &= ~IOCB_NOWAIT;
953 	}
954 
955 	ppos = io_kiocb_update_pos(req);
956 
957 	ret = rw_verify_area(WRITE, req->file, ppos, req->cqe.res);
958 	if (unlikely(ret)) {
959 		kfree(iovec);
960 		return ret;
961 	}
962 
963 	if (unlikely(!io_kiocb_start_write(req, kiocb)))
964 		return -EAGAIN;
965 	kiocb->ki_flags |= IOCB_WRITE;
966 
967 	if (likely(req->file->f_op->write_iter))
968 		ret2 = call_write_iter(req->file, kiocb, &s->iter);
969 	else if (req->file->f_op->write)
970 		ret2 = loop_rw_iter(WRITE, rw, &s->iter);
971 	else
972 		ret2 = -EINVAL;
973 
974 	if (ret2 == -EIOCBQUEUED) {
975 		req->flags |= REQ_F_PARTIAL_IO;
976 		io_kbuf_recycle(req, issue_flags);
977 	}
978 
979 	if (req->flags & REQ_F_REISSUE) {
980 		req->flags &= ~REQ_F_REISSUE;
981 		ret2 = -EAGAIN;
982 	}
983 
984 	/*
985 	 * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just
986 	 * retry them without IOCB_NOWAIT.
987 	 */
988 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
989 		ret2 = -EAGAIN;
990 	/* no retry on NONBLOCK nor RWF_NOWAIT */
991 	if (ret2 == -EAGAIN && (req->flags & REQ_F_NOWAIT))
992 		goto done;
993 	if (!force_nonblock || ret2 != -EAGAIN) {
994 		/* IOPOLL retry should happen for io-wq threads */
995 		if (ret2 == -EAGAIN && (req->ctx->flags & IORING_SETUP_IOPOLL))
996 			goto copy_iov;
997 
998 		if (ret2 != req->cqe.res && ret2 >= 0 && need_complete_io(req)) {
999 			struct io_async_rw *io;
1000 
1001 			trace_io_uring_short_write(req->ctx, kiocb->ki_pos - ret2,
1002 						req->cqe.res, ret2);
1003 
1004 			/* This is a partial write. The file pos has already been
1005 			 * updated, setup the async struct to complete the request
1006 			 * in the worker. Also update bytes_done to account for
1007 			 * the bytes already written.
1008 			 */
1009 			iov_iter_save_state(&s->iter, &s->iter_state);
1010 			ret = io_setup_async_rw(req, iovec, s, true);
1011 
1012 			io = req->async_data;
1013 			if (io)
1014 				io->bytes_done += ret2;
1015 
1016 			if (kiocb->ki_flags & IOCB_WRITE)
1017 				io_req_end_write(req);
1018 			return ret ? ret : -EAGAIN;
1019 		}
1020 done:
1021 		ret = kiocb_done(req, ret2, issue_flags);
1022 	} else {
1023 copy_iov:
1024 		iov_iter_restore(&s->iter, &s->iter_state);
1025 		ret = io_setup_async_rw(req, iovec, s, false);
1026 		if (!ret) {
1027 			if (kiocb->ki_flags & IOCB_WRITE)
1028 				io_req_end_write(req);
1029 			return -EAGAIN;
1030 		}
1031 		return ret;
1032 	}
1033 	/* it's reportedly faster than delegating the null check to kfree() */
1034 	if (iovec)
1035 		kfree(iovec);
1036 	return ret;
1037 }
1038 
io_rw_fail(struct io_kiocb * req)1039 void io_rw_fail(struct io_kiocb *req)
1040 {
1041 	int res;
1042 
1043 	res = io_fixup_rw_res(req, req->cqe.res);
1044 	io_req_set_res(req, res, req->cqe.flags);
1045 }
1046 
io_do_iopoll(struct io_ring_ctx * ctx,bool force_nonspin)1047 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
1048 {
1049 	struct io_wq_work_node *pos, *start, *prev;
1050 	unsigned int poll_flags = 0;
1051 	DEFINE_IO_COMP_BATCH(iob);
1052 	int nr_events = 0;
1053 
1054 	/*
1055 	 * Only spin for completions if we don't have multiple devices hanging
1056 	 * off our complete list.
1057 	 */
1058 	if (ctx->poll_multi_queue || force_nonspin)
1059 		poll_flags |= BLK_POLL_ONESHOT;
1060 
1061 	wq_list_for_each(pos, start, &ctx->iopoll_list) {
1062 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1063 		struct file *file = req->file;
1064 		int ret;
1065 
1066 		/*
1067 		 * Move completed and retryable entries to our local lists.
1068 		 * If we find a request that requires polling, break out
1069 		 * and complete those lists first, if we have entries there.
1070 		 */
1071 		if (READ_ONCE(req->iopoll_completed))
1072 			break;
1073 
1074 		if (req->opcode == IORING_OP_URING_CMD) {
1075 			struct io_uring_cmd *ioucmd;
1076 
1077 			ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
1078 			ret = file->f_op->uring_cmd_iopoll(ioucmd, &iob,
1079 								poll_flags);
1080 		} else {
1081 			struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
1082 
1083 			ret = file->f_op->iopoll(&rw->kiocb, &iob, poll_flags);
1084 		}
1085 		if (unlikely(ret < 0))
1086 			return ret;
1087 		else if (ret)
1088 			poll_flags |= BLK_POLL_ONESHOT;
1089 
1090 		/* iopoll may have completed current req */
1091 		if (!rq_list_empty(iob.req_list) ||
1092 		    READ_ONCE(req->iopoll_completed))
1093 			break;
1094 	}
1095 
1096 	if (!rq_list_empty(iob.req_list))
1097 		iob.complete(&iob);
1098 	else if (!pos)
1099 		return 0;
1100 
1101 	prev = start;
1102 	wq_list_for_each_resume(pos, prev) {
1103 		struct io_kiocb *req = container_of(pos, struct io_kiocb, comp_list);
1104 
1105 		/* order with io_complete_rw_iopoll(), e.g. ->result updates */
1106 		if (!smp_load_acquire(&req->iopoll_completed))
1107 			break;
1108 		nr_events++;
1109 		req->cqe.flags = io_put_kbuf(req, 0);
1110 	}
1111 	if (unlikely(!nr_events))
1112 		return 0;
1113 
1114 	pos = start ? start->next : ctx->iopoll_list.first;
1115 	wq_list_cut(&ctx->iopoll_list, prev, start);
1116 
1117 	if (WARN_ON_ONCE(!wq_list_empty(&ctx->submit_state.compl_reqs)))
1118 		return 0;
1119 	ctx->submit_state.compl_reqs.first = pos;
1120 	__io_submit_flush_completions(ctx);
1121 	return nr_events;
1122 }
1123