1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10 
11 #include <uapi/linux/io_uring.h>
12 
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19 
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 	struct file			*file;
23 	int				how;
24 };
25 
26 struct io_accept {
27 	struct file			*file;
28 	struct sockaddr __user		*addr;
29 	int __user			*addr_len;
30 	int				flags;
31 	int				iou_flags;
32 	u32				file_slot;
33 	unsigned long			nofile;
34 };
35 
36 struct io_socket {
37 	struct file			*file;
38 	int				domain;
39 	int				type;
40 	int				protocol;
41 	int				flags;
42 	u32				file_slot;
43 	unsigned long			nofile;
44 };
45 
46 struct io_connect {
47 	struct file			*file;
48 	struct sockaddr __user		*addr;
49 	int				addr_len;
50 	bool				in_progress;
51 	bool				seen_econnaborted;
52 };
53 
54 struct io_bind {
55 	struct file			*file;
56 	int				addr_len;
57 };
58 
59 struct io_listen {
60 	struct file			*file;
61 	int				backlog;
62 };
63 
64 struct io_sr_msg {
65 	struct file			*file;
66 	union {
67 		struct compat_msghdr __user	*umsg_compat;
68 		struct user_msghdr __user	*umsg;
69 		void __user			*buf;
70 	};
71 	int				len;
72 	unsigned			done_io;
73 	unsigned			msg_flags;
74 	unsigned			nr_multishot_loops;
75 	u16				flags;
76 	/* initialised and used only by !msg send variants */
77 	u16				addr_len;
78 	u16				buf_group;
79 	unsigned short			retry_flags;
80 	void __user			*addr;
81 	void __user			*msg_control;
82 	/* used only for send zerocopy */
83 	struct io_kiocb 		*notif;
84 };
85 
86 enum sr_retry_flags {
87 	IO_SR_MSG_RETRY		= 1,
88 	IO_SR_MSG_PARTIAL_MAP	= 2,
89 };
90 
91 /*
92  * Number of times we'll try and do receives if there's more data. If we
93  * exceed this limit, then add us to the back of the queue and retry from
94  * there. This helps fairness between flooding clients.
95  */
96 #define MULTISHOT_MAX_RETRY	32
97 
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)98 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
99 {
100 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
101 
102 	if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
103 		     sqe->buf_index || sqe->splice_fd_in))
104 		return -EINVAL;
105 
106 	shutdown->how = READ_ONCE(sqe->len);
107 	req->flags |= REQ_F_FORCE_ASYNC;
108 	return 0;
109 }
110 
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)111 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
112 {
113 	struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
114 	struct socket *sock;
115 	int ret;
116 
117 	WARN_ON_ONCE(issue_flags & IO_URING_F_NONBLOCK);
118 
119 	sock = sock_from_file(req->file);
120 	if (unlikely(!sock))
121 		return -ENOTSOCK;
122 
123 	ret = __sys_shutdown_sock(sock, shutdown->how);
124 	io_req_set_res(req, ret, 0);
125 	return IOU_OK;
126 }
127 
io_net_retry(struct socket * sock,int flags)128 static bool io_net_retry(struct socket *sock, int flags)
129 {
130 	if (!(flags & MSG_WAITALL))
131 		return false;
132 	return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
133 }
134 
io_netmsg_iovec_free(struct io_async_msghdr * kmsg)135 static void io_netmsg_iovec_free(struct io_async_msghdr *kmsg)
136 {
137 	if (kmsg->free_iov) {
138 		kfree(kmsg->free_iov);
139 		kmsg->free_iov_nr = 0;
140 		kmsg->free_iov = NULL;
141 	}
142 }
143 
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)144 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
145 {
146 	struct io_async_msghdr *hdr = req->async_data;
147 	struct iovec *iov;
148 
149 	/* can't recycle, ensure we free the iovec if we have one */
150 	if (unlikely(issue_flags & IO_URING_F_UNLOCKED)) {
151 		io_netmsg_iovec_free(hdr);
152 		return;
153 	}
154 
155 	/* Let normal cleanup path reap it if we fail adding to the cache */
156 	iov = hdr->free_iov;
157 	if (io_alloc_cache_put(&req->ctx->netmsg_cache, hdr)) {
158 		if (iov)
159 			kasan_mempool_poison_object(iov);
160 		req->async_data = NULL;
161 		req->flags &= ~(REQ_F_ASYNC_DATA|REQ_F_NEED_CLEANUP);
162 	}
163 }
164 
io_msg_alloc_async(struct io_kiocb * req)165 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req)
166 {
167 	struct io_ring_ctx *ctx = req->ctx;
168 	struct io_async_msghdr *hdr;
169 
170 	hdr = io_alloc_cache_get(&ctx->netmsg_cache);
171 	if (hdr) {
172 		if (hdr->free_iov) {
173 			kasan_mempool_unpoison_object(hdr->free_iov,
174 				hdr->free_iov_nr * sizeof(struct iovec));
175 			req->flags |= REQ_F_NEED_CLEANUP;
176 		}
177 		req->flags |= REQ_F_ASYNC_DATA;
178 		req->async_data = hdr;
179 		return hdr;
180 	}
181 
182 	if (!io_alloc_async_data(req)) {
183 		hdr = req->async_data;
184 		hdr->free_iov_nr = 0;
185 		hdr->free_iov = NULL;
186 		return hdr;
187 	}
188 	return NULL;
189 }
190 
191 /* assign new iovec to kmsg, if we need to */
io_net_vec_assign(struct io_kiocb * req,struct io_async_msghdr * kmsg,struct iovec * iov)192 static int io_net_vec_assign(struct io_kiocb *req, struct io_async_msghdr *kmsg,
193 			     struct iovec *iov)
194 {
195 	if (iov) {
196 		req->flags |= REQ_F_NEED_CLEANUP;
197 		kmsg->free_iov_nr = kmsg->msg.msg_iter.nr_segs;
198 		if (kmsg->free_iov)
199 			kfree(kmsg->free_iov);
200 		kmsg->free_iov = iov;
201 	}
202 	return 0;
203 }
204 
io_mshot_prep_retry(struct io_kiocb * req,struct io_async_msghdr * kmsg)205 static inline void io_mshot_prep_retry(struct io_kiocb *req,
206 				       struct io_async_msghdr *kmsg)
207 {
208 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
209 
210 	req->flags &= ~REQ_F_BL_EMPTY;
211 	sr->done_io = 0;
212 	sr->retry_flags = 0;
213 	sr->len = 0; /* get from the provided buffer */
214 	req->buf_index = sr->buf_group;
215 }
216 
217 #ifdef CONFIG_COMPAT
io_compat_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct compat_msghdr * msg,int ddir)218 static int io_compat_msg_copy_hdr(struct io_kiocb *req,
219 				  struct io_async_msghdr *iomsg,
220 				  struct compat_msghdr *msg, int ddir)
221 {
222 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
223 	struct compat_iovec __user *uiov;
224 	struct iovec *iov;
225 	int ret, nr_segs;
226 
227 	if (iomsg->free_iov) {
228 		nr_segs = iomsg->free_iov_nr;
229 		iov = iomsg->free_iov;
230 	} else {
231 		iov = &iomsg->fast_iov;
232 		nr_segs = 1;
233 	}
234 
235 	if (copy_from_user(msg, sr->umsg_compat, sizeof(*msg)))
236 		return -EFAULT;
237 
238 	uiov = compat_ptr(msg->msg_iov);
239 	if (req->flags & REQ_F_BUFFER_SELECT) {
240 		compat_ssize_t clen;
241 
242 		if (msg->msg_iovlen == 0) {
243 			sr->len = iov->iov_len = 0;
244 			iov->iov_base = NULL;
245 		} else if (msg->msg_iovlen > 1) {
246 			return -EINVAL;
247 		} else {
248 			if (!access_ok(uiov, sizeof(*uiov)))
249 				return -EFAULT;
250 			if (__get_user(clen, &uiov->iov_len))
251 				return -EFAULT;
252 			if (clen < 0)
253 				return -EINVAL;
254 			sr->len = clen;
255 		}
256 
257 		return 0;
258 	}
259 
260 	ret = __import_iovec(ddir, (struct iovec __user *)uiov, msg->msg_iovlen,
261 				nr_segs, &iov, &iomsg->msg.msg_iter, true);
262 	if (unlikely(ret < 0))
263 		return ret;
264 
265 	return io_net_vec_assign(req, iomsg, iov);
266 }
267 #endif
268 
io_msg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg,struct user_msghdr * msg,int ddir)269 static int io_msg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg,
270 			   struct user_msghdr *msg, int ddir)
271 {
272 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
273 	struct iovec *iov;
274 	int ret, nr_segs;
275 
276 	if (iomsg->free_iov) {
277 		nr_segs = iomsg->free_iov_nr;
278 		iov = iomsg->free_iov;
279 	} else {
280 		iov = &iomsg->fast_iov;
281 		nr_segs = 1;
282 	}
283 
284 	if (!user_access_begin(sr->umsg, sizeof(*sr->umsg)))
285 		return -EFAULT;
286 
287 	ret = -EFAULT;
288 	unsafe_get_user(msg->msg_name, &sr->umsg->msg_name, ua_end);
289 	unsafe_get_user(msg->msg_namelen, &sr->umsg->msg_namelen, ua_end);
290 	unsafe_get_user(msg->msg_iov, &sr->umsg->msg_iov, ua_end);
291 	unsafe_get_user(msg->msg_iovlen, &sr->umsg->msg_iovlen, ua_end);
292 	unsafe_get_user(msg->msg_control, &sr->umsg->msg_control, ua_end);
293 	unsafe_get_user(msg->msg_controllen, &sr->umsg->msg_controllen, ua_end);
294 	msg->msg_flags = 0;
295 
296 	if (req->flags & REQ_F_BUFFER_SELECT) {
297 		if (msg->msg_iovlen == 0) {
298 			sr->len = iov->iov_len = 0;
299 			iov->iov_base = NULL;
300 		} else if (msg->msg_iovlen > 1) {
301 			ret = -EINVAL;
302 			goto ua_end;
303 		} else {
304 			/* we only need the length for provided buffers */
305 			if (!access_ok(&msg->msg_iov[0].iov_len, sizeof(__kernel_size_t)))
306 				goto ua_end;
307 			unsafe_get_user(iov->iov_len, &msg->msg_iov[0].iov_len,
308 					ua_end);
309 			sr->len = iov->iov_len;
310 		}
311 		ret = 0;
312 ua_end:
313 		user_access_end();
314 		return ret;
315 	}
316 
317 	user_access_end();
318 	ret = __import_iovec(ddir, msg->msg_iov, msg->msg_iovlen, nr_segs,
319 				&iov, &iomsg->msg.msg_iter, false);
320 	if (unlikely(ret < 0))
321 		return ret;
322 
323 	return io_net_vec_assign(req, iomsg, iov);
324 }
325 
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)326 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
327 			       struct io_async_msghdr *iomsg)
328 {
329 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
330 	struct user_msghdr msg;
331 	int ret;
332 
333 	iomsg->msg.msg_name = &iomsg->addr;
334 	iomsg->msg.msg_iter.nr_segs = 0;
335 
336 #ifdef CONFIG_COMPAT
337 	if (unlikely(req->ctx->compat)) {
338 		struct compat_msghdr cmsg;
339 
340 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_SOURCE);
341 		if (unlikely(ret))
342 			return ret;
343 
344 		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, NULL);
345 		sr->msg_control = iomsg->msg.msg_control_user;
346 		return ret;
347 	}
348 #endif
349 
350 	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_SOURCE);
351 	if (unlikely(ret))
352 		return ret;
353 
354 	ret = __copy_msghdr(&iomsg->msg, &msg, NULL);
355 
356 	/* save msg_control as sys_sendmsg() overwrites it */
357 	sr->msg_control = iomsg->msg.msg_control_user;
358 	return ret;
359 }
360 
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)361 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
362 {
363 	struct io_async_msghdr *io = req->async_data;
364 
365 	io_netmsg_iovec_free(io);
366 }
367 
io_send_setup(struct io_kiocb * req)368 static int io_send_setup(struct io_kiocb *req)
369 {
370 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
371 	struct io_async_msghdr *kmsg = req->async_data;
372 	int ret;
373 
374 	kmsg->msg.msg_name = NULL;
375 	kmsg->msg.msg_namelen = 0;
376 	kmsg->msg.msg_control = NULL;
377 	kmsg->msg.msg_controllen = 0;
378 	kmsg->msg.msg_ubuf = NULL;
379 
380 	if (sr->addr) {
381 		ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
382 		if (unlikely(ret < 0))
383 			return ret;
384 		kmsg->msg.msg_name = &kmsg->addr;
385 		kmsg->msg.msg_namelen = sr->addr_len;
386 	}
387 	if (!io_do_buffer_select(req)) {
388 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
389 				  &kmsg->msg.msg_iter);
390 		if (unlikely(ret < 0))
391 			return ret;
392 	}
393 	return 0;
394 }
395 
io_sendmsg_prep_setup(struct io_kiocb * req,int is_msg)396 static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
397 {
398 	struct io_async_msghdr *kmsg;
399 	int ret;
400 
401 	kmsg = io_msg_alloc_async(req);
402 	if (unlikely(!kmsg))
403 		return -ENOMEM;
404 	if (!is_msg)
405 		return io_send_setup(req);
406 	ret = io_sendmsg_copy_hdr(req, kmsg);
407 	if (!ret)
408 		req->flags |= REQ_F_NEED_CLEANUP;
409 	return ret;
410 }
411 
412 #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE)
413 
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)414 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
415 {
416 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
417 
418 	sr->done_io = 0;
419 	sr->retry_flags = 0;
420 
421 	if (req->opcode == IORING_OP_SEND) {
422 		if (READ_ONCE(sqe->__pad3[0]))
423 			return -EINVAL;
424 		sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
425 		sr->addr_len = READ_ONCE(sqe->addr_len);
426 	} else if (sqe->addr2 || sqe->file_index) {
427 		return -EINVAL;
428 	}
429 
430 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
431 	sr->len = READ_ONCE(sqe->len);
432 	sr->flags = READ_ONCE(sqe->ioprio);
433 	if (sr->flags & ~SENDMSG_FLAGS)
434 		return -EINVAL;
435 	sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
436 	if (sr->msg_flags & MSG_DONTWAIT)
437 		req->flags |= REQ_F_NOWAIT;
438 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
439 		if (req->opcode == IORING_OP_SENDMSG)
440 			return -EINVAL;
441 		if (!(req->flags & REQ_F_BUFFER_SELECT))
442 			return -EINVAL;
443 		sr->msg_flags |= MSG_WAITALL;
444 		sr->buf_group = req->buf_index;
445 		req->buf_list = NULL;
446 		req->flags |= REQ_F_MULTISHOT;
447 	}
448 
449 #ifdef CONFIG_COMPAT
450 	if (req->ctx->compat)
451 		sr->msg_flags |= MSG_CMSG_COMPAT;
452 #endif
453 	return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
454 }
455 
io_req_msg_cleanup(struct io_kiocb * req,unsigned int issue_flags)456 static void io_req_msg_cleanup(struct io_kiocb *req,
457 			       unsigned int issue_flags)
458 {
459 	io_netmsg_recycle(req, issue_flags);
460 }
461 
462 /*
463  * For bundle completions, we need to figure out how many segments we consumed.
464  * A bundle could be using a single ITER_UBUF if that's all we mapped, or it
465  * could be using an ITER_IOVEC. If the latter, then if we consumed all of
466  * the segments, then it's a trivial questiont o answer. If we have residual
467  * data in the iter, then loop the segments to figure out how much we
468  * transferred.
469  */
io_bundle_nbufs(struct io_async_msghdr * kmsg,int ret)470 static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
471 {
472 	struct iovec *iov;
473 	int nbufs;
474 
475 	/* no data is always zero segments, and a ubuf is always 1 segment */
476 	if (ret <= 0)
477 		return 0;
478 	if (iter_is_ubuf(&kmsg->msg.msg_iter))
479 		return 1;
480 
481 	iov = kmsg->free_iov;
482 	if (!iov)
483 		iov = &kmsg->fast_iov;
484 
485 	/* if all data was transferred, it's basic pointer math */
486 	if (!iov_iter_count(&kmsg->msg.msg_iter))
487 		return iter_iov(&kmsg->msg.msg_iter) - iov;
488 
489 	/* short transfer, count segments */
490 	nbufs = 0;
491 	do {
492 		int this_len = min_t(int, iov[nbufs].iov_len, ret);
493 
494 		nbufs++;
495 		ret -= this_len;
496 	} while (ret);
497 
498 	return nbufs;
499 }
500 
io_net_kbuf_recyle(struct io_kiocb * req,struct io_async_msghdr * kmsg,int len)501 static int io_net_kbuf_recyle(struct io_kiocb *req,
502 			      struct io_async_msghdr *kmsg, int len)
503 {
504 	req->flags |= REQ_F_BL_NO_RECYCLE;
505 	if (req->flags & REQ_F_BUFFERS_COMMIT)
506 		io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
507 	return -EAGAIN;
508 }
509 
io_send_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,unsigned issue_flags)510 static inline bool io_send_finish(struct io_kiocb *req, int *ret,
511 				  struct io_async_msghdr *kmsg,
512 				  unsigned issue_flags)
513 {
514 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
515 	bool bundle_finished = *ret <= 0;
516 	unsigned int cflags;
517 
518 	if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
519 		cflags = io_put_kbuf(req, *ret, issue_flags);
520 		goto finish;
521 	}
522 
523 	cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
524 
525 	if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
526 		goto finish;
527 
528 	/*
529 	 * Fill CQE for this receive and see if we should keep trying to
530 	 * receive from this socket.
531 	 */
532 	if (io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
533 		io_mshot_prep_retry(req, kmsg);
534 		return false;
535 	}
536 
537 	/* Otherwise stop bundle and use the current result. */
538 finish:
539 	io_req_set_res(req, *ret, cflags);
540 	*ret = IOU_OK;
541 	return true;
542 }
543 
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)544 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
545 {
546 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
547 	struct io_async_msghdr *kmsg = req->async_data;
548 	struct socket *sock;
549 	unsigned flags;
550 	int min_ret = 0;
551 	int ret;
552 
553 	sock = sock_from_file(req->file);
554 	if (unlikely(!sock))
555 		return -ENOTSOCK;
556 
557 	if (!(req->flags & REQ_F_POLLED) &&
558 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
559 		return -EAGAIN;
560 
561 	flags = sr->msg_flags;
562 	if (issue_flags & IO_URING_F_NONBLOCK)
563 		flags |= MSG_DONTWAIT;
564 	if (flags & MSG_WAITALL)
565 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
566 
567 	kmsg->msg.msg_control_user = sr->msg_control;
568 
569 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
570 
571 	if (ret < min_ret) {
572 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
573 			return -EAGAIN;
574 		if (ret > 0 && io_net_retry(sock, flags)) {
575 			kmsg->msg.msg_controllen = 0;
576 			kmsg->msg.msg_control = NULL;
577 			sr->done_io += ret;
578 			return io_net_kbuf_recyle(req, kmsg, ret);
579 		}
580 		if (ret == -ERESTARTSYS)
581 			ret = -EINTR;
582 		req_set_fail(req);
583 	}
584 	io_req_msg_cleanup(req, issue_flags);
585 	if (ret >= 0)
586 		ret += sr->done_io;
587 	else if (sr->done_io)
588 		ret = sr->done_io;
589 	io_req_set_res(req, ret, 0);
590 	return IOU_OK;
591 }
592 
io_send(struct io_kiocb * req,unsigned int issue_flags)593 int io_send(struct io_kiocb *req, unsigned int issue_flags)
594 {
595 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
596 	struct io_async_msghdr *kmsg = req->async_data;
597 	struct socket *sock;
598 	unsigned flags;
599 	int min_ret = 0;
600 	int ret;
601 
602 	sock = sock_from_file(req->file);
603 	if (unlikely(!sock))
604 		return -ENOTSOCK;
605 
606 	if (!(req->flags & REQ_F_POLLED) &&
607 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
608 		return -EAGAIN;
609 
610 	flags = sr->msg_flags;
611 	if (issue_flags & IO_URING_F_NONBLOCK)
612 		flags |= MSG_DONTWAIT;
613 
614 retry_bundle:
615 	if (io_do_buffer_select(req)) {
616 		struct buf_sel_arg arg = {
617 			.iovs = &kmsg->fast_iov,
618 			.max_len = min_not_zero(sr->len, INT_MAX),
619 			.nr_iovs = 1,
620 		};
621 
622 		if (kmsg->free_iov) {
623 			arg.nr_iovs = kmsg->free_iov_nr;
624 			arg.iovs = kmsg->free_iov;
625 			arg.mode = KBUF_MODE_FREE;
626 		}
627 
628 		if (!(sr->flags & IORING_RECVSEND_BUNDLE))
629 			arg.nr_iovs = 1;
630 		else
631 			arg.mode |= KBUF_MODE_EXPAND;
632 
633 		ret = io_buffers_select(req, &arg, issue_flags);
634 		if (unlikely(ret < 0))
635 			return ret;
636 
637 		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
638 			kmsg->free_iov_nr = ret;
639 			kmsg->free_iov = arg.iovs;
640 			req->flags |= REQ_F_NEED_CLEANUP;
641 		}
642 		sr->len = arg.out_len;
643 
644 		if (ret == 1) {
645 			sr->buf = arg.iovs[0].iov_base;
646 			ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
647 						&kmsg->msg.msg_iter);
648 			if (unlikely(ret))
649 				return ret;
650 		} else {
651 			iov_iter_init(&kmsg->msg.msg_iter, ITER_SOURCE,
652 					arg.iovs, ret, arg.out_len);
653 		}
654 	}
655 
656 	/*
657 	 * If MSG_WAITALL is set, or this is a bundle send, then we need
658 	 * the full amount. If just bundle is set, if we do a short send
659 	 * then we complete the bundle sequence rather than continue on.
660 	 */
661 	if (flags & MSG_WAITALL || sr->flags & IORING_RECVSEND_BUNDLE)
662 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
663 
664 	flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
665 	kmsg->msg.msg_flags = flags;
666 	ret = sock_sendmsg(sock, &kmsg->msg);
667 	if (ret < min_ret) {
668 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
669 			return -EAGAIN;
670 
671 		if (ret > 0 && io_net_retry(sock, flags)) {
672 			sr->len -= ret;
673 			sr->buf += ret;
674 			sr->done_io += ret;
675 			return io_net_kbuf_recyle(req, kmsg, ret);
676 		}
677 		if (ret == -ERESTARTSYS)
678 			ret = -EINTR;
679 		req_set_fail(req);
680 	}
681 	if (ret >= 0)
682 		ret += sr->done_io;
683 	else if (sr->done_io)
684 		ret = sr->done_io;
685 
686 	if (!io_send_finish(req, &ret, kmsg, issue_flags))
687 		goto retry_bundle;
688 
689 	io_req_msg_cleanup(req, issue_flags);
690 	return ret;
691 }
692 
io_recvmsg_mshot_prep(struct io_kiocb * req,struct io_async_msghdr * iomsg,int namelen,size_t controllen)693 static int io_recvmsg_mshot_prep(struct io_kiocb *req,
694 				 struct io_async_msghdr *iomsg,
695 				 int namelen, size_t controllen)
696 {
697 	if ((req->flags & (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) ==
698 			  (REQ_F_APOLL_MULTISHOT|REQ_F_BUFFER_SELECT)) {
699 		int hdr;
700 
701 		if (unlikely(namelen < 0))
702 			return -EOVERFLOW;
703 		if (check_add_overflow(sizeof(struct io_uring_recvmsg_out),
704 					namelen, &hdr))
705 			return -EOVERFLOW;
706 		if (check_add_overflow(hdr, controllen, &hdr))
707 			return -EOVERFLOW;
708 
709 		iomsg->namelen = namelen;
710 		iomsg->controllen = controllen;
711 		return 0;
712 	}
713 
714 	return 0;
715 }
716 
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)717 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
718 			       struct io_async_msghdr *iomsg)
719 {
720 	struct user_msghdr msg;
721 	int ret;
722 
723 	iomsg->msg.msg_name = &iomsg->addr;
724 	iomsg->msg.msg_iter.nr_segs = 0;
725 
726 #ifdef CONFIG_COMPAT
727 	if (unlikely(req->ctx->compat)) {
728 		struct compat_msghdr cmsg;
729 
730 		ret = io_compat_msg_copy_hdr(req, iomsg, &cmsg, ITER_DEST);
731 		if (unlikely(ret))
732 			return ret;
733 
734 		ret = __get_compat_msghdr(&iomsg->msg, &cmsg, &iomsg->uaddr);
735 		if (unlikely(ret))
736 			return ret;
737 
738 		return io_recvmsg_mshot_prep(req, iomsg, cmsg.msg_namelen,
739 						cmsg.msg_controllen);
740 	}
741 #endif
742 
743 	ret = io_msg_copy_hdr(req, iomsg, &msg, ITER_DEST);
744 	if (unlikely(ret))
745 		return ret;
746 
747 	ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
748 	if (unlikely(ret))
749 		return ret;
750 
751 	return io_recvmsg_mshot_prep(req, iomsg, msg.msg_namelen,
752 					msg.msg_controllen);
753 }
754 
io_recvmsg_prep_setup(struct io_kiocb * req)755 static int io_recvmsg_prep_setup(struct io_kiocb *req)
756 {
757 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
758 	struct io_async_msghdr *kmsg;
759 	int ret;
760 
761 	kmsg = io_msg_alloc_async(req);
762 	if (unlikely(!kmsg))
763 		return -ENOMEM;
764 
765 	if (req->opcode == IORING_OP_RECV) {
766 		kmsg->msg.msg_name = NULL;
767 		kmsg->msg.msg_namelen = 0;
768 		kmsg->msg.msg_inq = 0;
769 		kmsg->msg.msg_control = NULL;
770 		kmsg->msg.msg_get_inq = 1;
771 		kmsg->msg.msg_controllen = 0;
772 		kmsg->msg.msg_iocb = NULL;
773 		kmsg->msg.msg_ubuf = NULL;
774 
775 		if (!io_do_buffer_select(req)) {
776 			ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
777 					  &kmsg->msg.msg_iter);
778 			if (unlikely(ret))
779 				return ret;
780 		}
781 		return 0;
782 	}
783 
784 	ret = io_recvmsg_copy_hdr(req, kmsg);
785 	if (!ret)
786 		req->flags |= REQ_F_NEED_CLEANUP;
787 	return ret;
788 }
789 
790 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT | \
791 			IORING_RECVSEND_BUNDLE)
792 
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)793 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
794 {
795 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
796 
797 	sr->done_io = 0;
798 	sr->retry_flags = 0;
799 
800 	if (unlikely(sqe->file_index || sqe->addr2))
801 		return -EINVAL;
802 
803 	sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
804 	sr->len = READ_ONCE(sqe->len);
805 	sr->flags = READ_ONCE(sqe->ioprio);
806 	if (sr->flags & ~RECVMSG_FLAGS)
807 		return -EINVAL;
808 	sr->msg_flags = READ_ONCE(sqe->msg_flags);
809 	if (sr->msg_flags & MSG_DONTWAIT)
810 		req->flags |= REQ_F_NOWAIT;
811 	if (sr->msg_flags & MSG_ERRQUEUE)
812 		req->flags |= REQ_F_CLEAR_POLLIN;
813 	if (req->flags & REQ_F_BUFFER_SELECT) {
814 		/*
815 		 * Store the buffer group for this multishot receive separately,
816 		 * as if we end up doing an io-wq based issue that selects a
817 		 * buffer, it has to be committed immediately and that will
818 		 * clear ->buf_list. This means we lose the link to the buffer
819 		 * list, and the eventual buffer put on completion then cannot
820 		 * restore it.
821 		 */
822 		sr->buf_group = req->buf_index;
823 		req->buf_list = NULL;
824 	}
825 	if (sr->flags & IORING_RECV_MULTISHOT) {
826 		if (!(req->flags & REQ_F_BUFFER_SELECT))
827 			return -EINVAL;
828 		if (sr->msg_flags & MSG_WAITALL)
829 			return -EINVAL;
830 		if (req->opcode == IORING_OP_RECV && sr->len)
831 			return -EINVAL;
832 		req->flags |= REQ_F_APOLL_MULTISHOT;
833 	}
834 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
835 		if (req->opcode == IORING_OP_RECVMSG)
836 			return -EINVAL;
837 	}
838 
839 #ifdef CONFIG_COMPAT
840 	if (req->ctx->compat)
841 		sr->msg_flags |= MSG_CMSG_COMPAT;
842 #endif
843 	sr->nr_multishot_loops = 0;
844 	return io_recvmsg_prep_setup(req);
845 }
846 
847 /* bits to clear in old and inherit in new cflags on bundle retry */
848 #define CQE_F_MASK	(IORING_CQE_F_SOCK_NONEMPTY|IORING_CQE_F_MORE)
849 
850 /*
851  * Finishes io_recv and io_recvmsg.
852  *
853  * Returns true if it is actually finished, or false if it should run
854  * again (for multishot).
855  */
io_recv_finish(struct io_kiocb * req,int * ret,struct io_async_msghdr * kmsg,bool mshot_finished,unsigned issue_flags)856 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
857 				  struct io_async_msghdr *kmsg,
858 				  bool mshot_finished, unsigned issue_flags)
859 {
860 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
861 	unsigned int cflags = 0;
862 
863 	if (kmsg->msg.msg_inq > 0)
864 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
865 
866 	if (sr->flags & IORING_RECVSEND_BUNDLE) {
867 		size_t this_ret = *ret - sr->done_io;
868 
869 		cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
870 				      issue_flags);
871 		if (sr->retry_flags & IO_SR_MSG_RETRY)
872 			cflags = req->cqe.flags | (cflags & CQE_F_MASK);
873 		/* bundle with no more immediate buffers, we're done */
874 		if (req->flags & REQ_F_BL_EMPTY)
875 			goto finish;
876 		/*
877 		 * If more is available AND it was a full transfer, retry and
878 		 * append to this one
879 		 */
880 		if (!sr->retry_flags && kmsg->msg.msg_inq > 1 && this_ret > 0 &&
881 		    !iov_iter_count(&kmsg->msg.msg_iter)) {
882 			req->cqe.flags = cflags & ~CQE_F_MASK;
883 			sr->len = kmsg->msg.msg_inq;
884 			sr->done_io += this_ret;
885 			sr->retry_flags |= IO_SR_MSG_RETRY;
886 			return false;
887 		}
888 	} else {
889 		cflags |= io_put_kbuf(req, *ret, issue_flags);
890 	}
891 
892 	/*
893 	 * Fill CQE for this receive and see if we should keep trying to
894 	 * receive from this socket.
895 	 */
896 	if ((req->flags & REQ_F_APOLL_MULTISHOT) && !mshot_finished &&
897 	    io_req_post_cqe(req, *ret, cflags | IORING_CQE_F_MORE)) {
898 		int mshot_retry_ret = IOU_ISSUE_SKIP_COMPLETE;
899 
900 		io_mshot_prep_retry(req, kmsg);
901 		/* Known not-empty or unknown state, retry */
902 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || kmsg->msg.msg_inq < 0) {
903 			if (sr->nr_multishot_loops++ < MULTISHOT_MAX_RETRY)
904 				return false;
905 			/* mshot retries exceeded, force a requeue */
906 			sr->nr_multishot_loops = 0;
907 			mshot_retry_ret = IOU_REQUEUE;
908 		}
909 		if (issue_flags & IO_URING_F_MULTISHOT)
910 			*ret = mshot_retry_ret;
911 		else
912 			*ret = -EAGAIN;
913 		return true;
914 	}
915 
916 	/* Finish the request / stop multishot. */
917 finish:
918 	io_req_set_res(req, *ret, cflags);
919 
920 	if (issue_flags & IO_URING_F_MULTISHOT)
921 		*ret = IOU_STOP_MULTISHOT;
922 	else
923 		*ret = IOU_OK;
924 	io_req_msg_cleanup(req, issue_flags);
925 	return true;
926 }
927 
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)928 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
929 				     struct io_sr_msg *sr, void __user **buf,
930 				     size_t *len)
931 {
932 	unsigned long ubuf = (unsigned long) *buf;
933 	unsigned long hdr;
934 
935 	hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
936 		kmsg->controllen;
937 	if (*len < hdr)
938 		return -EFAULT;
939 
940 	if (kmsg->controllen) {
941 		unsigned long control = ubuf + hdr - kmsg->controllen;
942 
943 		kmsg->msg.msg_control_user = (void __user *) control;
944 		kmsg->msg.msg_controllen = kmsg->controllen;
945 	}
946 
947 	sr->buf = *buf; /* stash for later copy */
948 	*buf = (void __user *) (ubuf + hdr);
949 	kmsg->payloadlen = *len = *len - hdr;
950 	return 0;
951 }
952 
953 struct io_recvmsg_multishot_hdr {
954 	struct io_uring_recvmsg_out msg;
955 	struct sockaddr_storage addr;
956 };
957 
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)958 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
959 				struct io_async_msghdr *kmsg,
960 				unsigned int flags, bool *finished)
961 {
962 	int err;
963 	int copy_len;
964 	struct io_recvmsg_multishot_hdr hdr;
965 
966 	if (kmsg->namelen)
967 		kmsg->msg.msg_name = &hdr.addr;
968 	kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
969 	kmsg->msg.msg_namelen = 0;
970 
971 	if (sock->file->f_flags & O_NONBLOCK)
972 		flags |= MSG_DONTWAIT;
973 
974 	err = sock_recvmsg(sock, &kmsg->msg, flags);
975 	*finished = err <= 0;
976 	if (err < 0)
977 		return err;
978 
979 	hdr.msg = (struct io_uring_recvmsg_out) {
980 		.controllen = kmsg->controllen - kmsg->msg.msg_controllen,
981 		.flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
982 	};
983 
984 	hdr.msg.payloadlen = err;
985 	if (err > kmsg->payloadlen)
986 		err = kmsg->payloadlen;
987 
988 	copy_len = sizeof(struct io_uring_recvmsg_out);
989 	if (kmsg->msg.msg_namelen > kmsg->namelen)
990 		copy_len += kmsg->namelen;
991 	else
992 		copy_len += kmsg->msg.msg_namelen;
993 
994 	/*
995 	 *      "fromlen shall refer to the value before truncation.."
996 	 *                      1003.1g
997 	 */
998 	hdr.msg.namelen = kmsg->msg.msg_namelen;
999 
1000 	/* ensure that there is no gap between hdr and sockaddr_storage */
1001 	BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
1002 		     sizeof(struct io_uring_recvmsg_out));
1003 	if (copy_to_user(io->buf, &hdr, copy_len)) {
1004 		*finished = true;
1005 		return -EFAULT;
1006 	}
1007 
1008 	return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
1009 			kmsg->controllen + err;
1010 }
1011 
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)1012 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
1013 {
1014 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1015 	struct io_async_msghdr *kmsg = req->async_data;
1016 	struct socket *sock;
1017 	unsigned flags;
1018 	int ret, min_ret = 0;
1019 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1020 	bool mshot_finished = true;
1021 
1022 	sock = sock_from_file(req->file);
1023 	if (unlikely(!sock))
1024 		return -ENOTSOCK;
1025 
1026 	if (!(req->flags & REQ_F_POLLED) &&
1027 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1028 		return -EAGAIN;
1029 
1030 	flags = sr->msg_flags;
1031 	if (force_nonblock)
1032 		flags |= MSG_DONTWAIT;
1033 
1034 retry_multishot:
1035 	if (io_do_buffer_select(req)) {
1036 		void __user *buf;
1037 		size_t len = sr->len;
1038 
1039 		buf = io_buffer_select(req, &len, issue_flags);
1040 		if (!buf)
1041 			return -ENOBUFS;
1042 
1043 		if (req->flags & REQ_F_APOLL_MULTISHOT) {
1044 			ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
1045 			if (ret) {
1046 				io_kbuf_recycle(req, issue_flags);
1047 				return ret;
1048 			}
1049 		}
1050 
1051 		iov_iter_ubuf(&kmsg->msg.msg_iter, ITER_DEST, buf, len);
1052 	}
1053 
1054 	kmsg->msg.msg_get_inq = 1;
1055 	kmsg->msg.msg_inq = -1;
1056 	if (req->flags & REQ_F_APOLL_MULTISHOT) {
1057 		ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
1058 					   &mshot_finished);
1059 	} else {
1060 		/* disable partial retry for recvmsg with cmsg attached */
1061 		if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
1062 			min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1063 
1064 		ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
1065 					 kmsg->uaddr, flags);
1066 	}
1067 
1068 	if (ret < min_ret) {
1069 		if (ret == -EAGAIN && force_nonblock) {
1070 			if (issue_flags & IO_URING_F_MULTISHOT) {
1071 				io_kbuf_recycle(req, issue_flags);
1072 				return IOU_ISSUE_SKIP_COMPLETE;
1073 			}
1074 			return -EAGAIN;
1075 		}
1076 		if (ret > 0 && io_net_retry(sock, flags)) {
1077 			sr->done_io += ret;
1078 			return io_net_kbuf_recyle(req, kmsg, ret);
1079 		}
1080 		if (ret == -ERESTARTSYS)
1081 			ret = -EINTR;
1082 		req_set_fail(req);
1083 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1084 		req_set_fail(req);
1085 	}
1086 
1087 	if (ret > 0)
1088 		ret += sr->done_io;
1089 	else if (sr->done_io)
1090 		ret = sr->done_io;
1091 	else
1092 		io_kbuf_recycle(req, issue_flags);
1093 
1094 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1095 		goto retry_multishot;
1096 
1097 	return ret;
1098 }
1099 
io_recv_buf_select(struct io_kiocb * req,struct io_async_msghdr * kmsg,size_t * len,unsigned int issue_flags)1100 static int io_recv_buf_select(struct io_kiocb *req, struct io_async_msghdr *kmsg,
1101 			      size_t *len, unsigned int issue_flags)
1102 {
1103 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1104 	int ret;
1105 
1106 	/*
1107 	 * If the ring isn't locked, then don't use the peek interface
1108 	 * to grab multiple buffers as we will lock/unlock between
1109 	 * this selection and posting the buffers.
1110 	 */
1111 	if (!(issue_flags & IO_URING_F_UNLOCKED) &&
1112 	    sr->flags & IORING_RECVSEND_BUNDLE) {
1113 		struct buf_sel_arg arg = {
1114 			.iovs = &kmsg->fast_iov,
1115 			.nr_iovs = 1,
1116 			.mode = KBUF_MODE_EXPAND,
1117 		};
1118 
1119 		if (kmsg->free_iov) {
1120 			arg.nr_iovs = kmsg->free_iov_nr;
1121 			arg.iovs = kmsg->free_iov;
1122 			arg.mode |= KBUF_MODE_FREE;
1123 		}
1124 
1125 		if (kmsg->msg.msg_inq > 1)
1126 			arg.max_len = min_not_zero(sr->len, kmsg->msg.msg_inq);
1127 
1128 		ret = io_buffers_peek(req, &arg);
1129 		if (unlikely(ret < 0))
1130 			return ret;
1131 
1132 		if (arg.iovs != &kmsg->fast_iov && arg.iovs != kmsg->free_iov) {
1133 			kmsg->free_iov_nr = ret;
1134 			kmsg->free_iov = arg.iovs;
1135 			req->flags |= REQ_F_NEED_CLEANUP;
1136 		}
1137 		if (arg.partial_map)
1138 			sr->retry_flags |= IO_SR_MSG_PARTIAL_MAP;
1139 
1140 		/* special case 1 vec, can be a fast path */
1141 		if (ret == 1) {
1142 			sr->buf = arg.iovs[0].iov_base;
1143 			sr->len = arg.iovs[0].iov_len;
1144 			goto map_ubuf;
1145 		}
1146 		iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, arg.iovs, ret,
1147 				arg.out_len);
1148 	} else {
1149 		void __user *buf;
1150 
1151 		*len = sr->len;
1152 		buf = io_buffer_select(req, len, issue_flags);
1153 		if (!buf)
1154 			return -ENOBUFS;
1155 		sr->buf = buf;
1156 		sr->len = *len;
1157 map_ubuf:
1158 		ret = import_ubuf(ITER_DEST, sr->buf, sr->len,
1159 				  &kmsg->msg.msg_iter);
1160 		if (unlikely(ret))
1161 			return ret;
1162 	}
1163 
1164 	return 0;
1165 }
1166 
io_recv(struct io_kiocb * req,unsigned int issue_flags)1167 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
1168 {
1169 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1170 	struct io_async_msghdr *kmsg = req->async_data;
1171 	struct socket *sock;
1172 	unsigned flags;
1173 	int ret, min_ret = 0;
1174 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1175 	size_t len = sr->len;
1176 	bool mshot_finished;
1177 
1178 	if (!(req->flags & REQ_F_POLLED) &&
1179 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1180 		return -EAGAIN;
1181 
1182 	sock = sock_from_file(req->file);
1183 	if (unlikely(!sock))
1184 		return -ENOTSOCK;
1185 
1186 	flags = sr->msg_flags;
1187 	if (force_nonblock)
1188 		flags |= MSG_DONTWAIT;
1189 
1190 retry_multishot:
1191 	if (io_do_buffer_select(req)) {
1192 		ret = io_recv_buf_select(req, kmsg, &len, issue_flags);
1193 		if (unlikely(ret)) {
1194 			kmsg->msg.msg_inq = -1;
1195 			goto out_free;
1196 		}
1197 		sr->buf = NULL;
1198 	}
1199 
1200 	kmsg->msg.msg_flags = 0;
1201 	kmsg->msg.msg_inq = -1;
1202 
1203 	if (flags & MSG_WAITALL)
1204 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1205 
1206 	ret = sock_recvmsg(sock, &kmsg->msg, flags);
1207 	if (ret < min_ret) {
1208 		if (ret == -EAGAIN && force_nonblock) {
1209 			if (issue_flags & IO_URING_F_MULTISHOT) {
1210 				io_kbuf_recycle(req, issue_flags);
1211 				return IOU_ISSUE_SKIP_COMPLETE;
1212 			}
1213 
1214 			return -EAGAIN;
1215 		}
1216 		if (ret > 0 && io_net_retry(sock, flags)) {
1217 			sr->len -= ret;
1218 			sr->buf += ret;
1219 			sr->done_io += ret;
1220 			return io_net_kbuf_recyle(req, kmsg, ret);
1221 		}
1222 		if (ret == -ERESTARTSYS)
1223 			ret = -EINTR;
1224 		req_set_fail(req);
1225 	} else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
1226 out_free:
1227 		req_set_fail(req);
1228 	}
1229 
1230 	mshot_finished = ret <= 0;
1231 	if (ret > 0)
1232 		ret += sr->done_io;
1233 	else if (sr->done_io)
1234 		ret = sr->done_io;
1235 	else
1236 		io_kbuf_recycle(req, issue_flags);
1237 
1238 	if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
1239 		goto retry_multishot;
1240 
1241 	return ret;
1242 }
1243 
io_send_zc_cleanup(struct io_kiocb * req)1244 void io_send_zc_cleanup(struct io_kiocb *req)
1245 {
1246 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1247 	struct io_async_msghdr *io = req->async_data;
1248 
1249 	if (req_has_async_data(req))
1250 		io_netmsg_iovec_free(io);
1251 	if (zc->notif) {
1252 		io_notif_flush(zc->notif);
1253 		zc->notif = NULL;
1254 	}
1255 }
1256 
1257 #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
1258 #define IO_ZC_FLAGS_VALID  (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE)
1259 
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1260 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1261 {
1262 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1263 	struct io_ring_ctx *ctx = req->ctx;
1264 	struct io_kiocb *notif;
1265 
1266 	zc->done_io = 0;
1267 	zc->retry_flags = 0;
1268 	req->flags |= REQ_F_POLL_NO_LAZY;
1269 
1270 	if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
1271 		return -EINVAL;
1272 	/* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
1273 	if (req->flags & REQ_F_CQE_SKIP)
1274 		return -EINVAL;
1275 
1276 	notif = zc->notif = io_alloc_notif(ctx);
1277 	if (!notif)
1278 		return -ENOMEM;
1279 	notif->cqe.user_data = req->cqe.user_data;
1280 	notif->cqe.res = 0;
1281 	notif->cqe.flags = IORING_CQE_F_NOTIF;
1282 	req->flags |= REQ_F_NEED_CLEANUP;
1283 
1284 	zc->flags = READ_ONCE(sqe->ioprio);
1285 	if (unlikely(zc->flags & ~IO_ZC_FLAGS_COMMON)) {
1286 		if (zc->flags & ~IO_ZC_FLAGS_VALID)
1287 			return -EINVAL;
1288 		if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
1289 			struct io_notif_data *nd = io_notif_to_data(notif);
1290 
1291 			nd->zc_report = true;
1292 			nd->zc_used = false;
1293 			nd->zc_copied = false;
1294 		}
1295 	}
1296 
1297 	if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1298 		unsigned idx = READ_ONCE(sqe->buf_index);
1299 
1300 		if (unlikely(idx >= ctx->nr_user_bufs))
1301 			return -EFAULT;
1302 		idx = array_index_nospec(idx, ctx->nr_user_bufs);
1303 		req->imu = READ_ONCE(ctx->user_bufs[idx]);
1304 		io_req_set_rsrc_node(notif, ctx, 0);
1305 	}
1306 
1307 	if (req->opcode == IORING_OP_SEND_ZC) {
1308 		if (READ_ONCE(sqe->__pad3[0]))
1309 			return -EINVAL;
1310 		zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1311 		zc->addr_len = READ_ONCE(sqe->addr_len);
1312 	} else {
1313 		if (unlikely(sqe->addr2 || sqe->file_index))
1314 			return -EINVAL;
1315 		if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1316 			return -EINVAL;
1317 	}
1318 
1319 	zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1320 	zc->len = READ_ONCE(sqe->len);
1321 	zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL | MSG_ZEROCOPY;
1322 	if (zc->msg_flags & MSG_DONTWAIT)
1323 		req->flags |= REQ_F_NOWAIT;
1324 
1325 #ifdef CONFIG_COMPAT
1326 	if (req->ctx->compat)
1327 		zc->msg_flags |= MSG_CMSG_COMPAT;
1328 #endif
1329 	return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
1330 }
1331 
io_sg_from_iter_iovec(struct sk_buff * skb,struct iov_iter * from,size_t length)1332 static int io_sg_from_iter_iovec(struct sk_buff *skb,
1333 				 struct iov_iter *from, size_t length)
1334 {
1335 	skb_zcopy_downgrade_managed(skb);
1336 	return zerocopy_fill_skb_from_iter(skb, from, length);
1337 }
1338 
io_sg_from_iter(struct sk_buff * skb,struct iov_iter * from,size_t length)1339 static int io_sg_from_iter(struct sk_buff *skb,
1340 			   struct iov_iter *from, size_t length)
1341 {
1342 	struct skb_shared_info *shinfo = skb_shinfo(skb);
1343 	int frag = shinfo->nr_frags;
1344 	int ret = 0;
1345 	struct bvec_iter bi;
1346 	ssize_t copied = 0;
1347 	unsigned long truesize = 0;
1348 
1349 	if (!frag)
1350 		shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1351 	else if (unlikely(!skb_zcopy_managed(skb)))
1352 		return zerocopy_fill_skb_from_iter(skb, from, length);
1353 
1354 	bi.bi_size = min(from->count, length);
1355 	bi.bi_bvec_done = from->iov_offset;
1356 	bi.bi_idx = 0;
1357 
1358 	while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1359 		struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1360 
1361 		copied += v.bv_len;
1362 		truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1363 		__skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1364 					   v.bv_offset, v.bv_len);
1365 		bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1366 	}
1367 	if (bi.bi_size)
1368 		ret = -EMSGSIZE;
1369 
1370 	shinfo->nr_frags = frag;
1371 	from->bvec += bi.bi_idx;
1372 	from->nr_segs -= bi.bi_idx;
1373 	from->count -= copied;
1374 	from->iov_offset = bi.bi_bvec_done;
1375 
1376 	skb->data_len += copied;
1377 	skb->len += copied;
1378 	skb->truesize += truesize;
1379 	return ret;
1380 }
1381 
io_send_zc_import(struct io_kiocb * req,struct io_async_msghdr * kmsg)1382 static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
1383 {
1384 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1385 	int ret;
1386 
1387 	if (sr->flags & IORING_RECVSEND_FIXED_BUF) {
1388 		ret = io_import_fixed(ITER_SOURCE, &kmsg->msg.msg_iter, req->imu,
1389 					(u64)(uintptr_t)sr->buf, sr->len);
1390 		if (unlikely(ret))
1391 			return ret;
1392 		kmsg->msg.sg_from_iter = io_sg_from_iter;
1393 	} else {
1394 		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
1395 		if (unlikely(ret))
1396 			return ret;
1397 		ret = io_notif_account_mem(sr->notif, sr->len);
1398 		if (unlikely(ret))
1399 			return ret;
1400 		kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1401 	}
1402 
1403 	return ret;
1404 }
1405 
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1406 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1407 {
1408 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1409 	struct io_async_msghdr *kmsg = req->async_data;
1410 	struct socket *sock;
1411 	unsigned msg_flags;
1412 	int ret, min_ret = 0;
1413 
1414 	sock = sock_from_file(req->file);
1415 	if (unlikely(!sock))
1416 		return -ENOTSOCK;
1417 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1418 		return -EOPNOTSUPP;
1419 
1420 	if (!(req->flags & REQ_F_POLLED) &&
1421 	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
1422 		return -EAGAIN;
1423 
1424 	if (!zc->done_io) {
1425 		ret = io_send_zc_import(req, kmsg);
1426 		if (unlikely(ret))
1427 			return ret;
1428 	}
1429 
1430 	msg_flags = zc->msg_flags;
1431 	if (issue_flags & IO_URING_F_NONBLOCK)
1432 		msg_flags |= MSG_DONTWAIT;
1433 	if (msg_flags & MSG_WAITALL)
1434 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1435 	msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1436 
1437 	kmsg->msg.msg_flags = msg_flags;
1438 	kmsg->msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1439 	ret = sock_sendmsg(sock, &kmsg->msg);
1440 
1441 	if (unlikely(ret < min_ret)) {
1442 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1443 			return -EAGAIN;
1444 
1445 		if (ret > 0 && io_net_retry(sock, kmsg->msg.msg_flags)) {
1446 			zc->len -= ret;
1447 			zc->buf += ret;
1448 			zc->done_io += ret;
1449 			return io_net_kbuf_recyle(req, kmsg, ret);
1450 		}
1451 		if (ret == -ERESTARTSYS)
1452 			ret = -EINTR;
1453 		req_set_fail(req);
1454 	}
1455 
1456 	if (ret >= 0)
1457 		ret += zc->done_io;
1458 	else if (zc->done_io)
1459 		ret = zc->done_io;
1460 
1461 	/*
1462 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1463 	 * flushing notif to io_send_zc_cleanup()
1464 	 */
1465 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1466 		io_notif_flush(zc->notif);
1467 		zc->notif = NULL;
1468 		io_req_msg_cleanup(req, 0);
1469 	}
1470 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1471 	return IOU_OK;
1472 }
1473 
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1474 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1475 {
1476 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1477 	struct io_async_msghdr *kmsg = req->async_data;
1478 	struct socket *sock;
1479 	unsigned flags;
1480 	int ret, min_ret = 0;
1481 
1482 	sock = sock_from_file(req->file);
1483 	if (unlikely(!sock))
1484 		return -ENOTSOCK;
1485 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1486 		return -EOPNOTSUPP;
1487 
1488 	if (!(req->flags & REQ_F_POLLED) &&
1489 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
1490 		return -EAGAIN;
1491 
1492 	flags = sr->msg_flags;
1493 	if (issue_flags & IO_URING_F_NONBLOCK)
1494 		flags |= MSG_DONTWAIT;
1495 	if (flags & MSG_WAITALL)
1496 		min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1497 
1498 	kmsg->msg.msg_control_user = sr->msg_control;
1499 	kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1500 	kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1501 	ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1502 
1503 	if (unlikely(ret < min_ret)) {
1504 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1505 			return -EAGAIN;
1506 
1507 		if (ret > 0 && io_net_retry(sock, flags)) {
1508 			sr->done_io += ret;
1509 			return io_net_kbuf_recyle(req, kmsg, ret);
1510 		}
1511 		if (ret == -ERESTARTSYS)
1512 			ret = -EINTR;
1513 		req_set_fail(req);
1514 	}
1515 
1516 	if (ret >= 0)
1517 		ret += sr->done_io;
1518 	else if (sr->done_io)
1519 		ret = sr->done_io;
1520 
1521 	/*
1522 	 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1523 	 * flushing notif to io_send_zc_cleanup()
1524 	 */
1525 	if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1526 		io_notif_flush(sr->notif);
1527 		sr->notif = NULL;
1528 		io_req_msg_cleanup(req, 0);
1529 	}
1530 	io_req_set_res(req, ret, IORING_CQE_F_MORE);
1531 	return IOU_OK;
1532 }
1533 
io_sendrecv_fail(struct io_kiocb * req)1534 void io_sendrecv_fail(struct io_kiocb *req)
1535 {
1536 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1537 
1538 	if (sr->done_io)
1539 		req->cqe.res = sr->done_io;
1540 
1541 	if ((req->flags & REQ_F_NEED_CLEANUP) &&
1542 	    (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1543 		req->cqe.flags |= IORING_CQE_F_MORE;
1544 }
1545 
1546 #define ACCEPT_FLAGS	(IORING_ACCEPT_MULTISHOT | IORING_ACCEPT_DONTWAIT | \
1547 			 IORING_ACCEPT_POLL_FIRST)
1548 
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1549 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1550 {
1551 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1552 
1553 	if (sqe->len || sqe->buf_index)
1554 		return -EINVAL;
1555 
1556 	accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1557 	accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1558 	accept->flags = READ_ONCE(sqe->accept_flags);
1559 	accept->nofile = rlimit(RLIMIT_NOFILE);
1560 	accept->iou_flags = READ_ONCE(sqe->ioprio);
1561 	if (accept->iou_flags & ~ACCEPT_FLAGS)
1562 		return -EINVAL;
1563 
1564 	accept->file_slot = READ_ONCE(sqe->file_index);
1565 	if (accept->file_slot) {
1566 		if (accept->flags & SOCK_CLOEXEC)
1567 			return -EINVAL;
1568 		if (accept->iou_flags & IORING_ACCEPT_MULTISHOT &&
1569 		    accept->file_slot != IORING_FILE_INDEX_ALLOC)
1570 			return -EINVAL;
1571 	}
1572 	if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1573 		return -EINVAL;
1574 	if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1575 		accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1576 	if (accept->iou_flags & IORING_ACCEPT_MULTISHOT)
1577 		req->flags |= REQ_F_APOLL_MULTISHOT;
1578 	if (accept->iou_flags & IORING_ACCEPT_DONTWAIT)
1579 		req->flags |= REQ_F_NOWAIT;
1580 	return 0;
1581 }
1582 
io_accept(struct io_kiocb * req,unsigned int issue_flags)1583 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1584 {
1585 	struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1586 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1587 	bool fixed = !!accept->file_slot;
1588 	struct proto_accept_arg arg = {
1589 		.flags = force_nonblock ? O_NONBLOCK : 0,
1590 	};
1591 	struct file *file;
1592 	unsigned cflags;
1593 	int ret, fd;
1594 
1595 	if (!(req->flags & REQ_F_POLLED) &&
1596 	    accept->iou_flags & IORING_ACCEPT_POLL_FIRST)
1597 		return -EAGAIN;
1598 
1599 retry:
1600 	if (!fixed) {
1601 		fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1602 		if (unlikely(fd < 0))
1603 			return fd;
1604 	}
1605 	arg.err = 0;
1606 	arg.is_empty = -1;
1607 	file = do_accept(req->file, &arg, accept->addr, accept->addr_len,
1608 			 accept->flags);
1609 	if (IS_ERR(file)) {
1610 		if (!fixed)
1611 			put_unused_fd(fd);
1612 		ret = PTR_ERR(file);
1613 		if (ret == -EAGAIN && force_nonblock &&
1614 		    !(accept->iou_flags & IORING_ACCEPT_DONTWAIT)) {
1615 			/*
1616 			 * if it's multishot and polled, we don't need to
1617 			 * return EAGAIN to arm the poll infra since it
1618 			 * has already been done
1619 			 */
1620 			if (issue_flags & IO_URING_F_MULTISHOT)
1621 				return IOU_ISSUE_SKIP_COMPLETE;
1622 			return ret;
1623 		}
1624 		if (ret == -ERESTARTSYS)
1625 			ret = -EINTR;
1626 		req_set_fail(req);
1627 	} else if (!fixed) {
1628 		fd_install(fd, file);
1629 		ret = fd;
1630 	} else {
1631 		ret = io_fixed_fd_install(req, issue_flags, file,
1632 						accept->file_slot);
1633 	}
1634 
1635 	cflags = 0;
1636 	if (!arg.is_empty)
1637 		cflags |= IORING_CQE_F_SOCK_NONEMPTY;
1638 
1639 	if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1640 		io_req_set_res(req, ret, cflags);
1641 		return IOU_OK;
1642 	}
1643 
1644 	if (ret < 0)
1645 		return ret;
1646 	if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
1647 		if (cflags & IORING_CQE_F_SOCK_NONEMPTY || arg.is_empty == -1)
1648 			goto retry;
1649 		if (issue_flags & IO_URING_F_MULTISHOT)
1650 			return IOU_ISSUE_SKIP_COMPLETE;
1651 		return -EAGAIN;
1652 	}
1653 
1654 	io_req_set_res(req, ret, cflags);
1655 	if (!(issue_flags & IO_URING_F_MULTISHOT))
1656 		return IOU_OK;
1657 	return IOU_STOP_MULTISHOT;
1658 }
1659 
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1660 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1661 {
1662 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1663 
1664 	if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1665 		return -EINVAL;
1666 
1667 	sock->domain = READ_ONCE(sqe->fd);
1668 	sock->type = READ_ONCE(sqe->off);
1669 	sock->protocol = READ_ONCE(sqe->len);
1670 	sock->file_slot = READ_ONCE(sqe->file_index);
1671 	sock->nofile = rlimit(RLIMIT_NOFILE);
1672 
1673 	sock->flags = sock->type & ~SOCK_TYPE_MASK;
1674 	if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1675 		return -EINVAL;
1676 	if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1677 		return -EINVAL;
1678 	return 0;
1679 }
1680 
io_socket(struct io_kiocb * req,unsigned int issue_flags)1681 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1682 {
1683 	struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1684 	bool fixed = !!sock->file_slot;
1685 	struct file *file;
1686 	int ret, fd;
1687 
1688 	if (!fixed) {
1689 		fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1690 		if (unlikely(fd < 0))
1691 			return fd;
1692 	}
1693 	file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1694 	if (IS_ERR(file)) {
1695 		if (!fixed)
1696 			put_unused_fd(fd);
1697 		ret = PTR_ERR(file);
1698 		if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1699 			return -EAGAIN;
1700 		if (ret == -ERESTARTSYS)
1701 			ret = -EINTR;
1702 		req_set_fail(req);
1703 	} else if (!fixed) {
1704 		fd_install(fd, file);
1705 		ret = fd;
1706 	} else {
1707 		ret = io_fixed_fd_install(req, issue_flags, file,
1708 					    sock->file_slot);
1709 	}
1710 	io_req_set_res(req, ret, 0);
1711 	return IOU_OK;
1712 }
1713 
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1714 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1715 {
1716 	struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1717 	struct io_async_msghdr *io;
1718 
1719 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1720 		return -EINVAL;
1721 
1722 	conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1723 	conn->addr_len =  READ_ONCE(sqe->addr2);
1724 	conn->in_progress = conn->seen_econnaborted = false;
1725 
1726 	io = io_msg_alloc_async(req);
1727 	if (unlikely(!io))
1728 		return -ENOMEM;
1729 
1730 	return move_addr_to_kernel(conn->addr, conn->addr_len, &io->addr);
1731 }
1732 
io_connect(struct io_kiocb * req,unsigned int issue_flags)1733 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1734 {
1735 	struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1736 	struct io_async_msghdr *io = req->async_data;
1737 	unsigned file_flags;
1738 	int ret;
1739 	bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1740 
1741 	if (connect->in_progress) {
1742 		struct poll_table_struct pt = { ._key = EPOLLERR };
1743 
1744 		if (vfs_poll(req->file, &pt) & EPOLLERR)
1745 			goto get_sock_err;
1746 	}
1747 
1748 	file_flags = force_nonblock ? O_NONBLOCK : 0;
1749 
1750 	ret = __sys_connect_file(req->file, &io->addr, connect->addr_len,
1751 				 file_flags);
1752 	if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1753 	    && force_nonblock) {
1754 		if (ret == -EINPROGRESS) {
1755 			connect->in_progress = true;
1756 		} else if (ret == -ECONNABORTED) {
1757 			if (connect->seen_econnaborted)
1758 				goto out;
1759 			connect->seen_econnaborted = true;
1760 		}
1761 		return -EAGAIN;
1762 	}
1763 	if (connect->in_progress) {
1764 		/*
1765 		 * At least bluetooth will return -EBADFD on a re-connect
1766 		 * attempt, and it's (supposedly) also valid to get -EISCONN
1767 		 * which means the previous result is good. For both of these,
1768 		 * grab the sock_error() and use that for the completion.
1769 		 */
1770 		if (ret == -EBADFD || ret == -EISCONN) {
1771 get_sock_err:
1772 			ret = sock_error(sock_from_file(req->file)->sk);
1773 		}
1774 	}
1775 	if (ret == -ERESTARTSYS)
1776 		ret = -EINTR;
1777 out:
1778 	if (ret < 0)
1779 		req_set_fail(req);
1780 	io_req_msg_cleanup(req, issue_flags);
1781 	io_req_set_res(req, ret, 0);
1782 	return IOU_OK;
1783 }
1784 
io_bind_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1785 int io_bind_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1786 {
1787 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1788 	struct sockaddr __user *uaddr;
1789 	struct io_async_msghdr *io;
1790 
1791 	if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1792 		return -EINVAL;
1793 
1794 	uaddr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1795 	bind->addr_len =  READ_ONCE(sqe->addr2);
1796 
1797 	io = io_msg_alloc_async(req);
1798 	if (unlikely(!io))
1799 		return -ENOMEM;
1800 	return move_addr_to_kernel(uaddr, bind->addr_len, &io->addr);
1801 }
1802 
io_bind(struct io_kiocb * req,unsigned int issue_flags)1803 int io_bind(struct io_kiocb *req, unsigned int issue_flags)
1804 {
1805 	struct io_bind *bind = io_kiocb_to_cmd(req, struct io_bind);
1806 	struct io_async_msghdr *io = req->async_data;
1807 	struct socket *sock;
1808 	int ret;
1809 
1810 	sock = sock_from_file(req->file);
1811 	if (unlikely(!sock))
1812 		return -ENOTSOCK;
1813 
1814 	ret = __sys_bind_socket(sock, &io->addr, bind->addr_len);
1815 	if (ret < 0)
1816 		req_set_fail(req);
1817 	io_req_set_res(req, ret, 0);
1818 	return 0;
1819 }
1820 
io_listen_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1821 int io_listen_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1822 {
1823 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1824 
1825 	if (sqe->addr || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in || sqe->addr2)
1826 		return -EINVAL;
1827 
1828 	listen->backlog = READ_ONCE(sqe->len);
1829 	return 0;
1830 }
1831 
io_listen(struct io_kiocb * req,unsigned int issue_flags)1832 int io_listen(struct io_kiocb *req, unsigned int issue_flags)
1833 {
1834 	struct io_listen *listen = io_kiocb_to_cmd(req, struct io_listen);
1835 	struct socket *sock;
1836 	int ret;
1837 
1838 	sock = sock_from_file(req->file);
1839 	if (unlikely(!sock))
1840 		return -ENOTSOCK;
1841 
1842 	ret = __sys_listen_socket(sock, listen->backlog);
1843 	if (ret < 0)
1844 		req_set_fail(req);
1845 	io_req_set_res(req, ret, 0);
1846 	return 0;
1847 }
1848 
io_netmsg_cache_free(const void * entry)1849 void io_netmsg_cache_free(const void *entry)
1850 {
1851 	struct io_async_msghdr *kmsg = (struct io_async_msghdr *) entry;
1852 
1853 	if (kmsg->free_iov) {
1854 		kasan_mempool_unpoison_object(kmsg->free_iov,
1855 				kmsg->free_iov_nr * sizeof(struct iovec));
1856 		io_netmsg_iovec_free(kmsg);
1857 	}
1858 	kfree(kmsg);
1859 }
1860 #endif
1861