1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/file.h>
5 #include <linux/slab.h>
6 #include <linux/net.h>
7 #include <linux/compat.h>
8 #include <net/compat.h>
9 #include <linux/io_uring.h>
10
11 #include <uapi/linux/io_uring.h>
12
13 #include "io_uring.h"
14 #include "kbuf.h"
15 #include "alloc_cache.h"
16 #include "net.h"
17 #include "notif.h"
18 #include "rsrc.h"
19
20 #if defined(CONFIG_NET)
21 struct io_shutdown {
22 struct file *file;
23 int how;
24 };
25
26 struct io_accept {
27 struct file *file;
28 struct sockaddr __user *addr;
29 int __user *addr_len;
30 int flags;
31 u32 file_slot;
32 unsigned long nofile;
33 };
34
35 struct io_socket {
36 struct file *file;
37 int domain;
38 int type;
39 int protocol;
40 int flags;
41 u32 file_slot;
42 unsigned long nofile;
43 };
44
45 struct io_connect {
46 struct file *file;
47 struct sockaddr __user *addr;
48 int addr_len;
49 bool in_progress;
50 bool seen_econnaborted;
51 };
52
53 struct io_sr_msg {
54 struct file *file;
55 union {
56 struct compat_msghdr __user *umsg_compat;
57 struct user_msghdr __user *umsg;
58 void __user *buf;
59 };
60 unsigned len;
61 unsigned done_io;
62 unsigned msg_flags;
63 u16 flags;
64 /* initialised and used only by !msg send variants */
65 u16 addr_len;
66 u16 buf_group;
67 void __user *addr;
68 void __user *msg_control;
69 /* used only for send zerocopy */
70 struct io_kiocb *notif;
71 };
72
io_shutdown_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)73 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
74 {
75 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
76
77 if (unlikely(sqe->off || sqe->addr || sqe->rw_flags ||
78 sqe->buf_index || sqe->splice_fd_in))
79 return -EINVAL;
80
81 shutdown->how = READ_ONCE(sqe->len);
82 return 0;
83 }
84
io_shutdown(struct io_kiocb * req,unsigned int issue_flags)85 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags)
86 {
87 struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
88 struct socket *sock;
89 int ret;
90
91 if (issue_flags & IO_URING_F_NONBLOCK)
92 return -EAGAIN;
93
94 sock = sock_from_file(req->file);
95 if (unlikely(!sock))
96 return -ENOTSOCK;
97
98 ret = __sys_shutdown_sock(sock, shutdown->how);
99 io_req_set_res(req, ret, 0);
100 return IOU_OK;
101 }
102
io_net_retry(struct socket * sock,int flags)103 static bool io_net_retry(struct socket *sock, int flags)
104 {
105 if (!(flags & MSG_WAITALL))
106 return false;
107 return sock->type == SOCK_STREAM || sock->type == SOCK_SEQPACKET;
108 }
109
io_netmsg_recycle(struct io_kiocb * req,unsigned int issue_flags)110 static void io_netmsg_recycle(struct io_kiocb *req, unsigned int issue_flags)
111 {
112 struct io_async_msghdr *hdr = req->async_data;
113
114 if (!req_has_async_data(req) || issue_flags & IO_URING_F_UNLOCKED)
115 return;
116
117 /* Let normal cleanup path reap it if we fail adding to the cache */
118 if (io_alloc_cache_put(&req->ctx->netmsg_cache, &hdr->cache)) {
119 req->async_data = NULL;
120 req->flags &= ~REQ_F_ASYNC_DATA;
121 }
122 }
123
io_msg_alloc_async(struct io_kiocb * req,unsigned int issue_flags)124 static struct io_async_msghdr *io_msg_alloc_async(struct io_kiocb *req,
125 unsigned int issue_flags)
126 {
127 struct io_ring_ctx *ctx = req->ctx;
128 struct io_cache_entry *entry;
129 struct io_async_msghdr *hdr;
130
131 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
132 entry = io_alloc_cache_get(&ctx->netmsg_cache);
133 if (entry) {
134 hdr = container_of(entry, struct io_async_msghdr, cache);
135 hdr->free_iov = NULL;
136 req->flags |= REQ_F_ASYNC_DATA;
137 req->async_data = hdr;
138 return hdr;
139 }
140 }
141
142 if (!io_alloc_async_data(req)) {
143 hdr = req->async_data;
144 hdr->free_iov = NULL;
145 return hdr;
146 }
147 return NULL;
148 }
149
io_msg_alloc_async_prep(struct io_kiocb * req)150 static inline struct io_async_msghdr *io_msg_alloc_async_prep(struct io_kiocb *req)
151 {
152 /* ->prep_async is always called from the submission context */
153 return io_msg_alloc_async(req, 0);
154 }
155
io_setup_async_msg(struct io_kiocb * req,struct io_async_msghdr * kmsg,unsigned int issue_flags)156 static int io_setup_async_msg(struct io_kiocb *req,
157 struct io_async_msghdr *kmsg,
158 unsigned int issue_flags)
159 {
160 struct io_async_msghdr *async_msg;
161
162 if (req_has_async_data(req))
163 return -EAGAIN;
164 async_msg = io_msg_alloc_async(req, issue_flags);
165 if (!async_msg) {
166 kfree(kmsg->free_iov);
167 return -ENOMEM;
168 }
169 req->flags |= REQ_F_NEED_CLEANUP;
170 memcpy(async_msg, kmsg, sizeof(*kmsg));
171 if (async_msg->msg.msg_name)
172 async_msg->msg.msg_name = &async_msg->addr;
173
174 if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs)
175 return -EAGAIN;
176
177 /* if were using fast_iov, set it to the new one */
178 if (!kmsg->free_iov) {
179 size_t fast_idx = kmsg->msg.msg_iter.iov - kmsg->fast_iov;
180 async_msg->msg.msg_iter.iov = &async_msg->fast_iov[fast_idx];
181 }
182
183 return -EAGAIN;
184 }
185
io_sendmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)186 static int io_sendmsg_copy_hdr(struct io_kiocb *req,
187 struct io_async_msghdr *iomsg)
188 {
189 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
190 int ret;
191
192 iomsg->msg.msg_name = &iomsg->addr;
193 iomsg->free_iov = iomsg->fast_iov;
194 ret = sendmsg_copy_msghdr(&iomsg->msg, sr->umsg, sr->msg_flags,
195 &iomsg->free_iov);
196 /* save msg_control as sys_sendmsg() overwrites it */
197 sr->msg_control = iomsg->msg.msg_control_user;
198 return ret;
199 }
200
io_send_prep_async(struct io_kiocb * req)201 int io_send_prep_async(struct io_kiocb *req)
202 {
203 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
204 struct io_async_msghdr *io;
205 int ret;
206
207 if (!zc->addr || req_has_async_data(req))
208 return 0;
209 io = io_msg_alloc_async_prep(req);
210 if (!io)
211 return -ENOMEM;
212 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &io->addr);
213 return ret;
214 }
215
io_setup_async_addr(struct io_kiocb * req,struct sockaddr_storage * addr_storage,unsigned int issue_flags)216 static int io_setup_async_addr(struct io_kiocb *req,
217 struct sockaddr_storage *addr_storage,
218 unsigned int issue_flags)
219 {
220 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
221 struct io_async_msghdr *io;
222
223 if (!sr->addr || req_has_async_data(req))
224 return -EAGAIN;
225 io = io_msg_alloc_async(req, issue_flags);
226 if (!io)
227 return -ENOMEM;
228 memcpy(&io->addr, addr_storage, sizeof(io->addr));
229 return -EAGAIN;
230 }
231
io_sendmsg_prep_async(struct io_kiocb * req)232 int io_sendmsg_prep_async(struct io_kiocb *req)
233 {
234 int ret;
235
236 if (!io_msg_alloc_async_prep(req))
237 return -ENOMEM;
238 ret = io_sendmsg_copy_hdr(req, req->async_data);
239 if (!ret)
240 req->flags |= REQ_F_NEED_CLEANUP;
241 return ret;
242 }
243
io_sendmsg_recvmsg_cleanup(struct io_kiocb * req)244 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
245 {
246 struct io_async_msghdr *io = req->async_data;
247
248 kfree(io->free_iov);
249 }
250
io_sendmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)251 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
252 {
253 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
254
255 if (req->opcode == IORING_OP_SEND) {
256 if (READ_ONCE(sqe->__pad3[0]))
257 return -EINVAL;
258 sr->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
259 sr->addr_len = READ_ONCE(sqe->addr_len);
260 } else if (sqe->addr2 || sqe->file_index) {
261 return -EINVAL;
262 }
263
264 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
265 sr->len = READ_ONCE(sqe->len);
266 sr->flags = READ_ONCE(sqe->ioprio);
267 if (sr->flags & ~IORING_RECVSEND_POLL_FIRST)
268 return -EINVAL;
269 sr->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
270 if (sr->msg_flags & MSG_DONTWAIT)
271 req->flags |= REQ_F_NOWAIT;
272
273 #ifdef CONFIG_COMPAT
274 if (req->ctx->compat)
275 sr->msg_flags |= MSG_CMSG_COMPAT;
276 #endif
277 sr->done_io = 0;
278 return 0;
279 }
280
io_sendmsg(struct io_kiocb * req,unsigned int issue_flags)281 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
282 {
283 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
284 struct io_async_msghdr iomsg, *kmsg;
285 struct socket *sock;
286 unsigned flags;
287 int min_ret = 0;
288 int ret;
289
290 sock = sock_from_file(req->file);
291 if (unlikely(!sock))
292 return -ENOTSOCK;
293
294 if (req_has_async_data(req)) {
295 kmsg = req->async_data;
296 kmsg->msg.msg_control_user = sr->msg_control;
297 } else {
298 ret = io_sendmsg_copy_hdr(req, &iomsg);
299 if (ret)
300 return ret;
301 kmsg = &iomsg;
302 }
303
304 if (!(req->flags & REQ_F_POLLED) &&
305 (sr->flags & IORING_RECVSEND_POLL_FIRST))
306 return io_setup_async_msg(req, kmsg, issue_flags);
307
308 flags = sr->msg_flags;
309 if (issue_flags & IO_URING_F_NONBLOCK)
310 flags |= MSG_DONTWAIT;
311 if (flags & MSG_WAITALL)
312 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
313
314 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
315
316 if (ret < min_ret) {
317 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
318 return io_setup_async_msg(req, kmsg, issue_flags);
319 if (ret > 0 && io_net_retry(sock, flags)) {
320 kmsg->msg.msg_controllen = 0;
321 kmsg->msg.msg_control = NULL;
322 sr->done_io += ret;
323 req->flags |= REQ_F_PARTIAL_IO;
324 return io_setup_async_msg(req, kmsg, issue_flags);
325 }
326 if (ret == -ERESTARTSYS)
327 ret = -EINTR;
328 req_set_fail(req);
329 }
330 /* fast path, check for non-NULL to avoid function call */
331 if (kmsg->free_iov)
332 kfree(kmsg->free_iov);
333 req->flags &= ~REQ_F_NEED_CLEANUP;
334 io_netmsg_recycle(req, issue_flags);
335 if (ret >= 0)
336 ret += sr->done_io;
337 else if (sr->done_io)
338 ret = sr->done_io;
339 io_req_set_res(req, ret, 0);
340 return IOU_OK;
341 }
342
io_send(struct io_kiocb * req,unsigned int issue_flags)343 int io_send(struct io_kiocb *req, unsigned int issue_flags)
344 {
345 struct sockaddr_storage __address;
346 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
347 struct msghdr msg;
348 struct iovec iov;
349 struct socket *sock;
350 unsigned flags;
351 int min_ret = 0;
352 int ret;
353
354 msg.msg_name = NULL;
355 msg.msg_control = NULL;
356 msg.msg_controllen = 0;
357 msg.msg_namelen = 0;
358 msg.msg_ubuf = NULL;
359
360 if (sr->addr) {
361 if (req_has_async_data(req)) {
362 struct io_async_msghdr *io = req->async_data;
363
364 msg.msg_name = &io->addr;
365 } else {
366 ret = move_addr_to_kernel(sr->addr, sr->addr_len, &__address);
367 if (unlikely(ret < 0))
368 return ret;
369 msg.msg_name = (struct sockaddr *)&__address;
370 }
371 msg.msg_namelen = sr->addr_len;
372 }
373
374 if (!(req->flags & REQ_F_POLLED) &&
375 (sr->flags & IORING_RECVSEND_POLL_FIRST))
376 return io_setup_async_addr(req, &__address, issue_flags);
377
378 sock = sock_from_file(req->file);
379 if (unlikely(!sock))
380 return -ENOTSOCK;
381
382 ret = import_single_range(ITER_SOURCE, sr->buf, sr->len, &iov, &msg.msg_iter);
383 if (unlikely(ret))
384 return ret;
385
386 flags = sr->msg_flags;
387 if (issue_flags & IO_URING_F_NONBLOCK)
388 flags |= MSG_DONTWAIT;
389 if (flags & MSG_WAITALL)
390 min_ret = iov_iter_count(&msg.msg_iter);
391
392 flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
393 msg.msg_flags = flags;
394 ret = sock_sendmsg(sock, &msg);
395 if (ret < min_ret) {
396 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
397 return io_setup_async_addr(req, &__address, issue_flags);
398
399 if (ret > 0 && io_net_retry(sock, flags)) {
400 sr->len -= ret;
401 sr->buf += ret;
402 sr->done_io += ret;
403 req->flags |= REQ_F_PARTIAL_IO;
404 return io_setup_async_addr(req, &__address, issue_flags);
405 }
406 if (ret == -ERESTARTSYS)
407 ret = -EINTR;
408 req_set_fail(req);
409 }
410 if (ret >= 0)
411 ret += sr->done_io;
412 else if (sr->done_io)
413 ret = sr->done_io;
414 io_req_set_res(req, ret, 0);
415 return IOU_OK;
416 }
417
io_recvmsg_multishot_overflow(struct io_async_msghdr * iomsg)418 static bool io_recvmsg_multishot_overflow(struct io_async_msghdr *iomsg)
419 {
420 int hdr;
421
422 if (iomsg->namelen < 0)
423 return true;
424 if (check_add_overflow((int)sizeof(struct io_uring_recvmsg_out),
425 iomsg->namelen, &hdr))
426 return true;
427 if (check_add_overflow(hdr, (int)iomsg->controllen, &hdr))
428 return true;
429
430 return false;
431 }
432
__io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)433 static int __io_recvmsg_copy_hdr(struct io_kiocb *req,
434 struct io_async_msghdr *iomsg)
435 {
436 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
437 struct user_msghdr msg;
438 int ret;
439
440 if (copy_from_user(&msg, sr->umsg, sizeof(*sr->umsg)))
441 return -EFAULT;
442
443 ret = __copy_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
444 if (ret)
445 return ret;
446
447 if (req->flags & REQ_F_BUFFER_SELECT) {
448 if (msg.msg_iovlen == 0) {
449 sr->len = iomsg->fast_iov[0].iov_len = 0;
450 iomsg->fast_iov[0].iov_base = NULL;
451 iomsg->free_iov = NULL;
452 } else if (msg.msg_iovlen > 1) {
453 return -EINVAL;
454 } else {
455 if (copy_from_user(iomsg->fast_iov, msg.msg_iov, sizeof(*msg.msg_iov)))
456 return -EFAULT;
457 sr->len = iomsg->fast_iov[0].iov_len;
458 iomsg->free_iov = NULL;
459 }
460
461 if (req->flags & REQ_F_APOLL_MULTISHOT) {
462 iomsg->namelen = msg.msg_namelen;
463 iomsg->controllen = msg.msg_controllen;
464 if (io_recvmsg_multishot_overflow(iomsg))
465 return -EOVERFLOW;
466 }
467 } else {
468 iomsg->free_iov = iomsg->fast_iov;
469 ret = __import_iovec(ITER_DEST, msg.msg_iov, msg.msg_iovlen, UIO_FASTIOV,
470 &iomsg->free_iov, &iomsg->msg.msg_iter,
471 false);
472 if (ret > 0)
473 ret = 0;
474 }
475
476 return ret;
477 }
478
479 #ifdef CONFIG_COMPAT
__io_compat_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)480 static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req,
481 struct io_async_msghdr *iomsg)
482 {
483 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
484 struct compat_msghdr msg;
485 struct compat_iovec __user *uiov;
486 int ret;
487
488 if (copy_from_user(&msg, sr->umsg_compat, sizeof(msg)))
489 return -EFAULT;
490
491 ret = __get_compat_msghdr(&iomsg->msg, &msg, &iomsg->uaddr);
492 if (ret)
493 return ret;
494
495 uiov = compat_ptr(msg.msg_iov);
496 if (req->flags & REQ_F_BUFFER_SELECT) {
497 compat_ssize_t clen;
498
499 iomsg->free_iov = NULL;
500 if (msg.msg_iovlen == 0) {
501 sr->len = 0;
502 } else if (msg.msg_iovlen > 1) {
503 return -EINVAL;
504 } else {
505 if (!access_ok(uiov, sizeof(*uiov)))
506 return -EFAULT;
507 if (__get_user(clen, &uiov->iov_len))
508 return -EFAULT;
509 if (clen < 0)
510 return -EINVAL;
511 sr->len = clen;
512 }
513
514 if (req->flags & REQ_F_APOLL_MULTISHOT) {
515 iomsg->namelen = msg.msg_namelen;
516 iomsg->controllen = msg.msg_controllen;
517 if (io_recvmsg_multishot_overflow(iomsg))
518 return -EOVERFLOW;
519 }
520 } else {
521 iomsg->free_iov = iomsg->fast_iov;
522 ret = __import_iovec(ITER_DEST, (struct iovec __user *)uiov, msg.msg_iovlen,
523 UIO_FASTIOV, &iomsg->free_iov,
524 &iomsg->msg.msg_iter, true);
525 if (ret < 0)
526 return ret;
527 }
528
529 return 0;
530 }
531 #endif
532
io_recvmsg_copy_hdr(struct io_kiocb * req,struct io_async_msghdr * iomsg)533 static int io_recvmsg_copy_hdr(struct io_kiocb *req,
534 struct io_async_msghdr *iomsg)
535 {
536 iomsg->msg.msg_name = &iomsg->addr;
537 iomsg->msg.msg_iter.nr_segs = 0;
538
539 #ifdef CONFIG_COMPAT
540 if (req->ctx->compat)
541 return __io_compat_recvmsg_copy_hdr(req, iomsg);
542 #endif
543
544 return __io_recvmsg_copy_hdr(req, iomsg);
545 }
546
io_recvmsg_prep_async(struct io_kiocb * req)547 int io_recvmsg_prep_async(struct io_kiocb *req)
548 {
549 int ret;
550
551 if (!io_msg_alloc_async_prep(req))
552 return -ENOMEM;
553 ret = io_recvmsg_copy_hdr(req, req->async_data);
554 if (!ret)
555 req->flags |= REQ_F_NEED_CLEANUP;
556 return ret;
557 }
558
559 #define RECVMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECV_MULTISHOT)
560
io_recvmsg_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)561 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
562 {
563 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
564
565 if (unlikely(sqe->file_index || sqe->addr2))
566 return -EINVAL;
567
568 sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr));
569 sr->len = READ_ONCE(sqe->len);
570 sr->flags = READ_ONCE(sqe->ioprio);
571 if (sr->flags & ~(RECVMSG_FLAGS))
572 return -EINVAL;
573 sr->msg_flags = READ_ONCE(sqe->msg_flags);
574 if (sr->msg_flags & MSG_DONTWAIT)
575 req->flags |= REQ_F_NOWAIT;
576 if (sr->msg_flags & MSG_ERRQUEUE)
577 req->flags |= REQ_F_CLEAR_POLLIN;
578 if (sr->flags & IORING_RECV_MULTISHOT) {
579 if (!(req->flags & REQ_F_BUFFER_SELECT))
580 return -EINVAL;
581 if (sr->msg_flags & MSG_WAITALL)
582 return -EINVAL;
583 if (req->opcode == IORING_OP_RECV && sr->len)
584 return -EINVAL;
585 req->flags |= REQ_F_APOLL_MULTISHOT;
586 /*
587 * Store the buffer group for this multishot receive separately,
588 * as if we end up doing an io-wq based issue that selects a
589 * buffer, it has to be committed immediately and that will
590 * clear ->buf_list. This means we lose the link to the buffer
591 * list, and the eventual buffer put on completion then cannot
592 * restore it.
593 */
594 sr->buf_group = req->buf_index;
595 }
596
597 #ifdef CONFIG_COMPAT
598 if (req->ctx->compat)
599 sr->msg_flags |= MSG_CMSG_COMPAT;
600 #endif
601 sr->done_io = 0;
602 return 0;
603 }
604
io_recv_prep_retry(struct io_kiocb * req)605 static inline void io_recv_prep_retry(struct io_kiocb *req)
606 {
607 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
608
609 sr->done_io = 0;
610 sr->len = 0; /* get from the provided buffer */
611 req->buf_index = sr->buf_group;
612 }
613
614 /*
615 * Finishes io_recv and io_recvmsg.
616 *
617 * Returns true if it is actually finished, or false if it should run
618 * again (for multishot).
619 */
io_recv_finish(struct io_kiocb * req,int * ret,unsigned int cflags,bool mshot_finished,unsigned issue_flags)620 static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
621 unsigned int cflags, bool mshot_finished,
622 unsigned issue_flags)
623 {
624 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
625 io_req_set_res(req, *ret, cflags);
626 *ret = IOU_OK;
627 return true;
628 }
629
630 if (!mshot_finished) {
631 if (io_post_aux_cqe(req->ctx, req->cqe.user_data, *ret,
632 cflags | IORING_CQE_F_MORE, false)) {
633 io_recv_prep_retry(req);
634 return false;
635 }
636 /*
637 * Otherwise stop multishot but use the current result.
638 * Probably will end up going into overflow, but this means
639 * we cannot trust the ordering anymore
640 */
641 }
642
643 io_req_set_res(req, *ret, cflags);
644
645 if (issue_flags & IO_URING_F_MULTISHOT)
646 *ret = IOU_STOP_MULTISHOT;
647 else
648 *ret = IOU_OK;
649 return true;
650 }
651
io_recvmsg_prep_multishot(struct io_async_msghdr * kmsg,struct io_sr_msg * sr,void __user ** buf,size_t * len)652 static int io_recvmsg_prep_multishot(struct io_async_msghdr *kmsg,
653 struct io_sr_msg *sr, void __user **buf,
654 size_t *len)
655 {
656 unsigned long ubuf = (unsigned long) *buf;
657 unsigned long hdr;
658
659 hdr = sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
660 kmsg->controllen;
661 if (*len < hdr)
662 return -EFAULT;
663
664 if (kmsg->controllen) {
665 unsigned long control = ubuf + hdr - kmsg->controllen;
666
667 kmsg->msg.msg_control_user = (void __user *) control;
668 kmsg->msg.msg_controllen = kmsg->controllen;
669 }
670
671 sr->buf = *buf; /* stash for later copy */
672 *buf = (void __user *) (ubuf + hdr);
673 kmsg->payloadlen = *len = *len - hdr;
674 return 0;
675 }
676
677 struct io_recvmsg_multishot_hdr {
678 struct io_uring_recvmsg_out msg;
679 struct sockaddr_storage addr;
680 };
681
io_recvmsg_multishot(struct socket * sock,struct io_sr_msg * io,struct io_async_msghdr * kmsg,unsigned int flags,bool * finished)682 static int io_recvmsg_multishot(struct socket *sock, struct io_sr_msg *io,
683 struct io_async_msghdr *kmsg,
684 unsigned int flags, bool *finished)
685 {
686 int err;
687 int copy_len;
688 struct io_recvmsg_multishot_hdr hdr;
689
690 if (kmsg->namelen)
691 kmsg->msg.msg_name = &hdr.addr;
692 kmsg->msg.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
693 kmsg->msg.msg_namelen = 0;
694
695 if (sock->file->f_flags & O_NONBLOCK)
696 flags |= MSG_DONTWAIT;
697
698 err = sock_recvmsg(sock, &kmsg->msg, flags);
699 *finished = err <= 0;
700 if (err < 0)
701 return err;
702
703 hdr.msg = (struct io_uring_recvmsg_out) {
704 .controllen = kmsg->controllen - kmsg->msg.msg_controllen,
705 .flags = kmsg->msg.msg_flags & ~MSG_CMSG_COMPAT
706 };
707
708 hdr.msg.payloadlen = err;
709 if (err > kmsg->payloadlen)
710 err = kmsg->payloadlen;
711
712 copy_len = sizeof(struct io_uring_recvmsg_out);
713 if (kmsg->msg.msg_namelen > kmsg->namelen)
714 copy_len += kmsg->namelen;
715 else
716 copy_len += kmsg->msg.msg_namelen;
717
718 /*
719 * "fromlen shall refer to the value before truncation.."
720 * 1003.1g
721 */
722 hdr.msg.namelen = kmsg->msg.msg_namelen;
723
724 /* ensure that there is no gap between hdr and sockaddr_storage */
725 BUILD_BUG_ON(offsetof(struct io_recvmsg_multishot_hdr, addr) !=
726 sizeof(struct io_uring_recvmsg_out));
727 if (copy_to_user(io->buf, &hdr, copy_len)) {
728 *finished = true;
729 return -EFAULT;
730 }
731
732 return sizeof(struct io_uring_recvmsg_out) + kmsg->namelen +
733 kmsg->controllen + err;
734 }
735
io_recvmsg(struct io_kiocb * req,unsigned int issue_flags)736 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags)
737 {
738 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
739 struct io_async_msghdr iomsg, *kmsg;
740 struct socket *sock;
741 unsigned int cflags;
742 unsigned flags;
743 int ret, min_ret = 0;
744 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
745 bool mshot_finished = true;
746
747 sock = sock_from_file(req->file);
748 if (unlikely(!sock))
749 return -ENOTSOCK;
750
751 if (req_has_async_data(req)) {
752 kmsg = req->async_data;
753 } else {
754 ret = io_recvmsg_copy_hdr(req, &iomsg);
755 if (ret)
756 return ret;
757 kmsg = &iomsg;
758 }
759
760 if (!(req->flags & REQ_F_POLLED) &&
761 (sr->flags & IORING_RECVSEND_POLL_FIRST))
762 return io_setup_async_msg(req, kmsg, issue_flags);
763
764 retry_multishot:
765 if (io_do_buffer_select(req)) {
766 void __user *buf;
767 size_t len = sr->len;
768
769 buf = io_buffer_select(req, &len, issue_flags);
770 if (!buf)
771 return -ENOBUFS;
772
773 if (req->flags & REQ_F_APOLL_MULTISHOT) {
774 ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
775 if (ret) {
776 io_kbuf_recycle(req, issue_flags);
777 return ret;
778 }
779 }
780
781 kmsg->fast_iov[0].iov_base = buf;
782 kmsg->fast_iov[0].iov_len = len;
783 iov_iter_init(&kmsg->msg.msg_iter, ITER_DEST, kmsg->fast_iov, 1,
784 len);
785 }
786
787 flags = sr->msg_flags;
788 if (force_nonblock)
789 flags |= MSG_DONTWAIT;
790
791 kmsg->msg.msg_get_inq = 1;
792 if (req->flags & REQ_F_APOLL_MULTISHOT) {
793 ret = io_recvmsg_multishot(sock, sr, kmsg, flags,
794 &mshot_finished);
795 } else {
796 /* disable partial retry for recvmsg with cmsg attached */
797 if (flags & MSG_WAITALL && !kmsg->msg.msg_controllen)
798 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
799
800 ret = __sys_recvmsg_sock(sock, &kmsg->msg, sr->umsg,
801 kmsg->uaddr, flags);
802 }
803
804 if (ret < min_ret) {
805 if (ret == -EAGAIN && force_nonblock) {
806 ret = io_setup_async_msg(req, kmsg, issue_flags);
807 if (ret == -EAGAIN && (issue_flags & IO_URING_F_MULTISHOT)) {
808 io_kbuf_recycle(req, issue_flags);
809 return IOU_ISSUE_SKIP_COMPLETE;
810 }
811 return ret;
812 }
813 if (ret > 0 && io_net_retry(sock, flags)) {
814 sr->done_io += ret;
815 req->flags |= REQ_F_PARTIAL_IO;
816 return io_setup_async_msg(req, kmsg, issue_flags);
817 }
818 if (ret == -ERESTARTSYS)
819 ret = -EINTR;
820 req_set_fail(req);
821 } else if ((flags & MSG_WAITALL) && (kmsg->msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
822 req_set_fail(req);
823 }
824
825 if (ret > 0)
826 ret += sr->done_io;
827 else if (sr->done_io)
828 ret = sr->done_io;
829 else
830 io_kbuf_recycle(req, issue_flags);
831
832 cflags = io_put_kbuf(req, issue_flags);
833 if (kmsg->msg.msg_inq)
834 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
835
836 if (!io_recv_finish(req, &ret, cflags, mshot_finished, issue_flags))
837 goto retry_multishot;
838
839 if (mshot_finished) {
840 /* fast path, check for non-NULL to avoid function call */
841 if (kmsg->free_iov)
842 kfree(kmsg->free_iov);
843 io_netmsg_recycle(req, issue_flags);
844 req->flags &= ~REQ_F_NEED_CLEANUP;
845 }
846
847 return ret;
848 }
849
io_recv(struct io_kiocb * req,unsigned int issue_flags)850 int io_recv(struct io_kiocb *req, unsigned int issue_flags)
851 {
852 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
853 struct msghdr msg;
854 struct socket *sock;
855 struct iovec iov;
856 unsigned int cflags;
857 unsigned flags;
858 int ret, min_ret = 0;
859 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
860 size_t len = sr->len;
861
862 if (!(req->flags & REQ_F_POLLED) &&
863 (sr->flags & IORING_RECVSEND_POLL_FIRST))
864 return -EAGAIN;
865
866 sock = sock_from_file(req->file);
867 if (unlikely(!sock))
868 return -ENOTSOCK;
869
870 retry_multishot:
871 if (io_do_buffer_select(req)) {
872 void __user *buf;
873
874 buf = io_buffer_select(req, &len, issue_flags);
875 if (!buf)
876 return -ENOBUFS;
877 sr->buf = buf;
878 sr->len = len;
879 }
880
881 ret = import_single_range(ITER_DEST, sr->buf, len, &iov, &msg.msg_iter);
882 if (unlikely(ret))
883 goto out_free;
884
885 msg.msg_name = NULL;
886 msg.msg_namelen = 0;
887 msg.msg_control = NULL;
888 msg.msg_get_inq = 1;
889 msg.msg_flags = 0;
890 msg.msg_controllen = 0;
891 msg.msg_iocb = NULL;
892 msg.msg_ubuf = NULL;
893
894 flags = sr->msg_flags;
895 if (force_nonblock)
896 flags |= MSG_DONTWAIT;
897 if (flags & MSG_WAITALL)
898 min_ret = iov_iter_count(&msg.msg_iter);
899
900 ret = sock_recvmsg(sock, &msg, flags);
901 if (ret < min_ret) {
902 if (ret == -EAGAIN && force_nonblock) {
903 if (issue_flags & IO_URING_F_MULTISHOT) {
904 io_kbuf_recycle(req, issue_flags);
905 return IOU_ISSUE_SKIP_COMPLETE;
906 }
907
908 return -EAGAIN;
909 }
910 if (ret > 0 && io_net_retry(sock, flags)) {
911 sr->len -= ret;
912 sr->buf += ret;
913 sr->done_io += ret;
914 req->flags |= REQ_F_PARTIAL_IO;
915 return -EAGAIN;
916 }
917 if (ret == -ERESTARTSYS)
918 ret = -EINTR;
919 req_set_fail(req);
920 } else if ((flags & MSG_WAITALL) && (msg.msg_flags & (MSG_TRUNC | MSG_CTRUNC))) {
921 out_free:
922 req_set_fail(req);
923 }
924
925 if (ret > 0)
926 ret += sr->done_io;
927 else if (sr->done_io)
928 ret = sr->done_io;
929 else
930 io_kbuf_recycle(req, issue_flags);
931
932 cflags = io_put_kbuf(req, issue_flags);
933 if (msg.msg_inq)
934 cflags |= IORING_CQE_F_SOCK_NONEMPTY;
935
936 if (!io_recv_finish(req, &ret, cflags, ret <= 0, issue_flags))
937 goto retry_multishot;
938
939 return ret;
940 }
941
io_send_zc_cleanup(struct io_kiocb * req)942 void io_send_zc_cleanup(struct io_kiocb *req)
943 {
944 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
945 struct io_async_msghdr *io;
946
947 if (req_has_async_data(req)) {
948 io = req->async_data;
949 /* might be ->fast_iov if *msg_copy_hdr failed */
950 if (io->free_iov != io->fast_iov)
951 kfree(io->free_iov);
952 }
953 if (zc->notif) {
954 io_notif_flush(zc->notif);
955 zc->notif = NULL;
956 }
957 }
958
io_send_zc_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)959 int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
960 {
961 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
962 struct io_ring_ctx *ctx = req->ctx;
963 struct io_kiocb *notif;
964
965 if (unlikely(READ_ONCE(sqe->__pad2[0]) || READ_ONCE(sqe->addr3)))
966 return -EINVAL;
967 /* we don't support IOSQE_CQE_SKIP_SUCCESS just yet */
968 if (req->flags & REQ_F_CQE_SKIP)
969 return -EINVAL;
970
971 zc->flags = READ_ONCE(sqe->ioprio);
972 if (zc->flags & ~(IORING_RECVSEND_POLL_FIRST |
973 IORING_RECVSEND_FIXED_BUF |
974 IORING_SEND_ZC_REPORT_USAGE))
975 return -EINVAL;
976 notif = zc->notif = io_alloc_notif(ctx);
977 if (!notif)
978 return -ENOMEM;
979 notif->cqe.user_data = req->cqe.user_data;
980 notif->cqe.res = 0;
981 notif->cqe.flags = IORING_CQE_F_NOTIF;
982 req->flags |= REQ_F_NEED_CLEANUP;
983 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
984 unsigned idx = READ_ONCE(sqe->buf_index);
985
986 if (unlikely(idx >= ctx->nr_user_bufs))
987 return -EFAULT;
988 idx = array_index_nospec(idx, ctx->nr_user_bufs);
989 req->imu = READ_ONCE(ctx->user_bufs[idx]);
990 io_req_set_rsrc_node(notif, ctx, 0);
991 }
992 if (zc->flags & IORING_SEND_ZC_REPORT_USAGE) {
993 io_notif_to_data(notif)->zc_report = true;
994 }
995
996 if (req->opcode == IORING_OP_SEND_ZC) {
997 if (READ_ONCE(sqe->__pad3[0]))
998 return -EINVAL;
999 zc->addr = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1000 zc->addr_len = READ_ONCE(sqe->addr_len);
1001 } else {
1002 if (unlikely(sqe->addr2 || sqe->file_index))
1003 return -EINVAL;
1004 if (unlikely(zc->flags & IORING_RECVSEND_FIXED_BUF))
1005 return -EINVAL;
1006 }
1007
1008 zc->buf = u64_to_user_ptr(READ_ONCE(sqe->addr));
1009 zc->len = READ_ONCE(sqe->len);
1010 zc->msg_flags = READ_ONCE(sqe->msg_flags) | MSG_NOSIGNAL;
1011 if (zc->msg_flags & MSG_DONTWAIT)
1012 req->flags |= REQ_F_NOWAIT;
1013
1014 zc->done_io = 0;
1015
1016 #ifdef CONFIG_COMPAT
1017 if (req->ctx->compat)
1018 zc->msg_flags |= MSG_CMSG_COMPAT;
1019 #endif
1020 return 0;
1021 }
1022
io_sg_from_iter_iovec(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1023 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
1024 struct iov_iter *from, size_t length)
1025 {
1026 skb_zcopy_downgrade_managed(skb);
1027 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1028 }
1029
io_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)1030 static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1031 struct iov_iter *from, size_t length)
1032 {
1033 struct skb_shared_info *shinfo = skb_shinfo(skb);
1034 int frag = shinfo->nr_frags;
1035 int ret = 0;
1036 struct bvec_iter bi;
1037 ssize_t copied = 0;
1038 unsigned long truesize = 0;
1039
1040 if (!frag)
1041 shinfo->flags |= SKBFL_MANAGED_FRAG_REFS;
1042 else if (unlikely(!skb_zcopy_managed(skb)))
1043 return __zerocopy_sg_from_iter(NULL, sk, skb, from, length);
1044
1045 bi.bi_size = min(from->count, length);
1046 bi.bi_bvec_done = from->iov_offset;
1047 bi.bi_idx = 0;
1048
1049 while (bi.bi_size && frag < MAX_SKB_FRAGS) {
1050 struct bio_vec v = mp_bvec_iter_bvec(from->bvec, bi);
1051
1052 copied += v.bv_len;
1053 truesize += PAGE_ALIGN(v.bv_len + v.bv_offset);
1054 __skb_fill_page_desc_noacc(shinfo, frag++, v.bv_page,
1055 v.bv_offset, v.bv_len);
1056 bvec_iter_advance_single(from->bvec, &bi, v.bv_len);
1057 }
1058 if (bi.bi_size)
1059 ret = -EMSGSIZE;
1060
1061 shinfo->nr_frags = frag;
1062 from->bvec += bi.bi_idx;
1063 from->nr_segs -= bi.bi_idx;
1064 from->count -= copied;
1065 from->iov_offset = bi.bi_bvec_done;
1066
1067 skb->data_len += copied;
1068 skb->len += copied;
1069 skb->truesize += truesize;
1070
1071 if (sk && sk->sk_type == SOCK_STREAM) {
1072 sk_wmem_queued_add(sk, truesize);
1073 if (!skb_zcopy_pure(skb))
1074 sk_mem_charge(sk, truesize);
1075 } else {
1076 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
1077 }
1078 return ret;
1079 }
1080
io_send_zc(struct io_kiocb * req,unsigned int issue_flags)1081 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
1082 {
1083 struct sockaddr_storage __address;
1084 struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
1085 struct msghdr msg;
1086 struct iovec iov;
1087 struct socket *sock;
1088 unsigned msg_flags;
1089 int ret, min_ret = 0;
1090
1091 sock = sock_from_file(req->file);
1092 if (unlikely(!sock))
1093 return -ENOTSOCK;
1094 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1095 return -EOPNOTSUPP;
1096
1097 msg.msg_name = NULL;
1098 msg.msg_control = NULL;
1099 msg.msg_controllen = 0;
1100 msg.msg_namelen = 0;
1101
1102 if (zc->addr) {
1103 if (req_has_async_data(req)) {
1104 struct io_async_msghdr *io = req->async_data;
1105
1106 msg.msg_name = &io->addr;
1107 } else {
1108 ret = move_addr_to_kernel(zc->addr, zc->addr_len, &__address);
1109 if (unlikely(ret < 0))
1110 return ret;
1111 msg.msg_name = (struct sockaddr *)&__address;
1112 }
1113 msg.msg_namelen = zc->addr_len;
1114 }
1115
1116 if (!(req->flags & REQ_F_POLLED) &&
1117 (zc->flags & IORING_RECVSEND_POLL_FIRST))
1118 return io_setup_async_addr(req, &__address, issue_flags);
1119
1120 if (zc->flags & IORING_RECVSEND_FIXED_BUF) {
1121 ret = io_import_fixed(ITER_SOURCE, &msg.msg_iter, req->imu,
1122 (u64)(uintptr_t)zc->buf, zc->len);
1123 if (unlikely(ret))
1124 return ret;
1125 msg.sg_from_iter = io_sg_from_iter;
1126 } else {
1127 ret = import_single_range(ITER_SOURCE, zc->buf, zc->len, &iov,
1128 &msg.msg_iter);
1129 if (unlikely(ret))
1130 return ret;
1131 ret = io_notif_account_mem(zc->notif, zc->len);
1132 if (unlikely(ret))
1133 return ret;
1134 msg.sg_from_iter = io_sg_from_iter_iovec;
1135 }
1136
1137 msg_flags = zc->msg_flags | MSG_ZEROCOPY;
1138 if (issue_flags & IO_URING_F_NONBLOCK)
1139 msg_flags |= MSG_DONTWAIT;
1140 if (msg_flags & MSG_WAITALL)
1141 min_ret = iov_iter_count(&msg.msg_iter);
1142 msg_flags &= ~MSG_INTERNAL_SENDMSG_FLAGS;
1143
1144 msg.msg_flags = msg_flags;
1145 msg.msg_ubuf = &io_notif_to_data(zc->notif)->uarg;
1146 ret = sock_sendmsg(sock, &msg);
1147
1148 if (unlikely(ret < min_ret)) {
1149 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1150 return io_setup_async_addr(req, &__address, issue_flags);
1151
1152 if (ret > 0 && io_net_retry(sock, msg.msg_flags)) {
1153 zc->len -= ret;
1154 zc->buf += ret;
1155 zc->done_io += ret;
1156 req->flags |= REQ_F_PARTIAL_IO;
1157 return io_setup_async_addr(req, &__address, issue_flags);
1158 }
1159 if (ret == -ERESTARTSYS)
1160 ret = -EINTR;
1161 req_set_fail(req);
1162 }
1163
1164 if (ret >= 0)
1165 ret += zc->done_io;
1166 else if (zc->done_io)
1167 ret = zc->done_io;
1168
1169 /*
1170 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1171 * flushing notif to io_send_zc_cleanup()
1172 */
1173 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1174 io_notif_flush(zc->notif);
1175 req->flags &= ~REQ_F_NEED_CLEANUP;
1176 }
1177 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1178 return IOU_OK;
1179 }
1180
io_sendmsg_zc(struct io_kiocb * req,unsigned int issue_flags)1181 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
1182 {
1183 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1184 struct io_async_msghdr iomsg, *kmsg;
1185 struct socket *sock;
1186 unsigned flags;
1187 int ret, min_ret = 0;
1188
1189 sock = sock_from_file(req->file);
1190 if (unlikely(!sock))
1191 return -ENOTSOCK;
1192 if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
1193 return -EOPNOTSUPP;
1194
1195 if (req_has_async_data(req)) {
1196 kmsg = req->async_data;
1197 } else {
1198 ret = io_sendmsg_copy_hdr(req, &iomsg);
1199 if (ret)
1200 return ret;
1201 kmsg = &iomsg;
1202 }
1203
1204 if (!(req->flags & REQ_F_POLLED) &&
1205 (sr->flags & IORING_RECVSEND_POLL_FIRST))
1206 return io_setup_async_msg(req, kmsg, issue_flags);
1207
1208 flags = sr->msg_flags | MSG_ZEROCOPY;
1209 if (issue_flags & IO_URING_F_NONBLOCK)
1210 flags |= MSG_DONTWAIT;
1211 if (flags & MSG_WAITALL)
1212 min_ret = iov_iter_count(&kmsg->msg.msg_iter);
1213
1214 kmsg->msg.msg_ubuf = &io_notif_to_data(sr->notif)->uarg;
1215 kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
1216 ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags);
1217
1218 if (unlikely(ret < min_ret)) {
1219 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1220 return io_setup_async_msg(req, kmsg, issue_flags);
1221
1222 if (ret > 0 && io_net_retry(sock, flags)) {
1223 sr->done_io += ret;
1224 req->flags |= REQ_F_PARTIAL_IO;
1225 return io_setup_async_msg(req, kmsg, issue_flags);
1226 }
1227 if (ret == -ERESTARTSYS)
1228 ret = -EINTR;
1229 req_set_fail(req);
1230 }
1231 /* fast path, check for non-NULL to avoid function call */
1232 if (kmsg->free_iov) {
1233 kfree(kmsg->free_iov);
1234 kmsg->free_iov = NULL;
1235 }
1236
1237 io_netmsg_recycle(req, issue_flags);
1238 if (ret >= 0)
1239 ret += sr->done_io;
1240 else if (sr->done_io)
1241 ret = sr->done_io;
1242
1243 /*
1244 * If we're in io-wq we can't rely on tw ordering guarantees, defer
1245 * flushing notif to io_send_zc_cleanup()
1246 */
1247 if (!(issue_flags & IO_URING_F_UNLOCKED)) {
1248 io_notif_flush(sr->notif);
1249 req->flags &= ~REQ_F_NEED_CLEANUP;
1250 }
1251 io_req_set_res(req, ret, IORING_CQE_F_MORE);
1252 return IOU_OK;
1253 }
1254
io_sendrecv_fail(struct io_kiocb * req)1255 void io_sendrecv_fail(struct io_kiocb *req)
1256 {
1257 struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
1258
1259 if (req->flags & REQ_F_PARTIAL_IO)
1260 req->cqe.res = sr->done_io;
1261
1262 if ((req->flags & REQ_F_NEED_CLEANUP) &&
1263 (req->opcode == IORING_OP_SEND_ZC || req->opcode == IORING_OP_SENDMSG_ZC))
1264 req->cqe.flags |= IORING_CQE_F_MORE;
1265 }
1266
io_accept_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1267 int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1268 {
1269 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1270 unsigned flags;
1271
1272 if (sqe->len || sqe->buf_index)
1273 return -EINVAL;
1274
1275 accept->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1276 accept->addr_len = u64_to_user_ptr(READ_ONCE(sqe->addr2));
1277 accept->flags = READ_ONCE(sqe->accept_flags);
1278 accept->nofile = rlimit(RLIMIT_NOFILE);
1279 flags = READ_ONCE(sqe->ioprio);
1280 if (flags & ~IORING_ACCEPT_MULTISHOT)
1281 return -EINVAL;
1282
1283 accept->file_slot = READ_ONCE(sqe->file_index);
1284 if (accept->file_slot) {
1285 if (accept->flags & SOCK_CLOEXEC)
1286 return -EINVAL;
1287 if (flags & IORING_ACCEPT_MULTISHOT &&
1288 accept->file_slot != IORING_FILE_INDEX_ALLOC)
1289 return -EINVAL;
1290 }
1291 if (accept->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1292 return -EINVAL;
1293 if (SOCK_NONBLOCK != O_NONBLOCK && (accept->flags & SOCK_NONBLOCK))
1294 accept->flags = (accept->flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
1295 if (flags & IORING_ACCEPT_MULTISHOT)
1296 req->flags |= REQ_F_APOLL_MULTISHOT;
1297 return 0;
1298 }
1299
io_accept(struct io_kiocb * req,unsigned int issue_flags)1300 int io_accept(struct io_kiocb *req, unsigned int issue_flags)
1301 {
1302 struct io_ring_ctx *ctx = req->ctx;
1303 struct io_accept *accept = io_kiocb_to_cmd(req, struct io_accept);
1304 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1305 unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0;
1306 bool fixed = !!accept->file_slot;
1307 struct file *file;
1308 int ret, fd;
1309
1310 retry:
1311 if (!fixed) {
1312 fd = __get_unused_fd_flags(accept->flags, accept->nofile);
1313 if (unlikely(fd < 0))
1314 return fd;
1315 }
1316 file = do_accept(req->file, file_flags, accept->addr, accept->addr_len,
1317 accept->flags);
1318 if (IS_ERR(file)) {
1319 if (!fixed)
1320 put_unused_fd(fd);
1321 ret = PTR_ERR(file);
1322 if (ret == -EAGAIN && force_nonblock) {
1323 /*
1324 * if it's multishot and polled, we don't need to
1325 * return EAGAIN to arm the poll infra since it
1326 * has already been done
1327 */
1328 if (issue_flags & IO_URING_F_MULTISHOT)
1329 return IOU_ISSUE_SKIP_COMPLETE;
1330 return ret;
1331 }
1332 if (ret == -ERESTARTSYS)
1333 ret = -EINTR;
1334 req_set_fail(req);
1335 } else if (!fixed) {
1336 fd_install(fd, file);
1337 ret = fd;
1338 } else {
1339 ret = io_fixed_fd_install(req, issue_flags, file,
1340 accept->file_slot);
1341 }
1342
1343 if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
1344 io_req_set_res(req, ret, 0);
1345 return IOU_OK;
1346 }
1347
1348 if (ret < 0)
1349 return ret;
1350 if (io_post_aux_cqe(ctx, req->cqe.user_data, ret, IORING_CQE_F_MORE, false))
1351 goto retry;
1352
1353 io_req_set_res(req, ret, 0);
1354 return IOU_STOP_MULTISHOT;
1355 }
1356
io_socket_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1357 int io_socket_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1358 {
1359 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1360
1361 if (sqe->addr || sqe->rw_flags || sqe->buf_index)
1362 return -EINVAL;
1363
1364 sock->domain = READ_ONCE(sqe->fd);
1365 sock->type = READ_ONCE(sqe->off);
1366 sock->protocol = READ_ONCE(sqe->len);
1367 sock->file_slot = READ_ONCE(sqe->file_index);
1368 sock->nofile = rlimit(RLIMIT_NOFILE);
1369
1370 sock->flags = sock->type & ~SOCK_TYPE_MASK;
1371 if (sock->file_slot && (sock->flags & SOCK_CLOEXEC))
1372 return -EINVAL;
1373 if (sock->flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
1374 return -EINVAL;
1375 return 0;
1376 }
1377
io_socket(struct io_kiocb * req,unsigned int issue_flags)1378 int io_socket(struct io_kiocb *req, unsigned int issue_flags)
1379 {
1380 struct io_socket *sock = io_kiocb_to_cmd(req, struct io_socket);
1381 bool fixed = !!sock->file_slot;
1382 struct file *file;
1383 int ret, fd;
1384
1385 if (!fixed) {
1386 fd = __get_unused_fd_flags(sock->flags, sock->nofile);
1387 if (unlikely(fd < 0))
1388 return fd;
1389 }
1390 file = __sys_socket_file(sock->domain, sock->type, sock->protocol);
1391 if (IS_ERR(file)) {
1392 if (!fixed)
1393 put_unused_fd(fd);
1394 ret = PTR_ERR(file);
1395 if (ret == -EAGAIN && (issue_flags & IO_URING_F_NONBLOCK))
1396 return -EAGAIN;
1397 if (ret == -ERESTARTSYS)
1398 ret = -EINTR;
1399 req_set_fail(req);
1400 } else if (!fixed) {
1401 fd_install(fd, file);
1402 ret = fd;
1403 } else {
1404 ret = io_fixed_fd_install(req, issue_flags, file,
1405 sock->file_slot);
1406 }
1407 io_req_set_res(req, ret, 0);
1408 return IOU_OK;
1409 }
1410
io_connect_prep_async(struct io_kiocb * req)1411 int io_connect_prep_async(struct io_kiocb *req)
1412 {
1413 struct io_async_connect *io = req->async_data;
1414 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1415
1416 return move_addr_to_kernel(conn->addr, conn->addr_len, &io->address);
1417 }
1418
io_connect_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)1419 int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
1420 {
1421 struct io_connect *conn = io_kiocb_to_cmd(req, struct io_connect);
1422
1423 if (sqe->len || sqe->buf_index || sqe->rw_flags || sqe->splice_fd_in)
1424 return -EINVAL;
1425
1426 conn->addr = u64_to_user_ptr(READ_ONCE(sqe->addr));
1427 conn->addr_len = READ_ONCE(sqe->addr2);
1428 conn->in_progress = conn->seen_econnaborted = false;
1429 return 0;
1430 }
1431
io_connect(struct io_kiocb * req,unsigned int issue_flags)1432 int io_connect(struct io_kiocb *req, unsigned int issue_flags)
1433 {
1434 struct io_connect *connect = io_kiocb_to_cmd(req, struct io_connect);
1435 struct io_async_connect __io, *io;
1436 unsigned file_flags;
1437 int ret;
1438 bool force_nonblock = issue_flags & IO_URING_F_NONBLOCK;
1439
1440 if (req_has_async_data(req)) {
1441 io = req->async_data;
1442 } else {
1443 ret = move_addr_to_kernel(connect->addr,
1444 connect->addr_len,
1445 &__io.address);
1446 if (ret)
1447 goto out;
1448 io = &__io;
1449 }
1450
1451 file_flags = force_nonblock ? O_NONBLOCK : 0;
1452
1453 ret = __sys_connect_file(req->file, &io->address,
1454 connect->addr_len, file_flags);
1455 if ((ret == -EAGAIN || ret == -EINPROGRESS || ret == -ECONNABORTED)
1456 && force_nonblock) {
1457 if (ret == -EINPROGRESS) {
1458 connect->in_progress = true;
1459 } else if (ret == -ECONNABORTED) {
1460 if (connect->seen_econnaborted)
1461 goto out;
1462 connect->seen_econnaborted = true;
1463 }
1464 if (req_has_async_data(req))
1465 return -EAGAIN;
1466 if (io_alloc_async_data(req)) {
1467 ret = -ENOMEM;
1468 goto out;
1469 }
1470 memcpy(req->async_data, &__io, sizeof(__io));
1471 return -EAGAIN;
1472 }
1473 if (connect->in_progress) {
1474 /*
1475 * At least bluetooth will return -EBADFD on a re-connect
1476 * attempt, and it's (supposedly) also valid to get -EISCONN
1477 * which means the previous result is good. For both of these,
1478 * grab the sock_error() and use that for the completion.
1479 */
1480 if (ret == -EBADFD || ret == -EISCONN)
1481 ret = sock_error(sock_from_file(req->file)->sk);
1482 }
1483 if (ret == -ERESTARTSYS)
1484 ret = -EINTR;
1485 out:
1486 if (ret < 0)
1487 req_set_fail(req);
1488 io_req_set_res(req, ret, 0);
1489 return IOU_OK;
1490 }
1491
io_netmsg_cache_free(struct io_cache_entry * entry)1492 void io_netmsg_cache_free(struct io_cache_entry *entry)
1493 {
1494 kfree(container_of(entry, struct io_async_msghdr, cache));
1495 }
1496 #endif
1497