1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * SUCS NET3:
4 *
5 * Generic datagram handling routines. These are generic for all
6 * protocols. Possibly a generic IP version on top of these would
7 * make sense. Not tonight however 8-).
8 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9 * NetROM layer all have identical poll code and mostly
10 * identical recvmsg() code. So we share it here. The poll was
11 * shared before but buried in udp.c so I moved it.
12 *
13 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
14 * udp.c code)
15 *
16 * Fixes:
17 * Alan Cox : NULL return from skb_peek_copy()
18 * understood
19 * Alan Cox : Rewrote skb_read_datagram to avoid the
20 * skb_peek_copy stuff.
21 * Alan Cox : Added support for SOCK_SEQPACKET.
22 * IPX can no longer use the SO_TYPE hack
23 * but AX.25 now works right, and SPX is
24 * feasible.
25 * Alan Cox : Fixed write poll of non IP protocol
26 * crash.
27 * Florian La Roche: Changed for my new skbuff handling.
28 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
29 * Linus Torvalds : BSD semantic fixes.
30 * Alan Cox : Datagram iovec handling
31 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
32 * Alan Cox : POSIXisms
33 * Pete Wyckoff : Unconnected accept() fix.
34 *
35 */
36
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54
55 #include <net/protocol.h>
56 #include <linux/skbuff.h>
57
58 #include <net/checksum.h>
59 #include <net/sock.h>
60 #include <net/tcp_states.h>
61 #include <trace/events/skb.h>
62 #include <net/busy_poll.h>
63
64 #include "datagram.h"
65
66 /*
67 * Is a socket 'connection oriented' ?
68 */
connection_based(struct sock * sk)69 static inline int connection_based(struct sock *sk)
70 {
71 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
72 }
73
receiver_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)74 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
75 void *key)
76 {
77 /*
78 * Avoid a wakeup if event not interesting for us
79 */
80 if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
81 return 0;
82 return autoremove_wake_function(wait, mode, sync, key);
83 }
84 /*
85 * Wait for the last received packet to be different from skb
86 */
__skb_wait_for_more_packets(struct sock * sk,int * err,long * timeo_p,const struct sk_buff * skb)87 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
88 const struct sk_buff *skb)
89 {
90 int error;
91 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
92
93 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
94
95 /* Socket errors? */
96 error = sock_error(sk);
97 if (error)
98 goto out_err;
99
100 if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
101 goto out;
102
103 /* Socket shut down? */
104 if (sk->sk_shutdown & RCV_SHUTDOWN)
105 goto out_noerr;
106
107 /* Sequenced packets can come disconnected.
108 * If so we report the problem
109 */
110 error = -ENOTCONN;
111 if (connection_based(sk) &&
112 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
113 goto out_err;
114
115 /* handle signals */
116 if (signal_pending(current))
117 goto interrupted;
118
119 error = 0;
120 *timeo_p = schedule_timeout(*timeo_p);
121 out:
122 finish_wait(sk_sleep(sk), &wait);
123 return error;
124 interrupted:
125 error = sock_intr_errno(*timeo_p);
126 out_err:
127 *err = error;
128 goto out;
129 out_noerr:
130 *err = 0;
131 error = 1;
132 goto out;
133 }
134 EXPORT_SYMBOL(__skb_wait_for_more_packets);
135
skb_set_peeked(struct sk_buff * skb)136 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
137 {
138 struct sk_buff *nskb;
139
140 if (skb->peeked)
141 return skb;
142
143 /* We have to unshare an skb before modifying it. */
144 if (!skb_shared(skb))
145 goto done;
146
147 nskb = skb_clone(skb, GFP_ATOMIC);
148 if (!nskb)
149 return ERR_PTR(-ENOMEM);
150
151 skb->prev->next = nskb;
152 skb->next->prev = nskb;
153 nskb->prev = skb->prev;
154 nskb->next = skb->next;
155
156 consume_skb(skb);
157 skb = nskb;
158
159 done:
160 skb->peeked = 1;
161
162 return skb;
163 }
164
__skb_try_recv_from_queue(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err,struct sk_buff ** last)165 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
166 struct sk_buff_head *queue,
167 unsigned int flags,
168 void (*destructor)(struct sock *sk,
169 struct sk_buff *skb),
170 int *off, int *err,
171 struct sk_buff **last)
172 {
173 bool peek_at_off = false;
174 struct sk_buff *skb;
175 int _off = 0;
176
177 if (unlikely(flags & MSG_PEEK && *off >= 0)) {
178 peek_at_off = true;
179 _off = *off;
180 }
181
182 *last = queue->prev;
183 skb_queue_walk(queue, skb) {
184 if (flags & MSG_PEEK) {
185 if (peek_at_off && _off >= skb->len &&
186 (_off || skb->peeked)) {
187 _off -= skb->len;
188 continue;
189 }
190 if (!skb->len) {
191 skb = skb_set_peeked(skb);
192 if (IS_ERR(skb)) {
193 *err = PTR_ERR(skb);
194 return NULL;
195 }
196 }
197 refcount_inc(&skb->users);
198 } else {
199 __skb_unlink(skb, queue);
200 if (destructor)
201 destructor(sk, skb);
202 }
203 *off = _off;
204 return skb;
205 }
206 return NULL;
207 }
208
209 /**
210 * __skb_try_recv_datagram - Receive a datagram skbuff
211 * @sk: socket
212 * @flags: MSG\_ flags
213 * @destructor: invoked under the receive lock on successful dequeue
214 * @off: an offset in bytes to peek skb from. Returns an offset
215 * within an skb where data actually starts
216 * @err: error code returned
217 * @last: set to last peeked message to inform the wait function
218 * what to look for when peeking
219 *
220 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
221 * and possible races. This replaces identical code in packet, raw and
222 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
223 * the long standing peek and read race for datagram sockets. If you
224 * alter this routine remember it must be re-entrant.
225 *
226 * This function will lock the socket if a skb is returned, so
227 * the caller needs to unlock the socket in that case (usually by
228 * calling skb_free_datagram). Returns NULL with @err set to
229 * -EAGAIN if no data was available or to some other value if an
230 * error was detected.
231 *
232 * * It does not lock socket since today. This function is
233 * * free of race conditions. This measure should/can improve
234 * * significantly datagram socket latencies at high loads,
235 * * when data copying to user space takes lots of time.
236 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
237 * * 8) Great win.)
238 * * --ANK (980729)
239 *
240 * The order of the tests when we find no data waiting are specified
241 * quite explicitly by POSIX 1003.1g, don't change them without having
242 * the standard around please.
243 */
__skb_try_recv_datagram(struct sock * sk,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err,struct sk_buff ** last)244 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
245 void (*destructor)(struct sock *sk,
246 struct sk_buff *skb),
247 int *off, int *err,
248 struct sk_buff **last)
249 {
250 struct sk_buff_head *queue = &sk->sk_receive_queue;
251 struct sk_buff *skb;
252 unsigned long cpu_flags;
253 /*
254 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
255 */
256 int error = sock_error(sk);
257
258 if (error)
259 goto no_packet;
260
261 do {
262 /* Again only user level code calls this function, so nothing
263 * interrupt level will suddenly eat the receive_queue.
264 *
265 * Look at current nfs client by the way...
266 * However, this function was correct in any case. 8)
267 */
268 spin_lock_irqsave(&queue->lock, cpu_flags);
269 skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
270 off, &error, last);
271 spin_unlock_irqrestore(&queue->lock, cpu_flags);
272 if (error)
273 goto no_packet;
274 if (skb)
275 return skb;
276
277 if (!sk_can_busy_loop(sk))
278 break;
279
280 sk_busy_loop(sk, flags & MSG_DONTWAIT);
281 } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
282
283 error = -EAGAIN;
284
285 no_packet:
286 *err = error;
287 return NULL;
288 }
289 EXPORT_SYMBOL(__skb_try_recv_datagram);
290
__skb_recv_datagram(struct sock * sk,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err)291 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
292 void (*destructor)(struct sock *sk,
293 struct sk_buff *skb),
294 int *off, int *err)
295 {
296 struct sk_buff *skb, *last;
297 long timeo;
298
299 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
300
301 do {
302 skb = __skb_try_recv_datagram(sk, flags, destructor, off, err,
303 &last);
304 if (skb)
305 return skb;
306
307 if (*err != -EAGAIN)
308 break;
309 } while (timeo &&
310 !__skb_wait_for_more_packets(sk, err, &timeo, last));
311
312 return NULL;
313 }
314 EXPORT_SYMBOL(__skb_recv_datagram);
315
skb_recv_datagram(struct sock * sk,unsigned int flags,int noblock,int * err)316 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
317 int noblock, int *err)
318 {
319 int off = 0;
320
321 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
322 NULL, &off, err);
323 }
324 EXPORT_SYMBOL(skb_recv_datagram);
325
skb_free_datagram(struct sock * sk,struct sk_buff * skb)326 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
327 {
328 consume_skb(skb);
329 sk_mem_reclaim_partial(sk);
330 }
331 EXPORT_SYMBOL(skb_free_datagram);
332
__skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb,int len)333 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
334 {
335 bool slow;
336
337 if (!skb_unref(skb)) {
338 sk_peek_offset_bwd(sk, len);
339 return;
340 }
341
342 slow = lock_sock_fast(sk);
343 sk_peek_offset_bwd(sk, len);
344 skb_orphan(skb);
345 sk_mem_reclaim_partial(sk);
346 unlock_sock_fast(sk, slow);
347
348 /* skb is now orphaned, can be freed outside of locked section */
349 __kfree_skb(skb);
350 }
351 EXPORT_SYMBOL(__skb_free_datagram_locked);
352
__sk_queue_drop_skb(struct sock * sk,struct sk_buff_head * sk_queue,struct sk_buff * skb,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb))353 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
354 struct sk_buff *skb, unsigned int flags,
355 void (*destructor)(struct sock *sk,
356 struct sk_buff *skb))
357 {
358 int err = 0;
359
360 if (flags & MSG_PEEK) {
361 err = -ENOENT;
362 spin_lock_bh(&sk_queue->lock);
363 if (skb->next) {
364 __skb_unlink(skb, sk_queue);
365 refcount_dec(&skb->users);
366 if (destructor)
367 destructor(sk, skb);
368 err = 0;
369 }
370 spin_unlock_bh(&sk_queue->lock);
371 }
372
373 atomic_inc(&sk->sk_drops);
374 return err;
375 }
376 EXPORT_SYMBOL(__sk_queue_drop_skb);
377
378 /**
379 * skb_kill_datagram - Free a datagram skbuff forcibly
380 * @sk: socket
381 * @skb: datagram skbuff
382 * @flags: MSG\_ flags
383 *
384 * This function frees a datagram skbuff that was received by
385 * skb_recv_datagram. The flags argument must match the one
386 * used for skb_recv_datagram.
387 *
388 * If the MSG_PEEK flag is set, and the packet is still on the
389 * receive queue of the socket, it will be taken off the queue
390 * before it is freed.
391 *
392 * This function currently only disables BH when acquiring the
393 * sk_receive_queue lock. Therefore it must not be used in a
394 * context where that lock is acquired in an IRQ context.
395 *
396 * It returns 0 if the packet was removed by us.
397 */
398
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)399 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
400 {
401 int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
402 NULL);
403
404 kfree_skb(skb);
405 sk_mem_reclaim_partial(sk);
406 return err;
407 }
408 EXPORT_SYMBOL(skb_kill_datagram);
409
__skb_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,bool fault_short,size_t (* cb)(const void *,size_t,void *,struct iov_iter *),void * data)410 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
411 struct iov_iter *to, int len, bool fault_short,
412 size_t (*cb)(const void *, size_t, void *,
413 struct iov_iter *), void *data)
414 {
415 int start = skb_headlen(skb);
416 int i, copy = start - offset, start_off = offset, n;
417 struct sk_buff *frag_iter;
418
419 /* Copy header. */
420 if (copy > 0) {
421 if (copy > len)
422 copy = len;
423 n = cb(skb->data + offset, copy, data, to);
424 offset += n;
425 if (n != copy)
426 goto short_copy;
427 if ((len -= copy) == 0)
428 return 0;
429 }
430
431 /* Copy paged appendix. Hmm... why does this look so complicated? */
432 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
433 int end;
434 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
435
436 WARN_ON(start > offset + len);
437
438 end = start + skb_frag_size(frag);
439 if ((copy = end - offset) > 0) {
440 struct page *page = skb_frag_page(frag);
441 u8 *vaddr = kmap(page);
442
443 if (copy > len)
444 copy = len;
445 n = cb(vaddr + skb_frag_off(frag) + offset - start,
446 copy, data, to);
447 kunmap(page);
448 offset += n;
449 if (n != copy)
450 goto short_copy;
451 if (!(len -= copy))
452 return 0;
453 }
454 start = end;
455 }
456
457 skb_walk_frags(skb, frag_iter) {
458 int end;
459
460 WARN_ON(start > offset + len);
461
462 end = start + frag_iter->len;
463 if ((copy = end - offset) > 0) {
464 if (copy > len)
465 copy = len;
466 if (__skb_datagram_iter(frag_iter, offset - start,
467 to, copy, fault_short, cb, data))
468 goto fault;
469 if ((len -= copy) == 0)
470 return 0;
471 offset += copy;
472 }
473 start = end;
474 }
475 if (!len)
476 return 0;
477
478 /* This is not really a user copy fault, but rather someone
479 * gave us a bogus length on the skb. We should probably
480 * print a warning here as it may indicate a kernel bug.
481 */
482
483 fault:
484 iov_iter_revert(to, offset - start_off);
485 return -EFAULT;
486
487 short_copy:
488 if (fault_short || iov_iter_count(to))
489 goto fault;
490
491 return 0;
492 }
493
494 /**
495 * skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
496 * and update a hash.
497 * @skb: buffer to copy
498 * @offset: offset in the buffer to start copying from
499 * @to: iovec iterator to copy to
500 * @len: amount of data to copy from buffer to iovec
501 * @hash: hash request to update
502 */
skb_copy_and_hash_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,struct ahash_request * hash)503 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
504 struct iov_iter *to, int len,
505 struct ahash_request *hash)
506 {
507 return __skb_datagram_iter(skb, offset, to, len, true,
508 hash_and_copy_to_iter, hash);
509 }
510 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
511
simple_copy_to_iter(const void * addr,size_t bytes,void * data __always_unused,struct iov_iter * i)512 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
513 void *data __always_unused, struct iov_iter *i)
514 {
515 return copy_to_iter(addr, bytes, i);
516 }
517
518 /**
519 * skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
520 * @skb: buffer to copy
521 * @offset: offset in the buffer to start copying from
522 * @to: iovec iterator to copy to
523 * @len: amount of data to copy from buffer to iovec
524 */
skb_copy_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len)525 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
526 struct iov_iter *to, int len)
527 {
528 trace_skb_copy_datagram_iovec(skb, len);
529 return __skb_datagram_iter(skb, offset, to, len, false,
530 simple_copy_to_iter, NULL);
531 }
532 EXPORT_SYMBOL(skb_copy_datagram_iter);
533
534 /**
535 * skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
536 * @skb: buffer to copy
537 * @offset: offset in the buffer to start copying to
538 * @from: the copy source
539 * @len: amount of data to copy to buffer from iovec
540 *
541 * Returns 0 or -EFAULT.
542 */
skb_copy_datagram_from_iter(struct sk_buff * skb,int offset,struct iov_iter * from,int len)543 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
544 struct iov_iter *from,
545 int len)
546 {
547 int start = skb_headlen(skb);
548 int i, copy = start - offset;
549 struct sk_buff *frag_iter;
550
551 /* Copy header. */
552 if (copy > 0) {
553 if (copy > len)
554 copy = len;
555 if (copy_from_iter(skb->data + offset, copy, from) != copy)
556 goto fault;
557 if ((len -= copy) == 0)
558 return 0;
559 offset += copy;
560 }
561
562 /* Copy paged appendix. Hmm... why does this look so complicated? */
563 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
564 int end;
565 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
566
567 WARN_ON(start > offset + len);
568
569 end = start + skb_frag_size(frag);
570 if ((copy = end - offset) > 0) {
571 size_t copied;
572
573 if (copy > len)
574 copy = len;
575 copied = copy_page_from_iter(skb_frag_page(frag),
576 skb_frag_off(frag) + offset - start,
577 copy, from);
578 if (copied != copy)
579 goto fault;
580
581 if (!(len -= copy))
582 return 0;
583 offset += copy;
584 }
585 start = end;
586 }
587
588 skb_walk_frags(skb, frag_iter) {
589 int end;
590
591 WARN_ON(start > offset + len);
592
593 end = start + frag_iter->len;
594 if ((copy = end - offset) > 0) {
595 if (copy > len)
596 copy = len;
597 if (skb_copy_datagram_from_iter(frag_iter,
598 offset - start,
599 from, copy))
600 goto fault;
601 if ((len -= copy) == 0)
602 return 0;
603 offset += copy;
604 }
605 start = end;
606 }
607 if (!len)
608 return 0;
609
610 fault:
611 return -EFAULT;
612 }
613 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
614
__zerocopy_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)615 int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
616 struct iov_iter *from, size_t length)
617 {
618 int frag = skb_shinfo(skb)->nr_frags;
619
620 while (length && iov_iter_count(from)) {
621 struct page *pages[MAX_SKB_FRAGS];
622 size_t start;
623 ssize_t copied;
624 unsigned long truesize;
625 int n = 0;
626
627 if (frag == MAX_SKB_FRAGS)
628 return -EMSGSIZE;
629
630 copied = iov_iter_get_pages(from, pages, length,
631 MAX_SKB_FRAGS - frag, &start);
632 if (copied < 0)
633 return -EFAULT;
634
635 iov_iter_advance(from, copied);
636 length -= copied;
637
638 truesize = PAGE_ALIGN(copied + start);
639 skb->data_len += copied;
640 skb->len += copied;
641 skb->truesize += truesize;
642 if (sk && sk->sk_type == SOCK_STREAM) {
643 sk_wmem_queued_add(sk, truesize);
644 sk_mem_charge(sk, truesize);
645 } else {
646 refcount_add(truesize, &skb->sk->sk_wmem_alloc);
647 }
648 while (copied) {
649 int size = min_t(int, copied, PAGE_SIZE - start);
650 skb_fill_page_desc(skb, frag++, pages[n], start, size);
651 start = 0;
652 copied -= size;
653 n++;
654 }
655 }
656 return 0;
657 }
658 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
659
660 /**
661 * zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
662 * @skb: buffer to copy
663 * @from: the source to copy from
664 *
665 * The function will first copy up to headlen, and then pin the userspace
666 * pages and build frags through them.
667 *
668 * Returns 0, -EFAULT or -EMSGSIZE.
669 */
zerocopy_sg_from_iter(struct sk_buff * skb,struct iov_iter * from)670 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
671 {
672 int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
673
674 /* copy up to skb headlen */
675 if (skb_copy_datagram_from_iter(skb, 0, from, copy))
676 return -EFAULT;
677
678 return __zerocopy_sg_from_iter(NULL, skb, from, ~0U);
679 }
680 EXPORT_SYMBOL(zerocopy_sg_from_iter);
681
682 /**
683 * skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
684 * and update a checksum.
685 * @skb: buffer to copy
686 * @offset: offset in the buffer to start copying from
687 * @to: iovec iterator to copy to
688 * @len: amount of data to copy from buffer to iovec
689 * @csump: checksum pointer
690 */
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,__wsum * csump)691 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
692 struct iov_iter *to, int len,
693 __wsum *csump)
694 {
695 return __skb_datagram_iter(skb, offset, to, len, true,
696 csum_and_copy_to_iter, csump);
697 }
698
699 /**
700 * skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
701 * @skb: skbuff
702 * @hlen: hardware length
703 * @msg: destination
704 *
705 * Caller _must_ check that skb will fit to this iovec.
706 *
707 * Returns: 0 - success.
708 * -EINVAL - checksum failure.
709 * -EFAULT - fault during copy.
710 */
skb_copy_and_csum_datagram_msg(struct sk_buff * skb,int hlen,struct msghdr * msg)711 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
712 int hlen, struct msghdr *msg)
713 {
714 __wsum csum;
715 int chunk = skb->len - hlen;
716
717 if (!chunk)
718 return 0;
719
720 if (msg_data_left(msg) < chunk) {
721 if (__skb_checksum_complete(skb))
722 return -EINVAL;
723 if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
724 goto fault;
725 } else {
726 csum = csum_partial(skb->data, hlen, skb->csum);
727 if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
728 chunk, &csum))
729 goto fault;
730
731 if (csum_fold(csum)) {
732 iov_iter_revert(&msg->msg_iter, chunk);
733 return -EINVAL;
734 }
735
736 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
737 !skb->csum_complete_sw)
738 netdev_rx_csum_fault(NULL, skb);
739 }
740 return 0;
741 fault:
742 return -EFAULT;
743 }
744 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
745
746 /**
747 * datagram_poll - generic datagram poll
748 * @file: file struct
749 * @sock: socket
750 * @wait: poll table
751 *
752 * Datagram poll: Again totally generic. This also handles
753 * sequenced packet sockets providing the socket receive queue
754 * is only ever holding data ready to receive.
755 *
756 * Note: when you *don't* use this routine for this protocol,
757 * and you use a different write policy from sock_writeable()
758 * then please supply your own write_space callback.
759 */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)760 __poll_t datagram_poll(struct file *file, struct socket *sock,
761 poll_table *wait)
762 {
763 struct sock *sk = sock->sk;
764 __poll_t mask;
765
766 sock_poll_wait(file, sock, wait);
767 mask = 0;
768
769 /* exceptional events? */
770 if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue))
771 mask |= EPOLLERR |
772 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
773
774 if (sk->sk_shutdown & RCV_SHUTDOWN)
775 mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
776 if (sk->sk_shutdown == SHUTDOWN_MASK)
777 mask |= EPOLLHUP;
778
779 /* readable? */
780 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
781 mask |= EPOLLIN | EPOLLRDNORM;
782
783 /* Connection-based need to check for termination and startup */
784 if (connection_based(sk)) {
785 if (sk->sk_state == TCP_CLOSE)
786 mask |= EPOLLHUP;
787 /* connection hasn't started yet? */
788 if (sk->sk_state == TCP_SYN_SENT)
789 return mask;
790 }
791
792 /* writable? */
793 if (sock_writeable(sk))
794 mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
795 else
796 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
797
798 return mask;
799 }
800 EXPORT_SYMBOL(datagram_poll);
801