1 /*
2 * SUCS NET3:
3 *
4 * Generic datagram handling routines. These are generic for all
5 * protocols. Possibly a generic IP version on top of these would
6 * make sense. Not tonight however 8-).
7 * This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
8 * NetROM layer all have identical poll code and mostly
9 * identical recvmsg() code. So we share it here. The poll was
10 * shared before but buried in udp.c so I moved it.
11 *
12 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
13 * udp.c code)
14 *
15 * Fixes:
16 * Alan Cox : NULL return from skb_peek_copy()
17 * understood
18 * Alan Cox : Rewrote skb_read_datagram to avoid the
19 * skb_peek_copy stuff.
20 * Alan Cox : Added support for SOCK_SEQPACKET.
21 * IPX can no longer use the SO_TYPE hack
22 * but AX.25 now works right, and SPX is
23 * feasible.
24 * Alan Cox : Fixed write poll of non IP protocol
25 * crash.
26 * Florian La Roche: Changed for my new skbuff handling.
27 * Darryl Miles : Fixed non-blocking SOCK_SEQPACKET.
28 * Linus Torvalds : BSD semantic fixes.
29 * Alan Cox : Datagram iovec handling
30 * Darryl Miles : Fixed non-blocking SOCK_STREAM.
31 * Alan Cox : POSIXisms
32 * Pete Wyckoff : Unconnected accept() fix.
33 *
34 */
35
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <asm/uaccess.h>
40 #include <linux/mm.h>
41 #include <linux/interrupt.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/rtnetlink.h>
47 #include <linux/poll.h>
48 #include <linux/highmem.h>
49 #include <linux/spinlock.h>
50 #include <linux/slab.h>
51 #include <linux/pagemap.h>
52
53 #include <net/protocol.h>
54 #include <linux/skbuff.h>
55
56 #include <net/checksum.h>
57 #include <net/sock.h>
58 #include <net/tcp_states.h>
59 #include <trace/events/skb.h>
60 #include <net/busy_poll.h>
61
62 /*
63 * Is a socket 'connection oriented' ?
64 */
connection_based(struct sock * sk)65 static inline int connection_based(struct sock *sk)
66 {
67 return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
68 }
69
receiver_wake_function(wait_queue_t * wait,unsigned int mode,int sync,void * key)70 static int receiver_wake_function(wait_queue_t *wait, unsigned int mode, int sync,
71 void *key)
72 {
73 unsigned long bits = (unsigned long)key;
74
75 /*
76 * Avoid a wakeup if event not interesting for us
77 */
78 if (bits && !(bits & (POLLIN | POLLERR)))
79 return 0;
80 return autoremove_wake_function(wait, mode, sync, key);
81 }
82 /*
83 * Wait for the last received packet to be different from skb
84 */
wait_for_more_packets(struct sock * sk,int * err,long * timeo_p,const struct sk_buff * skb)85 static int wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
86 const struct sk_buff *skb)
87 {
88 int error;
89 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
90
91 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
92
93 /* Socket errors? */
94 error = sock_error(sk);
95 if (error)
96 goto out_err;
97
98 if (sk->sk_receive_queue.prev != skb)
99 goto out;
100
101 /* Socket shut down? */
102 if (sk->sk_shutdown & RCV_SHUTDOWN)
103 goto out_noerr;
104
105 /* Sequenced packets can come disconnected.
106 * If so we report the problem
107 */
108 error = -ENOTCONN;
109 if (connection_based(sk) &&
110 !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
111 goto out_err;
112
113 /* handle signals */
114 if (signal_pending(current))
115 goto interrupted;
116
117 error = 0;
118 *timeo_p = schedule_timeout(*timeo_p);
119 out:
120 finish_wait(sk_sleep(sk), &wait);
121 return error;
122 interrupted:
123 error = sock_intr_errno(*timeo_p);
124 out_err:
125 *err = error;
126 goto out;
127 out_noerr:
128 *err = 0;
129 error = 1;
130 goto out;
131 }
132
skb_set_peeked(struct sk_buff * skb)133 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
134 {
135 struct sk_buff *nskb;
136
137 if (skb->peeked)
138 return skb;
139
140 /* We have to unshare an skb before modifying it. */
141 if (!skb_shared(skb))
142 goto done;
143
144 nskb = skb_clone(skb, GFP_ATOMIC);
145 if (!nskb)
146 return ERR_PTR(-ENOMEM);
147
148 skb->prev->next = nskb;
149 skb->next->prev = nskb;
150 nskb->prev = skb->prev;
151 nskb->next = skb->next;
152
153 consume_skb(skb);
154 skb = nskb;
155
156 done:
157 skb->peeked = 1;
158
159 return skb;
160 }
161
162 /**
163 * __skb_recv_datagram - Receive a datagram skbuff
164 * @sk: socket
165 * @flags: MSG_ flags
166 * @peeked: returns non-zero if this packet has been seen before
167 * @off: an offset in bytes to peek skb from. Returns an offset
168 * within an skb where data actually starts
169 * @err: error code returned
170 *
171 * Get a datagram skbuff, understands the peeking, nonblocking wakeups
172 * and possible races. This replaces identical code in packet, raw and
173 * udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
174 * the long standing peek and read race for datagram sockets. If you
175 * alter this routine remember it must be re-entrant.
176 *
177 * This function will lock the socket if a skb is returned, so the caller
178 * needs to unlock the socket in that case (usually by calling
179 * skb_free_datagram)
180 *
181 * * It does not lock socket since today. This function is
182 * * free of race conditions. This measure should/can improve
183 * * significantly datagram socket latencies at high loads,
184 * * when data copying to user space takes lots of time.
185 * * (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
186 * * 8) Great win.)
187 * * --ANK (980729)
188 *
189 * The order of the tests when we find no data waiting are specified
190 * quite explicitly by POSIX 1003.1g, don't change them without having
191 * the standard around please.
192 */
__skb_recv_datagram(struct sock * sk,unsigned int flags,int * peeked,int * off,int * err)193 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
194 int *peeked, int *off, int *err)
195 {
196 struct sk_buff_head *queue = &sk->sk_receive_queue;
197 struct sk_buff *skb, *last;
198 unsigned long cpu_flags;
199 long timeo;
200 /*
201 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
202 */
203 int error = sock_error(sk);
204
205 if (error)
206 goto no_packet;
207
208 timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
209
210 do {
211 /* Again only user level code calls this function, so nothing
212 * interrupt level will suddenly eat the receive_queue.
213 *
214 * Look at current nfs client by the way...
215 * However, this function was correct in any case. 8)
216 */
217 int _off = *off;
218
219 last = (struct sk_buff *)queue;
220 spin_lock_irqsave(&queue->lock, cpu_flags);
221 skb_queue_walk(queue, skb) {
222 last = skb;
223 *peeked = skb->peeked;
224 if (flags & MSG_PEEK) {
225 if (_off >= skb->len && (skb->len || _off ||
226 skb->peeked)) {
227 _off -= skb->len;
228 continue;
229 }
230
231 skb = skb_set_peeked(skb);
232 error = PTR_ERR(skb);
233 if (IS_ERR(skb))
234 goto unlock_err;
235
236 atomic_inc(&skb->users);
237 } else
238 __skb_unlink(skb, queue);
239
240 spin_unlock_irqrestore(&queue->lock, cpu_flags);
241 *off = _off;
242 return skb;
243 }
244 spin_unlock_irqrestore(&queue->lock, cpu_flags);
245
246 if (sk_can_busy_loop(sk) &&
247 sk_busy_loop(sk, flags & MSG_DONTWAIT))
248 continue;
249
250 /* User doesn't want to wait */
251 error = -EAGAIN;
252 if (!timeo)
253 goto no_packet;
254
255 } while (!wait_for_more_packets(sk, err, &timeo, last));
256
257 return NULL;
258
259 unlock_err:
260 spin_unlock_irqrestore(&queue->lock, cpu_flags);
261 no_packet:
262 *err = error;
263 return NULL;
264 }
265 EXPORT_SYMBOL(__skb_recv_datagram);
266
skb_recv_datagram(struct sock * sk,unsigned int flags,int noblock,int * err)267 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
268 int noblock, int *err)
269 {
270 int peeked, off = 0;
271
272 return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
273 &peeked, &off, err);
274 }
275 EXPORT_SYMBOL(skb_recv_datagram);
276
skb_free_datagram(struct sock * sk,struct sk_buff * skb)277 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
278 {
279 consume_skb(skb);
280 sk_mem_reclaim_partial(sk);
281 }
282 EXPORT_SYMBOL(skb_free_datagram);
283
skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb)284 void skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb)
285 {
286 bool slow;
287
288 if (likely(atomic_read(&skb->users) == 1))
289 smp_rmb();
290 else if (likely(!atomic_dec_and_test(&skb->users)))
291 return;
292
293 slow = lock_sock_fast(sk);
294 skb_orphan(skb);
295 sk_mem_reclaim_partial(sk);
296 unlock_sock_fast(sk, slow);
297
298 /* skb is now orphaned, can be freed outside of locked section */
299 __kfree_skb(skb);
300 }
301 EXPORT_SYMBOL(skb_free_datagram_locked);
302
303 /**
304 * skb_kill_datagram - Free a datagram skbuff forcibly
305 * @sk: socket
306 * @skb: datagram skbuff
307 * @flags: MSG_ flags
308 *
309 * This function frees a datagram skbuff that was received by
310 * skb_recv_datagram. The flags argument must match the one
311 * used for skb_recv_datagram.
312 *
313 * If the MSG_PEEK flag is set, and the packet is still on the
314 * receive queue of the socket, it will be taken off the queue
315 * before it is freed.
316 *
317 * This function currently only disables BH when acquiring the
318 * sk_receive_queue lock. Therefore it must not be used in a
319 * context where that lock is acquired in an IRQ context.
320 *
321 * It returns 0 if the packet was removed by us.
322 */
323
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)324 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
325 {
326 int err = 0;
327
328 if (flags & MSG_PEEK) {
329 err = -ENOENT;
330 spin_lock_bh(&sk->sk_receive_queue.lock);
331 if (skb == skb_peek(&sk->sk_receive_queue)) {
332 __skb_unlink(skb, &sk->sk_receive_queue);
333 atomic_dec(&skb->users);
334 err = 0;
335 }
336 spin_unlock_bh(&sk->sk_receive_queue.lock);
337 }
338
339 kfree_skb(skb);
340 atomic_inc(&sk->sk_drops);
341 sk_mem_reclaim_partial(sk);
342
343 return err;
344 }
345 EXPORT_SYMBOL(skb_kill_datagram);
346
347 /**
348 * skb_copy_datagram_iovec - Copy a datagram to an iovec.
349 * @skb: buffer to copy
350 * @offset: offset in the buffer to start copying from
351 * @to: io vector to copy to
352 * @len: amount of data to copy from buffer to iovec
353 *
354 * Note: the iovec is modified during the copy.
355 */
skb_copy_datagram_iovec(const struct sk_buff * skb,int offset,struct iovec * to,int len)356 int skb_copy_datagram_iovec(const struct sk_buff *skb, int offset,
357 struct iovec *to, int len)
358 {
359 int start = skb_headlen(skb);
360 int i, copy = start - offset;
361 struct sk_buff *frag_iter;
362
363 trace_skb_copy_datagram_iovec(skb, len);
364
365 /* Copy header. */
366 if (copy > 0) {
367 if (copy > len)
368 copy = len;
369 if (memcpy_toiovec(to, skb->data + offset, copy))
370 goto fault;
371 if ((len -= copy) == 0)
372 return 0;
373 offset += copy;
374 }
375
376 /* Copy paged appendix. Hmm... why does this look so complicated? */
377 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
378 int end;
379 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
380
381 WARN_ON(start > offset + len);
382
383 end = start + skb_frag_size(frag);
384 if ((copy = end - offset) > 0) {
385 int err;
386 u8 *vaddr;
387 struct page *page = skb_frag_page(frag);
388
389 if (copy > len)
390 copy = len;
391 vaddr = kmap(page);
392 err = memcpy_toiovec(to, vaddr + frag->page_offset +
393 offset - start, copy);
394 kunmap(page);
395 if (err)
396 goto fault;
397 if (!(len -= copy))
398 return 0;
399 offset += copy;
400 }
401 start = end;
402 }
403
404 skb_walk_frags(skb, frag_iter) {
405 int end;
406
407 WARN_ON(start > offset + len);
408
409 end = start + frag_iter->len;
410 if ((copy = end - offset) > 0) {
411 if (copy > len)
412 copy = len;
413 if (skb_copy_datagram_iovec(frag_iter,
414 offset - start,
415 to, copy))
416 goto fault;
417 if ((len -= copy) == 0)
418 return 0;
419 offset += copy;
420 }
421 start = end;
422 }
423 if (!len)
424 return 0;
425
426 fault:
427 return -EFAULT;
428 }
429 EXPORT_SYMBOL(skb_copy_datagram_iovec);
430
431 /**
432 * skb_copy_datagram_const_iovec - Copy a datagram to an iovec.
433 * @skb: buffer to copy
434 * @offset: offset in the buffer to start copying from
435 * @to: io vector to copy to
436 * @to_offset: offset in the io vector to start copying to
437 * @len: amount of data to copy from buffer to iovec
438 *
439 * Returns 0 or -EFAULT.
440 * Note: the iovec is not modified during the copy.
441 */
skb_copy_datagram_const_iovec(const struct sk_buff * skb,int offset,const struct iovec * to,int to_offset,int len)442 int skb_copy_datagram_const_iovec(const struct sk_buff *skb, int offset,
443 const struct iovec *to, int to_offset,
444 int len)
445 {
446 int start = skb_headlen(skb);
447 int i, copy = start - offset;
448 struct sk_buff *frag_iter;
449
450 /* Copy header. */
451 if (copy > 0) {
452 if (copy > len)
453 copy = len;
454 if (memcpy_toiovecend(to, skb->data + offset, to_offset, copy))
455 goto fault;
456 if ((len -= copy) == 0)
457 return 0;
458 offset += copy;
459 to_offset += copy;
460 }
461
462 /* Copy paged appendix. Hmm... why does this look so complicated? */
463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
464 int end;
465 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
466
467 WARN_ON(start > offset + len);
468
469 end = start + skb_frag_size(frag);
470 if ((copy = end - offset) > 0) {
471 int err;
472 u8 *vaddr;
473 struct page *page = skb_frag_page(frag);
474
475 if (copy > len)
476 copy = len;
477 vaddr = kmap(page);
478 err = memcpy_toiovecend(to, vaddr + frag->page_offset +
479 offset - start, to_offset, copy);
480 kunmap(page);
481 if (err)
482 goto fault;
483 if (!(len -= copy))
484 return 0;
485 offset += copy;
486 to_offset += copy;
487 }
488 start = end;
489 }
490
491 skb_walk_frags(skb, frag_iter) {
492 int end;
493
494 WARN_ON(start > offset + len);
495
496 end = start + frag_iter->len;
497 if ((copy = end - offset) > 0) {
498 if (copy > len)
499 copy = len;
500 if (skb_copy_datagram_const_iovec(frag_iter,
501 offset - start,
502 to, to_offset,
503 copy))
504 goto fault;
505 if ((len -= copy) == 0)
506 return 0;
507 offset += copy;
508 to_offset += copy;
509 }
510 start = end;
511 }
512 if (!len)
513 return 0;
514
515 fault:
516 return -EFAULT;
517 }
518 EXPORT_SYMBOL(skb_copy_datagram_const_iovec);
519
520 /**
521 * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
522 * @skb: buffer to copy
523 * @offset: offset in the buffer to start copying to
524 * @from: io vector to copy to
525 * @from_offset: offset in the io vector to start copying from
526 * @len: amount of data to copy to buffer from iovec
527 *
528 * Returns 0 or -EFAULT.
529 * Note: the iovec is not modified during the copy.
530 */
skb_copy_datagram_from_iovec(struct sk_buff * skb,int offset,const struct iovec * from,int from_offset,int len)531 int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
532 const struct iovec *from, int from_offset,
533 int len)
534 {
535 int start = skb_headlen(skb);
536 int i, copy = start - offset;
537 struct sk_buff *frag_iter;
538
539 /* Copy header. */
540 if (copy > 0) {
541 if (copy > len)
542 copy = len;
543 if (memcpy_fromiovecend(skb->data + offset, from, from_offset,
544 copy))
545 goto fault;
546 if ((len -= copy) == 0)
547 return 0;
548 offset += copy;
549 from_offset += copy;
550 }
551
552 /* Copy paged appendix. Hmm... why does this look so complicated? */
553 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
554 int end;
555 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
556
557 WARN_ON(start > offset + len);
558
559 end = start + skb_frag_size(frag);
560 if ((copy = end - offset) > 0) {
561 int err;
562 u8 *vaddr;
563 struct page *page = skb_frag_page(frag);
564
565 if (copy > len)
566 copy = len;
567 vaddr = kmap(page);
568 err = memcpy_fromiovecend(vaddr + frag->page_offset +
569 offset - start,
570 from, from_offset, copy);
571 kunmap(page);
572 if (err)
573 goto fault;
574
575 if (!(len -= copy))
576 return 0;
577 offset += copy;
578 from_offset += copy;
579 }
580 start = end;
581 }
582
583 skb_walk_frags(skb, frag_iter) {
584 int end;
585
586 WARN_ON(start > offset + len);
587
588 end = start + frag_iter->len;
589 if ((copy = end - offset) > 0) {
590 if (copy > len)
591 copy = len;
592 if (skb_copy_datagram_from_iovec(frag_iter,
593 offset - start,
594 from,
595 from_offset,
596 copy))
597 goto fault;
598 if ((len -= copy) == 0)
599 return 0;
600 offset += copy;
601 from_offset += copy;
602 }
603 start = end;
604 }
605 if (!len)
606 return 0;
607
608 fault:
609 return -EFAULT;
610 }
611 EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
612
613 /**
614 * zerocopy_sg_from_iovec - Build a zerocopy datagram from an iovec
615 * @skb: buffer to copy
616 * @from: io vector to copy from
617 * @offset: offset in the io vector to start copying from
618 * @count: amount of vectors to copy to buffer from
619 *
620 * The function will first copy up to headlen, and then pin the userspace
621 * pages and build frags through them.
622 *
623 * Returns 0, -EFAULT or -EMSGSIZE.
624 * Note: the iovec is not modified during the copy
625 */
zerocopy_sg_from_iovec(struct sk_buff * skb,const struct iovec * from,int offset,size_t count)626 int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
627 int offset, size_t count)
628 {
629 int len = iov_length(from, count) - offset;
630 int copy = min_t(int, skb_headlen(skb), len);
631 int size;
632 int i = 0;
633
634 /* copy up to skb headlen */
635 if (skb_copy_datagram_from_iovec(skb, 0, from, offset, copy))
636 return -EFAULT;
637
638 if (len == copy)
639 return 0;
640
641 offset += copy;
642 while (count--) {
643 struct page *page[MAX_SKB_FRAGS];
644 int num_pages;
645 unsigned long base;
646 unsigned long truesize;
647
648 /* Skip over from offset and copied */
649 if (offset >= from->iov_len) {
650 offset -= from->iov_len;
651 ++from;
652 continue;
653 }
654 len = from->iov_len - offset;
655 base = (unsigned long)from->iov_base + offset;
656 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
657 if (i + size > MAX_SKB_FRAGS)
658 return -EMSGSIZE;
659 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
660 if (num_pages != size) {
661 release_pages(&page[i], num_pages, 0);
662 return -EFAULT;
663 }
664 truesize = size * PAGE_SIZE;
665 skb->data_len += len;
666 skb->len += len;
667 skb->truesize += truesize;
668 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
669 while (len) {
670 int off = base & ~PAGE_MASK;
671 int size = min_t(int, len, PAGE_SIZE - off);
672 skb_fill_page_desc(skb, i, page[i], off, size);
673 base += size;
674 len -= size;
675 i++;
676 }
677 offset = 0;
678 ++from;
679 }
680 return 0;
681 }
682 EXPORT_SYMBOL(zerocopy_sg_from_iovec);
683
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,u8 __user * to,int len,__wsum * csump)684 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
685 u8 __user *to, int len,
686 __wsum *csump)
687 {
688 int start = skb_headlen(skb);
689 int i, copy = start - offset;
690 struct sk_buff *frag_iter;
691 int pos = 0;
692
693 /* Copy header. */
694 if (copy > 0) {
695 int err = 0;
696 if (copy > len)
697 copy = len;
698 *csump = csum_and_copy_to_user(skb->data + offset, to, copy,
699 *csump, &err);
700 if (err)
701 goto fault;
702 if ((len -= copy) == 0)
703 return 0;
704 offset += copy;
705 to += copy;
706 pos = copy;
707 }
708
709 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
710 int end;
711 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
712
713 WARN_ON(start > offset + len);
714
715 end = start + skb_frag_size(frag);
716 if ((copy = end - offset) > 0) {
717 __wsum csum2;
718 int err = 0;
719 u8 *vaddr;
720 struct page *page = skb_frag_page(frag);
721
722 if (copy > len)
723 copy = len;
724 vaddr = kmap(page);
725 csum2 = csum_and_copy_to_user(vaddr +
726 frag->page_offset +
727 offset - start,
728 to, copy, 0, &err);
729 kunmap(page);
730 if (err)
731 goto fault;
732 *csump = csum_block_add(*csump, csum2, pos);
733 if (!(len -= copy))
734 return 0;
735 offset += copy;
736 to += copy;
737 pos += copy;
738 }
739 start = end;
740 }
741
742 skb_walk_frags(skb, frag_iter) {
743 int end;
744
745 WARN_ON(start > offset + len);
746
747 end = start + frag_iter->len;
748 if ((copy = end - offset) > 0) {
749 __wsum csum2 = 0;
750 if (copy > len)
751 copy = len;
752 if (skb_copy_and_csum_datagram(frag_iter,
753 offset - start,
754 to, copy,
755 &csum2))
756 goto fault;
757 *csump = csum_block_add(*csump, csum2, pos);
758 if ((len -= copy) == 0)
759 return 0;
760 offset += copy;
761 to += copy;
762 pos += copy;
763 }
764 start = end;
765 }
766 if (!len)
767 return 0;
768
769 fault:
770 return -EFAULT;
771 }
772
__skb_checksum_complete_head(struct sk_buff * skb,int len)773 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
774 {
775 __sum16 sum;
776
777 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
778 if (likely(!sum)) {
779 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
780 !skb->csum_complete_sw)
781 netdev_rx_csum_fault(skb->dev);
782 }
783 if (!skb_shared(skb))
784 skb->csum_valid = !sum;
785 return sum;
786 }
787 EXPORT_SYMBOL(__skb_checksum_complete_head);
788
__skb_checksum_complete(struct sk_buff * skb)789 __sum16 __skb_checksum_complete(struct sk_buff *skb)
790 {
791 __wsum csum;
792 __sum16 sum;
793
794 csum = skb_checksum(skb, 0, skb->len, 0);
795
796 /* skb->csum holds pseudo checksum */
797 sum = csum_fold(csum_add(skb->csum, csum));
798 if (likely(!sum)) {
799 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
800 !skb->csum_complete_sw)
801 netdev_rx_csum_fault(skb->dev);
802 }
803
804 if (!skb_shared(skb)) {
805 /* Save full packet checksum */
806 skb->csum = csum;
807 skb->ip_summed = CHECKSUM_COMPLETE;
808 skb->csum_complete_sw = 1;
809 skb->csum_valid = !sum;
810 }
811
812 return sum;
813 }
814 EXPORT_SYMBOL(__skb_checksum_complete);
815
816 /**
817 * skb_copy_and_csum_datagram_iovec - Copy and checksum skb to user iovec.
818 * @skb: skbuff
819 * @hlen: hardware length
820 * @iov: io vector
821 *
822 * Caller _must_ check that skb will fit to this iovec.
823 *
824 * Returns: 0 - success.
825 * -EINVAL - checksum failure.
826 * -EFAULT - fault during copy. Beware, in this case iovec
827 * can be modified!
828 */
skb_copy_and_csum_datagram_iovec(struct sk_buff * skb,int hlen,struct iovec * iov)829 int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
830 int hlen, struct iovec *iov)
831 {
832 __wsum csum;
833 int chunk = skb->len - hlen;
834
835 if (!chunk)
836 return 0;
837
838 /* Skip filled elements.
839 * Pretty silly, look at memcpy_toiovec, though 8)
840 */
841 while (!iov->iov_len)
842 iov++;
843
844 if (iov->iov_len < chunk) {
845 if (__skb_checksum_complete(skb))
846 goto csum_error;
847 if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
848 goto fault;
849 } else {
850 csum = csum_partial(skb->data, hlen, skb->csum);
851 if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base,
852 chunk, &csum))
853 goto fault;
854 if (csum_fold(csum))
855 goto csum_error;
856 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE))
857 netdev_rx_csum_fault(skb->dev);
858 iov->iov_len -= chunk;
859 iov->iov_base += chunk;
860 }
861 return 0;
862 csum_error:
863 return -EINVAL;
864 fault:
865 return -EFAULT;
866 }
867 EXPORT_SYMBOL(skb_copy_and_csum_datagram_iovec);
868
869 /**
870 * datagram_poll - generic datagram poll
871 * @file: file struct
872 * @sock: socket
873 * @wait: poll table
874 *
875 * Datagram poll: Again totally generic. This also handles
876 * sequenced packet sockets providing the socket receive queue
877 * is only ever holding data ready to receive.
878 *
879 * Note: when you _don't_ use this routine for this protocol,
880 * and you use a different write policy from sock_writeable()
881 * then please supply your own write_space callback.
882 */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)883 unsigned int datagram_poll(struct file *file, struct socket *sock,
884 poll_table *wait)
885 {
886 struct sock *sk = sock->sk;
887 unsigned int mask;
888
889 sock_poll_wait(file, sk_sleep(sk), wait);
890 mask = 0;
891
892 /* exceptional events? */
893 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
894 mask |= POLLERR |
895 (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? POLLPRI : 0);
896
897 if (sk->sk_shutdown & RCV_SHUTDOWN)
898 mask |= POLLRDHUP | POLLIN | POLLRDNORM;
899 if (sk->sk_shutdown == SHUTDOWN_MASK)
900 mask |= POLLHUP;
901
902 /* readable? */
903 if (!skb_queue_empty(&sk->sk_receive_queue))
904 mask |= POLLIN | POLLRDNORM;
905
906 /* Connection-based need to check for termination and startup */
907 if (connection_based(sk)) {
908 if (sk->sk_state == TCP_CLOSE)
909 mask |= POLLHUP;
910 /* connection hasn't started yet? */
911 if (sk->sk_state == TCP_SYN_SENT)
912 return mask;
913 }
914
915 /* writable? */
916 if (sock_writeable(sk))
917 mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
918 else
919 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
920
921 return mask;
922 }
923 EXPORT_SYMBOL(datagram_poll);
924