• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	SUCS NET3:
4  *
5  *	Generic datagram handling routines. These are generic for all
6  *	protocols. Possibly a generic IP version on top of these would
7  *	make sense. Not tonight however 8-).
8  *	This is used because UDP, RAW, PACKET, DDP, IPX, AX.25 and
9  *	NetROM layer all have identical poll code and mostly
10  *	identical recvmsg() code. So we share it here. The poll was
11  *	shared before but buried in udp.c so I moved it.
12  *
13  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>. (datagram_poll() from old
14  *						     udp.c code)
15  *
16  *	Fixes:
17  *		Alan Cox	:	NULL return from skb_peek_copy()
18  *					understood
19  *		Alan Cox	:	Rewrote skb_read_datagram to avoid the
20  *					skb_peek_copy stuff.
21  *		Alan Cox	:	Added support for SOCK_SEQPACKET.
22  *					IPX can no longer use the SO_TYPE hack
23  *					but AX.25 now works right, and SPX is
24  *					feasible.
25  *		Alan Cox	:	Fixed write poll of non IP protocol
26  *					crash.
27  *		Florian  La Roche:	Changed for my new skbuff handling.
28  *		Darryl Miles	:	Fixed non-blocking SOCK_SEQPACKET.
29  *		Linus Torvalds	:	BSD semantic fixes.
30  *		Alan Cox	:	Datagram iovec handling
31  *		Darryl Miles	:	Fixed non-blocking SOCK_STREAM.
32  *		Alan Cox	:	POSIXisms
33  *		Pete Wyckoff    :       Unconnected accept() fix.
34  *
35  */
36 
37 #include <linux/module.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/uaccess.h>
41 #include <linux/mm.h>
42 #include <linux/interrupt.h>
43 #include <linux/errno.h>
44 #include <linux/sched.h>
45 #include <linux/inet.h>
46 #include <linux/netdevice.h>
47 #include <linux/rtnetlink.h>
48 #include <linux/poll.h>
49 #include <linux/highmem.h>
50 #include <linux/spinlock.h>
51 #include <linux/slab.h>
52 #include <linux/pagemap.h>
53 #include <linux/uio.h>
54 #include <linux/indirect_call_wrapper.h>
55 
56 #include <net/protocol.h>
57 #include <linux/skbuff.h>
58 
59 #include <net/checksum.h>
60 #include <net/sock.h>
61 #include <net/tcp_states.h>
62 #include <trace/events/skb.h>
63 #include <net/busy_poll.h>
64 
65 #include "datagram.h"
66 
67 /*
68  *	Is a socket 'connection oriented' ?
69  */
connection_based(struct sock * sk)70 static inline int connection_based(struct sock *sk)
71 {
72 	return sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM;
73 }
74 
receiver_wake_function(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)75 static int receiver_wake_function(wait_queue_entry_t *wait, unsigned int mode, int sync,
76 				  void *key)
77 {
78 	/*
79 	 * Avoid a wakeup if event not interesting for us
80 	 */
81 	if (key && !(key_to_poll(key) & (EPOLLIN | EPOLLERR)))
82 		return 0;
83 	return autoremove_wake_function(wait, mode, sync, key);
84 }
85 /*
86  * Wait for the last received packet to be different from skb
87  */
__skb_wait_for_more_packets(struct sock * sk,int * err,long * timeo_p,const struct sk_buff * skb)88 int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
89 				const struct sk_buff *skb)
90 {
91 	int error;
92 	DEFINE_WAIT_FUNC(wait, receiver_wake_function);
93 
94 	prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
95 
96 	/* Socket errors? */
97 	error = sock_error(sk);
98 	if (error)
99 		goto out_err;
100 
101 	if (READ_ONCE(sk->sk_receive_queue.prev) != skb)
102 		goto out;
103 
104 	/* Socket shut down? */
105 	if (sk->sk_shutdown & RCV_SHUTDOWN)
106 		goto out_noerr;
107 
108 	/* Sequenced packets can come disconnected.
109 	 * If so we report the problem
110 	 */
111 	error = -ENOTCONN;
112 	if (connection_based(sk) &&
113 	    !(sk->sk_state == TCP_ESTABLISHED || sk->sk_state == TCP_LISTEN))
114 		goto out_err;
115 
116 	/* handle signals */
117 	if (signal_pending(current))
118 		goto interrupted;
119 
120 	error = 0;
121 	*timeo_p = schedule_timeout(*timeo_p);
122 out:
123 	finish_wait(sk_sleep(sk), &wait);
124 	return error;
125 interrupted:
126 	error = sock_intr_errno(*timeo_p);
127 out_err:
128 	*err = error;
129 	goto out;
130 out_noerr:
131 	*err = 0;
132 	error = 1;
133 	goto out;
134 }
135 EXPORT_SYMBOL(__skb_wait_for_more_packets);
136 
skb_set_peeked(struct sk_buff * skb)137 static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
138 {
139 	struct sk_buff *nskb;
140 
141 	if (skb->peeked)
142 		return skb;
143 
144 	/* We have to unshare an skb before modifying it. */
145 	if (!skb_shared(skb))
146 		goto done;
147 
148 	nskb = skb_clone(skb, GFP_ATOMIC);
149 	if (!nskb)
150 		return ERR_PTR(-ENOMEM);
151 
152 	skb->prev->next = nskb;
153 	skb->next->prev = nskb;
154 	nskb->prev = skb->prev;
155 	nskb->next = skb->next;
156 
157 	consume_skb(skb);
158 	skb = nskb;
159 
160 done:
161 	skb->peeked = 1;
162 
163 	return skb;
164 }
165 
__skb_try_recv_from_queue(struct sock * sk,struct sk_buff_head * queue,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err,struct sk_buff ** last)166 struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
167 					  struct sk_buff_head *queue,
168 					  unsigned int flags,
169 					  void (*destructor)(struct sock *sk,
170 							   struct sk_buff *skb),
171 					  int *off, int *err,
172 					  struct sk_buff **last)
173 {
174 	bool peek_at_off = false;
175 	struct sk_buff *skb;
176 	int _off = 0;
177 
178 	if (unlikely(flags & MSG_PEEK && *off >= 0)) {
179 		peek_at_off = true;
180 		_off = *off;
181 	}
182 
183 	*last = queue->prev;
184 	skb_queue_walk(queue, skb) {
185 		if (flags & MSG_PEEK) {
186 			if (peek_at_off && _off >= skb->len &&
187 			    (_off || skb->peeked)) {
188 				_off -= skb->len;
189 				continue;
190 			}
191 			if (!skb->len) {
192 				skb = skb_set_peeked(skb);
193 				if (IS_ERR(skb)) {
194 					*err = PTR_ERR(skb);
195 					return NULL;
196 				}
197 			}
198 			refcount_inc(&skb->users);
199 		} else {
200 			__skb_unlink(skb, queue);
201 			if (destructor)
202 				destructor(sk, skb);
203 		}
204 		*off = _off;
205 		return skb;
206 	}
207 	return NULL;
208 }
209 
210 /**
211  *	__skb_try_recv_datagram - Receive a datagram skbuff
212  *	@sk: socket
213  *	@flags: MSG\_ flags
214  *	@destructor: invoked under the receive lock on successful dequeue
215  *	@off: an offset in bytes to peek skb from. Returns an offset
216  *	      within an skb where data actually starts
217  *	@err: error code returned
218  *	@last: set to last peeked message to inform the wait function
219  *	       what to look for when peeking
220  *
221  *	Get a datagram skbuff, understands the peeking, nonblocking wakeups
222  *	and possible races. This replaces identical code in packet, raw and
223  *	udp, as well as the IPX AX.25 and Appletalk. It also finally fixes
224  *	the long standing peek and read race for datagram sockets. If you
225  *	alter this routine remember it must be re-entrant.
226  *
227  *	This function will lock the socket if a skb is returned, so
228  *	the caller needs to unlock the socket in that case (usually by
229  *	calling skb_free_datagram). Returns NULL with @err set to
230  *	-EAGAIN if no data was available or to some other value if an
231  *	error was detected.
232  *
233  *	* It does not lock socket since today. This function is
234  *	* free of race conditions. This measure should/can improve
235  *	* significantly datagram socket latencies at high loads,
236  *	* when data copying to user space takes lots of time.
237  *	* (BTW I've just killed the last cli() in IP/IPv6/core/netlink/packet
238  *	*  8) Great win.)
239  *	*			                    --ANK (980729)
240  *
241  *	The order of the tests when we find no data waiting are specified
242  *	quite explicitly by POSIX 1003.1g, don't change them without having
243  *	the standard around please.
244  */
__skb_try_recv_datagram(struct sock * sk,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err,struct sk_buff ** last)245 struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
246 					void (*destructor)(struct sock *sk,
247 							   struct sk_buff *skb),
248 					int *off, int *err,
249 					struct sk_buff **last)
250 {
251 	struct sk_buff_head *queue = &sk->sk_receive_queue;
252 	struct sk_buff *skb;
253 	unsigned long cpu_flags;
254 	/*
255 	 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
256 	 */
257 	int error = sock_error(sk);
258 
259 	if (error)
260 		goto no_packet;
261 
262 	do {
263 		/* Again only user level code calls this function, so nothing
264 		 * interrupt level will suddenly eat the receive_queue.
265 		 *
266 		 * Look at current nfs client by the way...
267 		 * However, this function was correct in any case. 8)
268 		 */
269 		spin_lock_irqsave(&queue->lock, cpu_flags);
270 		skb = __skb_try_recv_from_queue(sk, queue, flags, destructor,
271 						off, &error, last);
272 		spin_unlock_irqrestore(&queue->lock, cpu_flags);
273 		if (error)
274 			goto no_packet;
275 		if (skb)
276 			return skb;
277 
278 		if (!sk_can_busy_loop(sk))
279 			break;
280 
281 		sk_busy_loop(sk, flags & MSG_DONTWAIT);
282 	} while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
283 
284 	error = -EAGAIN;
285 
286 no_packet:
287 	*err = error;
288 	return NULL;
289 }
290 EXPORT_SYMBOL(__skb_try_recv_datagram);
291 
__skb_recv_datagram(struct sock * sk,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb),int * off,int * err)292 struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
293 				    void (*destructor)(struct sock *sk,
294 						       struct sk_buff *skb),
295 				    int *off, int *err)
296 {
297 	struct sk_buff *skb, *last;
298 	long timeo;
299 
300 	timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
301 
302 	do {
303 		skb = __skb_try_recv_datagram(sk, flags, destructor, off, err,
304 					      &last);
305 		if (skb)
306 			return skb;
307 
308 		if (*err != -EAGAIN)
309 			break;
310 	} while (timeo &&
311 		!__skb_wait_for_more_packets(sk, err, &timeo, last));
312 
313 	return NULL;
314 }
315 EXPORT_SYMBOL(__skb_recv_datagram);
316 
skb_recv_datagram(struct sock * sk,unsigned int flags,int noblock,int * err)317 struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned int flags,
318 				  int noblock, int *err)
319 {
320 	int off = 0;
321 
322 	return __skb_recv_datagram(sk, flags | (noblock ? MSG_DONTWAIT : 0),
323 				   NULL, &off, err);
324 }
325 EXPORT_SYMBOL(skb_recv_datagram);
326 
skb_free_datagram(struct sock * sk,struct sk_buff * skb)327 void skb_free_datagram(struct sock *sk, struct sk_buff *skb)
328 {
329 	consume_skb(skb);
330 	sk_mem_reclaim_partial(sk);
331 }
332 EXPORT_SYMBOL(skb_free_datagram);
333 
__skb_free_datagram_locked(struct sock * sk,struct sk_buff * skb,int len)334 void __skb_free_datagram_locked(struct sock *sk, struct sk_buff *skb, int len)
335 {
336 	bool slow;
337 
338 	if (!skb_unref(skb)) {
339 		sk_peek_offset_bwd(sk, len);
340 		return;
341 	}
342 
343 	slow = lock_sock_fast(sk);
344 	sk_peek_offset_bwd(sk, len);
345 	skb_orphan(skb);
346 	sk_mem_reclaim_partial(sk);
347 	unlock_sock_fast(sk, slow);
348 
349 	/* skb is now orphaned, can be freed outside of locked section */
350 	__kfree_skb(skb);
351 }
352 EXPORT_SYMBOL(__skb_free_datagram_locked);
353 
__sk_queue_drop_skb(struct sock * sk,struct sk_buff_head * sk_queue,struct sk_buff * skb,unsigned int flags,void (* destructor)(struct sock * sk,struct sk_buff * skb))354 int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
355 			struct sk_buff *skb, unsigned int flags,
356 			void (*destructor)(struct sock *sk,
357 					   struct sk_buff *skb))
358 {
359 	int err = 0;
360 
361 	if (flags & MSG_PEEK) {
362 		err = -ENOENT;
363 		spin_lock_bh(&sk_queue->lock);
364 		if (skb->next) {
365 			__skb_unlink(skb, sk_queue);
366 			refcount_dec(&skb->users);
367 			if (destructor)
368 				destructor(sk, skb);
369 			err = 0;
370 		}
371 		spin_unlock_bh(&sk_queue->lock);
372 	}
373 
374 	atomic_inc(&sk->sk_drops);
375 	return err;
376 }
377 EXPORT_SYMBOL(__sk_queue_drop_skb);
378 
379 /**
380  *	skb_kill_datagram - Free a datagram skbuff forcibly
381  *	@sk: socket
382  *	@skb: datagram skbuff
383  *	@flags: MSG\_ flags
384  *
385  *	This function frees a datagram skbuff that was received by
386  *	skb_recv_datagram.  The flags argument must match the one
387  *	used for skb_recv_datagram.
388  *
389  *	If the MSG_PEEK flag is set, and the packet is still on the
390  *	receive queue of the socket, it will be taken off the queue
391  *	before it is freed.
392  *
393  *	This function currently only disables BH when acquiring the
394  *	sk_receive_queue lock.  Therefore it must not be used in a
395  *	context where that lock is acquired in an IRQ context.
396  *
397  *	It returns 0 if the packet was removed by us.
398  */
399 
skb_kill_datagram(struct sock * sk,struct sk_buff * skb,unsigned int flags)400 int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
401 {
402 	int err = __sk_queue_drop_skb(sk, &sk->sk_receive_queue, skb, flags,
403 				      NULL);
404 
405 	kfree_skb(skb);
406 	sk_mem_reclaim_partial(sk);
407 	return err;
408 }
409 EXPORT_SYMBOL(skb_kill_datagram);
410 
411 INDIRECT_CALLABLE_DECLARE(static size_t simple_copy_to_iter(const void *addr,
412 						size_t bytes,
413 						void *data __always_unused,
414 						struct iov_iter *i));
415 
__skb_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,bool fault_short,size_t (* cb)(const void *,size_t,void *,struct iov_iter *),void * data)416 static int __skb_datagram_iter(const struct sk_buff *skb, int offset,
417 			       struct iov_iter *to, int len, bool fault_short,
418 			       size_t (*cb)(const void *, size_t, void *,
419 					    struct iov_iter *), void *data)
420 {
421 	int start = skb_headlen(skb);
422 	int i, copy = start - offset, start_off = offset, n;
423 	struct sk_buff *frag_iter;
424 
425 	/* Copy header. */
426 	if (copy > 0) {
427 		if (copy > len)
428 			copy = len;
429 		n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
430 				    skb->data + offset, copy, data, to);
431 		offset += n;
432 		if (n != copy)
433 			goto short_copy;
434 		if ((len -= copy) == 0)
435 			return 0;
436 	}
437 
438 	/* Copy paged appendix. Hmm... why does this look so complicated? */
439 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
440 		int end;
441 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
442 
443 		WARN_ON(start > offset + len);
444 
445 		end = start + skb_frag_size(frag);
446 		if ((copy = end - offset) > 0) {
447 			struct page *page = skb_frag_page(frag);
448 			u8 *vaddr = kmap(page);
449 
450 			if (copy > len)
451 				copy = len;
452 			n = INDIRECT_CALL_1(cb, simple_copy_to_iter,
453 					vaddr + skb_frag_off(frag) + offset - start,
454 					copy, data, to);
455 			kunmap(page);
456 			offset += n;
457 			if (n != copy)
458 				goto short_copy;
459 			if (!(len -= copy))
460 				return 0;
461 		}
462 		start = end;
463 	}
464 
465 	skb_walk_frags(skb, frag_iter) {
466 		int end;
467 
468 		WARN_ON(start > offset + len);
469 
470 		end = start + frag_iter->len;
471 		if ((copy = end - offset) > 0) {
472 			if (copy > len)
473 				copy = len;
474 			if (__skb_datagram_iter(frag_iter, offset - start,
475 						to, copy, fault_short, cb, data))
476 				goto fault;
477 			if ((len -= copy) == 0)
478 				return 0;
479 			offset += copy;
480 		}
481 		start = end;
482 	}
483 	if (!len)
484 		return 0;
485 
486 	/* This is not really a user copy fault, but rather someone
487 	 * gave us a bogus length on the skb.  We should probably
488 	 * print a warning here as it may indicate a kernel bug.
489 	 */
490 
491 fault:
492 	iov_iter_revert(to, offset - start_off);
493 	return -EFAULT;
494 
495 short_copy:
496 	if (fault_short || iov_iter_count(to))
497 		goto fault;
498 
499 	return 0;
500 }
501 
502 /**
503  *	skb_copy_and_hash_datagram_iter - Copy datagram to an iovec iterator
504  *          and update a hash.
505  *	@skb: buffer to copy
506  *	@offset: offset in the buffer to start copying from
507  *	@to: iovec iterator to copy to
508  *	@len: amount of data to copy from buffer to iovec
509  *      @hash: hash request to update
510  */
skb_copy_and_hash_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,struct ahash_request * hash)511 int skb_copy_and_hash_datagram_iter(const struct sk_buff *skb, int offset,
512 			   struct iov_iter *to, int len,
513 			   struct ahash_request *hash)
514 {
515 	return __skb_datagram_iter(skb, offset, to, len, true,
516 			hash_and_copy_to_iter, hash);
517 }
518 EXPORT_SYMBOL(skb_copy_and_hash_datagram_iter);
519 
simple_copy_to_iter(const void * addr,size_t bytes,void * data __always_unused,struct iov_iter * i)520 static size_t simple_copy_to_iter(const void *addr, size_t bytes,
521 		void *data __always_unused, struct iov_iter *i)
522 {
523 	return copy_to_iter(addr, bytes, i);
524 }
525 
526 /**
527  *	skb_copy_datagram_iter - Copy a datagram to an iovec iterator.
528  *	@skb: buffer to copy
529  *	@offset: offset in the buffer to start copying from
530  *	@to: iovec iterator to copy to
531  *	@len: amount of data to copy from buffer to iovec
532  */
skb_copy_datagram_iter(const struct sk_buff * skb,int offset,struct iov_iter * to,int len)533 int skb_copy_datagram_iter(const struct sk_buff *skb, int offset,
534 			   struct iov_iter *to, int len)
535 {
536 	trace_skb_copy_datagram_iovec(skb, len);
537 	return __skb_datagram_iter(skb, offset, to, len, false,
538 			simple_copy_to_iter, NULL);
539 }
540 EXPORT_SYMBOL(skb_copy_datagram_iter);
541 
542 /**
543  *	skb_copy_datagram_from_iter - Copy a datagram from an iov_iter.
544  *	@skb: buffer to copy
545  *	@offset: offset in the buffer to start copying to
546  *	@from: the copy source
547  *	@len: amount of data to copy to buffer from iovec
548  *
549  *	Returns 0 or -EFAULT.
550  */
skb_copy_datagram_from_iter(struct sk_buff * skb,int offset,struct iov_iter * from,int len)551 int skb_copy_datagram_from_iter(struct sk_buff *skb, int offset,
552 				 struct iov_iter *from,
553 				 int len)
554 {
555 	int start = skb_headlen(skb);
556 	int i, copy = start - offset;
557 	struct sk_buff *frag_iter;
558 
559 	/* Copy header. */
560 	if (copy > 0) {
561 		if (copy > len)
562 			copy = len;
563 		if (copy_from_iter(skb->data + offset, copy, from) != copy)
564 			goto fault;
565 		if ((len -= copy) == 0)
566 			return 0;
567 		offset += copy;
568 	}
569 
570 	/* Copy paged appendix. Hmm... why does this look so complicated? */
571 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
572 		int end;
573 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
574 
575 		WARN_ON(start > offset + len);
576 
577 		end = start + skb_frag_size(frag);
578 		if ((copy = end - offset) > 0) {
579 			size_t copied;
580 
581 			if (copy > len)
582 				copy = len;
583 			copied = copy_page_from_iter(skb_frag_page(frag),
584 					  skb_frag_off(frag) + offset - start,
585 					  copy, from);
586 			if (copied != copy)
587 				goto fault;
588 
589 			if (!(len -= copy))
590 				return 0;
591 			offset += copy;
592 		}
593 		start = end;
594 	}
595 
596 	skb_walk_frags(skb, frag_iter) {
597 		int end;
598 
599 		WARN_ON(start > offset + len);
600 
601 		end = start + frag_iter->len;
602 		if ((copy = end - offset) > 0) {
603 			if (copy > len)
604 				copy = len;
605 			if (skb_copy_datagram_from_iter(frag_iter,
606 							offset - start,
607 							from, copy))
608 				goto fault;
609 			if ((len -= copy) == 0)
610 				return 0;
611 			offset += copy;
612 		}
613 		start = end;
614 	}
615 	if (!len)
616 		return 0;
617 
618 fault:
619 	return -EFAULT;
620 }
621 EXPORT_SYMBOL(skb_copy_datagram_from_iter);
622 
__zerocopy_sg_from_iter(struct sock * sk,struct sk_buff * skb,struct iov_iter * from,size_t length)623 int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
624 			    struct iov_iter *from, size_t length)
625 {
626 	int frag = skb_shinfo(skb)->nr_frags;
627 
628 	while (length && iov_iter_count(from)) {
629 		struct page *pages[MAX_SKB_FRAGS];
630 		size_t start;
631 		ssize_t copied;
632 		unsigned long truesize;
633 		int n = 0;
634 
635 		if (frag == MAX_SKB_FRAGS)
636 			return -EMSGSIZE;
637 
638 		copied = iov_iter_get_pages(from, pages, length,
639 					    MAX_SKB_FRAGS - frag, &start);
640 		if (copied < 0)
641 			return -EFAULT;
642 
643 		iov_iter_advance(from, copied);
644 		length -= copied;
645 
646 		truesize = PAGE_ALIGN(copied + start);
647 		skb->data_len += copied;
648 		skb->len += copied;
649 		skb->truesize += truesize;
650 		if (sk && sk->sk_type == SOCK_STREAM) {
651 			sk_wmem_queued_add(sk, truesize);
652 			sk_mem_charge(sk, truesize);
653 		} else {
654 			refcount_add(truesize, &skb->sk->sk_wmem_alloc);
655 		}
656 		while (copied) {
657 			int size = min_t(int, copied, PAGE_SIZE - start);
658 			skb_fill_page_desc(skb, frag++, pages[n], start, size);
659 			start = 0;
660 			copied -= size;
661 			n++;
662 		}
663 	}
664 	return 0;
665 }
666 EXPORT_SYMBOL(__zerocopy_sg_from_iter);
667 
668 /**
669  *	zerocopy_sg_from_iter - Build a zerocopy datagram from an iov_iter
670  *	@skb: buffer to copy
671  *	@from: the source to copy from
672  *
673  *	The function will first copy up to headlen, and then pin the userspace
674  *	pages and build frags through them.
675  *
676  *	Returns 0, -EFAULT or -EMSGSIZE.
677  */
zerocopy_sg_from_iter(struct sk_buff * skb,struct iov_iter * from)678 int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
679 {
680 	int copy = min_t(int, skb_headlen(skb), iov_iter_count(from));
681 
682 	/* copy up to skb headlen */
683 	if (skb_copy_datagram_from_iter(skb, 0, from, copy))
684 		return -EFAULT;
685 
686 	return __zerocopy_sg_from_iter(NULL, skb, from, ~0U);
687 }
688 EXPORT_SYMBOL(zerocopy_sg_from_iter);
689 
690 /**
691  *	skb_copy_and_csum_datagram_iter - Copy datagram to an iovec iterator
692  *          and update a checksum.
693  *	@skb: buffer to copy
694  *	@offset: offset in the buffer to start copying from
695  *	@to: iovec iterator to copy to
696  *	@len: amount of data to copy from buffer to iovec
697  *      @csump: checksum pointer
698  */
skb_copy_and_csum_datagram(const struct sk_buff * skb,int offset,struct iov_iter * to,int len,__wsum * csump)699 static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
700 				      struct iov_iter *to, int len,
701 				      __wsum *csump)
702 {
703 	struct csum_state csdata = { .csum = *csump };
704 	int ret;
705 
706 	ret = __skb_datagram_iter(skb, offset, to, len, true,
707 				  csum_and_copy_to_iter, &csdata);
708 	if (ret)
709 		return ret;
710 
711 	*csump = csdata.csum;
712 	return 0;
713 }
714 
715 /**
716  *	skb_copy_and_csum_datagram_msg - Copy and checksum skb to user iovec.
717  *	@skb: skbuff
718  *	@hlen: hardware length
719  *	@msg: destination
720  *
721  *	Caller _must_ check that skb will fit to this iovec.
722  *
723  *	Returns: 0       - success.
724  *		 -EINVAL - checksum failure.
725  *		 -EFAULT - fault during copy.
726  */
skb_copy_and_csum_datagram_msg(struct sk_buff * skb,int hlen,struct msghdr * msg)727 int skb_copy_and_csum_datagram_msg(struct sk_buff *skb,
728 				   int hlen, struct msghdr *msg)
729 {
730 	__wsum csum;
731 	int chunk = skb->len - hlen;
732 
733 	if (!chunk)
734 		return 0;
735 
736 	if (msg_data_left(msg) < chunk) {
737 		if (__skb_checksum_complete(skb))
738 			return -EINVAL;
739 		if (skb_copy_datagram_msg(skb, hlen, msg, chunk))
740 			goto fault;
741 	} else {
742 		csum = csum_partial(skb->data, hlen, skb->csum);
743 		if (skb_copy_and_csum_datagram(skb, hlen, &msg->msg_iter,
744 					       chunk, &csum))
745 			goto fault;
746 
747 		if (csum_fold(csum)) {
748 			iov_iter_revert(&msg->msg_iter, chunk);
749 			return -EINVAL;
750 		}
751 
752 		if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
753 		    !skb->csum_complete_sw)
754 			netdev_rx_csum_fault(NULL, skb);
755 	}
756 	return 0;
757 fault:
758 	return -EFAULT;
759 }
760 EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
761 
762 /**
763  * 	datagram_poll - generic datagram poll
764  *	@file: file struct
765  *	@sock: socket
766  *	@wait: poll table
767  *
768  *	Datagram poll: Again totally generic. This also handles
769  *	sequenced packet sockets providing the socket receive queue
770  *	is only ever holding data ready to receive.
771  *
772  *	Note: when you *don't* use this routine for this protocol,
773  *	and you use a different write policy from sock_writeable()
774  *	then please supply your own write_space callback.
775  */
datagram_poll(struct file * file,struct socket * sock,poll_table * wait)776 __poll_t datagram_poll(struct file *file, struct socket *sock,
777 			   poll_table *wait)
778 {
779 	struct sock *sk = sock->sk;
780 	__poll_t mask;
781 	u8 shutdown;
782 
783 	sock_poll_wait(file, sock, wait);
784 	mask = 0;
785 
786 	/* exceptional events? */
787 	if (READ_ONCE(sk->sk_err) ||
788 	    !skb_queue_empty_lockless(&sk->sk_error_queue))
789 		mask |= EPOLLERR |
790 			(sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0);
791 
792 	shutdown = READ_ONCE(sk->sk_shutdown);
793 	if (shutdown & RCV_SHUTDOWN)
794 		mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
795 	if (shutdown == SHUTDOWN_MASK)
796 		mask |= EPOLLHUP;
797 
798 	/* readable? */
799 	if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
800 		mask |= EPOLLIN | EPOLLRDNORM;
801 
802 	/* Connection-based need to check for termination and startup */
803 	if (connection_based(sk)) {
804 		int state = READ_ONCE(sk->sk_state);
805 
806 		if (state == TCP_CLOSE)
807 			mask |= EPOLLHUP;
808 		/* connection hasn't started yet? */
809 		if (state == TCP_SYN_SENT)
810 			return mask;
811 	}
812 
813 	/* writable? */
814 	if (sock_writeable(sk))
815 		mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
816 	else
817 		sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
818 
819 	return mask;
820 }
821 EXPORT_SYMBOL(datagram_poll);
822