• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40 
41 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/kernel.h>
44 #include <linux/kmemcheck.h>
45 #include <linux/mm.h>
46 #include <linux/interrupt.h>
47 #include <linux/in.h>
48 #include <linux/inet.h>
49 #include <linux/slab.h>
50 #include <linux/tcp.h>
51 #include <linux/udp.h>
52 #include <linux/netdevice.h>
53 #ifdef CONFIG_NET_CLS_ACT
54 #include <net/pkt_sched.h>
55 #endif
56 #include <linux/string.h>
57 #include <linux/skbuff.h>
58 #include <linux/splice.h>
59 #include <linux/cache.h>
60 #include <linux/rtnetlink.h>
61 #include <linux/init.h>
62 #include <linux/scatterlist.h>
63 #include <linux/errqueue.h>
64 #include <linux/prefetch.h>
65 #include <linux/if_vlan.h>
66 
67 #include <net/protocol.h>
68 #include <net/dst.h>
69 #include <net/sock.h>
70 #include <net/checksum.h>
71 #include <net/ip6_checksum.h>
72 #include <net/xfrm.h>
73 
74 #include <asm/uaccess.h>
75 #include <trace/events/skb.h>
76 #include <linux/highmem.h>
77 
78 struct kmem_cache *skbuff_head_cache __read_mostly;
79 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
80 int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
81 EXPORT_SYMBOL(sysctl_max_skb_frags);
82 
83 /**
84  *	skb_panic - private function for out-of-line support
85  *	@skb:	buffer
86  *	@sz:	size
87  *	@addr:	address
88  *	@msg:	skb_over_panic or skb_under_panic
89  *
90  *	Out-of-line support for skb_put() and skb_push().
91  *	Called via the wrapper skb_over_panic() or skb_under_panic().
92  *	Keep out of line to prevent kernel bloat.
93  *	__builtin_return_address is not used because it is not always reliable.
94  */
skb_panic(struct sk_buff * skb,unsigned int sz,void * addr,const char msg[])95 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
96 		      const char msg[])
97 {
98 	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
99 		 msg, addr, skb->len, sz, skb->head, skb->data,
100 		 (unsigned long)skb->tail, (unsigned long)skb->end,
101 		 skb->dev ? skb->dev->name : "<NULL>");
102 	BUG();
103 }
104 
skb_over_panic(struct sk_buff * skb,unsigned int sz,void * addr)105 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
106 {
107 	skb_panic(skb, sz, addr, __func__);
108 }
109 
skb_under_panic(struct sk_buff * skb,unsigned int sz,void * addr)110 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
111 {
112 	skb_panic(skb, sz, addr, __func__);
113 }
114 
115 /*
116  * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
117  * the caller if emergency pfmemalloc reserves are being used. If it is and
118  * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
119  * may be used. Otherwise, the packet data may be discarded until enough
120  * memory is free
121  */
122 #define kmalloc_reserve(size, gfp, node, pfmemalloc) \
123 	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
124 
__kmalloc_reserve(size_t size,gfp_t flags,int node,unsigned long ip,bool * pfmemalloc)125 static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
126 			       unsigned long ip, bool *pfmemalloc)
127 {
128 	void *obj;
129 	bool ret_pfmemalloc = false;
130 
131 	/*
132 	 * Try a regular allocation, when that fails and we're not entitled
133 	 * to the reserves, fail.
134 	 */
135 	obj = kmalloc_node_track_caller(size,
136 					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
137 					node);
138 	if (obj || !(gfp_pfmemalloc_allowed(flags)))
139 		goto out;
140 
141 	/* Try again but now we are using pfmemalloc reserves */
142 	ret_pfmemalloc = true;
143 	obj = kmalloc_node_track_caller(size, flags, node);
144 
145 out:
146 	if (pfmemalloc)
147 		*pfmemalloc = ret_pfmemalloc;
148 
149 	return obj;
150 }
151 
152 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
153  *	'private' fields and also do memory statistics to find all the
154  *	[BEEP] leaks.
155  *
156  */
157 
__alloc_skb_head(gfp_t gfp_mask,int node)158 struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
159 {
160 	struct sk_buff *skb;
161 
162 	/* Get the HEAD */
163 	skb = kmem_cache_alloc_node(skbuff_head_cache,
164 				    gfp_mask & ~__GFP_DMA, node);
165 	if (!skb)
166 		goto out;
167 
168 	/*
169 	 * Only clear those fields we need to clear, not those that we will
170 	 * actually initialise below. Hence, don't put any more fields after
171 	 * the tail pointer in struct sk_buff!
172 	 */
173 	memset(skb, 0, offsetof(struct sk_buff, tail));
174 	skb->head = NULL;
175 	skb->truesize = sizeof(struct sk_buff);
176 	atomic_set(&skb->users, 1);
177 
178 	skb->mac_header = (typeof(skb->mac_header))~0U;
179 out:
180 	return skb;
181 }
182 
183 /**
184  *	__alloc_skb	-	allocate a network buffer
185  *	@size: size to allocate
186  *	@gfp_mask: allocation mask
187  *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
188  *		instead of head cache and allocate a cloned (child) skb.
189  *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
190  *		allocations in case the data is required for writeback
191  *	@node: numa node to allocate memory on
192  *
193  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
194  *	tail room of at least size bytes. The object has a reference count
195  *	of one. The return is the buffer. On a failure the return is %NULL.
196  *
197  *	Buffers may only be allocated from interrupts using a @gfp_mask of
198  *	%GFP_ATOMIC.
199  */
__alloc_skb(unsigned int size,gfp_t gfp_mask,int flags,int node)200 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
201 			    int flags, int node)
202 {
203 	struct kmem_cache *cache;
204 	struct skb_shared_info *shinfo;
205 	struct sk_buff *skb;
206 	u8 *data;
207 	bool pfmemalloc;
208 
209 	cache = (flags & SKB_ALLOC_FCLONE)
210 		? skbuff_fclone_cache : skbuff_head_cache;
211 
212 	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
213 		gfp_mask |= __GFP_MEMALLOC;
214 
215 	/* Get the HEAD */
216 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
217 	if (!skb)
218 		goto out;
219 	prefetchw(skb);
220 
221 	/* We do our best to align skb_shared_info on a separate cache
222 	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
223 	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
224 	 * Both skb->head and skb_shared_info are cache line aligned.
225 	 */
226 	size = SKB_DATA_ALIGN(size);
227 	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
228 	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
229 	if (!data)
230 		goto nodata;
231 	/* kmalloc(size) might give us more room than requested.
232 	 * Put skb_shared_info exactly at the end of allocated zone,
233 	 * to allow max possible filling before reallocation.
234 	 */
235 	size = SKB_WITH_OVERHEAD(ksize(data));
236 	prefetchw(data + size);
237 
238 	/*
239 	 * Only clear those fields we need to clear, not those that we will
240 	 * actually initialise below. Hence, don't put any more fields after
241 	 * the tail pointer in struct sk_buff!
242 	 */
243 	memset(skb, 0, offsetof(struct sk_buff, tail));
244 	/* Account for allocated memory : skb + skb->head */
245 	skb->truesize = SKB_TRUESIZE(size);
246 	skb->pfmemalloc = pfmemalloc;
247 	atomic_set(&skb->users, 1);
248 	skb->head = data;
249 	skb->data = data;
250 	skb_reset_tail_pointer(skb);
251 	skb->end = skb->tail + size;
252 	skb->mac_header = (typeof(skb->mac_header))~0U;
253 	skb->transport_header = (typeof(skb->transport_header))~0U;
254 
255 	/* make sure we initialize shinfo sequentially */
256 	shinfo = skb_shinfo(skb);
257 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
258 	atomic_set(&shinfo->dataref, 1);
259 	kmemcheck_annotate_variable(shinfo->destructor_arg);
260 
261 	if (flags & SKB_ALLOC_FCLONE) {
262 		struct sk_buff_fclones *fclones;
263 
264 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
265 
266 		kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
267 		skb->fclone = SKB_FCLONE_ORIG;
268 		atomic_set(&fclones->fclone_ref, 1);
269 
270 		fclones->skb2.fclone = SKB_FCLONE_FREE;
271 		fclones->skb2.pfmemalloc = pfmemalloc;
272 	}
273 out:
274 	return skb;
275 nodata:
276 	kmem_cache_free(cache, skb);
277 	skb = NULL;
278 	goto out;
279 }
280 EXPORT_SYMBOL(__alloc_skb);
281 
282 /**
283  * __build_skb - build a network buffer
284  * @data: data buffer provided by caller
285  * @frag_size: size of data, or 0 if head was kmalloced
286  *
287  * Allocate a new &sk_buff. Caller provides space holding head and
288  * skb_shared_info. @data must have been allocated by kmalloc() only if
289  * @frag_size is 0, otherwise data should come from the page allocator
290  *  or vmalloc()
291  * The return is the new skb buffer.
292  * On a failure the return is %NULL, and @data is not freed.
293  * Notes :
294  *  Before IO, driver allocates only data buffer where NIC put incoming frame
295  *  Driver should add room at head (NET_SKB_PAD) and
296  *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
297  *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
298  *  before giving packet to stack.
299  *  RX rings only contains data buffers, not full skbs.
300  */
__build_skb(void * data,unsigned int frag_size)301 struct sk_buff *__build_skb(void *data, unsigned int frag_size)
302 {
303 	struct skb_shared_info *shinfo;
304 	struct sk_buff *skb;
305 	unsigned int size = frag_size ? : ksize(data);
306 
307 	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
308 	if (!skb)
309 		return NULL;
310 
311 	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
312 
313 	memset(skb, 0, offsetof(struct sk_buff, tail));
314 	skb->truesize = SKB_TRUESIZE(size);
315 	atomic_set(&skb->users, 1);
316 	skb->head = data;
317 	skb->data = data;
318 	skb_reset_tail_pointer(skb);
319 	skb->end = skb->tail + size;
320 	skb->mac_header = (typeof(skb->mac_header))~0U;
321 	skb->transport_header = (typeof(skb->transport_header))~0U;
322 
323 	/* make sure we initialize shinfo sequentially */
324 	shinfo = skb_shinfo(skb);
325 	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
326 	atomic_set(&shinfo->dataref, 1);
327 	kmemcheck_annotate_variable(shinfo->destructor_arg);
328 
329 	return skb;
330 }
331 
332 /* build_skb() is wrapper over __build_skb(), that specifically
333  * takes care of skb->head and skb->pfmemalloc
334  * This means that if @frag_size is not zero, then @data must be backed
335  * by a page fragment, not kmalloc() or vmalloc()
336  */
build_skb(void * data,unsigned int frag_size)337 struct sk_buff *build_skb(void *data, unsigned int frag_size)
338 {
339 	struct sk_buff *skb = __build_skb(data, frag_size);
340 
341 	if (skb && frag_size) {
342 		skb->head_frag = 1;
343 		if (virt_to_head_page(data)->pfmemalloc)
344 			skb->pfmemalloc = 1;
345 	}
346 	return skb;
347 }
348 EXPORT_SYMBOL(build_skb);
349 
350 struct netdev_alloc_cache {
351 	struct page_frag	frag;
352 	/* we maintain a pagecount bias, so that we dont dirty cache line
353 	 * containing page->_count every time we allocate a fragment.
354 	 */
355 	unsigned int		pagecnt_bias;
356 };
357 static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
358 
__netdev_alloc_frag(unsigned int fragsz,gfp_t gfp_mask)359 static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
360 {
361 	struct netdev_alloc_cache *nc;
362 	void *data = NULL;
363 	int order;
364 	unsigned long flags;
365 
366 	local_irq_save(flags);
367 	nc = this_cpu_ptr(&netdev_alloc_cache);
368 	if (unlikely(!nc->frag.page)) {
369 refill:
370 		for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
371 			gfp_t gfp = gfp_mask;
372 
373 			if (order)
374 				gfp |= __GFP_COMP | __GFP_NOWARN |
375 				       __GFP_NOMEMALLOC;
376 			nc->frag.page = alloc_pages(gfp, order);
377 			if (likely(nc->frag.page))
378 				break;
379 			if (--order < 0)
380 				goto end;
381 		}
382 		nc->frag.size = PAGE_SIZE << order;
383 		/* Even if we own the page, we do not use atomic_set().
384 		 * This would break get_page_unless_zero() users.
385 		 */
386 		atomic_add(NETDEV_PAGECNT_MAX_BIAS - 1,
387 			   &nc->frag.page->_count);
388 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
389 		nc->frag.offset = 0;
390 	}
391 
392 	if (nc->frag.offset + fragsz > nc->frag.size) {
393 		if (atomic_read(&nc->frag.page->_count) != nc->pagecnt_bias) {
394 			if (!atomic_sub_and_test(nc->pagecnt_bias,
395 						 &nc->frag.page->_count))
396 				goto refill;
397 			/* OK, page count is 0, we can safely set it */
398 			atomic_set(&nc->frag.page->_count,
399 				   NETDEV_PAGECNT_MAX_BIAS);
400 		} else {
401 			atomic_add(NETDEV_PAGECNT_MAX_BIAS - nc->pagecnt_bias,
402 				   &nc->frag.page->_count);
403 		}
404 		nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
405 		nc->frag.offset = 0;
406 	}
407 
408 	data = page_address(nc->frag.page) + nc->frag.offset;
409 	nc->frag.offset += fragsz;
410 	nc->pagecnt_bias--;
411 end:
412 	local_irq_restore(flags);
413 	return data;
414 }
415 
416 /**
417  * netdev_alloc_frag - allocate a page fragment
418  * @fragsz: fragment size
419  *
420  * Allocates a frag from a page for receive buffer.
421  * Uses GFP_ATOMIC allocations.
422  */
netdev_alloc_frag(unsigned int fragsz)423 void *netdev_alloc_frag(unsigned int fragsz)
424 {
425 	return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
426 }
427 EXPORT_SYMBOL(netdev_alloc_frag);
428 
429 /**
430  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
431  *	@dev: network device to receive on
432  *	@length: length to allocate
433  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
434  *
435  *	Allocate a new &sk_buff and assign it a usage count of one. The
436  *	buffer has unspecified headroom built in. Users should allocate
437  *	the headroom they think they need without accounting for the
438  *	built in space. The built in space is used for optimisations.
439  *
440  *	%NULL is returned if there is no free memory.
441  */
__netdev_alloc_skb(struct net_device * dev,unsigned int length,gfp_t gfp_mask)442 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
443 				   unsigned int length, gfp_t gfp_mask)
444 {
445 	struct sk_buff *skb = NULL;
446 	unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
447 			      SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
448 
449 	if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
450 		void *data;
451 
452 		if (sk_memalloc_socks())
453 			gfp_mask |= __GFP_MEMALLOC;
454 
455 		data = __netdev_alloc_frag(fragsz, gfp_mask);
456 
457 		if (likely(data)) {
458 			skb = build_skb(data, fragsz);
459 			if (unlikely(!skb))
460 				put_page(virt_to_head_page(data));
461 		}
462 	} else {
463 		skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
464 				  SKB_ALLOC_RX, NUMA_NO_NODE);
465 	}
466 	if (likely(skb)) {
467 		skb_reserve(skb, NET_SKB_PAD);
468 		skb->dev = dev;
469 	}
470 	return skb;
471 }
472 EXPORT_SYMBOL(__netdev_alloc_skb);
473 
skb_add_rx_frag(struct sk_buff * skb,int i,struct page * page,int off,int size,unsigned int truesize)474 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
475 		     int size, unsigned int truesize)
476 {
477 	skb_fill_page_desc(skb, i, page, off, size);
478 	skb->len += size;
479 	skb->data_len += size;
480 	skb->truesize += truesize;
481 }
482 EXPORT_SYMBOL(skb_add_rx_frag);
483 
skb_coalesce_rx_frag(struct sk_buff * skb,int i,int size,unsigned int truesize)484 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
485 			  unsigned int truesize)
486 {
487 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
488 
489 	skb_frag_size_add(frag, size);
490 	skb->len += size;
491 	skb->data_len += size;
492 	skb->truesize += truesize;
493 }
494 EXPORT_SYMBOL(skb_coalesce_rx_frag);
495 
skb_drop_list(struct sk_buff ** listp)496 static void skb_drop_list(struct sk_buff **listp)
497 {
498 	kfree_skb_list(*listp);
499 	*listp = NULL;
500 }
501 
skb_drop_fraglist(struct sk_buff * skb)502 static inline void skb_drop_fraglist(struct sk_buff *skb)
503 {
504 	skb_drop_list(&skb_shinfo(skb)->frag_list);
505 }
506 
skb_clone_fraglist(struct sk_buff * skb)507 static void skb_clone_fraglist(struct sk_buff *skb)
508 {
509 	struct sk_buff *list;
510 
511 	skb_walk_frags(skb, list)
512 		skb_get(list);
513 }
514 
skb_free_head(struct sk_buff * skb)515 static void skb_free_head(struct sk_buff *skb)
516 {
517 	if (skb->head_frag)
518 		put_page(virt_to_head_page(skb->head));
519 	else
520 		kfree(skb->head);
521 }
522 
skb_release_data(struct sk_buff * skb)523 static void skb_release_data(struct sk_buff *skb)
524 {
525 	struct skb_shared_info *shinfo = skb_shinfo(skb);
526 	int i;
527 
528 	if (skb->cloned &&
529 	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
530 			      &shinfo->dataref))
531 		return;
532 
533 	for (i = 0; i < shinfo->nr_frags; i++)
534 		__skb_frag_unref(&shinfo->frags[i]);
535 
536 	/*
537 	 * If skb buf is from userspace, we need to notify the caller
538 	 * the lower device DMA has done;
539 	 */
540 	if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
541 		struct ubuf_info *uarg;
542 
543 		uarg = shinfo->destructor_arg;
544 		if (uarg->callback)
545 			uarg->callback(uarg, true);
546 	}
547 
548 	if (shinfo->frag_list)
549 		kfree_skb_list(shinfo->frag_list);
550 
551 	skb_free_head(skb);
552 }
553 
554 /*
555  *	Free an skbuff by memory without cleaning the state.
556  */
kfree_skbmem(struct sk_buff * skb)557 static void kfree_skbmem(struct sk_buff *skb)
558 {
559 	struct sk_buff_fclones *fclones;
560 
561 	switch (skb->fclone) {
562 	case SKB_FCLONE_UNAVAILABLE:
563 		kmem_cache_free(skbuff_head_cache, skb);
564 		break;
565 
566 	case SKB_FCLONE_ORIG:
567 		fclones = container_of(skb, struct sk_buff_fclones, skb1);
568 		if (atomic_dec_and_test(&fclones->fclone_ref))
569 			kmem_cache_free(skbuff_fclone_cache, fclones);
570 		break;
571 
572 	case SKB_FCLONE_CLONE:
573 		fclones = container_of(skb, struct sk_buff_fclones, skb2);
574 
575 		/* The clone portion is available for
576 		 * fast-cloning again.
577 		 */
578 		skb->fclone = SKB_FCLONE_FREE;
579 
580 		if (atomic_dec_and_test(&fclones->fclone_ref))
581 			kmem_cache_free(skbuff_fclone_cache, fclones);
582 		break;
583 	}
584 }
585 
skb_release_head_state(struct sk_buff * skb)586 static void skb_release_head_state(struct sk_buff *skb)
587 {
588 	skb_dst_drop(skb);
589 #ifdef CONFIG_XFRM
590 	secpath_put(skb->sp);
591 #endif
592 	if (skb->destructor) {
593 		WARN_ON(in_irq());
594 		skb->destructor(skb);
595 	}
596 #if IS_ENABLED(CONFIG_NF_CONNTRACK)
597 	nf_conntrack_put(skb->nfct);
598 #endif
599 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
600 	nf_bridge_put(skb->nf_bridge);
601 #endif
602 /* XXX: IS this still necessary? - JHS */
603 #ifdef CONFIG_NET_SCHED
604 	skb->tc_index = 0;
605 #ifdef CONFIG_NET_CLS_ACT
606 	skb->tc_verd = 0;
607 #endif
608 #endif
609 }
610 
611 /* Free everything but the sk_buff shell. */
skb_release_all(struct sk_buff * skb)612 static void skb_release_all(struct sk_buff *skb)
613 {
614 	skb_release_head_state(skb);
615 	if (likely(skb->head))
616 		skb_release_data(skb);
617 }
618 
619 /**
620  *	__kfree_skb - private function
621  *	@skb: buffer
622  *
623  *	Free an sk_buff. Release anything attached to the buffer.
624  *	Clean the state. This is an internal helper function. Users should
625  *	always call kfree_skb
626  */
627 
__kfree_skb(struct sk_buff * skb)628 void __kfree_skb(struct sk_buff *skb)
629 {
630 	skb_release_all(skb);
631 	kfree_skbmem(skb);
632 }
633 EXPORT_SYMBOL(__kfree_skb);
634 
635 /**
636  *	kfree_skb - free an sk_buff
637  *	@skb: buffer to free
638  *
639  *	Drop a reference to the buffer and free it if the usage count has
640  *	hit zero.
641  */
kfree_skb(struct sk_buff * skb)642 void kfree_skb(struct sk_buff *skb)
643 {
644 	if (unlikely(!skb))
645 		return;
646 	if (likely(atomic_read(&skb->users) == 1))
647 		smp_rmb();
648 	else if (likely(!atomic_dec_and_test(&skb->users)))
649 		return;
650 	trace_kfree_skb(skb, __builtin_return_address(0));
651 	__kfree_skb(skb);
652 }
653 EXPORT_SYMBOL(kfree_skb);
654 
kfree_skb_list(struct sk_buff * segs)655 void kfree_skb_list(struct sk_buff *segs)
656 {
657 	while (segs) {
658 		struct sk_buff *next = segs->next;
659 
660 		kfree_skb(segs);
661 		segs = next;
662 	}
663 }
664 EXPORT_SYMBOL(kfree_skb_list);
665 
666 /**
667  *	skb_tx_error - report an sk_buff xmit error
668  *	@skb: buffer that triggered an error
669  *
670  *	Report xmit error if a device callback is tracking this skb.
671  *	skb must be freed afterwards.
672  */
skb_tx_error(struct sk_buff * skb)673 void skb_tx_error(struct sk_buff *skb)
674 {
675 	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
676 		struct ubuf_info *uarg;
677 
678 		uarg = skb_shinfo(skb)->destructor_arg;
679 		if (uarg->callback)
680 			uarg->callback(uarg, false);
681 		skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
682 	}
683 }
684 EXPORT_SYMBOL(skb_tx_error);
685 
686 /**
687  *	consume_skb - free an skbuff
688  *	@skb: buffer to free
689  *
690  *	Drop a ref to the buffer and free it if the usage count has hit zero
691  *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
692  *	is being dropped after a failure and notes that
693  */
consume_skb(struct sk_buff * skb)694 void consume_skb(struct sk_buff *skb)
695 {
696 	if (unlikely(!skb))
697 		return;
698 	if (likely(atomic_read(&skb->users) == 1))
699 		smp_rmb();
700 	else if (likely(!atomic_dec_and_test(&skb->users)))
701 		return;
702 	trace_consume_skb(skb);
703 	__kfree_skb(skb);
704 }
705 EXPORT_SYMBOL(consume_skb);
706 
707 /* Make sure a field is enclosed inside headers_start/headers_end section */
708 #define CHECK_SKB_FIELD(field) \
709 	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
710 		     offsetof(struct sk_buff, headers_start));	\
711 	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
712 		     offsetof(struct sk_buff, headers_end));	\
713 
__copy_skb_header(struct sk_buff * new,const struct sk_buff * old)714 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
715 {
716 	new->tstamp		= old->tstamp;
717 	/* We do not copy old->sk */
718 	new->dev		= old->dev;
719 	memcpy(new->cb, old->cb, sizeof(old->cb));
720 	skb_dst_copy(new, old);
721 #ifdef CONFIG_XFRM
722 	new->sp			= secpath_get(old->sp);
723 #endif
724 	__nf_copy(new, old, false);
725 
726 	/* Note : this field could be in headers_start/headers_end section
727 	 * It is not yet because we do not want to have a 16 bit hole
728 	 */
729 	new->queue_mapping = old->queue_mapping;
730 
731 	memcpy(&new->headers_start, &old->headers_start,
732 	       offsetof(struct sk_buff, headers_end) -
733 	       offsetof(struct sk_buff, headers_start));
734 	CHECK_SKB_FIELD(protocol);
735 	CHECK_SKB_FIELD(csum);
736 	CHECK_SKB_FIELD(hash);
737 	CHECK_SKB_FIELD(priority);
738 	CHECK_SKB_FIELD(skb_iif);
739 	CHECK_SKB_FIELD(vlan_proto);
740 	CHECK_SKB_FIELD(vlan_tci);
741 	CHECK_SKB_FIELD(transport_header);
742 	CHECK_SKB_FIELD(network_header);
743 	CHECK_SKB_FIELD(mac_header);
744 	CHECK_SKB_FIELD(inner_protocol);
745 	CHECK_SKB_FIELD(inner_transport_header);
746 	CHECK_SKB_FIELD(inner_network_header);
747 	CHECK_SKB_FIELD(inner_mac_header);
748 	CHECK_SKB_FIELD(mark);
749 #ifdef CONFIG_NETWORK_SECMARK
750 	CHECK_SKB_FIELD(secmark);
751 #endif
752 #ifdef CONFIG_NET_RX_BUSY_POLL
753 	CHECK_SKB_FIELD(napi_id);
754 #endif
755 #ifdef CONFIG_NET_SCHED
756 	CHECK_SKB_FIELD(tc_index);
757 #ifdef CONFIG_NET_CLS_ACT
758 	CHECK_SKB_FIELD(tc_verd);
759 #endif
760 #endif
761 
762 }
763 
764 /*
765  * You should not add any new code to this function.  Add it to
766  * __copy_skb_header above instead.
767  */
__skb_clone(struct sk_buff * n,struct sk_buff * skb)768 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
769 {
770 #define C(x) n->x = skb->x
771 
772 	n->next = n->prev = NULL;
773 	n->sk = NULL;
774 	__copy_skb_header(n, skb);
775 
776 	C(len);
777 	C(data_len);
778 	C(mac_len);
779 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
780 	n->cloned = 1;
781 	n->nohdr = 0;
782 	n->destructor = NULL;
783 	C(tail);
784 	C(end);
785 	C(head);
786 	C(head_frag);
787 	C(data);
788 	C(truesize);
789 	atomic_set(&n->users, 1);
790 
791 	atomic_inc(&(skb_shinfo(skb)->dataref));
792 	skb->cloned = 1;
793 
794 	return n;
795 #undef C
796 }
797 
798 /**
799  *	skb_morph	-	morph one skb into another
800  *	@dst: the skb to receive the contents
801  *	@src: the skb to supply the contents
802  *
803  *	This is identical to skb_clone except that the target skb is
804  *	supplied by the user.
805  *
806  *	The target skb is returned upon exit.
807  */
skb_morph(struct sk_buff * dst,struct sk_buff * src)808 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
809 {
810 	skb_release_all(dst);
811 	return __skb_clone(dst, src);
812 }
813 EXPORT_SYMBOL_GPL(skb_morph);
814 
815 /**
816  *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
817  *	@skb: the skb to modify
818  *	@gfp_mask: allocation priority
819  *
820  *	This must be called on SKBTX_DEV_ZEROCOPY skb.
821  *	It will copy all frags into kernel and drop the reference
822  *	to userspace pages.
823  *
824  *	If this function is called from an interrupt gfp_mask() must be
825  *	%GFP_ATOMIC.
826  *
827  *	Returns 0 on success or a negative error code on failure
828  *	to allocate kernel memory to copy to.
829  */
skb_copy_ubufs(struct sk_buff * skb,gfp_t gfp_mask)830 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
831 {
832 	int i;
833 	int num_frags = skb_shinfo(skb)->nr_frags;
834 	struct page *page, *head = NULL;
835 	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
836 
837 	for (i = 0; i < num_frags; i++) {
838 		u8 *vaddr;
839 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
840 
841 		page = alloc_page(gfp_mask);
842 		if (!page) {
843 			while (head) {
844 				struct page *next = (struct page *)page_private(head);
845 				put_page(head);
846 				head = next;
847 			}
848 			return -ENOMEM;
849 		}
850 		vaddr = kmap_atomic(skb_frag_page(f));
851 		memcpy(page_address(page),
852 		       vaddr + f->page_offset, skb_frag_size(f));
853 		kunmap_atomic(vaddr);
854 		set_page_private(page, (unsigned long)head);
855 		head = page;
856 	}
857 
858 	/* skb frags release userspace buffers */
859 	for (i = 0; i < num_frags; i++)
860 		skb_frag_unref(skb, i);
861 
862 	uarg->callback(uarg, false);
863 
864 	/* skb frags point to kernel buffers */
865 	for (i = num_frags - 1; i >= 0; i--) {
866 		__skb_fill_page_desc(skb, i, head, 0,
867 				     skb_shinfo(skb)->frags[i].size);
868 		head = (struct page *)page_private(head);
869 	}
870 
871 	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
872 	return 0;
873 }
874 EXPORT_SYMBOL_GPL(skb_copy_ubufs);
875 
876 /**
877  *	skb_clone	-	duplicate an sk_buff
878  *	@skb: buffer to clone
879  *	@gfp_mask: allocation priority
880  *
881  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
882  *	copies share the same packet data but not structure. The new
883  *	buffer has a reference count of 1. If the allocation fails the
884  *	function returns %NULL otherwise the new buffer is returned.
885  *
886  *	If this function is called from an interrupt gfp_mask() must be
887  *	%GFP_ATOMIC.
888  */
889 
skb_clone(struct sk_buff * skb,gfp_t gfp_mask)890 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
891 {
892 	struct sk_buff_fclones *fclones = container_of(skb,
893 						       struct sk_buff_fclones,
894 						       skb1);
895 	struct sk_buff *n = &fclones->skb2;
896 
897 	if (skb_orphan_frags(skb, gfp_mask))
898 		return NULL;
899 
900 	if (skb->fclone == SKB_FCLONE_ORIG &&
901 	    n->fclone == SKB_FCLONE_FREE) {
902 		n->fclone = SKB_FCLONE_CLONE;
903 		atomic_inc(&fclones->fclone_ref);
904 	} else {
905 		if (skb_pfmemalloc(skb))
906 			gfp_mask |= __GFP_MEMALLOC;
907 
908 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
909 		if (!n)
910 			return NULL;
911 
912 		kmemcheck_annotate_bitfield(n, flags1);
913 		n->fclone = SKB_FCLONE_UNAVAILABLE;
914 	}
915 
916 	return __skb_clone(n, skb);
917 }
918 EXPORT_SYMBOL(skb_clone);
919 
skb_headers_offset_update(struct sk_buff * skb,int off)920 static void skb_headers_offset_update(struct sk_buff *skb, int off)
921 {
922 	/* Only adjust this if it actually is csum_start rather than csum */
923 	if (skb->ip_summed == CHECKSUM_PARTIAL)
924 		skb->csum_start += off;
925 	/* {transport,network,mac}_header and tail are relative to skb->head */
926 	skb->transport_header += off;
927 	skb->network_header   += off;
928 	if (skb_mac_header_was_set(skb))
929 		skb->mac_header += off;
930 	skb->inner_transport_header += off;
931 	skb->inner_network_header += off;
932 	skb->inner_mac_header += off;
933 }
934 
copy_skb_header(struct sk_buff * new,const struct sk_buff * old)935 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
936 {
937 	__copy_skb_header(new, old);
938 
939 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
940 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
941 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
942 }
943 
skb_alloc_rx_flag(const struct sk_buff * skb)944 static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
945 {
946 	if (skb_pfmemalloc(skb))
947 		return SKB_ALLOC_RX;
948 	return 0;
949 }
950 
951 /**
952  *	skb_copy	-	create private copy of an sk_buff
953  *	@skb: buffer to copy
954  *	@gfp_mask: allocation priority
955  *
956  *	Make a copy of both an &sk_buff and its data. This is used when the
957  *	caller wishes to modify the data and needs a private copy of the
958  *	data to alter. Returns %NULL on failure or the pointer to the buffer
959  *	on success. The returned buffer has a reference count of 1.
960  *
961  *	As by-product this function converts non-linear &sk_buff to linear
962  *	one, so that &sk_buff becomes completely private and caller is allowed
963  *	to modify all the data of returned buffer. This means that this
964  *	function is not recommended for use in circumstances when only
965  *	header is going to be modified. Use pskb_copy() instead.
966  */
967 
skb_copy(const struct sk_buff * skb,gfp_t gfp_mask)968 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
969 {
970 	int headerlen = skb_headroom(skb);
971 	unsigned int size = skb_end_offset(skb) + skb->data_len;
972 	struct sk_buff *n = __alloc_skb(size, gfp_mask,
973 					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
974 
975 	if (!n)
976 		return NULL;
977 
978 	/* Set the data pointer */
979 	skb_reserve(n, headerlen);
980 	/* Set the tail pointer and length */
981 	skb_put(n, skb->len);
982 
983 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
984 		BUG();
985 
986 	copy_skb_header(n, skb);
987 	return n;
988 }
989 EXPORT_SYMBOL(skb_copy);
990 
991 /**
992  *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
993  *	@skb: buffer to copy
994  *	@headroom: headroom of new skb
995  *	@gfp_mask: allocation priority
996  *	@fclone: if true allocate the copy of the skb from the fclone
997  *	cache instead of the head cache; it is recommended to set this
998  *	to true for the cases where the copy will likely be cloned
999  *
1000  *	Make a copy of both an &sk_buff and part of its data, located
1001  *	in header. Fragmented data remain shared. This is used when
1002  *	the caller wishes to modify only header of &sk_buff and needs
1003  *	private copy of the header to alter. Returns %NULL on failure
1004  *	or the pointer to the buffer on success.
1005  *	The returned buffer has a reference count of 1.
1006  */
1007 
__pskb_copy_fclone(struct sk_buff * skb,int headroom,gfp_t gfp_mask,bool fclone)1008 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1009 				   gfp_t gfp_mask, bool fclone)
1010 {
1011 	unsigned int size = skb_headlen(skb) + headroom;
1012 	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1013 	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1014 
1015 	if (!n)
1016 		goto out;
1017 
1018 	/* Set the data pointer */
1019 	skb_reserve(n, headroom);
1020 	/* Set the tail pointer and length */
1021 	skb_put(n, skb_headlen(skb));
1022 	/* Copy the bytes */
1023 	skb_copy_from_linear_data(skb, n->data, n->len);
1024 
1025 	n->truesize += skb->data_len;
1026 	n->data_len  = skb->data_len;
1027 	n->len	     = skb->len;
1028 
1029 	if (skb_shinfo(skb)->nr_frags) {
1030 		int i;
1031 
1032 		if (skb_orphan_frags(skb, gfp_mask)) {
1033 			kfree_skb(n);
1034 			n = NULL;
1035 			goto out;
1036 		}
1037 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1038 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1039 			skb_frag_ref(skb, i);
1040 		}
1041 		skb_shinfo(n)->nr_frags = i;
1042 	}
1043 
1044 	if (skb_has_frag_list(skb)) {
1045 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1046 		skb_clone_fraglist(n);
1047 	}
1048 
1049 	copy_skb_header(n, skb);
1050 out:
1051 	return n;
1052 }
1053 EXPORT_SYMBOL(__pskb_copy_fclone);
1054 
1055 /**
1056  *	pskb_expand_head - reallocate header of &sk_buff
1057  *	@skb: buffer to reallocate
1058  *	@nhead: room to add at head
1059  *	@ntail: room to add at tail
1060  *	@gfp_mask: allocation priority
1061  *
1062  *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1063  *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1064  *	reference count of 1. Returns zero in the case of success or error,
1065  *	if expansion failed. In the last case, &sk_buff is not changed.
1066  *
1067  *	All the pointers pointing into skb header may change and must be
1068  *	reloaded after call to this function.
1069  */
1070 
pskb_expand_head(struct sk_buff * skb,int nhead,int ntail,gfp_t gfp_mask)1071 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1072 		     gfp_t gfp_mask)
1073 {
1074 	int i;
1075 	u8 *data;
1076 	int size = nhead + skb_end_offset(skb) + ntail;
1077 	long off;
1078 
1079 	BUG_ON(nhead < 0);
1080 
1081 	if (skb_shared(skb))
1082 		BUG();
1083 
1084 	size = SKB_DATA_ALIGN(size);
1085 
1086 	if (skb_pfmemalloc(skb))
1087 		gfp_mask |= __GFP_MEMALLOC;
1088 	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1089 			       gfp_mask, NUMA_NO_NODE, NULL);
1090 	if (!data)
1091 		goto nodata;
1092 	size = SKB_WITH_OVERHEAD(ksize(data));
1093 
1094 	/* Copy only real data... and, alas, header. This should be
1095 	 * optimized for the cases when header is void.
1096 	 */
1097 	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1098 
1099 	memcpy((struct skb_shared_info *)(data + size),
1100 	       skb_shinfo(skb),
1101 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1102 
1103 	/*
1104 	 * if shinfo is shared we must drop the old head gracefully, but if it
1105 	 * is not we can just drop the old head and let the existing refcount
1106 	 * be since all we did is relocate the values
1107 	 */
1108 	if (skb_cloned(skb)) {
1109 		/* copy this zero copy skb frags */
1110 		if (skb_orphan_frags(skb, gfp_mask))
1111 			goto nofrags;
1112 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1113 			skb_frag_ref(skb, i);
1114 
1115 		if (skb_has_frag_list(skb))
1116 			skb_clone_fraglist(skb);
1117 
1118 		skb_release_data(skb);
1119 	} else {
1120 		skb_free_head(skb);
1121 	}
1122 	off = (data + nhead) - skb->head;
1123 
1124 	skb->head     = data;
1125 	skb->head_frag = 0;
1126 	skb->data    += off;
1127 #ifdef NET_SKBUFF_DATA_USES_OFFSET
1128 	skb->end      = size;
1129 	off           = nhead;
1130 #else
1131 	skb->end      = skb->head + size;
1132 #endif
1133 	skb->tail	      += off;
1134 	skb_headers_offset_update(skb, nhead);
1135 	skb->cloned   = 0;
1136 	skb->hdr_len  = 0;
1137 	skb->nohdr    = 0;
1138 	atomic_set(&skb_shinfo(skb)->dataref, 1);
1139 	return 0;
1140 
1141 nofrags:
1142 	kfree(data);
1143 nodata:
1144 	return -ENOMEM;
1145 }
1146 EXPORT_SYMBOL(pskb_expand_head);
1147 
1148 /* Make private copy of skb with writable head and some headroom */
1149 
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)1150 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1151 {
1152 	struct sk_buff *skb2;
1153 	int delta = headroom - skb_headroom(skb);
1154 
1155 	if (delta <= 0)
1156 		skb2 = pskb_copy(skb, GFP_ATOMIC);
1157 	else {
1158 		skb2 = skb_clone(skb, GFP_ATOMIC);
1159 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1160 					     GFP_ATOMIC)) {
1161 			kfree_skb(skb2);
1162 			skb2 = NULL;
1163 		}
1164 	}
1165 	return skb2;
1166 }
1167 EXPORT_SYMBOL(skb_realloc_headroom);
1168 
1169 /**
1170  *	skb_copy_expand	-	copy and expand sk_buff
1171  *	@skb: buffer to copy
1172  *	@newheadroom: new free bytes at head
1173  *	@newtailroom: new free bytes at tail
1174  *	@gfp_mask: allocation priority
1175  *
1176  *	Make a copy of both an &sk_buff and its data and while doing so
1177  *	allocate additional space.
1178  *
1179  *	This is used when the caller wishes to modify the data and needs a
1180  *	private copy of the data to alter as well as more space for new fields.
1181  *	Returns %NULL on failure or the pointer to the buffer
1182  *	on success. The returned buffer has a reference count of 1.
1183  *
1184  *	You must pass %GFP_ATOMIC as the allocation priority if this function
1185  *	is called from an interrupt.
1186  */
skb_copy_expand(const struct sk_buff * skb,int newheadroom,int newtailroom,gfp_t gfp_mask)1187 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1188 				int newheadroom, int newtailroom,
1189 				gfp_t gfp_mask)
1190 {
1191 	/*
1192 	 *	Allocate the copy buffer
1193 	 */
1194 	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1195 					gfp_mask, skb_alloc_rx_flag(skb),
1196 					NUMA_NO_NODE);
1197 	int oldheadroom = skb_headroom(skb);
1198 	int head_copy_len, head_copy_off;
1199 
1200 	if (!n)
1201 		return NULL;
1202 
1203 	skb_reserve(n, newheadroom);
1204 
1205 	/* Set the tail pointer and length */
1206 	skb_put(n, skb->len);
1207 
1208 	head_copy_len = oldheadroom;
1209 	head_copy_off = 0;
1210 	if (newheadroom <= head_copy_len)
1211 		head_copy_len = newheadroom;
1212 	else
1213 		head_copy_off = newheadroom - head_copy_len;
1214 
1215 	/* Copy the linear header and data. */
1216 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1217 			  skb->len + head_copy_len))
1218 		BUG();
1219 
1220 	copy_skb_header(n, skb);
1221 
1222 	skb_headers_offset_update(n, newheadroom - oldheadroom);
1223 
1224 	return n;
1225 }
1226 EXPORT_SYMBOL(skb_copy_expand);
1227 
1228 /**
1229  *	skb_pad			-	zero pad the tail of an skb
1230  *	@skb: buffer to pad
1231  *	@pad: space to pad
1232  *
1233  *	Ensure that a buffer is followed by a padding area that is zero
1234  *	filled. Used by network drivers which may DMA or transfer data
1235  *	beyond the buffer end onto the wire.
1236  *
1237  *	May return error in out of memory cases. The skb is freed on error.
1238  */
1239 
skb_pad(struct sk_buff * skb,int pad)1240 int skb_pad(struct sk_buff *skb, int pad)
1241 {
1242 	int err;
1243 	int ntail;
1244 
1245 	/* If the skbuff is non linear tailroom is always zero.. */
1246 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1247 		memset(skb->data+skb->len, 0, pad);
1248 		return 0;
1249 	}
1250 
1251 	ntail = skb->data_len + pad - (skb->end - skb->tail);
1252 	if (likely(skb_cloned(skb) || ntail > 0)) {
1253 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1254 		if (unlikely(err))
1255 			goto free_skb;
1256 	}
1257 
1258 	/* FIXME: The use of this function with non-linear skb's really needs
1259 	 * to be audited.
1260 	 */
1261 	err = skb_linearize(skb);
1262 	if (unlikely(err))
1263 		goto free_skb;
1264 
1265 	memset(skb->data + skb->len, 0, pad);
1266 	return 0;
1267 
1268 free_skb:
1269 	kfree_skb(skb);
1270 	return err;
1271 }
1272 EXPORT_SYMBOL(skb_pad);
1273 
1274 /**
1275  *	pskb_put - add data to the tail of a potentially fragmented buffer
1276  *	@skb: start of the buffer to use
1277  *	@tail: tail fragment of the buffer to use
1278  *	@len: amount of data to add
1279  *
1280  *	This function extends the used data area of the potentially
1281  *	fragmented buffer. @tail must be the last fragment of @skb -- or
1282  *	@skb itself. If this would exceed the total buffer size the kernel
1283  *	will panic. A pointer to the first byte of the extra data is
1284  *	returned.
1285  */
1286 
pskb_put(struct sk_buff * skb,struct sk_buff * tail,int len)1287 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1288 {
1289 	if (tail != skb) {
1290 		skb->data_len += len;
1291 		skb->len += len;
1292 	}
1293 	return skb_put(tail, len);
1294 }
1295 EXPORT_SYMBOL_GPL(pskb_put);
1296 
1297 /**
1298  *	skb_put - add data to a buffer
1299  *	@skb: buffer to use
1300  *	@len: amount of data to add
1301  *
1302  *	This function extends the used data area of the buffer. If this would
1303  *	exceed the total buffer size the kernel will panic. A pointer to the
1304  *	first byte of the extra data is returned.
1305  */
skb_put(struct sk_buff * skb,unsigned int len)1306 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1307 {
1308 	unsigned char *tmp = skb_tail_pointer(skb);
1309 	SKB_LINEAR_ASSERT(skb);
1310 	skb->tail += len;
1311 	skb->len  += len;
1312 	if (unlikely(skb->tail > skb->end))
1313 		skb_over_panic(skb, len, __builtin_return_address(0));
1314 	return tmp;
1315 }
1316 EXPORT_SYMBOL(skb_put);
1317 
1318 /**
1319  *	skb_push - add data to the start of a buffer
1320  *	@skb: buffer to use
1321  *	@len: amount of data to add
1322  *
1323  *	This function extends the used data area of the buffer at the buffer
1324  *	start. If this would exceed the total buffer headroom the kernel will
1325  *	panic. A pointer to the first byte of the extra data is returned.
1326  */
skb_push(struct sk_buff * skb,unsigned int len)1327 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1328 {
1329 	skb->data -= len;
1330 	skb->len  += len;
1331 	if (unlikely(skb->data<skb->head))
1332 		skb_under_panic(skb, len, __builtin_return_address(0));
1333 	return skb->data;
1334 }
1335 EXPORT_SYMBOL(skb_push);
1336 
1337 /**
1338  *	skb_pull - remove data from the start of a buffer
1339  *	@skb: buffer to use
1340  *	@len: amount of data to remove
1341  *
1342  *	This function removes data from the start of a buffer, returning
1343  *	the memory to the headroom. A pointer to the next data in the buffer
1344  *	is returned. Once the data has been pulled future pushes will overwrite
1345  *	the old data.
1346  */
skb_pull(struct sk_buff * skb,unsigned int len)1347 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1348 {
1349 	return skb_pull_inline(skb, len);
1350 }
1351 EXPORT_SYMBOL(skb_pull);
1352 
1353 /**
1354  *	skb_trim - remove end from a buffer
1355  *	@skb: buffer to alter
1356  *	@len: new length
1357  *
1358  *	Cut the length of a buffer down by removing data from the tail. If
1359  *	the buffer is already under the length specified it is not modified.
1360  *	The skb must be linear.
1361  */
skb_trim(struct sk_buff * skb,unsigned int len)1362 void skb_trim(struct sk_buff *skb, unsigned int len)
1363 {
1364 	if (skb->len > len)
1365 		__skb_trim(skb, len);
1366 }
1367 EXPORT_SYMBOL(skb_trim);
1368 
1369 /* Trims skb to length len. It can change skb pointers.
1370  */
1371 
___pskb_trim(struct sk_buff * skb,unsigned int len)1372 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1373 {
1374 	struct sk_buff **fragp;
1375 	struct sk_buff *frag;
1376 	int offset = skb_headlen(skb);
1377 	int nfrags = skb_shinfo(skb)->nr_frags;
1378 	int i;
1379 	int err;
1380 
1381 	if (skb_cloned(skb) &&
1382 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1383 		return err;
1384 
1385 	i = 0;
1386 	if (offset >= len)
1387 		goto drop_pages;
1388 
1389 	for (; i < nfrags; i++) {
1390 		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1391 
1392 		if (end < len) {
1393 			offset = end;
1394 			continue;
1395 		}
1396 
1397 		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1398 
1399 drop_pages:
1400 		skb_shinfo(skb)->nr_frags = i;
1401 
1402 		for (; i < nfrags; i++)
1403 			skb_frag_unref(skb, i);
1404 
1405 		if (skb_has_frag_list(skb))
1406 			skb_drop_fraglist(skb);
1407 		goto done;
1408 	}
1409 
1410 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1411 	     fragp = &frag->next) {
1412 		int end = offset + frag->len;
1413 
1414 		if (skb_shared(frag)) {
1415 			struct sk_buff *nfrag;
1416 
1417 			nfrag = skb_clone(frag, GFP_ATOMIC);
1418 			if (unlikely(!nfrag))
1419 				return -ENOMEM;
1420 
1421 			nfrag->next = frag->next;
1422 			consume_skb(frag);
1423 			frag = nfrag;
1424 			*fragp = frag;
1425 		}
1426 
1427 		if (end < len) {
1428 			offset = end;
1429 			continue;
1430 		}
1431 
1432 		if (end > len &&
1433 		    unlikely((err = pskb_trim(frag, len - offset))))
1434 			return err;
1435 
1436 		if (frag->next)
1437 			skb_drop_list(&frag->next);
1438 		break;
1439 	}
1440 
1441 done:
1442 	if (len > skb_headlen(skb)) {
1443 		skb->data_len -= skb->len - len;
1444 		skb->len       = len;
1445 	} else {
1446 		skb->len       = len;
1447 		skb->data_len  = 0;
1448 		skb_set_tail_pointer(skb, len);
1449 	}
1450 
1451 	return 0;
1452 }
1453 EXPORT_SYMBOL(___pskb_trim);
1454 
1455 /**
1456  *	__pskb_pull_tail - advance tail of skb header
1457  *	@skb: buffer to reallocate
1458  *	@delta: number of bytes to advance tail
1459  *
1460  *	The function makes a sense only on a fragmented &sk_buff,
1461  *	it expands header moving its tail forward and copying necessary
1462  *	data from fragmented part.
1463  *
1464  *	&sk_buff MUST have reference count of 1.
1465  *
1466  *	Returns %NULL (and &sk_buff does not change) if pull failed
1467  *	or value of new tail of skb in the case of success.
1468  *
1469  *	All the pointers pointing into skb header may change and must be
1470  *	reloaded after call to this function.
1471  */
1472 
1473 /* Moves tail of skb head forward, copying data from fragmented part,
1474  * when it is necessary.
1475  * 1. It may fail due to malloc failure.
1476  * 2. It may change skb pointers.
1477  *
1478  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1479  */
__pskb_pull_tail(struct sk_buff * skb,int delta)1480 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1481 {
1482 	/* If skb has not enough free space at tail, get new one
1483 	 * plus 128 bytes for future expansions. If we have enough
1484 	 * room at tail, reallocate without expansion only if skb is cloned.
1485 	 */
1486 	int i, k, eat = (skb->tail + delta) - skb->end;
1487 
1488 	if (eat > 0 || skb_cloned(skb)) {
1489 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1490 				     GFP_ATOMIC))
1491 			return NULL;
1492 	}
1493 
1494 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1495 		BUG();
1496 
1497 	/* Optimization: no fragments, no reasons to preestimate
1498 	 * size of pulled pages. Superb.
1499 	 */
1500 	if (!skb_has_frag_list(skb))
1501 		goto pull_pages;
1502 
1503 	/* Estimate size of pulled pages. */
1504 	eat = delta;
1505 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1506 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1507 
1508 		if (size >= eat)
1509 			goto pull_pages;
1510 		eat -= size;
1511 	}
1512 
1513 	/* If we need update frag list, we are in troubles.
1514 	 * Certainly, it possible to add an offset to skb data,
1515 	 * but taking into account that pulling is expected to
1516 	 * be very rare operation, it is worth to fight against
1517 	 * further bloating skb head and crucify ourselves here instead.
1518 	 * Pure masohism, indeed. 8)8)
1519 	 */
1520 	if (eat) {
1521 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1522 		struct sk_buff *clone = NULL;
1523 		struct sk_buff *insp = NULL;
1524 
1525 		do {
1526 			BUG_ON(!list);
1527 
1528 			if (list->len <= eat) {
1529 				/* Eaten as whole. */
1530 				eat -= list->len;
1531 				list = list->next;
1532 				insp = list;
1533 			} else {
1534 				/* Eaten partially. */
1535 
1536 				if (skb_shared(list)) {
1537 					/* Sucks! We need to fork list. :-( */
1538 					clone = skb_clone(list, GFP_ATOMIC);
1539 					if (!clone)
1540 						return NULL;
1541 					insp = list->next;
1542 					list = clone;
1543 				} else {
1544 					/* This may be pulled without
1545 					 * problems. */
1546 					insp = list;
1547 				}
1548 				if (!pskb_pull(list, eat)) {
1549 					kfree_skb(clone);
1550 					return NULL;
1551 				}
1552 				break;
1553 			}
1554 		} while (eat);
1555 
1556 		/* Free pulled out fragments. */
1557 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1558 			skb_shinfo(skb)->frag_list = list->next;
1559 			kfree_skb(list);
1560 		}
1561 		/* And insert new clone at head. */
1562 		if (clone) {
1563 			clone->next = list;
1564 			skb_shinfo(skb)->frag_list = clone;
1565 		}
1566 	}
1567 	/* Success! Now we may commit changes to skb data. */
1568 
1569 pull_pages:
1570 	eat = delta;
1571 	k = 0;
1572 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1573 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1574 
1575 		if (size <= eat) {
1576 			skb_frag_unref(skb, i);
1577 			eat -= size;
1578 		} else {
1579 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1580 			if (eat) {
1581 				skb_shinfo(skb)->frags[k].page_offset += eat;
1582 				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1583 				eat = 0;
1584 			}
1585 			k++;
1586 		}
1587 	}
1588 	skb_shinfo(skb)->nr_frags = k;
1589 
1590 	skb->tail     += delta;
1591 	skb->data_len -= delta;
1592 
1593 	return skb_tail_pointer(skb);
1594 }
1595 EXPORT_SYMBOL(__pskb_pull_tail);
1596 
1597 /**
1598  *	skb_copy_bits - copy bits from skb to kernel buffer
1599  *	@skb: source skb
1600  *	@offset: offset in source
1601  *	@to: destination buffer
1602  *	@len: number of bytes to copy
1603  *
1604  *	Copy the specified number of bytes from the source skb to the
1605  *	destination buffer.
1606  *
1607  *	CAUTION ! :
1608  *		If its prototype is ever changed,
1609  *		check arch/{*}/net/{*}.S files,
1610  *		since it is called from BPF assembly code.
1611  */
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)1612 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1613 {
1614 	int start = skb_headlen(skb);
1615 	struct sk_buff *frag_iter;
1616 	int i, copy;
1617 
1618 	if (offset > (int)skb->len - len)
1619 		goto fault;
1620 
1621 	/* Copy header. */
1622 	if ((copy = start - offset) > 0) {
1623 		if (copy > len)
1624 			copy = len;
1625 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1626 		if ((len -= copy) == 0)
1627 			return 0;
1628 		offset += copy;
1629 		to     += copy;
1630 	}
1631 
1632 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1633 		int end;
1634 		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1635 
1636 		WARN_ON(start > offset + len);
1637 
1638 		end = start + skb_frag_size(f);
1639 		if ((copy = end - offset) > 0) {
1640 			u8 *vaddr;
1641 
1642 			if (copy > len)
1643 				copy = len;
1644 
1645 			vaddr = kmap_atomic(skb_frag_page(f));
1646 			memcpy(to,
1647 			       vaddr + f->page_offset + offset - start,
1648 			       copy);
1649 			kunmap_atomic(vaddr);
1650 
1651 			if ((len -= copy) == 0)
1652 				return 0;
1653 			offset += copy;
1654 			to     += copy;
1655 		}
1656 		start = end;
1657 	}
1658 
1659 	skb_walk_frags(skb, frag_iter) {
1660 		int end;
1661 
1662 		WARN_ON(start > offset + len);
1663 
1664 		end = start + frag_iter->len;
1665 		if ((copy = end - offset) > 0) {
1666 			if (copy > len)
1667 				copy = len;
1668 			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1669 				goto fault;
1670 			if ((len -= copy) == 0)
1671 				return 0;
1672 			offset += copy;
1673 			to     += copy;
1674 		}
1675 		start = end;
1676 	}
1677 
1678 	if (!len)
1679 		return 0;
1680 
1681 fault:
1682 	return -EFAULT;
1683 }
1684 EXPORT_SYMBOL(skb_copy_bits);
1685 
1686 /*
1687  * Callback from splice_to_pipe(), if we need to release some pages
1688  * at the end of the spd in case we error'ed out in filling the pipe.
1689  */
sock_spd_release(struct splice_pipe_desc * spd,unsigned int i)1690 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1691 {
1692 	put_page(spd->pages[i]);
1693 }
1694 
linear_to_page(struct page * page,unsigned int * len,unsigned int * offset,struct sock * sk)1695 static struct page *linear_to_page(struct page *page, unsigned int *len,
1696 				   unsigned int *offset,
1697 				   struct sock *sk)
1698 {
1699 	struct page_frag *pfrag = sk_page_frag(sk);
1700 
1701 	if (!sk_page_frag_refill(sk, pfrag))
1702 		return NULL;
1703 
1704 	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1705 
1706 	memcpy(page_address(pfrag->page) + pfrag->offset,
1707 	       page_address(page) + *offset, *len);
1708 	*offset = pfrag->offset;
1709 	pfrag->offset += *len;
1710 
1711 	return pfrag->page;
1712 }
1713 
spd_can_coalesce(const struct splice_pipe_desc * spd,struct page * page,unsigned int offset)1714 static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1715 			     struct page *page,
1716 			     unsigned int offset)
1717 {
1718 	return	spd->nr_pages &&
1719 		spd->pages[spd->nr_pages - 1] == page &&
1720 		(spd->partial[spd->nr_pages - 1].offset +
1721 		 spd->partial[spd->nr_pages - 1].len == offset);
1722 }
1723 
1724 /*
1725  * Fill page/offset/length into spd, if it can hold more pages.
1726  */
spd_fill_page(struct splice_pipe_desc * spd,struct pipe_inode_info * pipe,struct page * page,unsigned int * len,unsigned int offset,bool linear,struct sock * sk)1727 static bool spd_fill_page(struct splice_pipe_desc *spd,
1728 			  struct pipe_inode_info *pipe, struct page *page,
1729 			  unsigned int *len, unsigned int offset,
1730 			  bool linear,
1731 			  struct sock *sk)
1732 {
1733 	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1734 		return true;
1735 
1736 	if (linear) {
1737 		page = linear_to_page(page, len, &offset, sk);
1738 		if (!page)
1739 			return true;
1740 	}
1741 	if (spd_can_coalesce(spd, page, offset)) {
1742 		spd->partial[spd->nr_pages - 1].len += *len;
1743 		return false;
1744 	}
1745 	get_page(page);
1746 	spd->pages[spd->nr_pages] = page;
1747 	spd->partial[spd->nr_pages].len = *len;
1748 	spd->partial[spd->nr_pages].offset = offset;
1749 	spd->nr_pages++;
1750 
1751 	return false;
1752 }
1753 
__splice_segment(struct page * page,unsigned int poff,unsigned int plen,unsigned int * off,unsigned int * len,struct splice_pipe_desc * spd,bool linear,struct sock * sk,struct pipe_inode_info * pipe)1754 static bool __splice_segment(struct page *page, unsigned int poff,
1755 			     unsigned int plen, unsigned int *off,
1756 			     unsigned int *len,
1757 			     struct splice_pipe_desc *spd, bool linear,
1758 			     struct sock *sk,
1759 			     struct pipe_inode_info *pipe)
1760 {
1761 	if (!*len)
1762 		return true;
1763 
1764 	/* skip this segment if already processed */
1765 	if (*off >= plen) {
1766 		*off -= plen;
1767 		return false;
1768 	}
1769 
1770 	/* ignore any bits we already processed */
1771 	poff += *off;
1772 	plen -= *off;
1773 	*off = 0;
1774 
1775 	do {
1776 		unsigned int flen = min(*len, plen);
1777 
1778 		if (spd_fill_page(spd, pipe, page, &flen, poff,
1779 				  linear, sk))
1780 			return true;
1781 		poff += flen;
1782 		plen -= flen;
1783 		*len -= flen;
1784 	} while (*len && plen);
1785 
1786 	return false;
1787 }
1788 
1789 /*
1790  * Map linear and fragment data from the skb to spd. It reports true if the
1791  * pipe is full or if we already spliced the requested length.
1792  */
__skb_splice_bits(struct sk_buff * skb,struct pipe_inode_info * pipe,unsigned int * offset,unsigned int * len,struct splice_pipe_desc * spd,struct sock * sk)1793 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1794 			      unsigned int *offset, unsigned int *len,
1795 			      struct splice_pipe_desc *spd, struct sock *sk)
1796 {
1797 	int seg;
1798 
1799 	/* map the linear part :
1800 	 * If skb->head_frag is set, this 'linear' part is backed by a
1801 	 * fragment, and if the head is not shared with any clones then
1802 	 * we can avoid a copy since we own the head portion of this page.
1803 	 */
1804 	if (__splice_segment(virt_to_page(skb->data),
1805 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1806 			     skb_headlen(skb),
1807 			     offset, len, spd,
1808 			     skb_head_is_locked(skb),
1809 			     sk, pipe))
1810 		return true;
1811 
1812 	/*
1813 	 * then map the fragments
1814 	 */
1815 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1816 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1817 
1818 		if (__splice_segment(skb_frag_page(f),
1819 				     f->page_offset, skb_frag_size(f),
1820 				     offset, len, spd, false, sk, pipe))
1821 			return true;
1822 	}
1823 
1824 	return false;
1825 }
1826 
1827 /*
1828  * Map data from the skb to a pipe. Should handle both the linear part,
1829  * the fragments, and the frag list. It does NOT handle frag lists within
1830  * the frag list, if such a thing exists. We'd probably need to recurse to
1831  * handle that cleanly.
1832  */
skb_splice_bits(struct sk_buff * skb,unsigned int offset,struct pipe_inode_info * pipe,unsigned int tlen,unsigned int flags)1833 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1834 		    struct pipe_inode_info *pipe, unsigned int tlen,
1835 		    unsigned int flags)
1836 {
1837 	struct partial_page partial[MAX_SKB_FRAGS];
1838 	struct page *pages[MAX_SKB_FRAGS];
1839 	struct splice_pipe_desc spd = {
1840 		.pages = pages,
1841 		.partial = partial,
1842 		.nr_pages_max = MAX_SKB_FRAGS,
1843 		.flags = flags,
1844 		.ops = &nosteal_pipe_buf_ops,
1845 		.spd_release = sock_spd_release,
1846 	};
1847 	struct sk_buff *frag_iter;
1848 	struct sock *sk = skb->sk;
1849 	int ret = 0;
1850 
1851 	/*
1852 	 * __skb_splice_bits() only fails if the output has no room left,
1853 	 * so no point in going over the frag_list for the error case.
1854 	 */
1855 	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1856 		goto done;
1857 	else if (!tlen)
1858 		goto done;
1859 
1860 	/*
1861 	 * now see if we have a frag_list to map
1862 	 */
1863 	skb_walk_frags(skb, frag_iter) {
1864 		if (!tlen)
1865 			break;
1866 		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1867 			break;
1868 	}
1869 
1870 done:
1871 	if (spd.nr_pages) {
1872 		/*
1873 		 * Drop the socket lock, otherwise we have reverse
1874 		 * locking dependencies between sk_lock and i_mutex
1875 		 * here as compared to sendfile(). We enter here
1876 		 * with the socket lock held, and splice_to_pipe() will
1877 		 * grab the pipe inode lock. For sendfile() emulation,
1878 		 * we call into ->sendpage() with the i_mutex lock held
1879 		 * and networking will grab the socket lock.
1880 		 */
1881 		release_sock(sk);
1882 		ret = splice_to_pipe(pipe, &spd);
1883 		lock_sock(sk);
1884 	}
1885 
1886 	return ret;
1887 }
1888 
1889 /**
1890  *	skb_store_bits - store bits from kernel buffer to skb
1891  *	@skb: destination buffer
1892  *	@offset: offset in destination
1893  *	@from: source buffer
1894  *	@len: number of bytes to copy
1895  *
1896  *	Copy the specified number of bytes from the source buffer to the
1897  *	destination skb.  This function handles all the messy bits of
1898  *	traversing fragment lists and such.
1899  */
1900 
skb_store_bits(struct sk_buff * skb,int offset,const void * from,int len)1901 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1902 {
1903 	int start = skb_headlen(skb);
1904 	struct sk_buff *frag_iter;
1905 	int i, copy;
1906 
1907 	if (offset > (int)skb->len - len)
1908 		goto fault;
1909 
1910 	if ((copy = start - offset) > 0) {
1911 		if (copy > len)
1912 			copy = len;
1913 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1914 		if ((len -= copy) == 0)
1915 			return 0;
1916 		offset += copy;
1917 		from += copy;
1918 	}
1919 
1920 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1921 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1922 		int end;
1923 
1924 		WARN_ON(start > offset + len);
1925 
1926 		end = start + skb_frag_size(frag);
1927 		if ((copy = end - offset) > 0) {
1928 			u8 *vaddr;
1929 
1930 			if (copy > len)
1931 				copy = len;
1932 
1933 			vaddr = kmap_atomic(skb_frag_page(frag));
1934 			memcpy(vaddr + frag->page_offset + offset - start,
1935 			       from, copy);
1936 			kunmap_atomic(vaddr);
1937 
1938 			if ((len -= copy) == 0)
1939 				return 0;
1940 			offset += copy;
1941 			from += copy;
1942 		}
1943 		start = end;
1944 	}
1945 
1946 	skb_walk_frags(skb, frag_iter) {
1947 		int end;
1948 
1949 		WARN_ON(start > offset + len);
1950 
1951 		end = start + frag_iter->len;
1952 		if ((copy = end - offset) > 0) {
1953 			if (copy > len)
1954 				copy = len;
1955 			if (skb_store_bits(frag_iter, offset - start,
1956 					   from, copy))
1957 				goto fault;
1958 			if ((len -= copy) == 0)
1959 				return 0;
1960 			offset += copy;
1961 			from += copy;
1962 		}
1963 		start = end;
1964 	}
1965 	if (!len)
1966 		return 0;
1967 
1968 fault:
1969 	return -EFAULT;
1970 }
1971 EXPORT_SYMBOL(skb_store_bits);
1972 
1973 /* Checksum skb data. */
__skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum,const struct skb_checksum_ops * ops)1974 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
1975 		      __wsum csum, const struct skb_checksum_ops *ops)
1976 {
1977 	int start = skb_headlen(skb);
1978 	int i, copy = start - offset;
1979 	struct sk_buff *frag_iter;
1980 	int pos = 0;
1981 
1982 	/* Checksum header. */
1983 	if (copy > 0) {
1984 		if (copy > len)
1985 			copy = len;
1986 		csum = ops->update(skb->data + offset, copy, csum);
1987 		if ((len -= copy) == 0)
1988 			return csum;
1989 		offset += copy;
1990 		pos	= copy;
1991 	}
1992 
1993 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1994 		int end;
1995 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1996 
1997 		WARN_ON(start > offset + len);
1998 
1999 		end = start + skb_frag_size(frag);
2000 		if ((copy = end - offset) > 0) {
2001 			__wsum csum2;
2002 			u8 *vaddr;
2003 
2004 			if (copy > len)
2005 				copy = len;
2006 			vaddr = kmap_atomic(skb_frag_page(frag));
2007 			csum2 = ops->update(vaddr + frag->page_offset +
2008 					    offset - start, copy, 0);
2009 			kunmap_atomic(vaddr);
2010 			csum = ops->combine(csum, csum2, pos, copy);
2011 			if (!(len -= copy))
2012 				return csum;
2013 			offset += copy;
2014 			pos    += copy;
2015 		}
2016 		start = end;
2017 	}
2018 
2019 	skb_walk_frags(skb, frag_iter) {
2020 		int end;
2021 
2022 		WARN_ON(start > offset + len);
2023 
2024 		end = start + frag_iter->len;
2025 		if ((copy = end - offset) > 0) {
2026 			__wsum csum2;
2027 			if (copy > len)
2028 				copy = len;
2029 			csum2 = __skb_checksum(frag_iter, offset - start,
2030 					       copy, 0, ops);
2031 			csum = ops->combine(csum, csum2, pos, copy);
2032 			if ((len -= copy) == 0)
2033 				return csum;
2034 			offset += copy;
2035 			pos    += copy;
2036 		}
2037 		start = end;
2038 	}
2039 	BUG_ON(len);
2040 
2041 	return csum;
2042 }
2043 EXPORT_SYMBOL(__skb_checksum);
2044 
skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum)2045 __wsum skb_checksum(const struct sk_buff *skb, int offset,
2046 		    int len, __wsum csum)
2047 {
2048 	const struct skb_checksum_ops ops = {
2049 		.update  = csum_partial_ext,
2050 		.combine = csum_block_add_ext,
2051 	};
2052 
2053 	return __skb_checksum(skb, offset, len, csum, &ops);
2054 }
2055 EXPORT_SYMBOL(skb_checksum);
2056 
2057 /* Both of above in one bottle. */
2058 
skb_copy_and_csum_bits(const struct sk_buff * skb,int offset,u8 * to,int len,__wsum csum)2059 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2060 				    u8 *to, int len, __wsum csum)
2061 {
2062 	int start = skb_headlen(skb);
2063 	int i, copy = start - offset;
2064 	struct sk_buff *frag_iter;
2065 	int pos = 0;
2066 
2067 	/* Copy header. */
2068 	if (copy > 0) {
2069 		if (copy > len)
2070 			copy = len;
2071 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2072 						 copy, csum);
2073 		if ((len -= copy) == 0)
2074 			return csum;
2075 		offset += copy;
2076 		to     += copy;
2077 		pos	= copy;
2078 	}
2079 
2080 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2081 		int end;
2082 
2083 		WARN_ON(start > offset + len);
2084 
2085 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2086 		if ((copy = end - offset) > 0) {
2087 			__wsum csum2;
2088 			u8 *vaddr;
2089 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2090 
2091 			if (copy > len)
2092 				copy = len;
2093 			vaddr = kmap_atomic(skb_frag_page(frag));
2094 			csum2 = csum_partial_copy_nocheck(vaddr +
2095 							  frag->page_offset +
2096 							  offset - start, to,
2097 							  copy, 0);
2098 			kunmap_atomic(vaddr);
2099 			csum = csum_block_add(csum, csum2, pos);
2100 			if (!(len -= copy))
2101 				return csum;
2102 			offset += copy;
2103 			to     += copy;
2104 			pos    += copy;
2105 		}
2106 		start = end;
2107 	}
2108 
2109 	skb_walk_frags(skb, frag_iter) {
2110 		__wsum csum2;
2111 		int end;
2112 
2113 		WARN_ON(start > offset + len);
2114 
2115 		end = start + frag_iter->len;
2116 		if ((copy = end - offset) > 0) {
2117 			if (copy > len)
2118 				copy = len;
2119 			csum2 = skb_copy_and_csum_bits(frag_iter,
2120 						       offset - start,
2121 						       to, copy, 0);
2122 			csum = csum_block_add(csum, csum2, pos);
2123 			if ((len -= copy) == 0)
2124 				return csum;
2125 			offset += copy;
2126 			to     += copy;
2127 			pos    += copy;
2128 		}
2129 		start = end;
2130 	}
2131 	BUG_ON(len);
2132 	return csum;
2133 }
2134 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2135 
2136  /**
2137  *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2138  *	@from: source buffer
2139  *
2140  *	Calculates the amount of linear headroom needed in the 'to' skb passed
2141  *	into skb_zerocopy().
2142  */
2143 unsigned int
skb_zerocopy_headlen(const struct sk_buff * from)2144 skb_zerocopy_headlen(const struct sk_buff *from)
2145 {
2146 	unsigned int hlen = 0;
2147 
2148 	if (!from->head_frag ||
2149 	    skb_headlen(from) < L1_CACHE_BYTES ||
2150 	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2151 		hlen = skb_headlen(from);
2152 
2153 	if (skb_has_frag_list(from))
2154 		hlen = from->len;
2155 
2156 	return hlen;
2157 }
2158 EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2159 
2160 /**
2161  *	skb_zerocopy - Zero copy skb to skb
2162  *	@to: destination buffer
2163  *	@from: source buffer
2164  *	@len: number of bytes to copy from source buffer
2165  *	@hlen: size of linear headroom in destination buffer
2166  *
2167  *	Copies up to `len` bytes from `from` to `to` by creating references
2168  *	to the frags in the source buffer.
2169  *
2170  *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2171  *	headroom in the `to` buffer.
2172  *
2173  *	Return value:
2174  *	0: everything is OK
2175  *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
2176  *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2177  */
2178 int
skb_zerocopy(struct sk_buff * to,struct sk_buff * from,int len,int hlen)2179 skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2180 {
2181 	int i, j = 0;
2182 	int plen = 0; /* length of skb->head fragment */
2183 	int ret;
2184 	struct page *page;
2185 	unsigned int offset;
2186 
2187 	BUG_ON(!from->head_frag && !hlen);
2188 
2189 	/* dont bother with small payloads */
2190 	if (len <= skb_tailroom(to))
2191 		return skb_copy_bits(from, 0, skb_put(to, len), len);
2192 
2193 	if (hlen) {
2194 		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2195 		if (unlikely(ret))
2196 			return ret;
2197 		len -= hlen;
2198 	} else {
2199 		plen = min_t(int, skb_headlen(from), len);
2200 		if (plen) {
2201 			page = virt_to_head_page(from->head);
2202 			offset = from->data - (unsigned char *)page_address(page);
2203 			__skb_fill_page_desc(to, 0, page, offset, plen);
2204 			get_page(page);
2205 			j = 1;
2206 			len -= plen;
2207 		}
2208 	}
2209 
2210 	to->truesize += len + plen;
2211 	to->len += len + plen;
2212 	to->data_len += len + plen;
2213 
2214 	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2215 		skb_tx_error(from);
2216 		return -ENOMEM;
2217 	}
2218 
2219 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2220 		if (!len)
2221 			break;
2222 		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2223 		skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2224 		len -= skb_shinfo(to)->frags[j].size;
2225 		skb_frag_ref(to, j);
2226 		j++;
2227 	}
2228 	skb_shinfo(to)->nr_frags = j;
2229 
2230 	return 0;
2231 }
2232 EXPORT_SYMBOL_GPL(skb_zerocopy);
2233 
skb_copy_and_csum_dev(const struct sk_buff * skb,u8 * to)2234 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2235 {
2236 	__wsum csum;
2237 	long csstart;
2238 
2239 	if (skb->ip_summed == CHECKSUM_PARTIAL)
2240 		csstart = skb_checksum_start_offset(skb);
2241 	else
2242 		csstart = skb_headlen(skb);
2243 
2244 	BUG_ON(csstart > skb_headlen(skb));
2245 
2246 	skb_copy_from_linear_data(skb, to, csstart);
2247 
2248 	csum = 0;
2249 	if (csstart != skb->len)
2250 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2251 					      skb->len - csstart, 0);
2252 
2253 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2254 		long csstuff = csstart + skb->csum_offset;
2255 
2256 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2257 	}
2258 }
2259 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2260 
2261 /**
2262  *	skb_dequeue - remove from the head of the queue
2263  *	@list: list to dequeue from
2264  *
2265  *	Remove the head of the list. The list lock is taken so the function
2266  *	may be used safely with other locking list functions. The head item is
2267  *	returned or %NULL if the list is empty.
2268  */
2269 
skb_dequeue(struct sk_buff_head * list)2270 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2271 {
2272 	unsigned long flags;
2273 	struct sk_buff *result;
2274 
2275 	spin_lock_irqsave(&list->lock, flags);
2276 	result = __skb_dequeue(list);
2277 	spin_unlock_irqrestore(&list->lock, flags);
2278 	return result;
2279 }
2280 EXPORT_SYMBOL(skb_dequeue);
2281 
2282 /**
2283  *	skb_dequeue_tail - remove from the tail of the queue
2284  *	@list: list to dequeue from
2285  *
2286  *	Remove the tail of the list. The list lock is taken so the function
2287  *	may be used safely with other locking list functions. The tail item is
2288  *	returned or %NULL if the list is empty.
2289  */
skb_dequeue_tail(struct sk_buff_head * list)2290 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2291 {
2292 	unsigned long flags;
2293 	struct sk_buff *result;
2294 
2295 	spin_lock_irqsave(&list->lock, flags);
2296 	result = __skb_dequeue_tail(list);
2297 	spin_unlock_irqrestore(&list->lock, flags);
2298 	return result;
2299 }
2300 EXPORT_SYMBOL(skb_dequeue_tail);
2301 
2302 /**
2303  *	skb_queue_purge - empty a list
2304  *	@list: list to empty
2305  *
2306  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2307  *	the list and one reference dropped. This function takes the list
2308  *	lock and is atomic with respect to other list locking functions.
2309  */
skb_queue_purge(struct sk_buff_head * list)2310 void skb_queue_purge(struct sk_buff_head *list)
2311 {
2312 	struct sk_buff *skb;
2313 	while ((skb = skb_dequeue(list)) != NULL)
2314 		kfree_skb(skb);
2315 }
2316 EXPORT_SYMBOL(skb_queue_purge);
2317 
2318 /**
2319  *	skb_queue_head - queue a buffer at the list head
2320  *	@list: list to use
2321  *	@newsk: buffer to queue
2322  *
2323  *	Queue a buffer at the start of the list. This function takes the
2324  *	list lock and can be used safely with other locking &sk_buff functions
2325  *	safely.
2326  *
2327  *	A buffer cannot be placed on two lists at the same time.
2328  */
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)2329 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2330 {
2331 	unsigned long flags;
2332 
2333 	spin_lock_irqsave(&list->lock, flags);
2334 	__skb_queue_head(list, newsk);
2335 	spin_unlock_irqrestore(&list->lock, flags);
2336 }
2337 EXPORT_SYMBOL(skb_queue_head);
2338 
2339 /**
2340  *	skb_queue_tail - queue a buffer at the list tail
2341  *	@list: list to use
2342  *	@newsk: buffer to queue
2343  *
2344  *	Queue a buffer at the tail of the list. This function takes the
2345  *	list lock and can be used safely with other locking &sk_buff functions
2346  *	safely.
2347  *
2348  *	A buffer cannot be placed on two lists at the same time.
2349  */
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)2350 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2351 {
2352 	unsigned long flags;
2353 
2354 	spin_lock_irqsave(&list->lock, flags);
2355 	__skb_queue_tail(list, newsk);
2356 	spin_unlock_irqrestore(&list->lock, flags);
2357 }
2358 EXPORT_SYMBOL(skb_queue_tail);
2359 
2360 /**
2361  *	skb_unlink	-	remove a buffer from a list
2362  *	@skb: buffer to remove
2363  *	@list: list to use
2364  *
2365  *	Remove a packet from a list. The list locks are taken and this
2366  *	function is atomic with respect to other list locked calls
2367  *
2368  *	You must know what list the SKB is on.
2369  */
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)2370 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2371 {
2372 	unsigned long flags;
2373 
2374 	spin_lock_irqsave(&list->lock, flags);
2375 	__skb_unlink(skb, list);
2376 	spin_unlock_irqrestore(&list->lock, flags);
2377 }
2378 EXPORT_SYMBOL(skb_unlink);
2379 
2380 /**
2381  *	skb_append	-	append a buffer
2382  *	@old: buffer to insert after
2383  *	@newsk: buffer to insert
2384  *	@list: list to use
2385  *
2386  *	Place a packet after a given packet in a list. The list locks are taken
2387  *	and this function is atomic with respect to other list locked calls.
2388  *	A buffer cannot be placed on two lists at the same time.
2389  */
skb_append(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)2390 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2391 {
2392 	unsigned long flags;
2393 
2394 	spin_lock_irqsave(&list->lock, flags);
2395 	__skb_queue_after(list, old, newsk);
2396 	spin_unlock_irqrestore(&list->lock, flags);
2397 }
2398 EXPORT_SYMBOL(skb_append);
2399 
2400 /**
2401  *	skb_insert	-	insert a buffer
2402  *	@old: buffer to insert before
2403  *	@newsk: buffer to insert
2404  *	@list: list to use
2405  *
2406  *	Place a packet before a given packet in a list. The list locks are
2407  * 	taken and this function is atomic with respect to other list locked
2408  *	calls.
2409  *
2410  *	A buffer cannot be placed on two lists at the same time.
2411  */
skb_insert(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)2412 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2413 {
2414 	unsigned long flags;
2415 
2416 	spin_lock_irqsave(&list->lock, flags);
2417 	__skb_insert(newsk, old->prev, old, list);
2418 	spin_unlock_irqrestore(&list->lock, flags);
2419 }
2420 EXPORT_SYMBOL(skb_insert);
2421 
skb_split_inside_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,const int pos)2422 static inline void skb_split_inside_header(struct sk_buff *skb,
2423 					   struct sk_buff* skb1,
2424 					   const u32 len, const int pos)
2425 {
2426 	int i;
2427 
2428 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2429 					 pos - len);
2430 	/* And move data appendix as is. */
2431 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2432 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2433 
2434 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2435 	skb_shinfo(skb)->nr_frags  = 0;
2436 	skb1->data_len		   = skb->data_len;
2437 	skb1->len		   += skb1->data_len;
2438 	skb->data_len		   = 0;
2439 	skb->len		   = len;
2440 	skb_set_tail_pointer(skb, len);
2441 }
2442 
skb_split_no_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,int pos)2443 static inline void skb_split_no_header(struct sk_buff *skb,
2444 				       struct sk_buff* skb1,
2445 				       const u32 len, int pos)
2446 {
2447 	int i, k = 0;
2448 	const int nfrags = skb_shinfo(skb)->nr_frags;
2449 
2450 	skb_shinfo(skb)->nr_frags = 0;
2451 	skb1->len		  = skb1->data_len = skb->len - len;
2452 	skb->len		  = len;
2453 	skb->data_len		  = len - pos;
2454 
2455 	for (i = 0; i < nfrags; i++) {
2456 		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2457 
2458 		if (pos + size > len) {
2459 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2460 
2461 			if (pos < len) {
2462 				/* Split frag.
2463 				 * We have two variants in this case:
2464 				 * 1. Move all the frag to the second
2465 				 *    part, if it is possible. F.e.
2466 				 *    this approach is mandatory for TUX,
2467 				 *    where splitting is expensive.
2468 				 * 2. Split is accurately. We make this.
2469 				 */
2470 				skb_frag_ref(skb, i);
2471 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2472 				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2473 				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2474 				skb_shinfo(skb)->nr_frags++;
2475 			}
2476 			k++;
2477 		} else
2478 			skb_shinfo(skb)->nr_frags++;
2479 		pos += size;
2480 	}
2481 	skb_shinfo(skb1)->nr_frags = k;
2482 }
2483 
2484 /**
2485  * skb_split - Split fragmented skb to two parts at length len.
2486  * @skb: the buffer to split
2487  * @skb1: the buffer to receive the second part
2488  * @len: new length for skb
2489  */
skb_split(struct sk_buff * skb,struct sk_buff * skb1,const u32 len)2490 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2491 {
2492 	int pos = skb_headlen(skb);
2493 
2494 	skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2495 	if (len < pos)	/* Split line is inside header. */
2496 		skb_split_inside_header(skb, skb1, len, pos);
2497 	else		/* Second chunk has no header, nothing to copy. */
2498 		skb_split_no_header(skb, skb1, len, pos);
2499 }
2500 EXPORT_SYMBOL(skb_split);
2501 
2502 /* Shifting from/to a cloned skb is a no-go.
2503  *
2504  * Caller cannot keep skb_shinfo related pointers past calling here!
2505  */
skb_prepare_for_shift(struct sk_buff * skb)2506 static int skb_prepare_for_shift(struct sk_buff *skb)
2507 {
2508 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2509 }
2510 
2511 /**
2512  * skb_shift - Shifts paged data partially from skb to another
2513  * @tgt: buffer into which tail data gets added
2514  * @skb: buffer from which the paged data comes from
2515  * @shiftlen: shift up to this many bytes
2516  *
2517  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2518  * the length of the skb, from skb to tgt. Returns number bytes shifted.
2519  * It's up to caller to free skb if everything was shifted.
2520  *
2521  * If @tgt runs out of frags, the whole operation is aborted.
2522  *
2523  * Skb cannot include anything else but paged data while tgt is allowed
2524  * to have non-paged data as well.
2525  *
2526  * TODO: full sized shift could be optimized but that would need
2527  * specialized skb free'er to handle frags without up-to-date nr_frags.
2528  */
skb_shift(struct sk_buff * tgt,struct sk_buff * skb,int shiftlen)2529 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2530 {
2531 	int from, to, merge, todo;
2532 	struct skb_frag_struct *fragfrom, *fragto;
2533 
2534 	BUG_ON(shiftlen > skb->len);
2535 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2536 
2537 	todo = shiftlen;
2538 	from = 0;
2539 	to = skb_shinfo(tgt)->nr_frags;
2540 	fragfrom = &skb_shinfo(skb)->frags[from];
2541 
2542 	/* Actual merge is delayed until the point when we know we can
2543 	 * commit all, so that we don't have to undo partial changes
2544 	 */
2545 	if (!to ||
2546 	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2547 			      fragfrom->page_offset)) {
2548 		merge = -1;
2549 	} else {
2550 		merge = to - 1;
2551 
2552 		todo -= skb_frag_size(fragfrom);
2553 		if (todo < 0) {
2554 			if (skb_prepare_for_shift(skb) ||
2555 			    skb_prepare_for_shift(tgt))
2556 				return 0;
2557 
2558 			/* All previous frag pointers might be stale! */
2559 			fragfrom = &skb_shinfo(skb)->frags[from];
2560 			fragto = &skb_shinfo(tgt)->frags[merge];
2561 
2562 			skb_frag_size_add(fragto, shiftlen);
2563 			skb_frag_size_sub(fragfrom, shiftlen);
2564 			fragfrom->page_offset += shiftlen;
2565 
2566 			goto onlymerged;
2567 		}
2568 
2569 		from++;
2570 	}
2571 
2572 	/* Skip full, not-fitting skb to avoid expensive operations */
2573 	if ((shiftlen == skb->len) &&
2574 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2575 		return 0;
2576 
2577 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2578 		return 0;
2579 
2580 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2581 		if (to == MAX_SKB_FRAGS)
2582 			return 0;
2583 
2584 		fragfrom = &skb_shinfo(skb)->frags[from];
2585 		fragto = &skb_shinfo(tgt)->frags[to];
2586 
2587 		if (todo >= skb_frag_size(fragfrom)) {
2588 			*fragto = *fragfrom;
2589 			todo -= skb_frag_size(fragfrom);
2590 			from++;
2591 			to++;
2592 
2593 		} else {
2594 			__skb_frag_ref(fragfrom);
2595 			fragto->page = fragfrom->page;
2596 			fragto->page_offset = fragfrom->page_offset;
2597 			skb_frag_size_set(fragto, todo);
2598 
2599 			fragfrom->page_offset += todo;
2600 			skb_frag_size_sub(fragfrom, todo);
2601 			todo = 0;
2602 
2603 			to++;
2604 			break;
2605 		}
2606 	}
2607 
2608 	/* Ready to "commit" this state change to tgt */
2609 	skb_shinfo(tgt)->nr_frags = to;
2610 
2611 	if (merge >= 0) {
2612 		fragfrom = &skb_shinfo(skb)->frags[0];
2613 		fragto = &skb_shinfo(tgt)->frags[merge];
2614 
2615 		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2616 		__skb_frag_unref(fragfrom);
2617 	}
2618 
2619 	/* Reposition in the original skb */
2620 	to = 0;
2621 	while (from < skb_shinfo(skb)->nr_frags)
2622 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2623 	skb_shinfo(skb)->nr_frags = to;
2624 
2625 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2626 
2627 onlymerged:
2628 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2629 	 * the other hand might need it if it needs to be resent
2630 	 */
2631 	tgt->ip_summed = CHECKSUM_PARTIAL;
2632 	skb->ip_summed = CHECKSUM_PARTIAL;
2633 
2634 	/* Yak, is it really working this way? Some helper please? */
2635 	skb->len -= shiftlen;
2636 	skb->data_len -= shiftlen;
2637 	skb->truesize -= shiftlen;
2638 	tgt->len += shiftlen;
2639 	tgt->data_len += shiftlen;
2640 	tgt->truesize += shiftlen;
2641 
2642 	return shiftlen;
2643 }
2644 
2645 /**
2646  * skb_prepare_seq_read - Prepare a sequential read of skb data
2647  * @skb: the buffer to read
2648  * @from: lower offset of data to be read
2649  * @to: upper offset of data to be read
2650  * @st: state variable
2651  *
2652  * Initializes the specified state variable. Must be called before
2653  * invoking skb_seq_read() for the first time.
2654  */
skb_prepare_seq_read(struct sk_buff * skb,unsigned int from,unsigned int to,struct skb_seq_state * st)2655 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2656 			  unsigned int to, struct skb_seq_state *st)
2657 {
2658 	st->lower_offset = from;
2659 	st->upper_offset = to;
2660 	st->root_skb = st->cur_skb = skb;
2661 	st->frag_idx = st->stepped_offset = 0;
2662 	st->frag_data = NULL;
2663 }
2664 EXPORT_SYMBOL(skb_prepare_seq_read);
2665 
2666 /**
2667  * skb_seq_read - Sequentially read skb data
2668  * @consumed: number of bytes consumed by the caller so far
2669  * @data: destination pointer for data to be returned
2670  * @st: state variable
2671  *
2672  * Reads a block of skb data at @consumed relative to the
2673  * lower offset specified to skb_prepare_seq_read(). Assigns
2674  * the head of the data block to @data and returns the length
2675  * of the block or 0 if the end of the skb data or the upper
2676  * offset has been reached.
2677  *
2678  * The caller is not required to consume all of the data
2679  * returned, i.e. @consumed is typically set to the number
2680  * of bytes already consumed and the next call to
2681  * skb_seq_read() will return the remaining part of the block.
2682  *
2683  * Note 1: The size of each block of data returned can be arbitrary,
2684  *       this limitation is the cost for zerocopy sequential
2685  *       reads of potentially non linear data.
2686  *
2687  * Note 2: Fragment lists within fragments are not implemented
2688  *       at the moment, state->root_skb could be replaced with
2689  *       a stack for this purpose.
2690  */
skb_seq_read(unsigned int consumed,const u8 ** data,struct skb_seq_state * st)2691 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2692 			  struct skb_seq_state *st)
2693 {
2694 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2695 	skb_frag_t *frag;
2696 
2697 	if (unlikely(abs_offset >= st->upper_offset)) {
2698 		if (st->frag_data) {
2699 			kunmap_atomic(st->frag_data);
2700 			st->frag_data = NULL;
2701 		}
2702 		return 0;
2703 	}
2704 
2705 next_skb:
2706 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2707 
2708 	if (abs_offset < block_limit && !st->frag_data) {
2709 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2710 		return block_limit - abs_offset;
2711 	}
2712 
2713 	if (st->frag_idx == 0 && !st->frag_data)
2714 		st->stepped_offset += skb_headlen(st->cur_skb);
2715 
2716 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2717 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2718 		block_limit = skb_frag_size(frag) + st->stepped_offset;
2719 
2720 		if (abs_offset < block_limit) {
2721 			if (!st->frag_data)
2722 				st->frag_data = kmap_atomic(skb_frag_page(frag));
2723 
2724 			*data = (u8 *) st->frag_data + frag->page_offset +
2725 				(abs_offset - st->stepped_offset);
2726 
2727 			return block_limit - abs_offset;
2728 		}
2729 
2730 		if (st->frag_data) {
2731 			kunmap_atomic(st->frag_data);
2732 			st->frag_data = NULL;
2733 		}
2734 
2735 		st->frag_idx++;
2736 		st->stepped_offset += skb_frag_size(frag);
2737 	}
2738 
2739 	if (st->frag_data) {
2740 		kunmap_atomic(st->frag_data);
2741 		st->frag_data = NULL;
2742 	}
2743 
2744 	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2745 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2746 		st->frag_idx = 0;
2747 		goto next_skb;
2748 	} else if (st->cur_skb->next) {
2749 		st->cur_skb = st->cur_skb->next;
2750 		st->frag_idx = 0;
2751 		goto next_skb;
2752 	}
2753 
2754 	return 0;
2755 }
2756 EXPORT_SYMBOL(skb_seq_read);
2757 
2758 /**
2759  * skb_abort_seq_read - Abort a sequential read of skb data
2760  * @st: state variable
2761  *
2762  * Must be called if skb_seq_read() was not called until it
2763  * returned 0.
2764  */
skb_abort_seq_read(struct skb_seq_state * st)2765 void skb_abort_seq_read(struct skb_seq_state *st)
2766 {
2767 	if (st->frag_data)
2768 		kunmap_atomic(st->frag_data);
2769 }
2770 EXPORT_SYMBOL(skb_abort_seq_read);
2771 
2772 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2773 
skb_ts_get_next_block(unsigned int offset,const u8 ** text,struct ts_config * conf,struct ts_state * state)2774 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2775 					  struct ts_config *conf,
2776 					  struct ts_state *state)
2777 {
2778 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2779 }
2780 
skb_ts_finish(struct ts_config * conf,struct ts_state * state)2781 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2782 {
2783 	skb_abort_seq_read(TS_SKB_CB(state));
2784 }
2785 
2786 /**
2787  * skb_find_text - Find a text pattern in skb data
2788  * @skb: the buffer to look in
2789  * @from: search offset
2790  * @to: search limit
2791  * @config: textsearch configuration
2792  * @state: uninitialized textsearch state variable
2793  *
2794  * Finds a pattern in the skb data according to the specified
2795  * textsearch configuration. Use textsearch_next() to retrieve
2796  * subsequent occurrences of the pattern. Returns the offset
2797  * to the first occurrence or UINT_MAX if no match was found.
2798  */
skb_find_text(struct sk_buff * skb,unsigned int from,unsigned int to,struct ts_config * config,struct ts_state * state)2799 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2800 			   unsigned int to, struct ts_config *config,
2801 			   struct ts_state *state)
2802 {
2803 	unsigned int ret;
2804 
2805 	config->get_next_block = skb_ts_get_next_block;
2806 	config->finish = skb_ts_finish;
2807 
2808 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2809 
2810 	ret = textsearch_find(config, state);
2811 	return (ret <= to - from ? ret : UINT_MAX);
2812 }
2813 EXPORT_SYMBOL(skb_find_text);
2814 
2815 /**
2816  * skb_append_datato_frags - append the user data to a skb
2817  * @sk: sock  structure
2818  * @skb: skb structure to be appended with user data.
2819  * @getfrag: call back function to be used for getting the user data
2820  * @from: pointer to user message iov
2821  * @length: length of the iov message
2822  *
2823  * Description: This procedure append the user data in the fragment part
2824  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2825  */
skb_append_datato_frags(struct sock * sk,struct sk_buff * skb,int (* getfrag)(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length)2826 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2827 			int (*getfrag)(void *from, char *to, int offset,
2828 					int len, int odd, struct sk_buff *skb),
2829 			void *from, int length)
2830 {
2831 	int frg_cnt = skb_shinfo(skb)->nr_frags;
2832 	int copy;
2833 	int offset = 0;
2834 	int ret;
2835 	struct page_frag *pfrag = &current->task_frag;
2836 
2837 	do {
2838 		/* Return error if we don't have space for new frag */
2839 		if (frg_cnt >= MAX_SKB_FRAGS)
2840 			return -EMSGSIZE;
2841 
2842 		if (!sk_page_frag_refill(sk, pfrag))
2843 			return -ENOMEM;
2844 
2845 		/* copy the user data to page */
2846 		copy = min_t(int, length, pfrag->size - pfrag->offset);
2847 
2848 		ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2849 			      offset, copy, 0, skb);
2850 		if (ret < 0)
2851 			return -EFAULT;
2852 
2853 		/* copy was successful so update the size parameters */
2854 		skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2855 				   copy);
2856 		frg_cnt++;
2857 		pfrag->offset += copy;
2858 		get_page(pfrag->page);
2859 
2860 		skb->truesize += copy;
2861 		atomic_add(copy, &sk->sk_wmem_alloc);
2862 		skb->len += copy;
2863 		skb->data_len += copy;
2864 		offset += copy;
2865 		length -= copy;
2866 
2867 	} while (length > 0);
2868 
2869 	return 0;
2870 }
2871 EXPORT_SYMBOL(skb_append_datato_frags);
2872 
2873 /**
2874  *	skb_pull_rcsum - pull skb and update receive checksum
2875  *	@skb: buffer to update
2876  *	@len: length of data pulled
2877  *
2878  *	This function performs an skb_pull on the packet and updates
2879  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2880  *	receive path processing instead of skb_pull unless you know
2881  *	that the checksum difference is zero (e.g., a valid IP header)
2882  *	or you are setting ip_summed to CHECKSUM_NONE.
2883  */
skb_pull_rcsum(struct sk_buff * skb,unsigned int len)2884 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2885 {
2886 	unsigned char *data = skb->data;
2887 
2888 	BUG_ON(len > skb->len);
2889 	__skb_pull(skb, len);
2890 	skb_postpull_rcsum(skb, data, len);
2891 	return skb->data;
2892 }
2893 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2894 
2895 /**
2896  *	skb_segment - Perform protocol segmentation on skb.
2897  *	@head_skb: buffer to segment
2898  *	@features: features for the output path (see dev->features)
2899  *
2900  *	This function performs segmentation on the given skb.  It returns
2901  *	a pointer to the first in a list of new skbs for the segments.
2902  *	In case of error it returns ERR_PTR(err).
2903  */
skb_segment(struct sk_buff * head_skb,netdev_features_t features)2904 struct sk_buff *skb_segment(struct sk_buff *head_skb,
2905 			    netdev_features_t features)
2906 {
2907 	struct sk_buff *segs = NULL;
2908 	struct sk_buff *tail = NULL;
2909 	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2910 	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2911 	unsigned int mss = skb_shinfo(head_skb)->gso_size;
2912 	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2913 	struct sk_buff *frag_skb = head_skb;
2914 	unsigned int offset = doffset;
2915 	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2916 	unsigned int headroom;
2917 	unsigned int len;
2918 	__be16 proto;
2919 	bool csum;
2920 	int sg = !!(features & NETIF_F_SG);
2921 	int nfrags = skb_shinfo(head_skb)->nr_frags;
2922 	int err = -ENOMEM;
2923 	int i = 0;
2924 	int pos;
2925 	int dummy;
2926 
2927 	__skb_push(head_skb, doffset);
2928 	proto = skb_network_protocol(head_skb, &dummy);
2929 	if (unlikely(!proto))
2930 		return ERR_PTR(-EINVAL);
2931 
2932 	csum = !head_skb->encap_hdr_csum &&
2933 	    !!can_checksum_protocol(features, proto);
2934 
2935 	headroom = skb_headroom(head_skb);
2936 	pos = skb_headlen(head_skb);
2937 
2938 	do {
2939 		struct sk_buff *nskb;
2940 		skb_frag_t *nskb_frag;
2941 		int hsize;
2942 		int size;
2943 
2944 		len = head_skb->len - offset;
2945 		if (len > mss)
2946 			len = mss;
2947 
2948 		hsize = skb_headlen(head_skb) - offset;
2949 		if (hsize < 0)
2950 			hsize = 0;
2951 		if (hsize > len || !sg)
2952 			hsize = len;
2953 
2954 		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2955 		    (skb_headlen(list_skb) == len || sg)) {
2956 			BUG_ON(skb_headlen(list_skb) > len);
2957 
2958 			i = 0;
2959 			nfrags = skb_shinfo(list_skb)->nr_frags;
2960 			frag = skb_shinfo(list_skb)->frags;
2961 			frag_skb = list_skb;
2962 			pos += skb_headlen(list_skb);
2963 
2964 			while (pos < offset + len) {
2965 				BUG_ON(i >= nfrags);
2966 
2967 				size = skb_frag_size(frag);
2968 				if (pos + size > offset + len)
2969 					break;
2970 
2971 				i++;
2972 				pos += size;
2973 				frag++;
2974 			}
2975 
2976 			nskb = skb_clone(list_skb, GFP_ATOMIC);
2977 			list_skb = list_skb->next;
2978 
2979 			if (unlikely(!nskb))
2980 				goto err;
2981 
2982 			if (unlikely(pskb_trim(nskb, len))) {
2983 				kfree_skb(nskb);
2984 				goto err;
2985 			}
2986 
2987 			hsize = skb_end_offset(nskb);
2988 			if (skb_cow_head(nskb, doffset + headroom)) {
2989 				kfree_skb(nskb);
2990 				goto err;
2991 			}
2992 
2993 			nskb->truesize += skb_end_offset(nskb) - hsize;
2994 			skb_release_head_state(nskb);
2995 			__skb_push(nskb, doffset);
2996 		} else {
2997 			nskb = __alloc_skb(hsize + doffset + headroom,
2998 					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2999 					   NUMA_NO_NODE);
3000 
3001 			if (unlikely(!nskb))
3002 				goto err;
3003 
3004 			skb_reserve(nskb, headroom);
3005 			__skb_put(nskb, doffset);
3006 		}
3007 
3008 		if (segs)
3009 			tail->next = nskb;
3010 		else
3011 			segs = nskb;
3012 		tail = nskb;
3013 
3014 		__copy_skb_header(nskb, head_skb);
3015 
3016 		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3017 		skb_reset_mac_len(nskb);
3018 
3019 		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3020 						 nskb->data - tnl_hlen,
3021 						 doffset + tnl_hlen);
3022 
3023 		if (nskb->len == len + doffset)
3024 			goto perform_csum_check;
3025 
3026 		if (!sg) {
3027 			nskb->ip_summed = CHECKSUM_NONE;
3028 			nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
3029 							    skb_put(nskb, len),
3030 							    len, 0);
3031 			SKB_GSO_CB(nskb)->csum_start =
3032 			    skb_headroom(nskb) + doffset;
3033 			continue;
3034 		}
3035 
3036 		nskb_frag = skb_shinfo(nskb)->frags;
3037 
3038 		skb_copy_from_linear_data_offset(head_skb, offset,
3039 						 skb_put(nskb, hsize), hsize);
3040 
3041 		skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
3042 			SKBTX_SHARED_FRAG;
3043 
3044 		while (pos < offset + len) {
3045 			if (i >= nfrags) {
3046 				BUG_ON(skb_headlen(list_skb));
3047 
3048 				i = 0;
3049 				nfrags = skb_shinfo(list_skb)->nr_frags;
3050 				frag = skb_shinfo(list_skb)->frags;
3051 				frag_skb = list_skb;
3052 
3053 				BUG_ON(!nfrags);
3054 
3055 				list_skb = list_skb->next;
3056 			}
3057 
3058 			if (unlikely(skb_shinfo(nskb)->nr_frags >=
3059 				     MAX_SKB_FRAGS)) {
3060 				net_warn_ratelimited(
3061 					"skb_segment: too many frags: %u %u\n",
3062 					pos, mss);
3063 				goto err;
3064 			}
3065 
3066 			if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3067 				goto err;
3068 
3069 			*nskb_frag = *frag;
3070 			__skb_frag_ref(nskb_frag);
3071 			size = skb_frag_size(nskb_frag);
3072 
3073 			if (pos < offset) {
3074 				nskb_frag->page_offset += offset - pos;
3075 				skb_frag_size_sub(nskb_frag, offset - pos);
3076 			}
3077 
3078 			skb_shinfo(nskb)->nr_frags++;
3079 
3080 			if (pos + size <= offset + len) {
3081 				i++;
3082 				frag++;
3083 				pos += size;
3084 			} else {
3085 				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3086 				goto skip_fraglist;
3087 			}
3088 
3089 			nskb_frag++;
3090 		}
3091 
3092 skip_fraglist:
3093 		nskb->data_len = len - hsize;
3094 		nskb->len += nskb->data_len;
3095 		nskb->truesize += nskb->data_len;
3096 
3097 perform_csum_check:
3098 		if (!csum) {
3099 			nskb->csum = skb_checksum(nskb, doffset,
3100 						  nskb->len - doffset, 0);
3101 			nskb->ip_summed = CHECKSUM_NONE;
3102 			SKB_GSO_CB(nskb)->csum_start =
3103 			    skb_headroom(nskb) + doffset;
3104 		}
3105 	} while ((offset += len) < head_skb->len);
3106 
3107 	/* Some callers want to get the end of the list.
3108 	 * Put it in segs->prev to avoid walking the list.
3109 	 * (see validate_xmit_skb_list() for example)
3110 	 */
3111 	segs->prev = tail;
3112 	return segs;
3113 
3114 err:
3115 	kfree_skb_list(segs);
3116 	return ERR_PTR(err);
3117 }
3118 EXPORT_SYMBOL_GPL(skb_segment);
3119 
skb_gro_receive(struct sk_buff ** head,struct sk_buff * skb)3120 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3121 {
3122 	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3123 	unsigned int offset = skb_gro_offset(skb);
3124 	unsigned int headlen = skb_headlen(skb);
3125 	struct sk_buff *nskb, *lp, *p = *head;
3126 	unsigned int len = skb_gro_len(skb);
3127 	unsigned int delta_truesize;
3128 	unsigned int headroom;
3129 
3130 	if (unlikely(p->len + len >= 65536))
3131 		return -E2BIG;
3132 
3133 	lp = NAPI_GRO_CB(p)->last;
3134 	pinfo = skb_shinfo(lp);
3135 
3136 	if (headlen <= offset) {
3137 		skb_frag_t *frag;
3138 		skb_frag_t *frag2;
3139 		int i = skbinfo->nr_frags;
3140 		int nr_frags = pinfo->nr_frags + i;
3141 
3142 		if (nr_frags > MAX_SKB_FRAGS)
3143 			goto merge;
3144 
3145 		offset -= headlen;
3146 		pinfo->nr_frags = nr_frags;
3147 		skbinfo->nr_frags = 0;
3148 
3149 		frag = pinfo->frags + nr_frags;
3150 		frag2 = skbinfo->frags + i;
3151 		do {
3152 			*--frag = *--frag2;
3153 		} while (--i);
3154 
3155 		frag->page_offset += offset;
3156 		skb_frag_size_sub(frag, offset);
3157 
3158 		/* all fragments truesize : remove (head size + sk_buff) */
3159 		delta_truesize = skb->truesize -
3160 				 SKB_TRUESIZE(skb_end_offset(skb));
3161 
3162 		skb->truesize -= skb->data_len;
3163 		skb->len -= skb->data_len;
3164 		skb->data_len = 0;
3165 
3166 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3167 		goto done;
3168 	} else if (skb->head_frag) {
3169 		int nr_frags = pinfo->nr_frags;
3170 		skb_frag_t *frag = pinfo->frags + nr_frags;
3171 		struct page *page = virt_to_head_page(skb->head);
3172 		unsigned int first_size = headlen - offset;
3173 		unsigned int first_offset;
3174 
3175 		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3176 			goto merge;
3177 
3178 		first_offset = skb->data -
3179 			       (unsigned char *)page_address(page) +
3180 			       offset;
3181 
3182 		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3183 
3184 		frag->page.p	  = page;
3185 		frag->page_offset = first_offset;
3186 		skb_frag_size_set(frag, first_size);
3187 
3188 		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3189 		/* We dont need to clear skbinfo->nr_frags here */
3190 
3191 		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3192 		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3193 		goto done;
3194 	}
3195 	/* switch back to head shinfo */
3196 	pinfo = skb_shinfo(p);
3197 
3198 	if (pinfo->frag_list)
3199 		goto merge;
3200 	if (skb_gro_len(p) != pinfo->gso_size)
3201 		return -E2BIG;
3202 
3203 	headroom = skb_headroom(p);
3204 	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
3205 	if (unlikely(!nskb))
3206 		return -ENOMEM;
3207 
3208 	__copy_skb_header(nskb, p);
3209 	nskb->mac_len = p->mac_len;
3210 
3211 	skb_reserve(nskb, headroom);
3212 	__skb_put(nskb, skb_gro_offset(p));
3213 
3214 	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
3215 	skb_set_network_header(nskb, skb_network_offset(p));
3216 	skb_set_transport_header(nskb, skb_transport_offset(p));
3217 
3218 	__skb_pull(p, skb_gro_offset(p));
3219 	memcpy(skb_mac_header(nskb), skb_mac_header(p),
3220 	       p->data - skb_mac_header(p));
3221 
3222 	skb_shinfo(nskb)->frag_list = p;
3223 	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3224 	pinfo->gso_size = 0;
3225 	__skb_header_release(p);
3226 	NAPI_GRO_CB(nskb)->last = p;
3227 
3228 	nskb->data_len += p->len;
3229 	nskb->truesize += p->truesize;
3230 	nskb->len += p->len;
3231 
3232 	*head = nskb;
3233 	nskb->next = p->next;
3234 	p->next = NULL;
3235 
3236 	p = nskb;
3237 
3238 merge:
3239 	delta_truesize = skb->truesize;
3240 	if (offset > headlen) {
3241 		unsigned int eat = offset - headlen;
3242 
3243 		skbinfo->frags[0].page_offset += eat;
3244 		skb_frag_size_sub(&skbinfo->frags[0], eat);
3245 		skb->data_len -= eat;
3246 		skb->len -= eat;
3247 		offset = headlen;
3248 	}
3249 
3250 	__skb_pull(skb, offset);
3251 
3252 	if (NAPI_GRO_CB(p)->last == p)
3253 		skb_shinfo(p)->frag_list = skb;
3254 	else
3255 		NAPI_GRO_CB(p)->last->next = skb;
3256 	NAPI_GRO_CB(p)->last = skb;
3257 	__skb_header_release(skb);
3258 	lp = p;
3259 
3260 done:
3261 	NAPI_GRO_CB(p)->count++;
3262 	p->data_len += len;
3263 	p->truesize += delta_truesize;
3264 	p->len += len;
3265 	if (lp != p) {
3266 		lp->data_len += len;
3267 		lp->truesize += delta_truesize;
3268 		lp->len += len;
3269 	}
3270 	NAPI_GRO_CB(skb)->same_flow = 1;
3271 	return 0;
3272 }
3273 
skb_init(void)3274 void __init skb_init(void)
3275 {
3276 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3277 					      sizeof(struct sk_buff),
3278 					      0,
3279 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3280 					      NULL);
3281 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3282 						sizeof(struct sk_buff_fclones),
3283 						0,
3284 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3285 						NULL);
3286 }
3287 
3288 /**
3289  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3290  *	@skb: Socket buffer containing the buffers to be mapped
3291  *	@sg: The scatter-gather list to map into
3292  *	@offset: The offset into the buffer's contents to start mapping
3293  *	@len: Length of buffer space to be mapped
3294  *
3295  *	Fill the specified scatter-gather list with mappings/pointers into a
3296  *	region of the buffer space attached to a socket buffer.
3297  */
3298 static int
__skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)3299 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3300 {
3301 	int start = skb_headlen(skb);
3302 	int i, copy = start - offset;
3303 	struct sk_buff *frag_iter;
3304 	int elt = 0;
3305 
3306 	if (copy > 0) {
3307 		if (copy > len)
3308 			copy = len;
3309 		sg_set_buf(sg, skb->data + offset, copy);
3310 		elt++;
3311 		if ((len -= copy) == 0)
3312 			return elt;
3313 		offset += copy;
3314 	}
3315 
3316 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3317 		int end;
3318 
3319 		WARN_ON(start > offset + len);
3320 
3321 		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3322 		if ((copy = end - offset) > 0) {
3323 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3324 
3325 			if (copy > len)
3326 				copy = len;
3327 			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3328 					frag->page_offset+offset-start);
3329 			elt++;
3330 			if (!(len -= copy))
3331 				return elt;
3332 			offset += copy;
3333 		}
3334 		start = end;
3335 	}
3336 
3337 	skb_walk_frags(skb, frag_iter) {
3338 		int end;
3339 
3340 		WARN_ON(start > offset + len);
3341 
3342 		end = start + frag_iter->len;
3343 		if ((copy = end - offset) > 0) {
3344 			if (copy > len)
3345 				copy = len;
3346 			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3347 					      copy);
3348 			if ((len -= copy) == 0)
3349 				return elt;
3350 			offset += copy;
3351 		}
3352 		start = end;
3353 	}
3354 	BUG_ON(len);
3355 	return elt;
3356 }
3357 
3358 /* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3359  * sglist without mark the sg which contain last skb data as the end.
3360  * So the caller can mannipulate sg list as will when padding new data after
3361  * the first call without calling sg_unmark_end to expend sg list.
3362  *
3363  * Scenario to use skb_to_sgvec_nomark:
3364  * 1. sg_init_table
3365  * 2. skb_to_sgvec_nomark(payload1)
3366  * 3. skb_to_sgvec_nomark(payload2)
3367  *
3368  * This is equivalent to:
3369  * 1. sg_init_table
3370  * 2. skb_to_sgvec(payload1)
3371  * 3. sg_unmark_end
3372  * 4. skb_to_sgvec(payload2)
3373  *
3374  * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3375  * is more preferable.
3376  */
skb_to_sgvec_nomark(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)3377 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3378 			int offset, int len)
3379 {
3380 	return __skb_to_sgvec(skb, sg, offset, len);
3381 }
3382 EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3383 
skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)3384 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3385 {
3386 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
3387 
3388 	sg_mark_end(&sg[nsg - 1]);
3389 
3390 	return nsg;
3391 }
3392 EXPORT_SYMBOL_GPL(skb_to_sgvec);
3393 
3394 /**
3395  *	skb_cow_data - Check that a socket buffer's data buffers are writable
3396  *	@skb: The socket buffer to check.
3397  *	@tailbits: Amount of trailing space to be added
3398  *	@trailer: Returned pointer to the skb where the @tailbits space begins
3399  *
3400  *	Make sure that the data buffers attached to a socket buffer are
3401  *	writable. If they are not, private copies are made of the data buffers
3402  *	and the socket buffer is set to use these instead.
3403  *
3404  *	If @tailbits is given, make sure that there is space to write @tailbits
3405  *	bytes of data beyond current end of socket buffer.  @trailer will be
3406  *	set to point to the skb in which this space begins.
3407  *
3408  *	The number of scatterlist elements required to completely map the
3409  *	COW'd and extended socket buffer will be returned.
3410  */
skb_cow_data(struct sk_buff * skb,int tailbits,struct sk_buff ** trailer)3411 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3412 {
3413 	int copyflag;
3414 	int elt;
3415 	struct sk_buff *skb1, **skb_p;
3416 
3417 	/* If skb is cloned or its head is paged, reallocate
3418 	 * head pulling out all the pages (pages are considered not writable
3419 	 * at the moment even if they are anonymous).
3420 	 */
3421 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3422 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3423 		return -ENOMEM;
3424 
3425 	/* Easy case. Most of packets will go this way. */
3426 	if (!skb_has_frag_list(skb)) {
3427 		/* A little of trouble, not enough of space for trailer.
3428 		 * This should not happen, when stack is tuned to generate
3429 		 * good frames. OK, on miss we reallocate and reserve even more
3430 		 * space, 128 bytes is fair. */
3431 
3432 		if (skb_tailroom(skb) < tailbits &&
3433 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3434 			return -ENOMEM;
3435 
3436 		/* Voila! */
3437 		*trailer = skb;
3438 		return 1;
3439 	}
3440 
3441 	/* Misery. We are in troubles, going to mincer fragments... */
3442 
3443 	elt = 1;
3444 	skb_p = &skb_shinfo(skb)->frag_list;
3445 	copyflag = 0;
3446 
3447 	while ((skb1 = *skb_p) != NULL) {
3448 		int ntail = 0;
3449 
3450 		/* The fragment is partially pulled by someone,
3451 		 * this can happen on input. Copy it and everything
3452 		 * after it. */
3453 
3454 		if (skb_shared(skb1))
3455 			copyflag = 1;
3456 
3457 		/* If the skb is the last, worry about trailer. */
3458 
3459 		if (skb1->next == NULL && tailbits) {
3460 			if (skb_shinfo(skb1)->nr_frags ||
3461 			    skb_has_frag_list(skb1) ||
3462 			    skb_tailroom(skb1) < tailbits)
3463 				ntail = tailbits + 128;
3464 		}
3465 
3466 		if (copyflag ||
3467 		    skb_cloned(skb1) ||
3468 		    ntail ||
3469 		    skb_shinfo(skb1)->nr_frags ||
3470 		    skb_has_frag_list(skb1)) {
3471 			struct sk_buff *skb2;
3472 
3473 			/* Fuck, we are miserable poor guys... */
3474 			if (ntail == 0)
3475 				skb2 = skb_copy(skb1, GFP_ATOMIC);
3476 			else
3477 				skb2 = skb_copy_expand(skb1,
3478 						       skb_headroom(skb1),
3479 						       ntail,
3480 						       GFP_ATOMIC);
3481 			if (unlikely(skb2 == NULL))
3482 				return -ENOMEM;
3483 
3484 			if (skb1->sk)
3485 				skb_set_owner_w(skb2, skb1->sk);
3486 
3487 			/* Looking around. Are we still alive?
3488 			 * OK, link new skb, drop old one */
3489 
3490 			skb2->next = skb1->next;
3491 			*skb_p = skb2;
3492 			kfree_skb(skb1);
3493 			skb1 = skb2;
3494 		}
3495 		elt++;
3496 		*trailer = skb1;
3497 		skb_p = &skb1->next;
3498 	}
3499 
3500 	return elt;
3501 }
3502 EXPORT_SYMBOL_GPL(skb_cow_data);
3503 
sock_rmem_free(struct sk_buff * skb)3504 static void sock_rmem_free(struct sk_buff *skb)
3505 {
3506 	struct sock *sk = skb->sk;
3507 
3508 	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3509 }
3510 
3511 /*
3512  * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3513  */
sock_queue_err_skb(struct sock * sk,struct sk_buff * skb)3514 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3515 {
3516 	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3517 	    (unsigned int)sk->sk_rcvbuf)
3518 		return -ENOMEM;
3519 
3520 	skb_orphan(skb);
3521 	skb->sk = sk;
3522 	skb->destructor = sock_rmem_free;
3523 	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3524 
3525 	/* before exiting rcu section, make sure dst is refcounted */
3526 	skb_dst_force(skb);
3527 
3528 	skb_queue_tail(&sk->sk_error_queue, skb);
3529 	if (!sock_flag(sk, SOCK_DEAD))
3530 		sk->sk_data_ready(sk);
3531 	return 0;
3532 }
3533 EXPORT_SYMBOL(sock_queue_err_skb);
3534 
sock_dequeue_err_skb(struct sock * sk)3535 struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3536 {
3537 	struct sk_buff_head *q = &sk->sk_error_queue;
3538 	struct sk_buff *skb, *skb_next;
3539 	unsigned long flags;
3540 	int err = 0;
3541 
3542 	spin_lock_irqsave(&q->lock, flags);
3543 	skb = __skb_dequeue(q);
3544 	if (skb && (skb_next = skb_peek(q)))
3545 		err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3546 	spin_unlock_irqrestore(&q->lock, flags);
3547 
3548 	sk->sk_err = err;
3549 	if (err)
3550 		sk->sk_error_report(sk);
3551 
3552 	return skb;
3553 }
3554 EXPORT_SYMBOL(sock_dequeue_err_skb);
3555 
3556 /**
3557  * skb_clone_sk - create clone of skb, and take reference to socket
3558  * @skb: the skb to clone
3559  *
3560  * This function creates a clone of a buffer that holds a reference on
3561  * sk_refcnt.  Buffers created via this function are meant to be
3562  * returned using sock_queue_err_skb, or free via kfree_skb.
3563  *
3564  * When passing buffers allocated with this function to sock_queue_err_skb
3565  * it is necessary to wrap the call with sock_hold/sock_put in order to
3566  * prevent the socket from being released prior to being enqueued on
3567  * the sk_error_queue.
3568  */
skb_clone_sk(struct sk_buff * skb)3569 struct sk_buff *skb_clone_sk(struct sk_buff *skb)
3570 {
3571 	struct sock *sk = skb->sk;
3572 	struct sk_buff *clone;
3573 
3574 	if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
3575 		return NULL;
3576 
3577 	clone = skb_clone(skb, GFP_ATOMIC);
3578 	if (!clone) {
3579 		sock_put(sk);
3580 		return NULL;
3581 	}
3582 
3583 	clone->sk = sk;
3584 	clone->destructor = sock_efree;
3585 
3586 	return clone;
3587 }
3588 EXPORT_SYMBOL(skb_clone_sk);
3589 
__skb_complete_tx_timestamp(struct sk_buff * skb,struct sock * sk,int tstype)3590 static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3591 					struct sock *sk,
3592 					int tstype)
3593 {
3594 	struct sock_exterr_skb *serr;
3595 	int err;
3596 
3597 	serr = SKB_EXT_ERR(skb);
3598 	memset(serr, 0, sizeof(*serr));
3599 	serr->ee.ee_errno = ENOMSG;
3600 	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3601 	serr->ee.ee_info = tstype;
3602 	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3603 		serr->ee.ee_data = skb_shinfo(skb)->tskey;
3604 		if (sk->sk_protocol == IPPROTO_TCP &&
3605 		    sk->sk_type == SOCK_STREAM)
3606 			serr->ee.ee_data -= sk->sk_tskey;
3607 	}
3608 
3609 	err = sock_queue_err_skb(sk, skb);
3610 
3611 	if (err)
3612 		kfree_skb(skb);
3613 }
3614 
skb_complete_tx_timestamp(struct sk_buff * skb,struct skb_shared_hwtstamps * hwtstamps)3615 void skb_complete_tx_timestamp(struct sk_buff *skb,
3616 			       struct skb_shared_hwtstamps *hwtstamps)
3617 {
3618 	struct sock *sk = skb->sk;
3619 
3620 	/* Take a reference to prevent skb_orphan() from freeing the socket,
3621 	 * but only if the socket refcount is not zero.
3622 	 */
3623 	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3624 		*skb_hwtstamps(skb) = *hwtstamps;
3625 		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3626 		sock_put(sk);
3627 	}
3628 }
3629 EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3630 
__skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps,struct sock * sk,int tstype)3631 void __skb_tstamp_tx(struct sk_buff *orig_skb,
3632 		     struct skb_shared_hwtstamps *hwtstamps,
3633 		     struct sock *sk, int tstype)
3634 {
3635 	struct sk_buff *skb;
3636 
3637 	if (!sk)
3638 		return;
3639 
3640 	if (hwtstamps)
3641 		*skb_hwtstamps(orig_skb) = *hwtstamps;
3642 	else
3643 		orig_skb->tstamp = ktime_get_real();
3644 
3645 	skb = skb_clone(orig_skb, GFP_ATOMIC);
3646 	if (!skb)
3647 		return;
3648 
3649 	__skb_complete_tx_timestamp(skb, sk, tstype);
3650 }
3651 EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3652 
skb_tstamp_tx(struct sk_buff * orig_skb,struct skb_shared_hwtstamps * hwtstamps)3653 void skb_tstamp_tx(struct sk_buff *orig_skb,
3654 		   struct skb_shared_hwtstamps *hwtstamps)
3655 {
3656 	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3657 			       SCM_TSTAMP_SND);
3658 }
3659 EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3660 
skb_complete_wifi_ack(struct sk_buff * skb,bool acked)3661 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3662 {
3663 	struct sock *sk = skb->sk;
3664 	struct sock_exterr_skb *serr;
3665 	int err = 1;
3666 
3667 	skb->wifi_acked_valid = 1;
3668 	skb->wifi_acked = acked;
3669 
3670 	serr = SKB_EXT_ERR(skb);
3671 	memset(serr, 0, sizeof(*serr));
3672 	serr->ee.ee_errno = ENOMSG;
3673 	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3674 
3675 	/* Take a reference to prevent skb_orphan() from freeing the socket,
3676 	 * but only if the socket refcount is not zero.
3677 	 */
3678 	if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
3679 		err = sock_queue_err_skb(sk, skb);
3680 		sock_put(sk);
3681 	}
3682 	if (err)
3683 		kfree_skb(skb);
3684 }
3685 EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3686 
3687 
3688 /**
3689  * skb_partial_csum_set - set up and verify partial csum values for packet
3690  * @skb: the skb to set
3691  * @start: the number of bytes after skb->data to start checksumming.
3692  * @off: the offset from start to place the checksum.
3693  *
3694  * For untrusted partially-checksummed packets, we need to make sure the values
3695  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3696  *
3697  * This function checks and sets those values and skb->ip_summed: if this
3698  * returns false you should drop the packet.
3699  */
skb_partial_csum_set(struct sk_buff * skb,u16 start,u16 off)3700 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3701 {
3702 	if (unlikely(start > skb_headlen(skb)) ||
3703 	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3704 		net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3705 				     start, off, skb_headlen(skb));
3706 		return false;
3707 	}
3708 	skb->ip_summed = CHECKSUM_PARTIAL;
3709 	skb->csum_start = skb_headroom(skb) + start;
3710 	skb->csum_offset = off;
3711 	skb_set_transport_header(skb, start);
3712 	return true;
3713 }
3714 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3715 
skb_maybe_pull_tail(struct sk_buff * skb,unsigned int len,unsigned int max)3716 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3717 			       unsigned int max)
3718 {
3719 	if (skb_headlen(skb) >= len)
3720 		return 0;
3721 
3722 	/* If we need to pullup then pullup to the max, so we
3723 	 * won't need to do it again.
3724 	 */
3725 	if (max > skb->len)
3726 		max = skb->len;
3727 
3728 	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3729 		return -ENOMEM;
3730 
3731 	if (skb_headlen(skb) < len)
3732 		return -EPROTO;
3733 
3734 	return 0;
3735 }
3736 
3737 #define MAX_TCP_HDR_LEN (15 * 4)
3738 
skb_checksum_setup_ip(struct sk_buff * skb,typeof(IPPROTO_IP) proto,unsigned int off)3739 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3740 				      typeof(IPPROTO_IP) proto,
3741 				      unsigned int off)
3742 {
3743 	switch (proto) {
3744 		int err;
3745 
3746 	case IPPROTO_TCP:
3747 		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3748 					  off + MAX_TCP_HDR_LEN);
3749 		if (!err && !skb_partial_csum_set(skb, off,
3750 						  offsetof(struct tcphdr,
3751 							   check)))
3752 			err = -EPROTO;
3753 		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3754 
3755 	case IPPROTO_UDP:
3756 		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3757 					  off + sizeof(struct udphdr));
3758 		if (!err && !skb_partial_csum_set(skb, off,
3759 						  offsetof(struct udphdr,
3760 							   check)))
3761 			err = -EPROTO;
3762 		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3763 	}
3764 
3765 	return ERR_PTR(-EPROTO);
3766 }
3767 
3768 /* This value should be large enough to cover a tagged ethernet header plus
3769  * maximally sized IP and TCP or UDP headers.
3770  */
3771 #define MAX_IP_HDR_LEN 128
3772 
skb_checksum_setup_ipv4(struct sk_buff * skb,bool recalculate)3773 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3774 {
3775 	unsigned int off;
3776 	bool fragment;
3777 	__sum16 *csum;
3778 	int err;
3779 
3780 	fragment = false;
3781 
3782 	err = skb_maybe_pull_tail(skb,
3783 				  sizeof(struct iphdr),
3784 				  MAX_IP_HDR_LEN);
3785 	if (err < 0)
3786 		goto out;
3787 
3788 	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3789 		fragment = true;
3790 
3791 	off = ip_hdrlen(skb);
3792 
3793 	err = -EPROTO;
3794 
3795 	if (fragment)
3796 		goto out;
3797 
3798 	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3799 	if (IS_ERR(csum))
3800 		return PTR_ERR(csum);
3801 
3802 	if (recalculate)
3803 		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3804 					   ip_hdr(skb)->daddr,
3805 					   skb->len - off,
3806 					   ip_hdr(skb)->protocol, 0);
3807 	err = 0;
3808 
3809 out:
3810 	return err;
3811 }
3812 
3813 /* This value should be large enough to cover a tagged ethernet header plus
3814  * an IPv6 header, all options, and a maximal TCP or UDP header.
3815  */
3816 #define MAX_IPV6_HDR_LEN 256
3817 
3818 #define OPT_HDR(type, skb, off) \
3819 	(type *)(skb_network_header(skb) + (off))
3820 
skb_checksum_setup_ipv6(struct sk_buff * skb,bool recalculate)3821 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3822 {
3823 	int err;
3824 	u8 nexthdr;
3825 	unsigned int off;
3826 	unsigned int len;
3827 	bool fragment;
3828 	bool done;
3829 	__sum16 *csum;
3830 
3831 	fragment = false;
3832 	done = false;
3833 
3834 	off = sizeof(struct ipv6hdr);
3835 
3836 	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3837 	if (err < 0)
3838 		goto out;
3839 
3840 	nexthdr = ipv6_hdr(skb)->nexthdr;
3841 
3842 	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3843 	while (off <= len && !done) {
3844 		switch (nexthdr) {
3845 		case IPPROTO_DSTOPTS:
3846 		case IPPROTO_HOPOPTS:
3847 		case IPPROTO_ROUTING: {
3848 			struct ipv6_opt_hdr *hp;
3849 
3850 			err = skb_maybe_pull_tail(skb,
3851 						  off +
3852 						  sizeof(struct ipv6_opt_hdr),
3853 						  MAX_IPV6_HDR_LEN);
3854 			if (err < 0)
3855 				goto out;
3856 
3857 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3858 			nexthdr = hp->nexthdr;
3859 			off += ipv6_optlen(hp);
3860 			break;
3861 		}
3862 		case IPPROTO_AH: {
3863 			struct ip_auth_hdr *hp;
3864 
3865 			err = skb_maybe_pull_tail(skb,
3866 						  off +
3867 						  sizeof(struct ip_auth_hdr),
3868 						  MAX_IPV6_HDR_LEN);
3869 			if (err < 0)
3870 				goto out;
3871 
3872 			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3873 			nexthdr = hp->nexthdr;
3874 			off += ipv6_authlen(hp);
3875 			break;
3876 		}
3877 		case IPPROTO_FRAGMENT: {
3878 			struct frag_hdr *hp;
3879 
3880 			err = skb_maybe_pull_tail(skb,
3881 						  off +
3882 						  sizeof(struct frag_hdr),
3883 						  MAX_IPV6_HDR_LEN);
3884 			if (err < 0)
3885 				goto out;
3886 
3887 			hp = OPT_HDR(struct frag_hdr, skb, off);
3888 
3889 			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3890 				fragment = true;
3891 
3892 			nexthdr = hp->nexthdr;
3893 			off += sizeof(struct frag_hdr);
3894 			break;
3895 		}
3896 		default:
3897 			done = true;
3898 			break;
3899 		}
3900 	}
3901 
3902 	err = -EPROTO;
3903 
3904 	if (!done || fragment)
3905 		goto out;
3906 
3907 	csum = skb_checksum_setup_ip(skb, nexthdr, off);
3908 	if (IS_ERR(csum))
3909 		return PTR_ERR(csum);
3910 
3911 	if (recalculate)
3912 		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3913 					 &ipv6_hdr(skb)->daddr,
3914 					 skb->len - off, nexthdr, 0);
3915 	err = 0;
3916 
3917 out:
3918 	return err;
3919 }
3920 
3921 /**
3922  * skb_checksum_setup - set up partial checksum offset
3923  * @skb: the skb to set up
3924  * @recalculate: if true the pseudo-header checksum will be recalculated
3925  */
skb_checksum_setup(struct sk_buff * skb,bool recalculate)3926 int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3927 {
3928 	int err;
3929 
3930 	switch (skb->protocol) {
3931 	case htons(ETH_P_IP):
3932 		err = skb_checksum_setup_ipv4(skb, recalculate);
3933 		break;
3934 
3935 	case htons(ETH_P_IPV6):
3936 		err = skb_checksum_setup_ipv6(skb, recalculate);
3937 		break;
3938 
3939 	default:
3940 		err = -EPROTO;
3941 		break;
3942 	}
3943 
3944 	return err;
3945 }
3946 EXPORT_SYMBOL(skb_checksum_setup);
3947 
__skb_warn_lro_forwarding(const struct sk_buff * skb)3948 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3949 {
3950 	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3951 			     skb->dev->name);
3952 }
3953 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3954 
kfree_skb_partial(struct sk_buff * skb,bool head_stolen)3955 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3956 {
3957 	if (head_stolen) {
3958 		skb_release_head_state(skb);
3959 		kmem_cache_free(skbuff_head_cache, skb);
3960 	} else {
3961 		__kfree_skb(skb);
3962 	}
3963 }
3964 EXPORT_SYMBOL(kfree_skb_partial);
3965 
3966 /**
3967  * skb_try_coalesce - try to merge skb to prior one
3968  * @to: prior buffer
3969  * @from: buffer to add
3970  * @fragstolen: pointer to boolean
3971  * @delta_truesize: how much more was allocated than was requested
3972  */
skb_try_coalesce(struct sk_buff * to,struct sk_buff * from,bool * fragstolen,int * delta_truesize)3973 bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3974 		      bool *fragstolen, int *delta_truesize)
3975 {
3976 	int i, delta, len = from->len;
3977 
3978 	*fragstolen = false;
3979 
3980 	if (skb_cloned(to))
3981 		return false;
3982 
3983 	if (len <= skb_tailroom(to)) {
3984 		if (len)
3985 			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3986 		*delta_truesize = 0;
3987 		return true;
3988 	}
3989 
3990 	if (skb_has_frag_list(to) || skb_has_frag_list(from))
3991 		return false;
3992 
3993 	if (skb_headlen(from) != 0) {
3994 		struct page *page;
3995 		unsigned int offset;
3996 
3997 		if (skb_shinfo(to)->nr_frags +
3998 		    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3999 			return false;
4000 
4001 		if (skb_head_is_locked(from))
4002 			return false;
4003 
4004 		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4005 
4006 		page = virt_to_head_page(from->head);
4007 		offset = from->data - (unsigned char *)page_address(page);
4008 
4009 		skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4010 				   page, offset, skb_headlen(from));
4011 		*fragstolen = true;
4012 	} else {
4013 		if (skb_shinfo(to)->nr_frags +
4014 		    skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4015 			return false;
4016 
4017 		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4018 	}
4019 
4020 	WARN_ON_ONCE(delta < len);
4021 
4022 	memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4023 	       skb_shinfo(from)->frags,
4024 	       skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4025 	skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4026 
4027 	if (!skb_cloned(from))
4028 		skb_shinfo(from)->nr_frags = 0;
4029 
4030 	/* if the skb is not cloned this does nothing
4031 	 * since we set nr_frags to 0.
4032 	 */
4033 	for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4034 		skb_frag_ref(from, i);
4035 
4036 	to->truesize += delta;
4037 	to->len += len;
4038 	to->data_len += len;
4039 
4040 	*delta_truesize = delta;
4041 	return true;
4042 }
4043 EXPORT_SYMBOL(skb_try_coalesce);
4044 
4045 /**
4046  * skb_scrub_packet - scrub an skb
4047  *
4048  * @skb: buffer to clean
4049  * @xnet: packet is crossing netns
4050  *
4051  * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4052  * into/from a tunnel. Some information have to be cleared during these
4053  * operations.
4054  * skb_scrub_packet can also be used to clean a skb before injecting it in
4055  * another namespace (@xnet == true). We have to clear all information in the
4056  * skb that could impact namespace isolation.
4057  */
skb_scrub_packet(struct sk_buff * skb,bool xnet)4058 void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4059 {
4060 	skb->tstamp.tv64 = 0;
4061 	skb->pkt_type = PACKET_HOST;
4062 	skb->skb_iif = 0;
4063 	skb->ignore_df = 0;
4064 	skb_dst_drop(skb);
4065 	secpath_reset(skb);
4066 	nf_reset(skb);
4067 	nf_reset_trace(skb);
4068 
4069 	if (!xnet)
4070 		return;
4071 
4072 	ipvs_reset(skb);
4073 	skb_orphan(skb);
4074 	skb->mark = 0;
4075 }
4076 EXPORT_SYMBOL_GPL(skb_scrub_packet);
4077 
4078 /**
4079  * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4080  *
4081  * @skb: GSO skb
4082  *
4083  * skb_gso_transport_seglen is used to determine the real size of the
4084  * individual segments, including Layer4 headers (TCP/UDP).
4085  *
4086  * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4087  */
skb_gso_transport_seglen(const struct sk_buff * skb)4088 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4089 {
4090 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
4091 	unsigned int thlen = 0;
4092 
4093 	if (skb->encapsulation) {
4094 		thlen = skb_inner_transport_header(skb) -
4095 			skb_transport_header(skb);
4096 
4097 		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4098 			thlen += inner_tcp_hdrlen(skb);
4099 	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4100 		thlen = tcp_hdrlen(skb);
4101 	}
4102 	/* UFO sets gso_size to the size of the fragmentation
4103 	 * payload, i.e. the size of the L4 (UDP) header is already
4104 	 * accounted for.
4105 	 */
4106 	return thlen + shinfo->gso_size;
4107 }
4108 EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4109 
skb_reorder_vlan_header(struct sk_buff * skb)4110 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4111 {
4112 	if (skb_cow(skb, skb_headroom(skb)) < 0) {
4113 		kfree_skb(skb);
4114 		return NULL;
4115 	}
4116 
4117 	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
4118 		2 * ETH_ALEN);
4119 	skb->mac_header += VLAN_HLEN;
4120 	return skb;
4121 }
4122 
skb_vlan_untag(struct sk_buff * skb)4123 struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
4124 {
4125 	struct vlan_hdr *vhdr;
4126 	u16 vlan_tci;
4127 
4128 	if (unlikely(vlan_tx_tag_present(skb))) {
4129 		/* vlan_tci is already set-up so leave this for another time */
4130 		return skb;
4131 	}
4132 
4133 	skb = skb_share_check(skb, GFP_ATOMIC);
4134 	if (unlikely(!skb))
4135 		goto err_free;
4136 
4137 	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4138 		goto err_free;
4139 
4140 	vhdr = (struct vlan_hdr *)skb->data;
4141 	vlan_tci = ntohs(vhdr->h_vlan_TCI);
4142 	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4143 
4144 	skb_pull_rcsum(skb, VLAN_HLEN);
4145 	vlan_set_encap_proto(skb, vhdr);
4146 
4147 	skb = skb_reorder_vlan_header(skb);
4148 	if (unlikely(!skb))
4149 		goto err_free;
4150 
4151 	skb_reset_network_header(skb);
4152 	skb_reset_transport_header(skb);
4153 	skb_reset_mac_len(skb);
4154 
4155 	return skb;
4156 
4157 err_free:
4158 	kfree_skb(skb);
4159 	return NULL;
4160 }
4161 EXPORT_SYMBOL(skb_vlan_untag);
4162 
4163 /**
4164  * alloc_skb_with_frags - allocate skb with page frags
4165  *
4166  * @header_len: size of linear part
4167  * @data_len: needed length in frags
4168  * @max_page_order: max page order desired.
4169  * @errcode: pointer to error code if any
4170  * @gfp_mask: allocation mask
4171  *
4172  * This can be used to allocate a paged skb, given a maximal order for frags.
4173  */
alloc_skb_with_frags(unsigned long header_len,unsigned long data_len,int max_page_order,int * errcode,gfp_t gfp_mask)4174 struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4175 				     unsigned long data_len,
4176 				     int max_page_order,
4177 				     int *errcode,
4178 				     gfp_t gfp_mask)
4179 {
4180 	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
4181 	unsigned long chunk;
4182 	struct sk_buff *skb;
4183 	struct page *page;
4184 	gfp_t gfp_head;
4185 	int i;
4186 
4187 	*errcode = -EMSGSIZE;
4188 	/* Note this test could be relaxed, if we succeed to allocate
4189 	 * high order pages...
4190 	 */
4191 	if (npages > MAX_SKB_FRAGS)
4192 		return NULL;
4193 
4194 	gfp_head = gfp_mask;
4195 	if (gfp_head & __GFP_WAIT)
4196 		gfp_head |= __GFP_REPEAT;
4197 
4198 	*errcode = -ENOBUFS;
4199 	skb = alloc_skb(header_len, gfp_head);
4200 	if (!skb)
4201 		return NULL;
4202 
4203 	skb->truesize += npages << PAGE_SHIFT;
4204 
4205 	for (i = 0; npages > 0; i++) {
4206 		int order = max_page_order;
4207 
4208 		while (order) {
4209 			if (npages >= 1 << order) {
4210 				page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
4211 						   __GFP_COMP |
4212 						   __GFP_NOWARN |
4213 						   __GFP_NORETRY,
4214 						   order);
4215 				if (page)
4216 					goto fill_page;
4217 				/* Do not retry other high order allocations */
4218 				order = 1;
4219 				max_page_order = 0;
4220 			}
4221 			order--;
4222 		}
4223 		page = alloc_page(gfp_mask);
4224 		if (!page)
4225 			goto failure;
4226 fill_page:
4227 		chunk = min_t(unsigned long, data_len,
4228 			      PAGE_SIZE << order);
4229 		skb_fill_page_desc(skb, i, page, 0, chunk);
4230 		data_len -= chunk;
4231 		npages -= 1 << order;
4232 	}
4233 	return skb;
4234 
4235 failure:
4236 	kfree_skb(skb);
4237 	return NULL;
4238 }
4239 EXPORT_SYMBOL(alloc_skb_with_frags);
4240