• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Routines having to do with the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
5  *			Florian La Roche <rzsfl@rz.uni-sb.de>
6  *
7  *	Fixes:
8  *		Alan Cox	:	Fixed the worst of the load
9  *					balancer bugs.
10  *		Dave Platt	:	Interrupt stacking fix.
11  *	Richard Kooijman	:	Timestamp fixes.
12  *		Alan Cox	:	Changed buffer format.
13  *		Alan Cox	:	destructor hook for AF_UNIX etc.
14  *		Linus Torvalds	:	Better skb_clone.
15  *		Alan Cox	:	Added skb_copy.
16  *		Alan Cox	:	Added all the changed routines Linus
17  *					only put in the headers
18  *		Ray VanTassle	:	Fixed --skb->lock in free
19  *		Alan Cox	:	skb_copy copy arp field
20  *		Andi Kleen	:	slabified it.
21  *		Robert Olsson	:	Removed skb_head_pool
22  *
23  *	NOTE:
24  *		The __skb_ routines should be called with interrupts
25  *	disabled, or you better be *real* sure that the operation is atomic
26  *	with respect to whatever list is being frobbed (e.g. via lock_sock()
27  *	or via disabling bottom half handlers, etc).
28  *
29  *	This program is free software; you can redistribute it and/or
30  *	modify it under the terms of the GNU General Public License
31  *	as published by the Free Software Foundation; either version
32  *	2 of the License, or (at your option) any later version.
33  */
34 
35 /*
36  *	The functions in this file will not compile correctly with gcc 2.4.x
37  */
38 
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/mm.h>
43 #include <linux/interrupt.h>
44 #include <linux/in.h>
45 #include <linux/inet.h>
46 #include <linux/slab.h>
47 #include <linux/netdevice.h>
48 #ifdef CONFIG_NET_CLS_ACT
49 #include <net/pkt_sched.h>
50 #endif
51 #include <linux/string.h>
52 #include <linux/skbuff.h>
53 #include <linux/splice.h>
54 #include <linux/cache.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/init.h>
57 #include <linux/scatterlist.h>
58 
59 #include <net/protocol.h>
60 #include <net/dst.h>
61 #include <net/sock.h>
62 #include <net/checksum.h>
63 #include <net/xfrm.h>
64 
65 #include <asm/uaccess.h>
66 #include <asm/system.h>
67 
68 #include "kmap_skb.h"
69 
70 static struct kmem_cache *skbuff_head_cache __read_mostly;
71 static struct kmem_cache *skbuff_fclone_cache __read_mostly;
72 
sock_pipe_buf_release(struct pipe_inode_info * pipe,struct pipe_buffer * buf)73 static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
74 				  struct pipe_buffer *buf)
75 {
76 	put_page(buf->page);
77 }
78 
sock_pipe_buf_get(struct pipe_inode_info * pipe,struct pipe_buffer * buf)79 static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
80 				struct pipe_buffer *buf)
81 {
82 	get_page(buf->page);
83 }
84 
sock_pipe_buf_steal(struct pipe_inode_info * pipe,struct pipe_buffer * buf)85 static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
86 			       struct pipe_buffer *buf)
87 {
88 	return 1;
89 }
90 
91 
92 /* Pipe buffer operations for a socket. */
93 static struct pipe_buf_operations sock_pipe_buf_ops = {
94 	.can_merge = 0,
95 	.map = generic_pipe_buf_map,
96 	.unmap = generic_pipe_buf_unmap,
97 	.confirm = generic_pipe_buf_confirm,
98 	.release = sock_pipe_buf_release,
99 	.steal = sock_pipe_buf_steal,
100 	.get = sock_pipe_buf_get,
101 };
102 
103 /*
104  *	Keep out-of-line to prevent kernel bloat.
105  *	__builtin_return_address is not used because it is not always
106  *	reliable.
107  */
108 
109 /**
110  *	skb_over_panic	- 	private function
111  *	@skb: buffer
112  *	@sz: size
113  *	@here: address
114  *
115  *	Out of line support code for skb_put(). Not user callable.
116  */
skb_over_panic(struct sk_buff * skb,int sz,void * here)117 void skb_over_panic(struct sk_buff *skb, int sz, void *here)
118 {
119 	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
120 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
121 	       here, skb->len, sz, skb->head, skb->data,
122 	       (unsigned long)skb->tail, (unsigned long)skb->end,
123 	       skb->dev ? skb->dev->name : "<NULL>");
124 	BUG();
125 }
126 
127 /**
128  *	skb_under_panic	- 	private function
129  *	@skb: buffer
130  *	@sz: size
131  *	@here: address
132  *
133  *	Out of line support code for skb_push(). Not user callable.
134  */
135 
skb_under_panic(struct sk_buff * skb,int sz,void * here)136 void skb_under_panic(struct sk_buff *skb, int sz, void *here)
137 {
138 	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
139 			  "data:%p tail:%#lx end:%#lx dev:%s\n",
140 	       here, skb->len, sz, skb->head, skb->data,
141 	       (unsigned long)skb->tail, (unsigned long)skb->end,
142 	       skb->dev ? skb->dev->name : "<NULL>");
143 	BUG();
144 }
145 
146 /* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
147  *	'private' fields and also do memory statistics to find all the
148  *	[BEEP] leaks.
149  *
150  */
151 
152 /**
153  *	__alloc_skb	-	allocate a network buffer
154  *	@size: size to allocate
155  *	@gfp_mask: allocation mask
156  *	@fclone: allocate from fclone cache instead of head cache
157  *		and allocate a cloned (child) skb
158  *	@node: numa node to allocate memory on
159  *
160  *	Allocate a new &sk_buff. The returned buffer has no headroom and a
161  *	tail room of size bytes. The object has a reference count of one.
162  *	The return is the buffer. On a failure the return is %NULL.
163  *
164  *	Buffers may only be allocated from interrupts using a @gfp_mask of
165  *	%GFP_ATOMIC.
166  */
__alloc_skb(unsigned int size,gfp_t gfp_mask,int fclone,int node)167 struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
168 			    int fclone, int node)
169 {
170 	struct kmem_cache *cache;
171 	struct skb_shared_info *shinfo;
172 	struct sk_buff *skb;
173 	u8 *data;
174 
175 	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
176 
177 	/* Get the HEAD */
178 	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
179 	if (!skb)
180 		goto out;
181 
182 	size = SKB_DATA_ALIGN(size);
183 	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
184 			gfp_mask, node);
185 	if (!data)
186 		goto nodata;
187 
188 	/*
189 	 * Only clear those fields we need to clear, not those that we will
190 	 * actually initialise below. Hence, don't put any more fields after
191 	 * the tail pointer in struct sk_buff!
192 	 */
193 	memset(skb, 0, offsetof(struct sk_buff, tail));
194 	skb->truesize = size + sizeof(struct sk_buff);
195 	atomic_set(&skb->users, 1);
196 	skb->head = data;
197 	skb->data = data;
198 	skb_reset_tail_pointer(skb);
199 	skb->end = skb->tail + size;
200 	/* make sure we initialize shinfo sequentially */
201 	shinfo = skb_shinfo(skb);
202 	atomic_set(&shinfo->dataref, 1);
203 	shinfo->nr_frags  = 0;
204 	shinfo->gso_size = 0;
205 	shinfo->gso_segs = 0;
206 	shinfo->gso_type = 0;
207 	shinfo->ip6_frag_id = 0;
208 	shinfo->frag_list = NULL;
209 
210 	if (fclone) {
211 		struct sk_buff *child = skb + 1;
212 		atomic_t *fclone_ref = (atomic_t *) (child + 1);
213 
214 		skb->fclone = SKB_FCLONE_ORIG;
215 		atomic_set(fclone_ref, 1);
216 
217 		child->fclone = SKB_FCLONE_UNAVAILABLE;
218 	}
219 out:
220 	return skb;
221 nodata:
222 	kmem_cache_free(cache, skb);
223 	skb = NULL;
224 	goto out;
225 }
226 
227 /**
228  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
229  *	@dev: network device to receive on
230  *	@length: length to allocate
231  *	@gfp_mask: get_free_pages mask, passed to alloc_skb
232  *
233  *	Allocate a new &sk_buff and assign it a usage count of one. The
234  *	buffer has unspecified headroom built in. Users should allocate
235  *	the headroom they think they need without accounting for the
236  *	built in space. The built in space is used for optimisations.
237  *
238  *	%NULL is returned if there is no free memory.
239  */
__netdev_alloc_skb(struct net_device * dev,unsigned int length,gfp_t gfp_mask)240 struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
241 		unsigned int length, gfp_t gfp_mask)
242 {
243 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
244 	struct sk_buff *skb;
245 
246 	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
247 	if (likely(skb)) {
248 		skb_reserve(skb, NET_SKB_PAD);
249 		skb->dev = dev;
250 	}
251 	return skb;
252 }
253 
__netdev_alloc_page(struct net_device * dev,gfp_t gfp_mask)254 struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask)
255 {
256 	int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
257 	struct page *page;
258 
259 	page = alloc_pages_node(node, gfp_mask, 0);
260 	return page;
261 }
262 EXPORT_SYMBOL(__netdev_alloc_page);
263 
skb_add_rx_frag(struct sk_buff * skb,int i,struct page * page,int off,int size)264 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
265 		int size)
266 {
267 	skb_fill_page_desc(skb, i, page, off, size);
268 	skb->len += size;
269 	skb->data_len += size;
270 	skb->truesize += size;
271 }
272 EXPORT_SYMBOL(skb_add_rx_frag);
273 
274 /**
275  *	dev_alloc_skb - allocate an skbuff for receiving
276  *	@length: length to allocate
277  *
278  *	Allocate a new &sk_buff and assign it a usage count of one. The
279  *	buffer has unspecified headroom built in. Users should allocate
280  *	the headroom they think they need without accounting for the
281  *	built in space. The built in space is used for optimisations.
282  *
283  *	%NULL is returned if there is no free memory. Although this function
284  *	allocates memory it can be called from an interrupt.
285  */
dev_alloc_skb(unsigned int length)286 struct sk_buff *dev_alloc_skb(unsigned int length)
287 {
288 	/*
289 	 * There is more code here than it seems:
290 	 * __dev_alloc_skb is an inline
291 	 */
292 	return __dev_alloc_skb(length, GFP_ATOMIC);
293 }
294 EXPORT_SYMBOL(dev_alloc_skb);
295 
skb_drop_list(struct sk_buff ** listp)296 static void skb_drop_list(struct sk_buff **listp)
297 {
298 	struct sk_buff *list = *listp;
299 
300 	*listp = NULL;
301 
302 	do {
303 		struct sk_buff *this = list;
304 		list = list->next;
305 		kfree_skb(this);
306 	} while (list);
307 }
308 
skb_drop_fraglist(struct sk_buff * skb)309 static inline void skb_drop_fraglist(struct sk_buff *skb)
310 {
311 	skb_drop_list(&skb_shinfo(skb)->frag_list);
312 }
313 
skb_clone_fraglist(struct sk_buff * skb)314 static void skb_clone_fraglist(struct sk_buff *skb)
315 {
316 	struct sk_buff *list;
317 
318 	for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
319 		skb_get(list);
320 }
321 
skb_release_data(struct sk_buff * skb)322 static void skb_release_data(struct sk_buff *skb)
323 {
324 	if (!skb->cloned ||
325 	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
326 			       &skb_shinfo(skb)->dataref)) {
327 		if (skb_shinfo(skb)->nr_frags) {
328 			int i;
329 			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
330 				put_page(skb_shinfo(skb)->frags[i].page);
331 		}
332 
333 		if (skb_shinfo(skb)->frag_list)
334 			skb_drop_fraglist(skb);
335 
336 		kfree(skb->head);
337 	}
338 }
339 
340 /*
341  *	Free an skbuff by memory without cleaning the state.
342  */
kfree_skbmem(struct sk_buff * skb)343 static void kfree_skbmem(struct sk_buff *skb)
344 {
345 	struct sk_buff *other;
346 	atomic_t *fclone_ref;
347 
348 	switch (skb->fclone) {
349 	case SKB_FCLONE_UNAVAILABLE:
350 		kmem_cache_free(skbuff_head_cache, skb);
351 		break;
352 
353 	case SKB_FCLONE_ORIG:
354 		fclone_ref = (atomic_t *) (skb + 2);
355 		if (atomic_dec_and_test(fclone_ref))
356 			kmem_cache_free(skbuff_fclone_cache, skb);
357 		break;
358 
359 	case SKB_FCLONE_CLONE:
360 		fclone_ref = (atomic_t *) (skb + 1);
361 		other = skb - 1;
362 
363 		/* The clone portion is available for
364 		 * fast-cloning again.
365 		 */
366 		skb->fclone = SKB_FCLONE_UNAVAILABLE;
367 
368 		if (atomic_dec_and_test(fclone_ref))
369 			kmem_cache_free(skbuff_fclone_cache, other);
370 		break;
371 	}
372 }
373 
skb_release_head_state(struct sk_buff * skb)374 static void skb_release_head_state(struct sk_buff *skb)
375 {
376 	dst_release(skb->dst);
377 #ifdef CONFIG_XFRM
378 	secpath_put(skb->sp);
379 #endif
380 	if (skb->destructor) {
381 		WARN_ON(in_irq());
382 		skb->destructor(skb);
383 	}
384 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
385 	nf_conntrack_put(skb->nfct);
386 	nf_conntrack_put_reasm(skb->nfct_reasm);
387 #endif
388 #ifdef CONFIG_BRIDGE_NETFILTER
389 	nf_bridge_put(skb->nf_bridge);
390 #endif
391 /* XXX: IS this still necessary? - JHS */
392 #ifdef CONFIG_NET_SCHED
393 	skb->tc_index = 0;
394 #ifdef CONFIG_NET_CLS_ACT
395 	skb->tc_verd = 0;
396 #endif
397 #endif
398 }
399 
400 /* Free everything but the sk_buff shell. */
skb_release_all(struct sk_buff * skb)401 static void skb_release_all(struct sk_buff *skb)
402 {
403 	skb_release_head_state(skb);
404 	skb_release_data(skb);
405 }
406 
407 /**
408  *	__kfree_skb - private function
409  *	@skb: buffer
410  *
411  *	Free an sk_buff. Release anything attached to the buffer.
412  *	Clean the state. This is an internal helper function. Users should
413  *	always call kfree_skb
414  */
415 
__kfree_skb(struct sk_buff * skb)416 void __kfree_skb(struct sk_buff *skb)
417 {
418 	skb_release_all(skb);
419 	kfree_skbmem(skb);
420 }
421 
422 /**
423  *	kfree_skb - free an sk_buff
424  *	@skb: buffer to free
425  *
426  *	Drop a reference to the buffer and free it if the usage count has
427  *	hit zero.
428  */
kfree_skb(struct sk_buff * skb)429 void kfree_skb(struct sk_buff *skb)
430 {
431 	if (unlikely(!skb))
432 		return;
433 	if (likely(atomic_read(&skb->users) == 1))
434 		smp_rmb();
435 	else if (likely(!atomic_dec_and_test(&skb->users)))
436 		return;
437 	__kfree_skb(skb);
438 }
439 
440 /**
441  *	skb_recycle_check - check if skb can be reused for receive
442  *	@skb: buffer
443  *	@skb_size: minimum receive buffer size
444  *
445  *	Checks that the skb passed in is not shared or cloned, and
446  *	that it is linear and its head portion at least as large as
447  *	skb_size so that it can be recycled as a receive buffer.
448  *	If these conditions are met, this function does any necessary
449  *	reference count dropping and cleans up the skbuff as if it
450  *	just came from __alloc_skb().
451  */
skb_recycle_check(struct sk_buff * skb,int skb_size)452 int skb_recycle_check(struct sk_buff *skb, int skb_size)
453 {
454 	struct skb_shared_info *shinfo;
455 
456 	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
457 		return 0;
458 
459 	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
460 	if (skb_end_pointer(skb) - skb->head < skb_size)
461 		return 0;
462 
463 	if (skb_shared(skb) || skb_cloned(skb))
464 		return 0;
465 
466 	skb_release_head_state(skb);
467 	shinfo = skb_shinfo(skb);
468 	atomic_set(&shinfo->dataref, 1);
469 	shinfo->nr_frags = 0;
470 	shinfo->gso_size = 0;
471 	shinfo->gso_segs = 0;
472 	shinfo->gso_type = 0;
473 	shinfo->ip6_frag_id = 0;
474 	shinfo->frag_list = NULL;
475 
476 	memset(skb, 0, offsetof(struct sk_buff, tail));
477 	skb->data = skb->head + NET_SKB_PAD;
478 	skb_reset_tail_pointer(skb);
479 
480 	return 1;
481 }
482 EXPORT_SYMBOL(skb_recycle_check);
483 
__copy_skb_header(struct sk_buff * new,const struct sk_buff * old)484 static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
485 {
486 	new->tstamp		= old->tstamp;
487 	new->dev		= old->dev;
488 	new->transport_header	= old->transport_header;
489 	new->network_header	= old->network_header;
490 	new->mac_header		= old->mac_header;
491 	new->dst		= dst_clone(old->dst);
492 #ifdef CONFIG_XFRM
493 	new->sp			= secpath_get(old->sp);
494 #endif
495 	memcpy(new->cb, old->cb, sizeof(old->cb));
496 	new->csum_start		= old->csum_start;
497 	new->csum_offset	= old->csum_offset;
498 	new->local_df		= old->local_df;
499 	new->pkt_type		= old->pkt_type;
500 	new->ip_summed		= old->ip_summed;
501 	skb_copy_queue_mapping(new, old);
502 	new->priority		= old->priority;
503 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
504 	new->ipvs_property	= old->ipvs_property;
505 #endif
506 	new->protocol		= old->protocol;
507 	new->mark		= old->mark;
508 	__nf_copy(new, old);
509 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
510     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
511 	new->nf_trace		= old->nf_trace;
512 #endif
513 #ifdef CONFIG_NET_SCHED
514 	new->tc_index		= old->tc_index;
515 #ifdef CONFIG_NET_CLS_ACT
516 	new->tc_verd		= old->tc_verd;
517 #endif
518 #endif
519 	new->vlan_tci		= old->vlan_tci;
520 
521 	skb_copy_secmark(new, old);
522 }
523 
__skb_clone(struct sk_buff * n,struct sk_buff * skb)524 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
525 {
526 #define C(x) n->x = skb->x
527 
528 	n->next = n->prev = NULL;
529 	n->sk = NULL;
530 	__copy_skb_header(n, skb);
531 
532 	C(len);
533 	C(data_len);
534 	C(mac_len);
535 	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
536 	n->cloned = 1;
537 	n->nohdr = 0;
538 	n->destructor = NULL;
539 	C(iif);
540 	C(tail);
541 	C(end);
542 	C(head);
543 	C(data);
544 	C(truesize);
545 #if defined(CONFIG_MAC80211) || defined(CONFIG_MAC80211_MODULE)
546 	C(do_not_encrypt);
547 	C(requeue);
548 #endif
549 	atomic_set(&n->users, 1);
550 
551 	atomic_inc(&(skb_shinfo(skb)->dataref));
552 	skb->cloned = 1;
553 
554 	return n;
555 #undef C
556 }
557 
558 /**
559  *	skb_morph	-	morph one skb into another
560  *	@dst: the skb to receive the contents
561  *	@src: the skb to supply the contents
562  *
563  *	This is identical to skb_clone except that the target skb is
564  *	supplied by the user.
565  *
566  *	The target skb is returned upon exit.
567  */
skb_morph(struct sk_buff * dst,struct sk_buff * src)568 struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
569 {
570 	skb_release_all(dst);
571 	return __skb_clone(dst, src);
572 }
573 EXPORT_SYMBOL_GPL(skb_morph);
574 
575 /**
576  *	skb_clone	-	duplicate an sk_buff
577  *	@skb: buffer to clone
578  *	@gfp_mask: allocation priority
579  *
580  *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
581  *	copies share the same packet data but not structure. The new
582  *	buffer has a reference count of 1. If the allocation fails the
583  *	function returns %NULL otherwise the new buffer is returned.
584  *
585  *	If this function is called from an interrupt gfp_mask() must be
586  *	%GFP_ATOMIC.
587  */
588 
skb_clone(struct sk_buff * skb,gfp_t gfp_mask)589 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
590 {
591 	struct sk_buff *n;
592 
593 	n = skb + 1;
594 	if (skb->fclone == SKB_FCLONE_ORIG &&
595 	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
596 		atomic_t *fclone_ref = (atomic_t *) (n + 1);
597 		n->fclone = SKB_FCLONE_CLONE;
598 		atomic_inc(fclone_ref);
599 	} else {
600 		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
601 		if (!n)
602 			return NULL;
603 		n->fclone = SKB_FCLONE_UNAVAILABLE;
604 	}
605 
606 	return __skb_clone(n, skb);
607 }
608 
copy_skb_header(struct sk_buff * new,const struct sk_buff * old)609 static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
610 {
611 #ifndef NET_SKBUFF_DATA_USES_OFFSET
612 	/*
613 	 *	Shift between the two data areas in bytes
614 	 */
615 	unsigned long offset = new->data - old->data;
616 #endif
617 
618 	__copy_skb_header(new, old);
619 
620 #ifndef NET_SKBUFF_DATA_USES_OFFSET
621 	/* {transport,network,mac}_header are relative to skb->head */
622 	new->transport_header += offset;
623 	new->network_header   += offset;
624 	new->mac_header	      += offset;
625 #endif
626 	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
627 	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
628 	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
629 }
630 
631 /**
632  *	skb_copy	-	create private copy of an sk_buff
633  *	@skb: buffer to copy
634  *	@gfp_mask: allocation priority
635  *
636  *	Make a copy of both an &sk_buff and its data. This is used when the
637  *	caller wishes to modify the data and needs a private copy of the
638  *	data to alter. Returns %NULL on failure or the pointer to the buffer
639  *	on success. The returned buffer has a reference count of 1.
640  *
641  *	As by-product this function converts non-linear &sk_buff to linear
642  *	one, so that &sk_buff becomes completely private and caller is allowed
643  *	to modify all the data of returned buffer. This means that this
644  *	function is not recommended for use in circumstances when only
645  *	header is going to be modified. Use pskb_copy() instead.
646  */
647 
skb_copy(const struct sk_buff * skb,gfp_t gfp_mask)648 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
649 {
650 	int headerlen = skb->data - skb->head;
651 	/*
652 	 *	Allocate the copy buffer
653 	 */
654 	struct sk_buff *n;
655 #ifdef NET_SKBUFF_DATA_USES_OFFSET
656 	n = alloc_skb(skb->end + skb->data_len, gfp_mask);
657 #else
658 	n = alloc_skb(skb->end - skb->head + skb->data_len, gfp_mask);
659 #endif
660 	if (!n)
661 		return NULL;
662 
663 	/* Set the data pointer */
664 	skb_reserve(n, headerlen);
665 	/* Set the tail pointer and length */
666 	skb_put(n, skb->len);
667 
668 	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
669 		BUG();
670 
671 	copy_skb_header(n, skb);
672 	return n;
673 }
674 
675 
676 /**
677  *	pskb_copy	-	create copy of an sk_buff with private head.
678  *	@skb: buffer to copy
679  *	@gfp_mask: allocation priority
680  *
681  *	Make a copy of both an &sk_buff and part of its data, located
682  *	in header. Fragmented data remain shared. This is used when
683  *	the caller wishes to modify only header of &sk_buff and needs
684  *	private copy of the header to alter. Returns %NULL on failure
685  *	or the pointer to the buffer on success.
686  *	The returned buffer has a reference count of 1.
687  */
688 
pskb_copy(struct sk_buff * skb,gfp_t gfp_mask)689 struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
690 {
691 	/*
692 	 *	Allocate the copy buffer
693 	 */
694 	struct sk_buff *n;
695 #ifdef NET_SKBUFF_DATA_USES_OFFSET
696 	n = alloc_skb(skb->end, gfp_mask);
697 #else
698 	n = alloc_skb(skb->end - skb->head, gfp_mask);
699 #endif
700 	if (!n)
701 		goto out;
702 
703 	/* Set the data pointer */
704 	skb_reserve(n, skb->data - skb->head);
705 	/* Set the tail pointer and length */
706 	skb_put(n, skb_headlen(skb));
707 	/* Copy the bytes */
708 	skb_copy_from_linear_data(skb, n->data, n->len);
709 
710 	n->truesize += skb->data_len;
711 	n->data_len  = skb->data_len;
712 	n->len	     = skb->len;
713 
714 	if (skb_shinfo(skb)->nr_frags) {
715 		int i;
716 
717 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
718 			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
719 			get_page(skb_shinfo(n)->frags[i].page);
720 		}
721 		skb_shinfo(n)->nr_frags = i;
722 	}
723 
724 	if (skb_shinfo(skb)->frag_list) {
725 		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
726 		skb_clone_fraglist(n);
727 	}
728 
729 	copy_skb_header(n, skb);
730 out:
731 	return n;
732 }
733 
734 /**
735  *	pskb_expand_head - reallocate header of &sk_buff
736  *	@skb: buffer to reallocate
737  *	@nhead: room to add at head
738  *	@ntail: room to add at tail
739  *	@gfp_mask: allocation priority
740  *
741  *	Expands (or creates identical copy, if &nhead and &ntail are zero)
742  *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
743  *	reference count of 1. Returns zero in the case of success or error,
744  *	if expansion failed. In the last case, &sk_buff is not changed.
745  *
746  *	All the pointers pointing into skb header may change and must be
747  *	reloaded after call to this function.
748  */
749 
pskb_expand_head(struct sk_buff * skb,int nhead,int ntail,gfp_t gfp_mask)750 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
751 		     gfp_t gfp_mask)
752 {
753 	int i;
754 	u8 *data;
755 #ifdef NET_SKBUFF_DATA_USES_OFFSET
756 	int size = nhead + skb->end + ntail;
757 #else
758 	int size = nhead + (skb->end - skb->head) + ntail;
759 #endif
760 	long off;
761 
762 	BUG_ON(nhead < 0);
763 
764 	if (skb_shared(skb))
765 		BUG();
766 
767 	size = SKB_DATA_ALIGN(size);
768 
769 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
770 	if (!data)
771 		goto nodata;
772 
773 	/* Copy only real data... and, alas, header. This should be
774 	 * optimized for the cases when header is void. */
775 #ifdef NET_SKBUFF_DATA_USES_OFFSET
776 	memcpy(data + nhead, skb->head, skb->tail);
777 #else
778 	memcpy(data + nhead, skb->head, skb->tail - skb->head);
779 #endif
780 	memcpy(data + size, skb_end_pointer(skb),
781 	       sizeof(struct skb_shared_info));
782 
783 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
784 		get_page(skb_shinfo(skb)->frags[i].page);
785 
786 	if (skb_shinfo(skb)->frag_list)
787 		skb_clone_fraglist(skb);
788 
789 	skb_release_data(skb);
790 
791 	off = (data + nhead) - skb->head;
792 
793 	skb->head     = data;
794 	skb->data    += off;
795 #ifdef NET_SKBUFF_DATA_USES_OFFSET
796 	skb->end      = size;
797 	off           = nhead;
798 #else
799 	skb->end      = skb->head + size;
800 #endif
801 	/* {transport,network,mac}_header and tail are relative to skb->head */
802 	skb->tail	      += off;
803 	skb->transport_header += off;
804 	skb->network_header   += off;
805 	skb->mac_header	      += off;
806 	skb->csum_start       += nhead;
807 	skb->cloned   = 0;
808 	skb->hdr_len  = 0;
809 	skb->nohdr    = 0;
810 	atomic_set(&skb_shinfo(skb)->dataref, 1);
811 	return 0;
812 
813 nodata:
814 	return -ENOMEM;
815 }
816 
817 /* Make private copy of skb with writable head and some headroom */
818 
skb_realloc_headroom(struct sk_buff * skb,unsigned int headroom)819 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
820 {
821 	struct sk_buff *skb2;
822 	int delta = headroom - skb_headroom(skb);
823 
824 	if (delta <= 0)
825 		skb2 = pskb_copy(skb, GFP_ATOMIC);
826 	else {
827 		skb2 = skb_clone(skb, GFP_ATOMIC);
828 		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
829 					     GFP_ATOMIC)) {
830 			kfree_skb(skb2);
831 			skb2 = NULL;
832 		}
833 	}
834 	return skb2;
835 }
836 
837 
838 /**
839  *	skb_copy_expand	-	copy and expand sk_buff
840  *	@skb: buffer to copy
841  *	@newheadroom: new free bytes at head
842  *	@newtailroom: new free bytes at tail
843  *	@gfp_mask: allocation priority
844  *
845  *	Make a copy of both an &sk_buff and its data and while doing so
846  *	allocate additional space.
847  *
848  *	This is used when the caller wishes to modify the data and needs a
849  *	private copy of the data to alter as well as more space for new fields.
850  *	Returns %NULL on failure or the pointer to the buffer
851  *	on success. The returned buffer has a reference count of 1.
852  *
853  *	You must pass %GFP_ATOMIC as the allocation priority if this function
854  *	is called from an interrupt.
855  */
skb_copy_expand(const struct sk_buff * skb,int newheadroom,int newtailroom,gfp_t gfp_mask)856 struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
857 				int newheadroom, int newtailroom,
858 				gfp_t gfp_mask)
859 {
860 	/*
861 	 *	Allocate the copy buffer
862 	 */
863 	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
864 				      gfp_mask);
865 	int oldheadroom = skb_headroom(skb);
866 	int head_copy_len, head_copy_off;
867 	int off;
868 
869 	if (!n)
870 		return NULL;
871 
872 	skb_reserve(n, newheadroom);
873 
874 	/* Set the tail pointer and length */
875 	skb_put(n, skb->len);
876 
877 	head_copy_len = oldheadroom;
878 	head_copy_off = 0;
879 	if (newheadroom <= head_copy_len)
880 		head_copy_len = newheadroom;
881 	else
882 		head_copy_off = newheadroom - head_copy_len;
883 
884 	/* Copy the linear header and data. */
885 	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
886 			  skb->len + head_copy_len))
887 		BUG();
888 
889 	copy_skb_header(n, skb);
890 
891 	off                  = newheadroom - oldheadroom;
892 	n->csum_start       += off;
893 #ifdef NET_SKBUFF_DATA_USES_OFFSET
894 	n->transport_header += off;
895 	n->network_header   += off;
896 	n->mac_header	    += off;
897 #endif
898 
899 	return n;
900 }
901 
902 /**
903  *	skb_pad			-	zero pad the tail of an skb
904  *	@skb: buffer to pad
905  *	@pad: space to pad
906  *
907  *	Ensure that a buffer is followed by a padding area that is zero
908  *	filled. Used by network drivers which may DMA or transfer data
909  *	beyond the buffer end onto the wire.
910  *
911  *	May return error in out of memory cases. The skb is freed on error.
912  */
913 
skb_pad(struct sk_buff * skb,int pad)914 int skb_pad(struct sk_buff *skb, int pad)
915 {
916 	int err;
917 	int ntail;
918 
919 	/* If the skbuff is non linear tailroom is always zero.. */
920 	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
921 		memset(skb->data+skb->len, 0, pad);
922 		return 0;
923 	}
924 
925 	ntail = skb->data_len + pad - (skb->end - skb->tail);
926 	if (likely(skb_cloned(skb) || ntail > 0)) {
927 		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
928 		if (unlikely(err))
929 			goto free_skb;
930 	}
931 
932 	/* FIXME: The use of this function with non-linear skb's really needs
933 	 * to be audited.
934 	 */
935 	err = skb_linearize(skb);
936 	if (unlikely(err))
937 		goto free_skb;
938 
939 	memset(skb->data + skb->len, 0, pad);
940 	return 0;
941 
942 free_skb:
943 	kfree_skb(skb);
944 	return err;
945 }
946 
947 /**
948  *	skb_put - add data to a buffer
949  *	@skb: buffer to use
950  *	@len: amount of data to add
951  *
952  *	This function extends the used data area of the buffer. If this would
953  *	exceed the total buffer size the kernel will panic. A pointer to the
954  *	first byte of the extra data is returned.
955  */
skb_put(struct sk_buff * skb,unsigned int len)956 unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
957 {
958 	unsigned char *tmp = skb_tail_pointer(skb);
959 	SKB_LINEAR_ASSERT(skb);
960 	skb->tail += len;
961 	skb->len  += len;
962 	if (unlikely(skb->tail > skb->end))
963 		skb_over_panic(skb, len, __builtin_return_address(0));
964 	return tmp;
965 }
966 EXPORT_SYMBOL(skb_put);
967 
968 /**
969  *	skb_push - add data to the start of a buffer
970  *	@skb: buffer to use
971  *	@len: amount of data to add
972  *
973  *	This function extends the used data area of the buffer at the buffer
974  *	start. If this would exceed the total buffer headroom the kernel will
975  *	panic. A pointer to the first byte of the extra data is returned.
976  */
skb_push(struct sk_buff * skb,unsigned int len)977 unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
978 {
979 	skb->data -= len;
980 	skb->len  += len;
981 	if (unlikely(skb->data<skb->head))
982 		skb_under_panic(skb, len, __builtin_return_address(0));
983 	return skb->data;
984 }
985 EXPORT_SYMBOL(skb_push);
986 
987 /**
988  *	skb_pull - remove data from the start of a buffer
989  *	@skb: buffer to use
990  *	@len: amount of data to remove
991  *
992  *	This function removes data from the start of a buffer, returning
993  *	the memory to the headroom. A pointer to the next data in the buffer
994  *	is returned. Once the data has been pulled future pushes will overwrite
995  *	the old data.
996  */
skb_pull(struct sk_buff * skb,unsigned int len)997 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
998 {
999 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1000 }
1001 EXPORT_SYMBOL(skb_pull);
1002 
1003 /**
1004  *	skb_trim - remove end from a buffer
1005  *	@skb: buffer to alter
1006  *	@len: new length
1007  *
1008  *	Cut the length of a buffer down by removing data from the tail. If
1009  *	the buffer is already under the length specified it is not modified.
1010  *	The skb must be linear.
1011  */
skb_trim(struct sk_buff * skb,unsigned int len)1012 void skb_trim(struct sk_buff *skb, unsigned int len)
1013 {
1014 	if (skb->len > len)
1015 		__skb_trim(skb, len);
1016 }
1017 EXPORT_SYMBOL(skb_trim);
1018 
1019 /* Trims skb to length len. It can change skb pointers.
1020  */
1021 
___pskb_trim(struct sk_buff * skb,unsigned int len)1022 int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1023 {
1024 	struct sk_buff **fragp;
1025 	struct sk_buff *frag;
1026 	int offset = skb_headlen(skb);
1027 	int nfrags = skb_shinfo(skb)->nr_frags;
1028 	int i;
1029 	int err;
1030 
1031 	if (skb_cloned(skb) &&
1032 	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1033 		return err;
1034 
1035 	i = 0;
1036 	if (offset >= len)
1037 		goto drop_pages;
1038 
1039 	for (; i < nfrags; i++) {
1040 		int end = offset + skb_shinfo(skb)->frags[i].size;
1041 
1042 		if (end < len) {
1043 			offset = end;
1044 			continue;
1045 		}
1046 
1047 		skb_shinfo(skb)->frags[i++].size = len - offset;
1048 
1049 drop_pages:
1050 		skb_shinfo(skb)->nr_frags = i;
1051 
1052 		for (; i < nfrags; i++)
1053 			put_page(skb_shinfo(skb)->frags[i].page);
1054 
1055 		if (skb_shinfo(skb)->frag_list)
1056 			skb_drop_fraglist(skb);
1057 		goto done;
1058 	}
1059 
1060 	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1061 	     fragp = &frag->next) {
1062 		int end = offset + frag->len;
1063 
1064 		if (skb_shared(frag)) {
1065 			struct sk_buff *nfrag;
1066 
1067 			nfrag = skb_clone(frag, GFP_ATOMIC);
1068 			if (unlikely(!nfrag))
1069 				return -ENOMEM;
1070 
1071 			nfrag->next = frag->next;
1072 			kfree_skb(frag);
1073 			frag = nfrag;
1074 			*fragp = frag;
1075 		}
1076 
1077 		if (end < len) {
1078 			offset = end;
1079 			continue;
1080 		}
1081 
1082 		if (end > len &&
1083 		    unlikely((err = pskb_trim(frag, len - offset))))
1084 			return err;
1085 
1086 		if (frag->next)
1087 			skb_drop_list(&frag->next);
1088 		break;
1089 	}
1090 
1091 done:
1092 	if (len > skb_headlen(skb)) {
1093 		skb->data_len -= skb->len - len;
1094 		skb->len       = len;
1095 	} else {
1096 		skb->len       = len;
1097 		skb->data_len  = 0;
1098 		skb_set_tail_pointer(skb, len);
1099 	}
1100 
1101 	return 0;
1102 }
1103 
1104 /**
1105  *	__pskb_pull_tail - advance tail of skb header
1106  *	@skb: buffer to reallocate
1107  *	@delta: number of bytes to advance tail
1108  *
1109  *	The function makes a sense only on a fragmented &sk_buff,
1110  *	it expands header moving its tail forward and copying necessary
1111  *	data from fragmented part.
1112  *
1113  *	&sk_buff MUST have reference count of 1.
1114  *
1115  *	Returns %NULL (and &sk_buff does not change) if pull failed
1116  *	or value of new tail of skb in the case of success.
1117  *
1118  *	All the pointers pointing into skb header may change and must be
1119  *	reloaded after call to this function.
1120  */
1121 
1122 /* Moves tail of skb head forward, copying data from fragmented part,
1123  * when it is necessary.
1124  * 1. It may fail due to malloc failure.
1125  * 2. It may change skb pointers.
1126  *
1127  * It is pretty complicated. Luckily, it is called only in exceptional cases.
1128  */
__pskb_pull_tail(struct sk_buff * skb,int delta)1129 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1130 {
1131 	/* If skb has not enough free space at tail, get new one
1132 	 * plus 128 bytes for future expansions. If we have enough
1133 	 * room at tail, reallocate without expansion only if skb is cloned.
1134 	 */
1135 	int i, k, eat = (skb->tail + delta) - skb->end;
1136 
1137 	if (eat > 0 || skb_cloned(skb)) {
1138 		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1139 				     GFP_ATOMIC))
1140 			return NULL;
1141 	}
1142 
1143 	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1144 		BUG();
1145 
1146 	/* Optimization: no fragments, no reasons to preestimate
1147 	 * size of pulled pages. Superb.
1148 	 */
1149 	if (!skb_shinfo(skb)->frag_list)
1150 		goto pull_pages;
1151 
1152 	/* Estimate size of pulled pages. */
1153 	eat = delta;
1154 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1155 		if (skb_shinfo(skb)->frags[i].size >= eat)
1156 			goto pull_pages;
1157 		eat -= skb_shinfo(skb)->frags[i].size;
1158 	}
1159 
1160 	/* If we need update frag list, we are in troubles.
1161 	 * Certainly, it possible to add an offset to skb data,
1162 	 * but taking into account that pulling is expected to
1163 	 * be very rare operation, it is worth to fight against
1164 	 * further bloating skb head and crucify ourselves here instead.
1165 	 * Pure masohism, indeed. 8)8)
1166 	 */
1167 	if (eat) {
1168 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1169 		struct sk_buff *clone = NULL;
1170 		struct sk_buff *insp = NULL;
1171 
1172 		do {
1173 			BUG_ON(!list);
1174 
1175 			if (list->len <= eat) {
1176 				/* Eaten as whole. */
1177 				eat -= list->len;
1178 				list = list->next;
1179 				insp = list;
1180 			} else {
1181 				/* Eaten partially. */
1182 
1183 				if (skb_shared(list)) {
1184 					/* Sucks! We need to fork list. :-( */
1185 					clone = skb_clone(list, GFP_ATOMIC);
1186 					if (!clone)
1187 						return NULL;
1188 					insp = list->next;
1189 					list = clone;
1190 				} else {
1191 					/* This may be pulled without
1192 					 * problems. */
1193 					insp = list;
1194 				}
1195 				if (!pskb_pull(list, eat)) {
1196 					if (clone)
1197 						kfree_skb(clone);
1198 					return NULL;
1199 				}
1200 				break;
1201 			}
1202 		} while (eat);
1203 
1204 		/* Free pulled out fragments. */
1205 		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1206 			skb_shinfo(skb)->frag_list = list->next;
1207 			kfree_skb(list);
1208 		}
1209 		/* And insert new clone at head. */
1210 		if (clone) {
1211 			clone->next = list;
1212 			skb_shinfo(skb)->frag_list = clone;
1213 		}
1214 	}
1215 	/* Success! Now we may commit changes to skb data. */
1216 
1217 pull_pages:
1218 	eat = delta;
1219 	k = 0;
1220 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1221 		if (skb_shinfo(skb)->frags[i].size <= eat) {
1222 			put_page(skb_shinfo(skb)->frags[i].page);
1223 			eat -= skb_shinfo(skb)->frags[i].size;
1224 		} else {
1225 			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1226 			if (eat) {
1227 				skb_shinfo(skb)->frags[k].page_offset += eat;
1228 				skb_shinfo(skb)->frags[k].size -= eat;
1229 				eat = 0;
1230 			}
1231 			k++;
1232 		}
1233 	}
1234 	skb_shinfo(skb)->nr_frags = k;
1235 
1236 	skb->tail     += delta;
1237 	skb->data_len -= delta;
1238 
1239 	return skb_tail_pointer(skb);
1240 }
1241 
1242 /* Copy some data bits from skb to kernel buffer. */
1243 
skb_copy_bits(const struct sk_buff * skb,int offset,void * to,int len)1244 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1245 {
1246 	int i, copy;
1247 	int start = skb_headlen(skb);
1248 
1249 	if (offset > (int)skb->len - len)
1250 		goto fault;
1251 
1252 	/* Copy header. */
1253 	if ((copy = start - offset) > 0) {
1254 		if (copy > len)
1255 			copy = len;
1256 		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1257 		if ((len -= copy) == 0)
1258 			return 0;
1259 		offset += copy;
1260 		to     += copy;
1261 	}
1262 
1263 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1264 		int end;
1265 
1266 		WARN_ON(start > offset + len);
1267 
1268 		end = start + skb_shinfo(skb)->frags[i].size;
1269 		if ((copy = end - offset) > 0) {
1270 			u8 *vaddr;
1271 
1272 			if (copy > len)
1273 				copy = len;
1274 
1275 			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1276 			memcpy(to,
1277 			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1278 			       offset - start, copy);
1279 			kunmap_skb_frag(vaddr);
1280 
1281 			if ((len -= copy) == 0)
1282 				return 0;
1283 			offset += copy;
1284 			to     += copy;
1285 		}
1286 		start = end;
1287 	}
1288 
1289 	if (skb_shinfo(skb)->frag_list) {
1290 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1291 
1292 		for (; list; list = list->next) {
1293 			int end;
1294 
1295 			WARN_ON(start > offset + len);
1296 
1297 			end = start + list->len;
1298 			if ((copy = end - offset) > 0) {
1299 				if (copy > len)
1300 					copy = len;
1301 				if (skb_copy_bits(list, offset - start,
1302 						  to, copy))
1303 					goto fault;
1304 				if ((len -= copy) == 0)
1305 					return 0;
1306 				offset += copy;
1307 				to     += copy;
1308 			}
1309 			start = end;
1310 		}
1311 	}
1312 	if (!len)
1313 		return 0;
1314 
1315 fault:
1316 	return -EFAULT;
1317 }
1318 
1319 /*
1320  * Callback from splice_to_pipe(), if we need to release some pages
1321  * at the end of the spd in case we error'ed out in filling the pipe.
1322  */
sock_spd_release(struct splice_pipe_desc * spd,unsigned int i)1323 static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1324 {
1325 	put_page(spd->pages[i]);
1326 }
1327 
linear_to_page(struct page * page,unsigned int len,unsigned int offset)1328 static inline struct page *linear_to_page(struct page *page, unsigned int len,
1329 					  unsigned int offset)
1330 {
1331 	struct page *p = alloc_pages(GFP_KERNEL, 0);
1332 
1333 	if (!p)
1334 		return NULL;
1335 	memcpy(page_address(p) + offset, page_address(page) + offset, len);
1336 
1337 	return p;
1338 }
1339 
1340 /*
1341  * Fill page/offset/length into spd, if it can hold more pages.
1342  */
spd_fill_page(struct splice_pipe_desc * spd,struct page * page,unsigned int len,unsigned int offset,struct sk_buff * skb,int linear)1343 static inline int spd_fill_page(struct splice_pipe_desc *spd, struct page *page,
1344 				unsigned int len, unsigned int offset,
1345 				struct sk_buff *skb, int linear)
1346 {
1347 	if (unlikely(spd->nr_pages == PIPE_BUFFERS))
1348 		return 1;
1349 
1350 	if (linear) {
1351 		page = linear_to_page(page, len, offset);
1352 		if (!page)
1353 			return 1;
1354 	} else
1355 		get_page(page);
1356 
1357 	spd->pages[spd->nr_pages] = page;
1358 	spd->partial[spd->nr_pages].len = len;
1359 	spd->partial[spd->nr_pages].offset = offset;
1360 	spd->nr_pages++;
1361 
1362 	return 0;
1363 }
1364 
__segment_seek(struct page ** page,unsigned int * poff,unsigned int * plen,unsigned int off)1365 static inline void __segment_seek(struct page **page, unsigned int *poff,
1366 				  unsigned int *plen, unsigned int off)
1367 {
1368 	*poff += off;
1369 	*page += *poff / PAGE_SIZE;
1370 	*poff = *poff % PAGE_SIZE;
1371 	*plen -= off;
1372 }
1373 
__splice_segment(struct page * page,unsigned int poff,unsigned int plen,unsigned int * off,unsigned int * len,struct sk_buff * skb,struct splice_pipe_desc * spd,int linear)1374 static inline int __splice_segment(struct page *page, unsigned int poff,
1375 				   unsigned int plen, unsigned int *off,
1376 				   unsigned int *len, struct sk_buff *skb,
1377 				   struct splice_pipe_desc *spd, int linear)
1378 {
1379 	if (!*len)
1380 		return 1;
1381 
1382 	/* skip this segment if already processed */
1383 	if (*off >= plen) {
1384 		*off -= plen;
1385 		return 0;
1386 	}
1387 
1388 	/* ignore any bits we already processed */
1389 	if (*off) {
1390 		__segment_seek(&page, &poff, &plen, *off);
1391 		*off = 0;
1392 	}
1393 
1394 	do {
1395 		unsigned int flen = min(*len, plen);
1396 
1397 		/* the linear region may spread across several pages  */
1398 		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1399 
1400 		if (spd_fill_page(spd, page, flen, poff, skb, linear))
1401 			return 1;
1402 
1403 		__segment_seek(&page, &poff, &plen, flen);
1404 		*len -= flen;
1405 
1406 	} while (*len && plen);
1407 
1408 	return 0;
1409 }
1410 
1411 /*
1412  * Map linear and fragment data from the skb to spd. It reports failure if the
1413  * pipe is full or if we already spliced the requested length.
1414  */
__skb_splice_bits(struct sk_buff * skb,unsigned int * offset,unsigned int * len,struct splice_pipe_desc * spd)1415 static int __skb_splice_bits(struct sk_buff *skb, unsigned int *offset,
1416 		      unsigned int *len,
1417 		      struct splice_pipe_desc *spd)
1418 {
1419 	int seg;
1420 
1421 	/*
1422 	 * map the linear part
1423 	 */
1424 	if (__splice_segment(virt_to_page(skb->data),
1425 			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1426 			     skb_headlen(skb),
1427 			     offset, len, skb, spd, 1))
1428 		return 1;
1429 
1430 	/*
1431 	 * then map the fragments
1432 	 */
1433 	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1434 		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1435 
1436 		if (__splice_segment(f->page, f->page_offset, f->size,
1437 				     offset, len, skb, spd, 0))
1438 			return 1;
1439 	}
1440 
1441 	return 0;
1442 }
1443 
1444 /*
1445  * Map data from the skb to a pipe. Should handle both the linear part,
1446  * the fragments, and the frag list. It does NOT handle frag lists within
1447  * the frag list, if such a thing exists. We'd probably need to recurse to
1448  * handle that cleanly.
1449  */
skb_splice_bits(struct sk_buff * skb,unsigned int offset,struct pipe_inode_info * pipe,unsigned int tlen,unsigned int flags)1450 int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1451 		    struct pipe_inode_info *pipe, unsigned int tlen,
1452 		    unsigned int flags)
1453 {
1454 	struct partial_page partial[PIPE_BUFFERS];
1455 	struct page *pages[PIPE_BUFFERS];
1456 	struct splice_pipe_desc spd = {
1457 		.pages = pages,
1458 		.partial = partial,
1459 		.flags = flags,
1460 		.ops = &sock_pipe_buf_ops,
1461 		.spd_release = sock_spd_release,
1462 	};
1463 
1464 	/*
1465 	 * __skb_splice_bits() only fails if the output has no room left,
1466 	 * so no point in going over the frag_list for the error case.
1467 	 */
1468 	if (__skb_splice_bits(skb, &offset, &tlen, &spd))
1469 		goto done;
1470 	else if (!tlen)
1471 		goto done;
1472 
1473 	/*
1474 	 * now see if we have a frag_list to map
1475 	 */
1476 	if (skb_shinfo(skb)->frag_list) {
1477 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1478 
1479 		for (; list && tlen; list = list->next) {
1480 			if (__skb_splice_bits(list, &offset, &tlen, &spd))
1481 				break;
1482 		}
1483 	}
1484 
1485 done:
1486 	if (spd.nr_pages) {
1487 		struct sock *sk = skb->sk;
1488 		int ret;
1489 
1490 		/*
1491 		 * Drop the socket lock, otherwise we have reverse
1492 		 * locking dependencies between sk_lock and i_mutex
1493 		 * here as compared to sendfile(). We enter here
1494 		 * with the socket lock held, and splice_to_pipe() will
1495 		 * grab the pipe inode lock. For sendfile() emulation,
1496 		 * we call into ->sendpage() with the i_mutex lock held
1497 		 * and networking will grab the socket lock.
1498 		 */
1499 		release_sock(sk);
1500 		ret = splice_to_pipe(pipe, &spd);
1501 		lock_sock(sk);
1502 		return ret;
1503 	}
1504 
1505 	return 0;
1506 }
1507 
1508 /**
1509  *	skb_store_bits - store bits from kernel buffer to skb
1510  *	@skb: destination buffer
1511  *	@offset: offset in destination
1512  *	@from: source buffer
1513  *	@len: number of bytes to copy
1514  *
1515  *	Copy the specified number of bytes from the source buffer to the
1516  *	destination skb.  This function handles all the messy bits of
1517  *	traversing fragment lists and such.
1518  */
1519 
skb_store_bits(struct sk_buff * skb,int offset,const void * from,int len)1520 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1521 {
1522 	int i, copy;
1523 	int start = skb_headlen(skb);
1524 
1525 	if (offset > (int)skb->len - len)
1526 		goto fault;
1527 
1528 	if ((copy = start - offset) > 0) {
1529 		if (copy > len)
1530 			copy = len;
1531 		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1532 		if ((len -= copy) == 0)
1533 			return 0;
1534 		offset += copy;
1535 		from += copy;
1536 	}
1537 
1538 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1539 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1540 		int end;
1541 
1542 		WARN_ON(start > offset + len);
1543 
1544 		end = start + frag->size;
1545 		if ((copy = end - offset) > 0) {
1546 			u8 *vaddr;
1547 
1548 			if (copy > len)
1549 				copy = len;
1550 
1551 			vaddr = kmap_skb_frag(frag);
1552 			memcpy(vaddr + frag->page_offset + offset - start,
1553 			       from, copy);
1554 			kunmap_skb_frag(vaddr);
1555 
1556 			if ((len -= copy) == 0)
1557 				return 0;
1558 			offset += copy;
1559 			from += copy;
1560 		}
1561 		start = end;
1562 	}
1563 
1564 	if (skb_shinfo(skb)->frag_list) {
1565 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1566 
1567 		for (; list; list = list->next) {
1568 			int end;
1569 
1570 			WARN_ON(start > offset + len);
1571 
1572 			end = start + list->len;
1573 			if ((copy = end - offset) > 0) {
1574 				if (copy > len)
1575 					copy = len;
1576 				if (skb_store_bits(list, offset - start,
1577 						   from, copy))
1578 					goto fault;
1579 				if ((len -= copy) == 0)
1580 					return 0;
1581 				offset += copy;
1582 				from += copy;
1583 			}
1584 			start = end;
1585 		}
1586 	}
1587 	if (!len)
1588 		return 0;
1589 
1590 fault:
1591 	return -EFAULT;
1592 }
1593 
1594 EXPORT_SYMBOL(skb_store_bits);
1595 
1596 /* Checksum skb data. */
1597 
skb_checksum(const struct sk_buff * skb,int offset,int len,__wsum csum)1598 __wsum skb_checksum(const struct sk_buff *skb, int offset,
1599 			  int len, __wsum csum)
1600 {
1601 	int start = skb_headlen(skb);
1602 	int i, copy = start - offset;
1603 	int pos = 0;
1604 
1605 	/* Checksum header. */
1606 	if (copy > 0) {
1607 		if (copy > len)
1608 			copy = len;
1609 		csum = csum_partial(skb->data + offset, copy, csum);
1610 		if ((len -= copy) == 0)
1611 			return csum;
1612 		offset += copy;
1613 		pos	= copy;
1614 	}
1615 
1616 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1617 		int end;
1618 
1619 		WARN_ON(start > offset + len);
1620 
1621 		end = start + skb_shinfo(skb)->frags[i].size;
1622 		if ((copy = end - offset) > 0) {
1623 			__wsum csum2;
1624 			u8 *vaddr;
1625 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1626 
1627 			if (copy > len)
1628 				copy = len;
1629 			vaddr = kmap_skb_frag(frag);
1630 			csum2 = csum_partial(vaddr + frag->page_offset +
1631 					     offset - start, copy, 0);
1632 			kunmap_skb_frag(vaddr);
1633 			csum = csum_block_add(csum, csum2, pos);
1634 			if (!(len -= copy))
1635 				return csum;
1636 			offset += copy;
1637 			pos    += copy;
1638 		}
1639 		start = end;
1640 	}
1641 
1642 	if (skb_shinfo(skb)->frag_list) {
1643 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1644 
1645 		for (; list; list = list->next) {
1646 			int end;
1647 
1648 			WARN_ON(start > offset + len);
1649 
1650 			end = start + list->len;
1651 			if ((copy = end - offset) > 0) {
1652 				__wsum csum2;
1653 				if (copy > len)
1654 					copy = len;
1655 				csum2 = skb_checksum(list, offset - start,
1656 						     copy, 0);
1657 				csum = csum_block_add(csum, csum2, pos);
1658 				if ((len -= copy) == 0)
1659 					return csum;
1660 				offset += copy;
1661 				pos    += copy;
1662 			}
1663 			start = end;
1664 		}
1665 	}
1666 	BUG_ON(len);
1667 
1668 	return csum;
1669 }
1670 
1671 /* Both of above in one bottle. */
1672 
skb_copy_and_csum_bits(const struct sk_buff * skb,int offset,u8 * to,int len,__wsum csum)1673 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1674 				    u8 *to, int len, __wsum csum)
1675 {
1676 	int start = skb_headlen(skb);
1677 	int i, copy = start - offset;
1678 	int pos = 0;
1679 
1680 	/* Copy header. */
1681 	if (copy > 0) {
1682 		if (copy > len)
1683 			copy = len;
1684 		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1685 						 copy, csum);
1686 		if ((len -= copy) == 0)
1687 			return csum;
1688 		offset += copy;
1689 		to     += copy;
1690 		pos	= copy;
1691 	}
1692 
1693 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1694 		int end;
1695 
1696 		WARN_ON(start > offset + len);
1697 
1698 		end = start + skb_shinfo(skb)->frags[i].size;
1699 		if ((copy = end - offset) > 0) {
1700 			__wsum csum2;
1701 			u8 *vaddr;
1702 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1703 
1704 			if (copy > len)
1705 				copy = len;
1706 			vaddr = kmap_skb_frag(frag);
1707 			csum2 = csum_partial_copy_nocheck(vaddr +
1708 							  frag->page_offset +
1709 							  offset - start, to,
1710 							  copy, 0);
1711 			kunmap_skb_frag(vaddr);
1712 			csum = csum_block_add(csum, csum2, pos);
1713 			if (!(len -= copy))
1714 				return csum;
1715 			offset += copy;
1716 			to     += copy;
1717 			pos    += copy;
1718 		}
1719 		start = end;
1720 	}
1721 
1722 	if (skb_shinfo(skb)->frag_list) {
1723 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1724 
1725 		for (; list; list = list->next) {
1726 			__wsum csum2;
1727 			int end;
1728 
1729 			WARN_ON(start > offset + len);
1730 
1731 			end = start + list->len;
1732 			if ((copy = end - offset) > 0) {
1733 				if (copy > len)
1734 					copy = len;
1735 				csum2 = skb_copy_and_csum_bits(list,
1736 							       offset - start,
1737 							       to, copy, 0);
1738 				csum = csum_block_add(csum, csum2, pos);
1739 				if ((len -= copy) == 0)
1740 					return csum;
1741 				offset += copy;
1742 				to     += copy;
1743 				pos    += copy;
1744 			}
1745 			start = end;
1746 		}
1747 	}
1748 	BUG_ON(len);
1749 	return csum;
1750 }
1751 
skb_copy_and_csum_dev(const struct sk_buff * skb,u8 * to)1752 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1753 {
1754 	__wsum csum;
1755 	long csstart;
1756 
1757 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1758 		csstart = skb->csum_start - skb_headroom(skb);
1759 	else
1760 		csstart = skb_headlen(skb);
1761 
1762 	BUG_ON(csstart > skb_headlen(skb));
1763 
1764 	skb_copy_from_linear_data(skb, to, csstart);
1765 
1766 	csum = 0;
1767 	if (csstart != skb->len)
1768 		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1769 					      skb->len - csstart, 0);
1770 
1771 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1772 		long csstuff = csstart + skb->csum_offset;
1773 
1774 		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1775 	}
1776 }
1777 
1778 /**
1779  *	skb_dequeue - remove from the head of the queue
1780  *	@list: list to dequeue from
1781  *
1782  *	Remove the head of the list. The list lock is taken so the function
1783  *	may be used safely with other locking list functions. The head item is
1784  *	returned or %NULL if the list is empty.
1785  */
1786 
skb_dequeue(struct sk_buff_head * list)1787 struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1788 {
1789 	unsigned long flags;
1790 	struct sk_buff *result;
1791 
1792 	spin_lock_irqsave(&list->lock, flags);
1793 	result = __skb_dequeue(list);
1794 	spin_unlock_irqrestore(&list->lock, flags);
1795 	return result;
1796 }
1797 
1798 /**
1799  *	skb_dequeue_tail - remove from the tail of the queue
1800  *	@list: list to dequeue from
1801  *
1802  *	Remove the tail of the list. The list lock is taken so the function
1803  *	may be used safely with other locking list functions. The tail item is
1804  *	returned or %NULL if the list is empty.
1805  */
skb_dequeue_tail(struct sk_buff_head * list)1806 struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1807 {
1808 	unsigned long flags;
1809 	struct sk_buff *result;
1810 
1811 	spin_lock_irqsave(&list->lock, flags);
1812 	result = __skb_dequeue_tail(list);
1813 	spin_unlock_irqrestore(&list->lock, flags);
1814 	return result;
1815 }
1816 
1817 /**
1818  *	skb_queue_purge - empty a list
1819  *	@list: list to empty
1820  *
1821  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1822  *	the list and one reference dropped. This function takes the list
1823  *	lock and is atomic with respect to other list locking functions.
1824  */
skb_queue_purge(struct sk_buff_head * list)1825 void skb_queue_purge(struct sk_buff_head *list)
1826 {
1827 	struct sk_buff *skb;
1828 	while ((skb = skb_dequeue(list)) != NULL)
1829 		kfree_skb(skb);
1830 }
1831 
1832 /**
1833  *	skb_queue_head - queue a buffer at the list head
1834  *	@list: list to use
1835  *	@newsk: buffer to queue
1836  *
1837  *	Queue a buffer at the start of the list. This function takes the
1838  *	list lock and can be used safely with other locking &sk_buff functions
1839  *	safely.
1840  *
1841  *	A buffer cannot be placed on two lists at the same time.
1842  */
skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)1843 void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
1844 {
1845 	unsigned long flags;
1846 
1847 	spin_lock_irqsave(&list->lock, flags);
1848 	__skb_queue_head(list, newsk);
1849 	spin_unlock_irqrestore(&list->lock, flags);
1850 }
1851 
1852 /**
1853  *	skb_queue_tail - queue a buffer at the list tail
1854  *	@list: list to use
1855  *	@newsk: buffer to queue
1856  *
1857  *	Queue a buffer at the tail of the list. This function takes the
1858  *	list lock and can be used safely with other locking &sk_buff functions
1859  *	safely.
1860  *
1861  *	A buffer cannot be placed on two lists at the same time.
1862  */
skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)1863 void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
1864 {
1865 	unsigned long flags;
1866 
1867 	spin_lock_irqsave(&list->lock, flags);
1868 	__skb_queue_tail(list, newsk);
1869 	spin_unlock_irqrestore(&list->lock, flags);
1870 }
1871 
1872 /**
1873  *	skb_unlink	-	remove a buffer from a list
1874  *	@skb: buffer to remove
1875  *	@list: list to use
1876  *
1877  *	Remove a packet from a list. The list locks are taken and this
1878  *	function is atomic with respect to other list locked calls
1879  *
1880  *	You must know what list the SKB is on.
1881  */
skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)1882 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1883 {
1884 	unsigned long flags;
1885 
1886 	spin_lock_irqsave(&list->lock, flags);
1887 	__skb_unlink(skb, list);
1888 	spin_unlock_irqrestore(&list->lock, flags);
1889 }
1890 
1891 /**
1892  *	skb_append	-	append a buffer
1893  *	@old: buffer to insert after
1894  *	@newsk: buffer to insert
1895  *	@list: list to use
1896  *
1897  *	Place a packet after a given packet in a list. The list locks are taken
1898  *	and this function is atomic with respect to other list locked calls.
1899  *	A buffer cannot be placed on two lists at the same time.
1900  */
skb_append(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)1901 void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1902 {
1903 	unsigned long flags;
1904 
1905 	spin_lock_irqsave(&list->lock, flags);
1906 	__skb_queue_after(list, old, newsk);
1907 	spin_unlock_irqrestore(&list->lock, flags);
1908 }
1909 
1910 
1911 /**
1912  *	skb_insert	-	insert a buffer
1913  *	@old: buffer to insert before
1914  *	@newsk: buffer to insert
1915  *	@list: list to use
1916  *
1917  *	Place a packet before a given packet in a list. The list locks are
1918  * 	taken and this function is atomic with respect to other list locked
1919  *	calls.
1920  *
1921  *	A buffer cannot be placed on two lists at the same time.
1922  */
skb_insert(struct sk_buff * old,struct sk_buff * newsk,struct sk_buff_head * list)1923 void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
1924 {
1925 	unsigned long flags;
1926 
1927 	spin_lock_irqsave(&list->lock, flags);
1928 	__skb_insert(newsk, old->prev, old, list);
1929 	spin_unlock_irqrestore(&list->lock, flags);
1930 }
1931 
skb_split_inside_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,const int pos)1932 static inline void skb_split_inside_header(struct sk_buff *skb,
1933 					   struct sk_buff* skb1,
1934 					   const u32 len, const int pos)
1935 {
1936 	int i;
1937 
1938 	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
1939 					 pos - len);
1940 	/* And move data appendix as is. */
1941 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1942 		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
1943 
1944 	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
1945 	skb_shinfo(skb)->nr_frags  = 0;
1946 	skb1->data_len		   = skb->data_len;
1947 	skb1->len		   += skb1->data_len;
1948 	skb->data_len		   = 0;
1949 	skb->len		   = len;
1950 	skb_set_tail_pointer(skb, len);
1951 }
1952 
skb_split_no_header(struct sk_buff * skb,struct sk_buff * skb1,const u32 len,int pos)1953 static inline void skb_split_no_header(struct sk_buff *skb,
1954 				       struct sk_buff* skb1,
1955 				       const u32 len, int pos)
1956 {
1957 	int i, k = 0;
1958 	const int nfrags = skb_shinfo(skb)->nr_frags;
1959 
1960 	skb_shinfo(skb)->nr_frags = 0;
1961 	skb1->len		  = skb1->data_len = skb->len - len;
1962 	skb->len		  = len;
1963 	skb->data_len		  = len - pos;
1964 
1965 	for (i = 0; i < nfrags; i++) {
1966 		int size = skb_shinfo(skb)->frags[i].size;
1967 
1968 		if (pos + size > len) {
1969 			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
1970 
1971 			if (pos < len) {
1972 				/* Split frag.
1973 				 * We have two variants in this case:
1974 				 * 1. Move all the frag to the second
1975 				 *    part, if it is possible. F.e.
1976 				 *    this approach is mandatory for TUX,
1977 				 *    where splitting is expensive.
1978 				 * 2. Split is accurately. We make this.
1979 				 */
1980 				get_page(skb_shinfo(skb)->frags[i].page);
1981 				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
1982 				skb_shinfo(skb1)->frags[0].size -= len - pos;
1983 				skb_shinfo(skb)->frags[i].size	= len - pos;
1984 				skb_shinfo(skb)->nr_frags++;
1985 			}
1986 			k++;
1987 		} else
1988 			skb_shinfo(skb)->nr_frags++;
1989 		pos += size;
1990 	}
1991 	skb_shinfo(skb1)->nr_frags = k;
1992 }
1993 
1994 /**
1995  * skb_split - Split fragmented skb to two parts at length len.
1996  * @skb: the buffer to split
1997  * @skb1: the buffer to receive the second part
1998  * @len: new length for skb
1999  */
skb_split(struct sk_buff * skb,struct sk_buff * skb1,const u32 len)2000 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2001 {
2002 	int pos = skb_headlen(skb);
2003 
2004 	if (len < pos)	/* Split line is inside header. */
2005 		skb_split_inside_header(skb, skb1, len, pos);
2006 	else		/* Second chunk has no header, nothing to copy. */
2007 		skb_split_no_header(skb, skb1, len, pos);
2008 }
2009 
2010 /* Shifting from/to a cloned skb is a no-go.
2011  *
2012  * Caller cannot keep skb_shinfo related pointers past calling here!
2013  */
skb_prepare_for_shift(struct sk_buff * skb)2014 static int skb_prepare_for_shift(struct sk_buff *skb)
2015 {
2016 	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2017 }
2018 
2019 /**
2020  * skb_shift - Shifts paged data partially from skb to another
2021  * @tgt: buffer into which tail data gets added
2022  * @skb: buffer from which the paged data comes from
2023  * @shiftlen: shift up to this many bytes
2024  *
2025  * Attempts to shift up to shiftlen worth of bytes, which may be less than
2026  * the length of the skb, from tgt to skb. Returns number bytes shifted.
2027  * It's up to caller to free skb if everything was shifted.
2028  *
2029  * If @tgt runs out of frags, the whole operation is aborted.
2030  *
2031  * Skb cannot include anything else but paged data while tgt is allowed
2032  * to have non-paged data as well.
2033  *
2034  * TODO: full sized shift could be optimized but that would need
2035  * specialized skb free'er to handle frags without up-to-date nr_frags.
2036  */
skb_shift(struct sk_buff * tgt,struct sk_buff * skb,int shiftlen)2037 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2038 {
2039 	int from, to, merge, todo;
2040 	struct skb_frag_struct *fragfrom, *fragto;
2041 
2042 	BUG_ON(shiftlen > skb->len);
2043 	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
2044 
2045 	todo = shiftlen;
2046 	from = 0;
2047 	to = skb_shinfo(tgt)->nr_frags;
2048 	fragfrom = &skb_shinfo(skb)->frags[from];
2049 
2050 	/* Actual merge is delayed until the point when we know we can
2051 	 * commit all, so that we don't have to undo partial changes
2052 	 */
2053 	if (!to ||
2054 	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
2055 		merge = -1;
2056 	} else {
2057 		merge = to - 1;
2058 
2059 		todo -= fragfrom->size;
2060 		if (todo < 0) {
2061 			if (skb_prepare_for_shift(skb) ||
2062 			    skb_prepare_for_shift(tgt))
2063 				return 0;
2064 
2065 			/* All previous frag pointers might be stale! */
2066 			fragfrom = &skb_shinfo(skb)->frags[from];
2067 			fragto = &skb_shinfo(tgt)->frags[merge];
2068 
2069 			fragto->size += shiftlen;
2070 			fragfrom->size -= shiftlen;
2071 			fragfrom->page_offset += shiftlen;
2072 
2073 			goto onlymerged;
2074 		}
2075 
2076 		from++;
2077 	}
2078 
2079 	/* Skip full, not-fitting skb to avoid expensive operations */
2080 	if ((shiftlen == skb->len) &&
2081 	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2082 		return 0;
2083 
2084 	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2085 		return 0;
2086 
2087 	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2088 		if (to == MAX_SKB_FRAGS)
2089 			return 0;
2090 
2091 		fragfrom = &skb_shinfo(skb)->frags[from];
2092 		fragto = &skb_shinfo(tgt)->frags[to];
2093 
2094 		if (todo >= fragfrom->size) {
2095 			*fragto = *fragfrom;
2096 			todo -= fragfrom->size;
2097 			from++;
2098 			to++;
2099 
2100 		} else {
2101 			get_page(fragfrom->page);
2102 			fragto->page = fragfrom->page;
2103 			fragto->page_offset = fragfrom->page_offset;
2104 			fragto->size = todo;
2105 
2106 			fragfrom->page_offset += todo;
2107 			fragfrom->size -= todo;
2108 			todo = 0;
2109 
2110 			to++;
2111 			break;
2112 		}
2113 	}
2114 
2115 	/* Ready to "commit" this state change to tgt */
2116 	skb_shinfo(tgt)->nr_frags = to;
2117 
2118 	if (merge >= 0) {
2119 		fragfrom = &skb_shinfo(skb)->frags[0];
2120 		fragto = &skb_shinfo(tgt)->frags[merge];
2121 
2122 		fragto->size += fragfrom->size;
2123 		put_page(fragfrom->page);
2124 	}
2125 
2126 	/* Reposition in the original skb */
2127 	to = 0;
2128 	while (from < skb_shinfo(skb)->nr_frags)
2129 		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2130 	skb_shinfo(skb)->nr_frags = to;
2131 
2132 	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2133 
2134 onlymerged:
2135 	/* Most likely the tgt won't ever need its checksum anymore, skb on
2136 	 * the other hand might need it if it needs to be resent
2137 	 */
2138 	tgt->ip_summed = CHECKSUM_PARTIAL;
2139 	skb->ip_summed = CHECKSUM_PARTIAL;
2140 
2141 	/* Yak, is it really working this way? Some helper please? */
2142 	skb->len -= shiftlen;
2143 	skb->data_len -= shiftlen;
2144 	skb->truesize -= shiftlen;
2145 	tgt->len += shiftlen;
2146 	tgt->data_len += shiftlen;
2147 	tgt->truesize += shiftlen;
2148 
2149 	return shiftlen;
2150 }
2151 
2152 /**
2153  * skb_prepare_seq_read - Prepare a sequential read of skb data
2154  * @skb: the buffer to read
2155  * @from: lower offset of data to be read
2156  * @to: upper offset of data to be read
2157  * @st: state variable
2158  *
2159  * Initializes the specified state variable. Must be called before
2160  * invoking skb_seq_read() for the first time.
2161  */
skb_prepare_seq_read(struct sk_buff * skb,unsigned int from,unsigned int to,struct skb_seq_state * st)2162 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2163 			  unsigned int to, struct skb_seq_state *st)
2164 {
2165 	st->lower_offset = from;
2166 	st->upper_offset = to;
2167 	st->root_skb = st->cur_skb = skb;
2168 	st->frag_idx = st->stepped_offset = 0;
2169 	st->frag_data = NULL;
2170 }
2171 
2172 /**
2173  * skb_seq_read - Sequentially read skb data
2174  * @consumed: number of bytes consumed by the caller so far
2175  * @data: destination pointer for data to be returned
2176  * @st: state variable
2177  *
2178  * Reads a block of skb data at &consumed relative to the
2179  * lower offset specified to skb_prepare_seq_read(). Assigns
2180  * the head of the data block to &data and returns the length
2181  * of the block or 0 if the end of the skb data or the upper
2182  * offset has been reached.
2183  *
2184  * The caller is not required to consume all of the data
2185  * returned, i.e. &consumed is typically set to the number
2186  * of bytes already consumed and the next call to
2187  * skb_seq_read() will return the remaining part of the block.
2188  *
2189  * Note 1: The size of each block of data returned can be arbitary,
2190  *       this limitation is the cost for zerocopy seqeuental
2191  *       reads of potentially non linear data.
2192  *
2193  * Note 2: Fragment lists within fragments are not implemented
2194  *       at the moment, state->root_skb could be replaced with
2195  *       a stack for this purpose.
2196  */
skb_seq_read(unsigned int consumed,const u8 ** data,struct skb_seq_state * st)2197 unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2198 			  struct skb_seq_state *st)
2199 {
2200 	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2201 	skb_frag_t *frag;
2202 
2203 	if (unlikely(abs_offset >= st->upper_offset))
2204 		return 0;
2205 
2206 next_skb:
2207 	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2208 
2209 	if (abs_offset < block_limit) {
2210 		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2211 		return block_limit - abs_offset;
2212 	}
2213 
2214 	if (st->frag_idx == 0 && !st->frag_data)
2215 		st->stepped_offset += skb_headlen(st->cur_skb);
2216 
2217 	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2218 		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2219 		block_limit = frag->size + st->stepped_offset;
2220 
2221 		if (abs_offset < block_limit) {
2222 			if (!st->frag_data)
2223 				st->frag_data = kmap_skb_frag(frag);
2224 
2225 			*data = (u8 *) st->frag_data + frag->page_offset +
2226 				(abs_offset - st->stepped_offset);
2227 
2228 			return block_limit - abs_offset;
2229 		}
2230 
2231 		if (st->frag_data) {
2232 			kunmap_skb_frag(st->frag_data);
2233 			st->frag_data = NULL;
2234 		}
2235 
2236 		st->frag_idx++;
2237 		st->stepped_offset += frag->size;
2238 	}
2239 
2240 	if (st->frag_data) {
2241 		kunmap_skb_frag(st->frag_data);
2242 		st->frag_data = NULL;
2243 	}
2244 
2245 	if (st->root_skb == st->cur_skb &&
2246 	    skb_shinfo(st->root_skb)->frag_list) {
2247 		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2248 		st->frag_idx = 0;
2249 		goto next_skb;
2250 	} else if (st->cur_skb->next) {
2251 		st->cur_skb = st->cur_skb->next;
2252 		st->frag_idx = 0;
2253 		goto next_skb;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 /**
2260  * skb_abort_seq_read - Abort a sequential read of skb data
2261  * @st: state variable
2262  *
2263  * Must be called if skb_seq_read() was not called until it
2264  * returned 0.
2265  */
skb_abort_seq_read(struct skb_seq_state * st)2266 void skb_abort_seq_read(struct skb_seq_state *st)
2267 {
2268 	if (st->frag_data)
2269 		kunmap_skb_frag(st->frag_data);
2270 }
2271 
2272 #define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2273 
skb_ts_get_next_block(unsigned int offset,const u8 ** text,struct ts_config * conf,struct ts_state * state)2274 static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2275 					  struct ts_config *conf,
2276 					  struct ts_state *state)
2277 {
2278 	return skb_seq_read(offset, text, TS_SKB_CB(state));
2279 }
2280 
skb_ts_finish(struct ts_config * conf,struct ts_state * state)2281 static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2282 {
2283 	skb_abort_seq_read(TS_SKB_CB(state));
2284 }
2285 
2286 /**
2287  * skb_find_text - Find a text pattern in skb data
2288  * @skb: the buffer to look in
2289  * @from: search offset
2290  * @to: search limit
2291  * @config: textsearch configuration
2292  * @state: uninitialized textsearch state variable
2293  *
2294  * Finds a pattern in the skb data according to the specified
2295  * textsearch configuration. Use textsearch_next() to retrieve
2296  * subsequent occurrences of the pattern. Returns the offset
2297  * to the first occurrence or UINT_MAX if no match was found.
2298  */
skb_find_text(struct sk_buff * skb,unsigned int from,unsigned int to,struct ts_config * config,struct ts_state * state)2299 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2300 			   unsigned int to, struct ts_config *config,
2301 			   struct ts_state *state)
2302 {
2303 	unsigned int ret;
2304 
2305 	config->get_next_block = skb_ts_get_next_block;
2306 	config->finish = skb_ts_finish;
2307 
2308 	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2309 
2310 	ret = textsearch_find(config, state);
2311 	return (ret <= to - from ? ret : UINT_MAX);
2312 }
2313 
2314 /**
2315  * skb_append_datato_frags: - append the user data to a skb
2316  * @sk: sock  structure
2317  * @skb: skb structure to be appened with user data.
2318  * @getfrag: call back function to be used for getting the user data
2319  * @from: pointer to user message iov
2320  * @length: length of the iov message
2321  *
2322  * Description: This procedure append the user data in the fragment part
2323  * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2324  */
skb_append_datato_frags(struct sock * sk,struct sk_buff * skb,int (* getfrag)(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length)2325 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2326 			int (*getfrag)(void *from, char *to, int offset,
2327 					int len, int odd, struct sk_buff *skb),
2328 			void *from, int length)
2329 {
2330 	int frg_cnt = 0;
2331 	skb_frag_t *frag = NULL;
2332 	struct page *page = NULL;
2333 	int copy, left;
2334 	int offset = 0;
2335 	int ret;
2336 
2337 	do {
2338 		/* Return error if we don't have space for new frag */
2339 		frg_cnt = skb_shinfo(skb)->nr_frags;
2340 		if (frg_cnt >= MAX_SKB_FRAGS)
2341 			return -EFAULT;
2342 
2343 		/* allocate a new page for next frag */
2344 		page = alloc_pages(sk->sk_allocation, 0);
2345 
2346 		/* If alloc_page fails just return failure and caller will
2347 		 * free previous allocated pages by doing kfree_skb()
2348 		 */
2349 		if (page == NULL)
2350 			return -ENOMEM;
2351 
2352 		/* initialize the next frag */
2353 		sk->sk_sndmsg_page = page;
2354 		sk->sk_sndmsg_off = 0;
2355 		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2356 		skb->truesize += PAGE_SIZE;
2357 		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2358 
2359 		/* get the new initialized frag */
2360 		frg_cnt = skb_shinfo(skb)->nr_frags;
2361 		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2362 
2363 		/* copy the user data to page */
2364 		left = PAGE_SIZE - frag->page_offset;
2365 		copy = (length > left)? left : length;
2366 
2367 		ret = getfrag(from, (page_address(frag->page) +
2368 			    frag->page_offset + frag->size),
2369 			    offset, copy, 0, skb);
2370 		if (ret < 0)
2371 			return -EFAULT;
2372 
2373 		/* copy was successful so update the size parameters */
2374 		sk->sk_sndmsg_off += copy;
2375 		frag->size += copy;
2376 		skb->len += copy;
2377 		skb->data_len += copy;
2378 		offset += copy;
2379 		length -= copy;
2380 
2381 	} while (length > 0);
2382 
2383 	return 0;
2384 }
2385 
2386 /**
2387  *	skb_pull_rcsum - pull skb and update receive checksum
2388  *	@skb: buffer to update
2389  *	@len: length of data pulled
2390  *
2391  *	This function performs an skb_pull on the packet and updates
2392  *	the CHECKSUM_COMPLETE checksum.  It should be used on
2393  *	receive path processing instead of skb_pull unless you know
2394  *	that the checksum difference is zero (e.g., a valid IP header)
2395  *	or you are setting ip_summed to CHECKSUM_NONE.
2396  */
skb_pull_rcsum(struct sk_buff * skb,unsigned int len)2397 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2398 {
2399 	BUG_ON(len > skb->len);
2400 	skb->len -= len;
2401 	BUG_ON(skb->len < skb->data_len);
2402 	skb_postpull_rcsum(skb, skb->data, len);
2403 	return skb->data += len;
2404 }
2405 
2406 EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2407 
2408 /**
2409  *	skb_segment - Perform protocol segmentation on skb.
2410  *	@skb: buffer to segment
2411  *	@features: features for the output path (see dev->features)
2412  *
2413  *	This function performs segmentation on the given skb.  It returns
2414  *	a pointer to the first in a list of new skbs for the segments.
2415  *	In case of error it returns ERR_PTR(err).
2416  */
skb_segment(struct sk_buff * skb,int features)2417 struct sk_buff *skb_segment(struct sk_buff *skb, int features)
2418 {
2419 	struct sk_buff *segs = NULL;
2420 	struct sk_buff *tail = NULL;
2421 	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2422 	unsigned int mss = skb_shinfo(skb)->gso_size;
2423 	unsigned int doffset = skb->data - skb_mac_header(skb);
2424 	unsigned int offset = doffset;
2425 	unsigned int headroom;
2426 	unsigned int len;
2427 	int sg = features & NETIF_F_SG;
2428 	int nfrags = skb_shinfo(skb)->nr_frags;
2429 	int err = -ENOMEM;
2430 	int i = 0;
2431 	int pos;
2432 
2433 	__skb_push(skb, doffset);
2434 	headroom = skb_headroom(skb);
2435 	pos = skb_headlen(skb);
2436 
2437 	do {
2438 		struct sk_buff *nskb;
2439 		skb_frag_t *frag;
2440 		int hsize;
2441 		int size;
2442 
2443 		len = skb->len - offset;
2444 		if (len > mss)
2445 			len = mss;
2446 
2447 		hsize = skb_headlen(skb) - offset;
2448 		if (hsize < 0)
2449 			hsize = 0;
2450 		if (hsize > len || !sg)
2451 			hsize = len;
2452 
2453 		if (!hsize && i >= nfrags) {
2454 			BUG_ON(fskb->len != len);
2455 
2456 			pos += len;
2457 			nskb = skb_clone(fskb, GFP_ATOMIC);
2458 			fskb = fskb->next;
2459 
2460 			if (unlikely(!nskb))
2461 				goto err;
2462 
2463 			hsize = skb_end_pointer(nskb) - nskb->head;
2464 			if (skb_cow_head(nskb, doffset + headroom)) {
2465 				kfree_skb(nskb);
2466 				goto err;
2467 			}
2468 
2469 			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2470 					  hsize;
2471 			skb_release_head_state(nskb);
2472 			__skb_push(nskb, doffset);
2473 		} else {
2474 			nskb = alloc_skb(hsize + doffset + headroom,
2475 					 GFP_ATOMIC);
2476 
2477 			if (unlikely(!nskb))
2478 				goto err;
2479 
2480 			skb_reserve(nskb, headroom);
2481 			__skb_put(nskb, doffset);
2482 		}
2483 
2484 		if (segs)
2485 			tail->next = nskb;
2486 		else
2487 			segs = nskb;
2488 		tail = nskb;
2489 
2490 		__copy_skb_header(nskb, skb);
2491 		nskb->mac_len = skb->mac_len;
2492 
2493 		skb_reset_mac_header(nskb);
2494 		skb_set_network_header(nskb, skb->mac_len);
2495 		nskb->transport_header = (nskb->network_header +
2496 					  skb_network_header_len(skb));
2497 		skb_copy_from_linear_data(skb, nskb->data, doffset);
2498 
2499 		if (pos >= offset + len)
2500 			continue;
2501 
2502 		if (!sg) {
2503 			nskb->ip_summed = CHECKSUM_NONE;
2504 			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2505 							    skb_put(nskb, len),
2506 							    len, 0);
2507 			continue;
2508 		}
2509 
2510 		frag = skb_shinfo(nskb)->frags;
2511 
2512 		skb_copy_from_linear_data_offset(skb, offset,
2513 						 skb_put(nskb, hsize), hsize);
2514 
2515 		while (pos < offset + len && i < nfrags) {
2516 			*frag = skb_shinfo(skb)->frags[i];
2517 			get_page(frag->page);
2518 			size = frag->size;
2519 
2520 			if (pos < offset) {
2521 				frag->page_offset += offset - pos;
2522 				frag->size -= offset - pos;
2523 			}
2524 
2525 			skb_shinfo(nskb)->nr_frags++;
2526 
2527 			if (pos + size <= offset + len) {
2528 				i++;
2529 				pos += size;
2530 			} else {
2531 				frag->size -= pos + size - (offset + len);
2532 				goto skip_fraglist;
2533 			}
2534 
2535 			frag++;
2536 		}
2537 
2538 		if (pos < offset + len) {
2539 			struct sk_buff *fskb2 = fskb;
2540 
2541 			BUG_ON(pos + fskb->len != offset + len);
2542 
2543 			pos += fskb->len;
2544 			fskb = fskb->next;
2545 
2546 			if (fskb2->next) {
2547 				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2548 				if (!fskb2)
2549 					goto err;
2550 			} else
2551 				skb_get(fskb2);
2552 
2553 			BUG_ON(skb_shinfo(nskb)->frag_list);
2554 			skb_shinfo(nskb)->frag_list = fskb2;
2555 		}
2556 
2557 skip_fraglist:
2558 		nskb->data_len = len - hsize;
2559 		nskb->len += nskb->data_len;
2560 		nskb->truesize += nskb->data_len;
2561 	} while ((offset += len) < skb->len);
2562 
2563 	return segs;
2564 
2565 err:
2566 	while ((skb = segs)) {
2567 		segs = skb->next;
2568 		kfree_skb(skb);
2569 	}
2570 	return ERR_PTR(err);
2571 }
2572 
2573 EXPORT_SYMBOL_GPL(skb_segment);
2574 
skb_gro_receive(struct sk_buff ** head,struct sk_buff * skb)2575 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2576 {
2577 	struct sk_buff *p = *head;
2578 	struct sk_buff *nskb;
2579 	unsigned int headroom;
2580 	unsigned int hlen = p->data - skb_mac_header(p);
2581 	unsigned int len = skb->len;
2582 
2583 	if (hlen + p->len + len >= 65536)
2584 		return -E2BIG;
2585 
2586 	if (skb_shinfo(p)->frag_list)
2587 		goto merge;
2588 	else if (!skb_headlen(p) && !skb_headlen(skb) &&
2589 		 skb_shinfo(p)->nr_frags + skb_shinfo(skb)->nr_frags <
2590 		 MAX_SKB_FRAGS) {
2591 		memcpy(skb_shinfo(p)->frags + skb_shinfo(p)->nr_frags,
2592 		       skb_shinfo(skb)->frags,
2593 		       skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
2594 
2595 		skb_shinfo(p)->nr_frags += skb_shinfo(skb)->nr_frags;
2596 		skb_shinfo(skb)->nr_frags = 0;
2597 
2598 		skb->truesize -= skb->data_len;
2599 		skb->len -= skb->data_len;
2600 		skb->data_len = 0;
2601 
2602 		NAPI_GRO_CB(skb)->free = 1;
2603 		goto done;
2604 	}
2605 
2606 	headroom = skb_headroom(p);
2607 	nskb = netdev_alloc_skb(p->dev, headroom);
2608 	if (unlikely(!nskb))
2609 		return -ENOMEM;
2610 
2611 	__copy_skb_header(nskb, p);
2612 	nskb->mac_len = p->mac_len;
2613 
2614 	skb_reserve(nskb, headroom);
2615 
2616 	skb_set_mac_header(nskb, -hlen);
2617 	skb_set_network_header(nskb, skb_network_offset(p));
2618 	skb_set_transport_header(nskb, skb_transport_offset(p));
2619 
2620 	memcpy(skb_mac_header(nskb), skb_mac_header(p), hlen);
2621 
2622 	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2623 	skb_shinfo(nskb)->frag_list = p;
2624 	skb_shinfo(nskb)->gso_size = skb_shinfo(p)->gso_size;
2625 	skb_header_release(p);
2626 	nskb->prev = p;
2627 
2628 	nskb->data_len += p->len;
2629 	nskb->truesize += p->len;
2630 	nskb->len += p->len;
2631 
2632 	*head = nskb;
2633 	nskb->next = p->next;
2634 	p->next = NULL;
2635 
2636 	p = nskb;
2637 
2638 merge:
2639 	p->prev->next = skb;
2640 	p->prev = skb;
2641 	skb_header_release(skb);
2642 
2643 done:
2644 	NAPI_GRO_CB(p)->count++;
2645 	p->data_len += len;
2646 	p->truesize += len;
2647 	p->len += len;
2648 
2649 	NAPI_GRO_CB(skb)->same_flow = 1;
2650 	return 0;
2651 }
2652 EXPORT_SYMBOL_GPL(skb_gro_receive);
2653 
skb_init(void)2654 void __init skb_init(void)
2655 {
2656 	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2657 					      sizeof(struct sk_buff),
2658 					      0,
2659 					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2660 					      NULL);
2661 	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2662 						(2*sizeof(struct sk_buff)) +
2663 						sizeof(atomic_t),
2664 						0,
2665 						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2666 						NULL);
2667 }
2668 
2669 /**
2670  *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2671  *	@skb: Socket buffer containing the buffers to be mapped
2672  *	@sg: The scatter-gather list to map into
2673  *	@offset: The offset into the buffer's contents to start mapping
2674  *	@len: Length of buffer space to be mapped
2675  *
2676  *	Fill the specified scatter-gather list with mappings/pointers into a
2677  *	region of the buffer space attached to a socket buffer.
2678  */
2679 static int
__skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)2680 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2681 {
2682 	int start = skb_headlen(skb);
2683 	int i, copy = start - offset;
2684 	int elt = 0;
2685 
2686 	if (copy > 0) {
2687 		if (copy > len)
2688 			copy = len;
2689 		sg_set_buf(sg, skb->data + offset, copy);
2690 		elt++;
2691 		if ((len -= copy) == 0)
2692 			return elt;
2693 		offset += copy;
2694 	}
2695 
2696 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2697 		int end;
2698 
2699 		WARN_ON(start > offset + len);
2700 
2701 		end = start + skb_shinfo(skb)->frags[i].size;
2702 		if ((copy = end - offset) > 0) {
2703 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2704 
2705 			if (copy > len)
2706 				copy = len;
2707 			sg_set_page(&sg[elt], frag->page, copy,
2708 					frag->page_offset+offset-start);
2709 			elt++;
2710 			if (!(len -= copy))
2711 				return elt;
2712 			offset += copy;
2713 		}
2714 		start = end;
2715 	}
2716 
2717 	if (skb_shinfo(skb)->frag_list) {
2718 		struct sk_buff *list = skb_shinfo(skb)->frag_list;
2719 
2720 		for (; list; list = list->next) {
2721 			int end;
2722 
2723 			WARN_ON(start > offset + len);
2724 
2725 			end = start + list->len;
2726 			if ((copy = end - offset) > 0) {
2727 				if (copy > len)
2728 					copy = len;
2729 				elt += __skb_to_sgvec(list, sg+elt, offset - start,
2730 						      copy);
2731 				if ((len -= copy) == 0)
2732 					return elt;
2733 				offset += copy;
2734 			}
2735 			start = end;
2736 		}
2737 	}
2738 	BUG_ON(len);
2739 	return elt;
2740 }
2741 
skb_to_sgvec(struct sk_buff * skb,struct scatterlist * sg,int offset,int len)2742 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2743 {
2744 	int nsg = __skb_to_sgvec(skb, sg, offset, len);
2745 
2746 	sg_mark_end(&sg[nsg - 1]);
2747 
2748 	return nsg;
2749 }
2750 
2751 /**
2752  *	skb_cow_data - Check that a socket buffer's data buffers are writable
2753  *	@skb: The socket buffer to check.
2754  *	@tailbits: Amount of trailing space to be added
2755  *	@trailer: Returned pointer to the skb where the @tailbits space begins
2756  *
2757  *	Make sure that the data buffers attached to a socket buffer are
2758  *	writable. If they are not, private copies are made of the data buffers
2759  *	and the socket buffer is set to use these instead.
2760  *
2761  *	If @tailbits is given, make sure that there is space to write @tailbits
2762  *	bytes of data beyond current end of socket buffer.  @trailer will be
2763  *	set to point to the skb in which this space begins.
2764  *
2765  *	The number of scatterlist elements required to completely map the
2766  *	COW'd and extended socket buffer will be returned.
2767  */
skb_cow_data(struct sk_buff * skb,int tailbits,struct sk_buff ** trailer)2768 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2769 {
2770 	int copyflag;
2771 	int elt;
2772 	struct sk_buff *skb1, **skb_p;
2773 
2774 	/* If skb is cloned or its head is paged, reallocate
2775 	 * head pulling out all the pages (pages are considered not writable
2776 	 * at the moment even if they are anonymous).
2777 	 */
2778 	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
2779 	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
2780 		return -ENOMEM;
2781 
2782 	/* Easy case. Most of packets will go this way. */
2783 	if (!skb_shinfo(skb)->frag_list) {
2784 		/* A little of trouble, not enough of space for trailer.
2785 		 * This should not happen, when stack is tuned to generate
2786 		 * good frames. OK, on miss we reallocate and reserve even more
2787 		 * space, 128 bytes is fair. */
2788 
2789 		if (skb_tailroom(skb) < tailbits &&
2790 		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
2791 			return -ENOMEM;
2792 
2793 		/* Voila! */
2794 		*trailer = skb;
2795 		return 1;
2796 	}
2797 
2798 	/* Misery. We are in troubles, going to mincer fragments... */
2799 
2800 	elt = 1;
2801 	skb_p = &skb_shinfo(skb)->frag_list;
2802 	copyflag = 0;
2803 
2804 	while ((skb1 = *skb_p) != NULL) {
2805 		int ntail = 0;
2806 
2807 		/* The fragment is partially pulled by someone,
2808 		 * this can happen on input. Copy it and everything
2809 		 * after it. */
2810 
2811 		if (skb_shared(skb1))
2812 			copyflag = 1;
2813 
2814 		/* If the skb is the last, worry about trailer. */
2815 
2816 		if (skb1->next == NULL && tailbits) {
2817 			if (skb_shinfo(skb1)->nr_frags ||
2818 			    skb_shinfo(skb1)->frag_list ||
2819 			    skb_tailroom(skb1) < tailbits)
2820 				ntail = tailbits + 128;
2821 		}
2822 
2823 		if (copyflag ||
2824 		    skb_cloned(skb1) ||
2825 		    ntail ||
2826 		    skb_shinfo(skb1)->nr_frags ||
2827 		    skb_shinfo(skb1)->frag_list) {
2828 			struct sk_buff *skb2;
2829 
2830 			/* Fuck, we are miserable poor guys... */
2831 			if (ntail == 0)
2832 				skb2 = skb_copy(skb1, GFP_ATOMIC);
2833 			else
2834 				skb2 = skb_copy_expand(skb1,
2835 						       skb_headroom(skb1),
2836 						       ntail,
2837 						       GFP_ATOMIC);
2838 			if (unlikely(skb2 == NULL))
2839 				return -ENOMEM;
2840 
2841 			if (skb1->sk)
2842 				skb_set_owner_w(skb2, skb1->sk);
2843 
2844 			/* Looking around. Are we still alive?
2845 			 * OK, link new skb, drop old one */
2846 
2847 			skb2->next = skb1->next;
2848 			*skb_p = skb2;
2849 			kfree_skb(skb1);
2850 			skb1 = skb2;
2851 		}
2852 		elt++;
2853 		*trailer = skb1;
2854 		skb_p = &skb1->next;
2855 	}
2856 
2857 	return elt;
2858 }
2859 
2860 /**
2861  * skb_partial_csum_set - set up and verify partial csum values for packet
2862  * @skb: the skb to set
2863  * @start: the number of bytes after skb->data to start checksumming.
2864  * @off: the offset from start to place the checksum.
2865  *
2866  * For untrusted partially-checksummed packets, we need to make sure the values
2867  * for skb->csum_start and skb->csum_offset are valid so we don't oops.
2868  *
2869  * This function checks and sets those values and skb->ip_summed: if this
2870  * returns false you should drop the packet.
2871  */
skb_partial_csum_set(struct sk_buff * skb,u16 start,u16 off)2872 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
2873 {
2874 	if (unlikely(start > skb->len - 2) ||
2875 	    unlikely((int)start + off > skb->len - 2)) {
2876 		if (net_ratelimit())
2877 			printk(KERN_WARNING
2878 			       "bad partial csum: csum=%u/%u len=%u\n",
2879 			       start, off, skb->len);
2880 		return false;
2881 	}
2882 	skb->ip_summed = CHECKSUM_PARTIAL;
2883 	skb->csum_start = skb_headroom(skb) + start;
2884 	skb->csum_offset = off;
2885 	return true;
2886 }
2887 
__skb_warn_lro_forwarding(const struct sk_buff * skb)2888 void __skb_warn_lro_forwarding(const struct sk_buff *skb)
2889 {
2890 	if (net_ratelimit())
2891 		pr_warning("%s: received packets cannot be forwarded"
2892 			   " while LRO is enabled\n", skb->dev->name);
2893 }
2894 
2895 EXPORT_SYMBOL(___pskb_trim);
2896 EXPORT_SYMBOL(__kfree_skb);
2897 EXPORT_SYMBOL(kfree_skb);
2898 EXPORT_SYMBOL(__pskb_pull_tail);
2899 EXPORT_SYMBOL(__alloc_skb);
2900 EXPORT_SYMBOL(__netdev_alloc_skb);
2901 EXPORT_SYMBOL(pskb_copy);
2902 EXPORT_SYMBOL(pskb_expand_head);
2903 EXPORT_SYMBOL(skb_checksum);
2904 EXPORT_SYMBOL(skb_clone);
2905 EXPORT_SYMBOL(skb_copy);
2906 EXPORT_SYMBOL(skb_copy_and_csum_bits);
2907 EXPORT_SYMBOL(skb_copy_and_csum_dev);
2908 EXPORT_SYMBOL(skb_copy_bits);
2909 EXPORT_SYMBOL(skb_copy_expand);
2910 EXPORT_SYMBOL(skb_over_panic);
2911 EXPORT_SYMBOL(skb_pad);
2912 EXPORT_SYMBOL(skb_realloc_headroom);
2913 EXPORT_SYMBOL(skb_under_panic);
2914 EXPORT_SYMBOL(skb_dequeue);
2915 EXPORT_SYMBOL(skb_dequeue_tail);
2916 EXPORT_SYMBOL(skb_insert);
2917 EXPORT_SYMBOL(skb_queue_purge);
2918 EXPORT_SYMBOL(skb_queue_head);
2919 EXPORT_SYMBOL(skb_queue_tail);
2920 EXPORT_SYMBOL(skb_unlink);
2921 EXPORT_SYMBOL(skb_append);
2922 EXPORT_SYMBOL(skb_split);
2923 EXPORT_SYMBOL(skb_prepare_seq_read);
2924 EXPORT_SYMBOL(skb_seq_read);
2925 EXPORT_SYMBOL(skb_abort_seq_read);
2926 EXPORT_SYMBOL(skb_find_text);
2927 EXPORT_SYMBOL(skb_append_datato_frags);
2928 EXPORT_SYMBOL(__skb_warn_lro_forwarding);
2929 
2930 EXPORT_SYMBOL_GPL(skb_to_sgvec);
2931 EXPORT_SYMBOL_GPL(skb_cow_data);
2932 EXPORT_SYMBOL_GPL(skb_partial_csum_set);
2933