• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *	Definitions for the 'struct sk_buff' memory handlers.
3  *
4  *	Authors:
5  *		Alan Cox, <gw4pts@gw4pts.ampr.org>
6  *		Florian La Roche, <rzsfl@rz.uni-sb.de>
7  *
8  *	This program is free software; you can redistribute it and/or
9  *	modify it under the terms of the GNU General Public License
10  *	as published by the Free Software Foundation; either version
11  *	2 of the License, or (at your option) any later version.
12  */
13 
14 #ifndef _LINUX_SKBUFF_H
15 #define _LINUX_SKBUFF_H
16 
17 #include <linux/kernel.h>
18 #include <linux/kmemcheck.h>
19 #include <linux/compiler.h>
20 #include <linux/time.h>
21 #include <linux/bug.h>
22 #include <linux/cache.h>
23 
24 #include <linux/atomic.h>
25 #include <asm/types.h>
26 #include <linux/spinlock.h>
27 #include <linux/net.h>
28 #include <linux/textsearch.h>
29 #include <net/checksum.h>
30 #include <linux/rcupdate.h>
31 #include <linux/dmaengine.h>
32 #include <linux/hrtimer.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/netdev_features.h>
35 #include <net/flow_keys.h>
36 
37 /* Don't change this without changing skb_csum_unnecessary! */
38 #define CHECKSUM_NONE 0
39 #define CHECKSUM_UNNECESSARY 1
40 #define CHECKSUM_COMPLETE 2
41 #define CHECKSUM_PARTIAL 3
42 
43 #define SKB_DATA_ALIGN(X)	(((X) + (SMP_CACHE_BYTES - 1)) & \
44 				 ~(SMP_CACHE_BYTES - 1))
45 #define SKB_WITH_OVERHEAD(X)	\
46 	((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
47 #define SKB_MAX_ORDER(X, ORDER) \
48 	SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
49 #define SKB_MAX_HEAD(X)		(SKB_MAX_ORDER((X), 0))
50 #define SKB_MAX_ALLOC		(SKB_MAX_ORDER(0, 2))
51 
52 /* return minimum truesize of one skb containing X bytes of data */
53 #define SKB_TRUESIZE(X) ((X) +						\
54 			 SKB_DATA_ALIGN(sizeof(struct sk_buff)) +	\
55 			 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
56 
57 /* A. Checksumming of received packets by device.
58  *
59  *	NONE: device failed to checksum this packet.
60  *		skb->csum is undefined.
61  *
62  *	UNNECESSARY: device parsed packet and wouldbe verified checksum.
63  *		skb->csum is undefined.
64  *	      It is bad option, but, unfortunately, many of vendors do this.
65  *	      Apparently with secret goal to sell you new device, when you
66  *	      will add new protocol to your host. F.e. IPv6. 8)
67  *
68  *	COMPLETE: the most generic way. Device supplied checksum of _all_
69  *	    the packet as seen by netif_rx in skb->csum.
70  *	    NOTE: Even if device supports only some protocols, but
71  *	    is able to produce some skb->csum, it MUST use COMPLETE,
72  *	    not UNNECESSARY.
73  *
74  *	PARTIAL: identical to the case for output below.  This may occur
75  *	    on a packet received directly from another Linux OS, e.g.,
76  *	    a virtualised Linux kernel on the same host.  The packet can
77  *	    be treated in the same way as UNNECESSARY except that on
78  *	    output (i.e., forwarding) the checksum must be filled in
79  *	    by the OS or the hardware.
80  *
81  * B. Checksumming on output.
82  *
83  *	NONE: skb is checksummed by protocol or csum is not required.
84  *
85  *	PARTIAL: device is required to csum packet as seen by hard_start_xmit
86  *	from skb->csum_start to the end and to record the checksum
87  *	at skb->csum_start + skb->csum_offset.
88  *
89  *	Device must show its capabilities in dev->features, set
90  *	at device setup time.
91  *	NETIF_F_HW_CSUM	- it is clever device, it is able to checksum
92  *			  everything.
93  *	NETIF_F_IP_CSUM - device is dumb. It is able to csum only
94  *			  TCP/UDP over IPv4. Sigh. Vendors like this
95  *			  way by an unknown reason. Though, see comment above
96  *			  about CHECKSUM_UNNECESSARY. 8)
97  *	NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
98  *
99  *	UNNECESSARY: device will do per protocol specific csum. Protocol drivers
100  *	that do not want net to perform the checksum calculation should use
101  *	this flag in their outgoing skbs.
102  *	NETIF_F_FCOE_CRC  this indicates the device can do FCoE FC CRC
103  *			  offload. Correspondingly, the FCoE protocol driver
104  *			  stack should use CHECKSUM_UNNECESSARY.
105  *
106  *	Any questions? No questions, good. 		--ANK
107  */
108 
109 struct net_device;
110 struct scatterlist;
111 struct pipe_inode_info;
112 
113 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
114 struct nf_conntrack {
115 	atomic_t use;
116 };
117 #endif
118 
119 #ifdef CONFIG_BRIDGE_NETFILTER
120 struct nf_bridge_info {
121 	atomic_t		use;
122 	unsigned int		mask;
123 	struct net_device	*physindev;
124 	struct net_device	*physoutdev;
125 	unsigned long		data[32 / sizeof(unsigned long)];
126 };
127 #endif
128 
129 struct sk_buff_head {
130 	/* These two members must be first. */
131 	struct sk_buff	*next;
132 	struct sk_buff	*prev;
133 
134 	__u32		qlen;
135 	spinlock_t	lock;
136 };
137 
138 struct sk_buff;
139 
140 /* To allow 64K frame to be packed as single skb without frag_list we
141  * require 64K/PAGE_SIZE pages plus 1 additional page to allow for
142  * buffers which do not start on a page boundary.
143  *
144  * Since GRO uses frags we allocate at least 16 regardless of page
145  * size.
146  */
147 #if (65536/PAGE_SIZE + 1) < 16
148 #define MAX_SKB_FRAGS 16UL
149 #else
150 #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
151 #endif
152 
153 typedef struct skb_frag_struct skb_frag_t;
154 
155 struct skb_frag_struct {
156 	struct {
157 		struct page *p;
158 	} page;
159 #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
160 	__u32 page_offset;
161 	__u32 size;
162 #else
163 	__u16 page_offset;
164 	__u16 size;
165 #endif
166 };
167 
skb_frag_size(const skb_frag_t * frag)168 static inline unsigned int skb_frag_size(const skb_frag_t *frag)
169 {
170 	return frag->size;
171 }
172 
skb_frag_size_set(skb_frag_t * frag,unsigned int size)173 static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size)
174 {
175 	frag->size = size;
176 }
177 
skb_frag_size_add(skb_frag_t * frag,int delta)178 static inline void skb_frag_size_add(skb_frag_t *frag, int delta)
179 {
180 	frag->size += delta;
181 }
182 
skb_frag_size_sub(skb_frag_t * frag,int delta)183 static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
184 {
185 	frag->size -= delta;
186 }
187 
188 #define HAVE_HW_TIME_STAMP
189 
190 /**
191  * struct skb_shared_hwtstamps - hardware time stamps
192  * @hwtstamp:	hardware time stamp transformed into duration
193  *		since arbitrary point in time
194  * @syststamp:	hwtstamp transformed to system time base
195  *
196  * Software time stamps generated by ktime_get_real() are stored in
197  * skb->tstamp. The relation between the different kinds of time
198  * stamps is as follows:
199  *
200  * syststamp and tstamp can be compared against each other in
201  * arbitrary combinations.  The accuracy of a
202  * syststamp/tstamp/"syststamp from other device" comparison is
203  * limited by the accuracy of the transformation into system time
204  * base. This depends on the device driver and its underlying
205  * hardware.
206  *
207  * hwtstamps can only be compared against other hwtstamps from
208  * the same device.
209  *
210  * This structure is attached to packets as part of the
211  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
212  */
213 struct skb_shared_hwtstamps {
214 	ktime_t	hwtstamp;
215 	ktime_t	syststamp;
216 };
217 
218 /* Definitions for tx_flags in struct skb_shared_info */
219 enum {
220 	/* generate hardware time stamp */
221 	SKBTX_HW_TSTAMP = 1 << 0,
222 
223 	/* generate software time stamp */
224 	SKBTX_SW_TSTAMP = 1 << 1,
225 
226 	/* device driver is going to provide hardware time stamp */
227 	SKBTX_IN_PROGRESS = 1 << 2,
228 
229 	/* device driver supports TX zero-copy buffers */
230 	SKBTX_DEV_ZEROCOPY = 1 << 3,
231 
232 	/* generate wifi status information (where possible) */
233 	SKBTX_WIFI_STATUS = 1 << 4,
234 
235 	/* This indicates at least one fragment might be overwritten
236 	 * (as in vmsplice(), sendfile() ...)
237 	 * If we need to compute a TX checksum, we'll need to copy
238 	 * all frags to avoid possible bad checksum
239 	 */
240 	SKBTX_SHARED_FRAG = 1 << 5,
241 };
242 
243 /*
244  * The callback notifies userspace to release buffers when skb DMA is done in
245  * lower device, the skb last reference should be 0 when calling this.
246  * The zerocopy_success argument is true if zero copy transmit occurred,
247  * false on data copy or out of memory error caused by data copy attempt.
248  * The ctx field is used to track device context.
249  * The desc field is used to track userspace buffer index.
250  */
251 struct ubuf_info {
252 	void (*callback)(struct ubuf_info *, bool zerocopy_success);
253 	void *ctx;
254 	unsigned long desc;
255 };
256 
257 /* This data is invariant across clones and lives at
258  * the end of the header data, ie. at skb->end.
259  */
260 struct skb_shared_info {
261 	unsigned char	nr_frags;
262 	__u8		tx_flags;
263 	unsigned short	gso_size;
264 	/* Warning: this field is not always filled in (UFO)! */
265 	unsigned short	gso_segs;
266 	unsigned short  gso_type;
267 	struct sk_buff	*frag_list;
268 	struct skb_shared_hwtstamps hwtstamps;
269 	__be32          ip6_frag_id;
270 
271 	/*
272 	 * Warning : all fields before dataref are cleared in __alloc_skb()
273 	 */
274 	atomic_t	dataref;
275 
276 	/* Intermediate layers must ensure that destructor_arg
277 	 * remains valid until skb destructor */
278 	void *		destructor_arg;
279 
280 	/* must be last field, see pskb_expand_head() */
281 	skb_frag_t	frags[MAX_SKB_FRAGS];
282 };
283 
284 /* We divide dataref into two halves.  The higher 16 bits hold references
285  * to the payload part of skb->data.  The lower 16 bits hold references to
286  * the entire skb->data.  A clone of a headerless skb holds the length of
287  * the header in skb->hdr_len.
288  *
289  * All users must obey the rule that the skb->data reference count must be
290  * greater than or equal to the payload reference count.
291  *
292  * Holding a reference to the payload part means that the user does not
293  * care about modifications to the header part of skb->data.
294  */
295 #define SKB_DATAREF_SHIFT 16
296 #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
297 
298 
299 enum {
300 	SKB_FCLONE_UNAVAILABLE,
301 	SKB_FCLONE_ORIG,
302 	SKB_FCLONE_CLONE,
303 };
304 
305 enum {
306 	SKB_GSO_TCPV4 = 1 << 0,
307 	SKB_GSO_UDP = 1 << 1,
308 
309 	/* This indicates the skb is from an untrusted source. */
310 	SKB_GSO_DODGY = 1 << 2,
311 
312 	/* This indicates the tcp segment has CWR set. */
313 	SKB_GSO_TCP_ECN = 1 << 3,
314 
315 	SKB_GSO_TCPV6 = 1 << 4,
316 
317 	SKB_GSO_FCOE = 1 << 5,
318 
319 	SKB_GSO_GRE = 1 << 6,
320 
321 	SKB_GSO_UDP_TUNNEL = 1 << 7,
322 };
323 
324 #if BITS_PER_LONG > 32
325 #define NET_SKBUFF_DATA_USES_OFFSET 1
326 #endif
327 
328 #ifdef NET_SKBUFF_DATA_USES_OFFSET
329 typedef unsigned int sk_buff_data_t;
330 #else
331 typedef unsigned char *sk_buff_data_t;
332 #endif
333 
334 #if defined(CONFIG_NF_DEFRAG_IPV4) || defined(CONFIG_NF_DEFRAG_IPV4_MODULE) || \
335     defined(CONFIG_NF_DEFRAG_IPV6) || defined(CONFIG_NF_DEFRAG_IPV6_MODULE)
336 #define NET_SKBUFF_NF_DEFRAG_NEEDED 1
337 #endif
338 
339 /**
340  *	struct sk_buff - socket buffer
341  *	@next: Next buffer in list
342  *	@prev: Previous buffer in list
343  *	@tstamp: Time we arrived
344  *	@sk: Socket we are owned by
345  *	@dev: Device we arrived on/are leaving by
346  *	@cb: Control buffer. Free for use by every layer. Put private vars here
347  *	@_skb_refdst: destination entry (with norefcount bit)
348  *	@sp: the security path, used for xfrm
349  *	@len: Length of actual data
350  *	@data_len: Data length
351  *	@mac_len: Length of link layer header
352  *	@hdr_len: writable header length of cloned skb
353  *	@csum: Checksum (must include start/offset pair)
354  *	@csum_start: Offset from skb->head where checksumming should start
355  *	@csum_offset: Offset from csum_start where checksum should be stored
356  *	@priority: Packet queueing priority
357  *	@local_df: allow local fragmentation
358  *	@cloned: Head may be cloned (check refcnt to be sure)
359  *	@ip_summed: Driver fed us an IP checksum
360  *	@nohdr: Payload reference only, must not modify header
361  *	@nfctinfo: Relationship of this skb to the connection
362  *	@pkt_type: Packet class
363  *	@fclone: skbuff clone status
364  *	@ipvs_property: skbuff is owned by ipvs
365  *	@peeked: this packet has been seen already, so stats have been
366  *		done for it, don't do them again
367  *	@nf_trace: netfilter packet trace flag
368  *	@protocol: Packet protocol from driver
369  *	@destructor: Destruct function
370  *	@nfct: Associated connection, if any
371  *	@nfct_reasm: netfilter conntrack re-assembly pointer
372  *	@nf_bridge: Saved data about a bridged frame - see br_netfilter.c
373  *	@skb_iif: ifindex of device we arrived on
374  *	@tc_index: Traffic control index
375  *	@tc_verd: traffic control verdict
376  *	@rxhash: the packet hash computed on receive
377  *	@queue_mapping: Queue mapping for multiqueue devices
378  *	@ndisc_nodetype: router type (from link layer)
379  *	@ooo_okay: allow the mapping of a socket to a queue to be changed
380  *	@l4_rxhash: indicate rxhash is a canonical 4-tuple hash over transport
381  *		ports.
382  *	@wifi_acked_valid: wifi_acked was set
383  *	@wifi_acked: whether frame was acked on wifi or not
384  *	@no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
385  *	@dma_cookie: a cookie to one of several possible DMA operations
386  *		done by skb DMA functions
387  *	@secmark: security marking
388  *	@mark: Generic packet mark
389  *	@dropcount: total number of sk_receive_queue overflows
390  *	@vlan_proto: vlan encapsulation protocol
391  *	@vlan_tci: vlan tag control information
392  *	@inner_transport_header: Inner transport layer header (encapsulation)
393  *	@inner_network_header: Network layer header (encapsulation)
394  *	@inner_mac_header: Link layer header (encapsulation)
395  *	@transport_header: Transport layer header
396  *	@network_header: Network layer header
397  *	@mac_header: Link layer header
398  *	@tail: Tail pointer
399  *	@end: End pointer
400  *	@head: Head of buffer
401  *	@data: Data head pointer
402  *	@truesize: Buffer size
403  *	@users: User count - see {datagram,tcp}.c
404  */
405 
406 struct sk_buff {
407 	/* These two members must be first. */
408 	struct sk_buff		*next;
409 	struct sk_buff		*prev;
410 
411 	ktime_t			tstamp;
412 
413 	struct sock		*sk;
414 	struct net_device	*dev;
415 
416 	/*
417 	 * This is the control buffer. It is free to use for every
418 	 * layer. Please put your private variables there. If you
419 	 * want to keep them across layers you have to do a skb_clone()
420 	 * first. This is owned by whoever has the skb queued ATM.
421 	 */
422 	char			cb[48] __aligned(8);
423 
424 	unsigned long		_skb_refdst;
425 #ifdef CONFIG_XFRM
426 	struct	sec_path	*sp;
427 #endif
428 	unsigned int		len,
429 				data_len;
430 	__u16			mac_len,
431 				hdr_len;
432 	union {
433 		__wsum		csum;
434 		struct {
435 			__u16	csum_start;
436 			__u16	csum_offset;
437 		};
438 	};
439 	__u32			priority;
440 	kmemcheck_bitfield_begin(flags1);
441 	__u8			local_df:1,
442 				cloned:1,
443 				ip_summed:2,
444 				nohdr:1,
445 				nfctinfo:3;
446 	__u8			pkt_type:3,
447 				fclone:2,
448 				ipvs_property:1,
449 				peeked:1,
450 				nf_trace:1;
451 	kmemcheck_bitfield_end(flags1);
452 	__be16			protocol;
453 
454 	void			(*destructor)(struct sk_buff *skb);
455 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
456 	struct nf_conntrack	*nfct;
457 #endif
458 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
459 	struct sk_buff		*nfct_reasm;
460 #endif
461 #ifdef CONFIG_BRIDGE_NETFILTER
462 	struct nf_bridge_info	*nf_bridge;
463 #endif
464 
465 	int			skb_iif;
466 
467 	__u32			rxhash;
468 
469 	__be16			vlan_proto;
470 	__u16			vlan_tci;
471 
472 #ifdef CONFIG_NET_SCHED
473 	__u16			tc_index;	/* traffic control index */
474 #ifdef CONFIG_NET_CLS_ACT
475 	__u16			tc_verd;	/* traffic control verdict */
476 #endif
477 #endif
478 
479 	__u16			queue_mapping;
480 	kmemcheck_bitfield_begin(flags2);
481 #ifdef CONFIG_IPV6_NDISC_NODETYPE
482 	__u8			ndisc_nodetype:2;
483 #endif
484 	__u8			pfmemalloc:1;
485 	__u8			ooo_okay:1;
486 	__u8			l4_rxhash:1;
487 	__u8			wifi_acked_valid:1;
488 	__u8			wifi_acked:1;
489 	__u8			no_fcs:1;
490 	__u8			head_frag:1;
491 	/* Encapsulation protocol and NIC drivers should use
492 	 * this flag to indicate to each other if the skb contains
493 	 * encapsulated packet or not and maybe use the inner packet
494 	 * headers if needed
495 	 */
496 	__u8			encapsulation:1;
497 	/* 7/9 bit hole (depending on ndisc_nodetype presence) */
498 	kmemcheck_bitfield_end(flags2);
499 
500 #ifdef CONFIG_NET_DMA
501 	dma_cookie_t		dma_cookie;
502 #endif
503 #ifdef CONFIG_NETWORK_SECMARK
504 	__u32			secmark;
505 #endif
506 	union {
507 		__u32		mark;
508 		__u32		dropcount;
509 		__u32		reserved_tailroom;
510 	};
511 
512 	sk_buff_data_t		inner_transport_header;
513 	sk_buff_data_t		inner_network_header;
514 	sk_buff_data_t		inner_mac_header;
515 	sk_buff_data_t		transport_header;
516 	sk_buff_data_t		network_header;
517 	sk_buff_data_t		mac_header;
518 	/* These elements must be at the end, see alloc_skb() for details.  */
519 	sk_buff_data_t		tail;
520 	sk_buff_data_t		end;
521 	unsigned char		*head,
522 				*data;
523 	unsigned int		truesize;
524 	atomic_t		users;
525 };
526 
527 #ifdef __KERNEL__
528 /*
529  *	Handling routines are only of interest to the kernel
530  */
531 #include <linux/slab.h>
532 
533 
534 #define SKB_ALLOC_FCLONE	0x01
535 #define SKB_ALLOC_RX		0x02
536 
537 /* Returns true if the skb was allocated from PFMEMALLOC reserves */
skb_pfmemalloc(const struct sk_buff * skb)538 static inline bool skb_pfmemalloc(const struct sk_buff *skb)
539 {
540 	return unlikely(skb->pfmemalloc);
541 }
542 
543 /*
544  * skb might have a dst pointer attached, refcounted or not.
545  * _skb_refdst low order bit is set if refcount was _not_ taken
546  */
547 #define SKB_DST_NOREF	1UL
548 #define SKB_DST_PTRMASK	~(SKB_DST_NOREF)
549 
550 /**
551  * skb_dst - returns skb dst_entry
552  * @skb: buffer
553  *
554  * Returns skb dst_entry, regardless of reference taken or not.
555  */
skb_dst(const struct sk_buff * skb)556 static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
557 {
558 	/* If refdst was not refcounted, check we still are in a
559 	 * rcu_read_lock section
560 	 */
561 	WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) &&
562 		!rcu_read_lock_held() &&
563 		!rcu_read_lock_bh_held());
564 	return (struct dst_entry *)(skb->_skb_refdst & SKB_DST_PTRMASK);
565 }
566 
567 /**
568  * skb_dst_set - sets skb dst
569  * @skb: buffer
570  * @dst: dst entry
571  *
572  * Sets skb dst, assuming a reference was taken on dst and should
573  * be released by skb_dst_drop()
574  */
skb_dst_set(struct sk_buff * skb,struct dst_entry * dst)575 static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
576 {
577 	skb->_skb_refdst = (unsigned long)dst;
578 }
579 
580 extern void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst,
581 				bool force);
582 
583 /**
584  * skb_dst_set_noref - sets skb dst, hopefully, without taking reference
585  * @skb: buffer
586  * @dst: dst entry
587  *
588  * Sets skb dst, assuming a reference was not taken on dst.
589  * If dst entry is cached, we do not take reference and dst_release
590  * will be avoided by refdst_drop. If dst entry is not cached, we take
591  * reference, so that last dst_release can destroy the dst immediately.
592  */
skb_dst_set_noref(struct sk_buff * skb,struct dst_entry * dst)593 static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst)
594 {
595 	__skb_dst_set_noref(skb, dst, false);
596 }
597 
598 /**
599  * skb_dst_set_noref_force - sets skb dst, without taking reference
600  * @skb: buffer
601  * @dst: dst entry
602  *
603  * Sets skb dst, assuming a reference was not taken on dst.
604  * No reference is taken and no dst_release will be called. While for
605  * cached dsts deferred reclaim is a basic feature, for entries that are
606  * not cached it is caller's job to guarantee that last dst_release for
607  * provided dst happens when nobody uses it, eg. after a RCU grace period.
608  */
skb_dst_set_noref_force(struct sk_buff * skb,struct dst_entry * dst)609 static inline void skb_dst_set_noref_force(struct sk_buff *skb,
610 					   struct dst_entry *dst)
611 {
612 	__skb_dst_set_noref(skb, dst, true);
613 }
614 
615 /**
616  * skb_dst_is_noref - Test if skb dst isn't refcounted
617  * @skb: buffer
618  */
skb_dst_is_noref(const struct sk_buff * skb)619 static inline bool skb_dst_is_noref(const struct sk_buff *skb)
620 {
621 	return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb);
622 }
623 
skb_rtable(const struct sk_buff * skb)624 static inline struct rtable *skb_rtable(const struct sk_buff *skb)
625 {
626 	return (struct rtable *)skb_dst(skb);
627 }
628 
629 extern void kfree_skb(struct sk_buff *skb);
630 extern void kfree_skb_list(struct sk_buff *segs);
631 extern void skb_tx_error(struct sk_buff *skb);
632 extern void consume_skb(struct sk_buff *skb);
633 extern void	       __kfree_skb(struct sk_buff *skb);
634 extern struct kmem_cache *skbuff_head_cache;
635 
636 extern void kfree_skb_partial(struct sk_buff *skb, bool head_stolen);
637 extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
638 			     bool *fragstolen, int *delta_truesize);
639 
640 extern struct sk_buff *__alloc_skb(unsigned int size,
641 				   gfp_t priority, int flags, int node);
642 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
alloc_skb(unsigned int size,gfp_t priority)643 static inline struct sk_buff *alloc_skb(unsigned int size,
644 					gfp_t priority)
645 {
646 	return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
647 }
648 
alloc_skb_fclone(unsigned int size,gfp_t priority)649 static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
650 					       gfp_t priority)
651 {
652 	return __alloc_skb(size, priority, SKB_ALLOC_FCLONE, NUMA_NO_NODE);
653 }
654 
655 extern struct sk_buff *__alloc_skb_head(gfp_t priority, int node);
alloc_skb_head(gfp_t priority)656 static inline struct sk_buff *alloc_skb_head(gfp_t priority)
657 {
658 	return __alloc_skb_head(priority, -1);
659 }
660 
661 extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
662 extern int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
663 extern struct sk_buff *skb_clone(struct sk_buff *skb,
664 				 gfp_t priority);
665 extern struct sk_buff *skb_copy(const struct sk_buff *skb,
666 				gfp_t priority);
667 extern struct sk_buff *__pskb_copy(struct sk_buff *skb,
668 				 int headroom, gfp_t gfp_mask);
669 
670 extern int	       pskb_expand_head(struct sk_buff *skb,
671 					int nhead, int ntail,
672 					gfp_t gfp_mask);
673 extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
674 					    unsigned int headroom);
675 extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
676 				       int newheadroom, int newtailroom,
677 				       gfp_t priority);
678 extern int	       skb_to_sgvec(struct sk_buff *skb,
679 				    struct scatterlist *sg, int offset,
680 				    int len);
681 extern int	       skb_cow_data(struct sk_buff *skb, int tailbits,
682 				    struct sk_buff **trailer);
683 extern int	       skb_pad(struct sk_buff *skb, int pad);
684 #define dev_kfree_skb(a)	consume_skb(a)
685 
686 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
687 			int getfrag(void *from, char *to, int offset,
688 			int len,int odd, struct sk_buff *skb),
689 			void *from, int length);
690 
691 struct skb_seq_state {
692 	__u32		lower_offset;
693 	__u32		upper_offset;
694 	__u32		frag_idx;
695 	__u32		stepped_offset;
696 	struct sk_buff	*root_skb;
697 	struct sk_buff	*cur_skb;
698 	__u8		*frag_data;
699 };
700 
701 extern void	      skb_prepare_seq_read(struct sk_buff *skb,
702 					   unsigned int from, unsigned int to,
703 					   struct skb_seq_state *st);
704 extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
705 				   struct skb_seq_state *st);
706 extern void	      skb_abort_seq_read(struct skb_seq_state *st);
707 
708 extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
709 				    unsigned int to, struct ts_config *config,
710 				    struct ts_state *state);
711 
712 extern void __skb_get_rxhash(struct sk_buff *skb);
skb_get_rxhash(struct sk_buff * skb)713 static inline __u32 skb_get_rxhash(struct sk_buff *skb)
714 {
715 	if (!skb->l4_rxhash)
716 		__skb_get_rxhash(skb);
717 
718 	return skb->rxhash;
719 }
720 
721 #ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_end_pointer(const struct sk_buff * skb)722 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
723 {
724 	return skb->head + skb->end;
725 }
726 
skb_end_offset(const struct sk_buff * skb)727 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
728 {
729 	return skb->end;
730 }
731 #else
skb_end_pointer(const struct sk_buff * skb)732 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
733 {
734 	return skb->end;
735 }
736 
skb_end_offset(const struct sk_buff * skb)737 static inline unsigned int skb_end_offset(const struct sk_buff *skb)
738 {
739 	return skb->end - skb->head;
740 }
741 #endif
742 
743 /* Internal */
744 #define skb_shinfo(SKB)	((struct skb_shared_info *)(skb_end_pointer(SKB)))
745 
skb_hwtstamps(struct sk_buff * skb)746 static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
747 {
748 	return &skb_shinfo(skb)->hwtstamps;
749 }
750 
751 /**
752  *	skb_queue_empty - check if a queue is empty
753  *	@list: queue head
754  *
755  *	Returns true if the queue is empty, false otherwise.
756  */
skb_queue_empty(const struct sk_buff_head * list)757 static inline int skb_queue_empty(const struct sk_buff_head *list)
758 {
759 	return list->next == (struct sk_buff *)list;
760 }
761 
762 /**
763  *	skb_queue_is_last - check if skb is the last entry in the queue
764  *	@list: queue head
765  *	@skb: buffer
766  *
767  *	Returns true if @skb is the last buffer on the list.
768  */
skb_queue_is_last(const struct sk_buff_head * list,const struct sk_buff * skb)769 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
770 				     const struct sk_buff *skb)
771 {
772 	return skb->next == (struct sk_buff *)list;
773 }
774 
775 /**
776  *	skb_queue_is_first - check if skb is the first entry in the queue
777  *	@list: queue head
778  *	@skb: buffer
779  *
780  *	Returns true if @skb is the first buffer on the list.
781  */
skb_queue_is_first(const struct sk_buff_head * list,const struct sk_buff * skb)782 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
783 				      const struct sk_buff *skb)
784 {
785 	return skb->prev == (struct sk_buff *)list;
786 }
787 
788 /**
789  *	skb_queue_next - return the next packet in the queue
790  *	@list: queue head
791  *	@skb: current buffer
792  *
793  *	Return the next packet in @list after @skb.  It is only valid to
794  *	call this if skb_queue_is_last() evaluates to false.
795  */
skb_queue_next(const struct sk_buff_head * list,const struct sk_buff * skb)796 static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
797 					     const struct sk_buff *skb)
798 {
799 	/* This BUG_ON may seem severe, but if we just return then we
800 	 * are going to dereference garbage.
801 	 */
802 	BUG_ON(skb_queue_is_last(list, skb));
803 	return skb->next;
804 }
805 
806 /**
807  *	skb_queue_prev - return the prev packet in the queue
808  *	@list: queue head
809  *	@skb: current buffer
810  *
811  *	Return the prev packet in @list before @skb.  It is only valid to
812  *	call this if skb_queue_is_first() evaluates to false.
813  */
skb_queue_prev(const struct sk_buff_head * list,const struct sk_buff * skb)814 static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
815 					     const struct sk_buff *skb)
816 {
817 	/* This BUG_ON may seem severe, but if we just return then we
818 	 * are going to dereference garbage.
819 	 */
820 	BUG_ON(skb_queue_is_first(list, skb));
821 	return skb->prev;
822 }
823 
824 /**
825  *	skb_get - reference buffer
826  *	@skb: buffer to reference
827  *
828  *	Makes another reference to a socket buffer and returns a pointer
829  *	to the buffer.
830  */
skb_get(struct sk_buff * skb)831 static inline struct sk_buff *skb_get(struct sk_buff *skb)
832 {
833 	atomic_inc(&skb->users);
834 	return skb;
835 }
836 
837 /*
838  * If users == 1, we are the only owner and are can avoid redundant
839  * atomic change.
840  */
841 
842 /**
843  *	skb_cloned - is the buffer a clone
844  *	@skb: buffer to check
845  *
846  *	Returns true if the buffer was generated with skb_clone() and is
847  *	one of multiple shared copies of the buffer. Cloned buffers are
848  *	shared data so must not be written to under normal circumstances.
849  */
skb_cloned(const struct sk_buff * skb)850 static inline int skb_cloned(const struct sk_buff *skb)
851 {
852 	return skb->cloned &&
853 	       (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
854 }
855 
skb_unclone(struct sk_buff * skb,gfp_t pri)856 static inline int skb_unclone(struct sk_buff *skb, gfp_t pri)
857 {
858 	might_sleep_if(pri & __GFP_WAIT);
859 
860 	if (skb_cloned(skb))
861 		return pskb_expand_head(skb, 0, 0, pri);
862 
863 	return 0;
864 }
865 
866 /**
867  *	skb_header_cloned - is the header a clone
868  *	@skb: buffer to check
869  *
870  *	Returns true if modifying the header part of the buffer requires
871  *	the data to be copied.
872  */
skb_header_cloned(const struct sk_buff * skb)873 static inline int skb_header_cloned(const struct sk_buff *skb)
874 {
875 	int dataref;
876 
877 	if (!skb->cloned)
878 		return 0;
879 
880 	dataref = atomic_read(&skb_shinfo(skb)->dataref);
881 	dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
882 	return dataref != 1;
883 }
884 
885 /**
886  *	skb_header_release - release reference to header
887  *	@skb: buffer to operate on
888  *
889  *	Drop a reference to the header part of the buffer.  This is done
890  *	by acquiring a payload reference.  You must not read from the header
891  *	part of skb->data after this.
892  */
skb_header_release(struct sk_buff * skb)893 static inline void skb_header_release(struct sk_buff *skb)
894 {
895 	BUG_ON(skb->nohdr);
896 	skb->nohdr = 1;
897 	atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
898 }
899 
900 /**
901  *	skb_shared - is the buffer shared
902  *	@skb: buffer to check
903  *
904  *	Returns true if more than one person has a reference to this
905  *	buffer.
906  */
skb_shared(const struct sk_buff * skb)907 static inline int skb_shared(const struct sk_buff *skb)
908 {
909 	return atomic_read(&skb->users) != 1;
910 }
911 
912 /**
913  *	skb_share_check - check if buffer is shared and if so clone it
914  *	@skb: buffer to check
915  *	@pri: priority for memory allocation
916  *
917  *	If the buffer is shared the buffer is cloned and the old copy
918  *	drops a reference. A new clone with a single reference is returned.
919  *	If the buffer is not shared the original buffer is returned. When
920  *	being called from interrupt status or with spinlocks held pri must
921  *	be GFP_ATOMIC.
922  *
923  *	NULL is returned on a memory allocation failure.
924  */
skb_share_check(struct sk_buff * skb,gfp_t pri)925 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
926 {
927 	might_sleep_if(pri & __GFP_WAIT);
928 	if (skb_shared(skb)) {
929 		struct sk_buff *nskb = skb_clone(skb, pri);
930 
931 		if (likely(nskb))
932 			consume_skb(skb);
933 		else
934 			kfree_skb(skb);
935 		skb = nskb;
936 	}
937 	return skb;
938 }
939 
940 /*
941  *	Copy shared buffers into a new sk_buff. We effectively do COW on
942  *	packets to handle cases where we have a local reader and forward
943  *	and a couple of other messy ones. The normal one is tcpdumping
944  *	a packet thats being forwarded.
945  */
946 
947 /**
948  *	skb_unshare - make a copy of a shared buffer
949  *	@skb: buffer to check
950  *	@pri: priority for memory allocation
951  *
952  *	If the socket buffer is a clone then this function creates a new
953  *	copy of the data, drops a reference count on the old copy and returns
954  *	the new copy with the reference count at 1. If the buffer is not a clone
955  *	the original buffer is returned. When called with a spinlock held or
956  *	from interrupt state @pri must be %GFP_ATOMIC
957  *
958  *	%NULL is returned on a memory allocation failure.
959  */
skb_unshare(struct sk_buff * skb,gfp_t pri)960 static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
961 					  gfp_t pri)
962 {
963 	might_sleep_if(pri & __GFP_WAIT);
964 	if (skb_cloned(skb)) {
965 		struct sk_buff *nskb = skb_copy(skb, pri);
966 		kfree_skb(skb);	/* Free our shared copy */
967 		skb = nskb;
968 	}
969 	return skb;
970 }
971 
972 /**
973  *	skb_peek - peek at the head of an &sk_buff_head
974  *	@list_: list to peek at
975  *
976  *	Peek an &sk_buff. Unlike most other operations you _MUST_
977  *	be careful with this one. A peek leaves the buffer on the
978  *	list and someone else may run off with it. You must hold
979  *	the appropriate locks or have a private queue to do this.
980  *
981  *	Returns %NULL for an empty list or a pointer to the head element.
982  *	The reference count is not incremented and the reference is therefore
983  *	volatile. Use with caution.
984  */
skb_peek(const struct sk_buff_head * list_)985 static inline struct sk_buff *skb_peek(const struct sk_buff_head *list_)
986 {
987 	struct sk_buff *skb = list_->next;
988 
989 	if (skb == (struct sk_buff *)list_)
990 		skb = NULL;
991 	return skb;
992 }
993 
994 /**
995  *	skb_peek_next - peek skb following the given one from a queue
996  *	@skb: skb to start from
997  *	@list_: list to peek at
998  *
999  *	Returns %NULL when the end of the list is met or a pointer to the
1000  *	next element. The reference count is not incremented and the
1001  *	reference is therefore volatile. Use with caution.
1002  */
skb_peek_next(struct sk_buff * skb,const struct sk_buff_head * list_)1003 static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
1004 		const struct sk_buff_head *list_)
1005 {
1006 	struct sk_buff *next = skb->next;
1007 
1008 	if (next == (struct sk_buff *)list_)
1009 		next = NULL;
1010 	return next;
1011 }
1012 
1013 /**
1014  *	skb_peek_tail - peek at the tail of an &sk_buff_head
1015  *	@list_: list to peek at
1016  *
1017  *	Peek an &sk_buff. Unlike most other operations you _MUST_
1018  *	be careful with this one. A peek leaves the buffer on the
1019  *	list and someone else may run off with it. You must hold
1020  *	the appropriate locks or have a private queue to do this.
1021  *
1022  *	Returns %NULL for an empty list or a pointer to the tail element.
1023  *	The reference count is not incremented and the reference is therefore
1024  *	volatile. Use with caution.
1025  */
skb_peek_tail(const struct sk_buff_head * list_)1026 static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
1027 {
1028 	struct sk_buff *skb = list_->prev;
1029 
1030 	if (skb == (struct sk_buff *)list_)
1031 		skb = NULL;
1032 	return skb;
1033 
1034 }
1035 
1036 /**
1037  *	skb_queue_len	- get queue length
1038  *	@list_: list to measure
1039  *
1040  *	Return the length of an &sk_buff queue.
1041  */
skb_queue_len(const struct sk_buff_head * list_)1042 static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
1043 {
1044 	return list_->qlen;
1045 }
1046 
1047 /**
1048  *	__skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
1049  *	@list: queue to initialize
1050  *
1051  *	This initializes only the list and queue length aspects of
1052  *	an sk_buff_head object.  This allows to initialize the list
1053  *	aspects of an sk_buff_head without reinitializing things like
1054  *	the spinlock.  It can also be used for on-stack sk_buff_head
1055  *	objects where the spinlock is known to not be used.
1056  */
__skb_queue_head_init(struct sk_buff_head * list)1057 static inline void __skb_queue_head_init(struct sk_buff_head *list)
1058 {
1059 	list->prev = list->next = (struct sk_buff *)list;
1060 	list->qlen = 0;
1061 }
1062 
1063 /*
1064  * This function creates a split out lock class for each invocation;
1065  * this is needed for now since a whole lot of users of the skb-queue
1066  * infrastructure in drivers have different locking usage (in hardirq)
1067  * than the networking core (in softirq only). In the long run either the
1068  * network layer or drivers should need annotation to consolidate the
1069  * main types of usage into 3 classes.
1070  */
skb_queue_head_init(struct sk_buff_head * list)1071 static inline void skb_queue_head_init(struct sk_buff_head *list)
1072 {
1073 	spin_lock_init(&list->lock);
1074 	__skb_queue_head_init(list);
1075 }
1076 
skb_queue_head_init_class(struct sk_buff_head * list,struct lock_class_key * class)1077 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
1078 		struct lock_class_key *class)
1079 {
1080 	skb_queue_head_init(list);
1081 	lockdep_set_class(&list->lock, class);
1082 }
1083 
1084 /*
1085  *	Insert an sk_buff on a list.
1086  *
1087  *	The "__skb_xxxx()" functions are the non-atomic ones that
1088  *	can only be called with interrupts disabled.
1089  */
1090 extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
__skb_insert(struct sk_buff * newsk,struct sk_buff * prev,struct sk_buff * next,struct sk_buff_head * list)1091 static inline void __skb_insert(struct sk_buff *newsk,
1092 				struct sk_buff *prev, struct sk_buff *next,
1093 				struct sk_buff_head *list)
1094 {
1095 	newsk->next = next;
1096 	newsk->prev = prev;
1097 	next->prev  = prev->next = newsk;
1098 	list->qlen++;
1099 }
1100 
__skb_queue_splice(const struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * next)1101 static inline void __skb_queue_splice(const struct sk_buff_head *list,
1102 				      struct sk_buff *prev,
1103 				      struct sk_buff *next)
1104 {
1105 	struct sk_buff *first = list->next;
1106 	struct sk_buff *last = list->prev;
1107 
1108 	first->prev = prev;
1109 	prev->next = first;
1110 
1111 	last->next = next;
1112 	next->prev = last;
1113 }
1114 
1115 /**
1116  *	skb_queue_splice - join two skb lists, this is designed for stacks
1117  *	@list: the new list to add
1118  *	@head: the place to add it in the first list
1119  */
skb_queue_splice(const struct sk_buff_head * list,struct sk_buff_head * head)1120 static inline void skb_queue_splice(const struct sk_buff_head *list,
1121 				    struct sk_buff_head *head)
1122 {
1123 	if (!skb_queue_empty(list)) {
1124 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1125 		head->qlen += list->qlen;
1126 	}
1127 }
1128 
1129 /**
1130  *	skb_queue_splice_init - join two skb lists and reinitialise the emptied list
1131  *	@list: the new list to add
1132  *	@head: the place to add it in the first list
1133  *
1134  *	The list at @list is reinitialised
1135  */
skb_queue_splice_init(struct sk_buff_head * list,struct sk_buff_head * head)1136 static inline void skb_queue_splice_init(struct sk_buff_head *list,
1137 					 struct sk_buff_head *head)
1138 {
1139 	if (!skb_queue_empty(list)) {
1140 		__skb_queue_splice(list, (struct sk_buff *) head, head->next);
1141 		head->qlen += list->qlen;
1142 		__skb_queue_head_init(list);
1143 	}
1144 }
1145 
1146 /**
1147  *	skb_queue_splice_tail - join two skb lists, each list being a queue
1148  *	@list: the new list to add
1149  *	@head: the place to add it in the first list
1150  */
skb_queue_splice_tail(const struct sk_buff_head * list,struct sk_buff_head * head)1151 static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
1152 					 struct sk_buff_head *head)
1153 {
1154 	if (!skb_queue_empty(list)) {
1155 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1156 		head->qlen += list->qlen;
1157 	}
1158 }
1159 
1160 /**
1161  *	skb_queue_splice_tail_init - join two skb lists and reinitialise the emptied list
1162  *	@list: the new list to add
1163  *	@head: the place to add it in the first list
1164  *
1165  *	Each of the lists is a queue.
1166  *	The list at @list is reinitialised
1167  */
skb_queue_splice_tail_init(struct sk_buff_head * list,struct sk_buff_head * head)1168 static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
1169 					      struct sk_buff_head *head)
1170 {
1171 	if (!skb_queue_empty(list)) {
1172 		__skb_queue_splice(list, head->prev, (struct sk_buff *) head);
1173 		head->qlen += list->qlen;
1174 		__skb_queue_head_init(list);
1175 	}
1176 }
1177 
1178 /**
1179  *	__skb_queue_after - queue a buffer at the list head
1180  *	@list: list to use
1181  *	@prev: place after this buffer
1182  *	@newsk: buffer to queue
1183  *
1184  *	Queue a buffer int the middle of a list. This function takes no locks
1185  *	and you must therefore hold required locks before calling it.
1186  *
1187  *	A buffer cannot be placed on two lists at the same time.
1188  */
__skb_queue_after(struct sk_buff_head * list,struct sk_buff * prev,struct sk_buff * newsk)1189 static inline void __skb_queue_after(struct sk_buff_head *list,
1190 				     struct sk_buff *prev,
1191 				     struct sk_buff *newsk)
1192 {
1193 	__skb_insert(newsk, prev, prev->next, list);
1194 }
1195 
1196 extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
1197 		       struct sk_buff_head *list);
1198 
__skb_queue_before(struct sk_buff_head * list,struct sk_buff * next,struct sk_buff * newsk)1199 static inline void __skb_queue_before(struct sk_buff_head *list,
1200 				      struct sk_buff *next,
1201 				      struct sk_buff *newsk)
1202 {
1203 	__skb_insert(newsk, next->prev, next, list);
1204 }
1205 
1206 /**
1207  *	__skb_queue_head - queue a buffer at the list head
1208  *	@list: list to use
1209  *	@newsk: buffer to queue
1210  *
1211  *	Queue a buffer at the start of a list. This function takes no locks
1212  *	and you must therefore hold required locks before calling it.
1213  *
1214  *	A buffer cannot be placed on two lists at the same time.
1215  */
1216 extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
__skb_queue_head(struct sk_buff_head * list,struct sk_buff * newsk)1217 static inline void __skb_queue_head(struct sk_buff_head *list,
1218 				    struct sk_buff *newsk)
1219 {
1220 	__skb_queue_after(list, (struct sk_buff *)list, newsk);
1221 }
1222 
1223 /**
1224  *	__skb_queue_tail - queue a buffer at the list tail
1225  *	@list: list to use
1226  *	@newsk: buffer to queue
1227  *
1228  *	Queue a buffer at the end of a list. This function takes no locks
1229  *	and you must therefore hold required locks before calling it.
1230  *
1231  *	A buffer cannot be placed on two lists at the same time.
1232  */
1233 extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
__skb_queue_tail(struct sk_buff_head * list,struct sk_buff * newsk)1234 static inline void __skb_queue_tail(struct sk_buff_head *list,
1235 				   struct sk_buff *newsk)
1236 {
1237 	__skb_queue_before(list, (struct sk_buff *)list, newsk);
1238 }
1239 
1240 /*
1241  * remove sk_buff from list. _Must_ be called atomically, and with
1242  * the list known..
1243  */
1244 extern void	   skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
__skb_unlink(struct sk_buff * skb,struct sk_buff_head * list)1245 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
1246 {
1247 	struct sk_buff *next, *prev;
1248 
1249 	list->qlen--;
1250 	next	   = skb->next;
1251 	prev	   = skb->prev;
1252 	skb->next  = skb->prev = NULL;
1253 	next->prev = prev;
1254 	prev->next = next;
1255 }
1256 
1257 /**
1258  *	__skb_dequeue - remove from the head of the queue
1259  *	@list: list to dequeue from
1260  *
1261  *	Remove the head of the list. This function does not take any locks
1262  *	so must be used with appropriate locks held only. The head item is
1263  *	returned or %NULL if the list is empty.
1264  */
1265 extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
__skb_dequeue(struct sk_buff_head * list)1266 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
1267 {
1268 	struct sk_buff *skb = skb_peek(list);
1269 	if (skb)
1270 		__skb_unlink(skb, list);
1271 	return skb;
1272 }
1273 
1274 /**
1275  *	__skb_dequeue_tail - remove from the tail of the queue
1276  *	@list: list to dequeue from
1277  *
1278  *	Remove the tail of the list. This function does not take any locks
1279  *	so must be used with appropriate locks held only. The tail item is
1280  *	returned or %NULL if the list is empty.
1281  */
1282 extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
__skb_dequeue_tail(struct sk_buff_head * list)1283 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
1284 {
1285 	struct sk_buff *skb = skb_peek_tail(list);
1286 	if (skb)
1287 		__skb_unlink(skb, list);
1288 	return skb;
1289 }
1290 
1291 
skb_is_nonlinear(const struct sk_buff * skb)1292 static inline bool skb_is_nonlinear(const struct sk_buff *skb)
1293 {
1294 	return skb->data_len;
1295 }
1296 
skb_headlen(const struct sk_buff * skb)1297 static inline unsigned int skb_headlen(const struct sk_buff *skb)
1298 {
1299 	return skb->len - skb->data_len;
1300 }
1301 
skb_pagelen(const struct sk_buff * skb)1302 static inline int skb_pagelen(const struct sk_buff *skb)
1303 {
1304 	int i, len = 0;
1305 
1306 	for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
1307 		len += skb_frag_size(&skb_shinfo(skb)->frags[i]);
1308 	return len + skb_headlen(skb);
1309 }
1310 
1311 /**
1312  * __skb_fill_page_desc - initialise a paged fragment in an skb
1313  * @skb: buffer containing fragment to be initialised
1314  * @i: paged fragment index to initialise
1315  * @page: the page to use for this fragment
1316  * @off: the offset to the data with @page
1317  * @size: the length of the data
1318  *
1319  * Initialises the @i'th fragment of @skb to point to &size bytes at
1320  * offset @off within @page.
1321  *
1322  * Does not take any additional reference on the fragment.
1323  */
__skb_fill_page_desc(struct sk_buff * skb,int i,struct page * page,int off,int size)1324 static inline void __skb_fill_page_desc(struct sk_buff *skb, int i,
1325 					struct page *page, int off, int size)
1326 {
1327 	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1328 
1329 	/*
1330 	 * Propagate page->pfmemalloc to the skb if we can. The problem is
1331 	 * that not all callers have unique ownership of the page. If
1332 	 * pfmemalloc is set, we check the mapping as a mapping implies
1333 	 * page->index is set (index and pfmemalloc share space).
1334 	 * If it's a valid mapping, we cannot use page->pfmemalloc but we
1335 	 * do not lose pfmemalloc information as the pages would not be
1336 	 * allocated using __GFP_MEMALLOC.
1337 	 */
1338 	frag->page.p		  = page;
1339 	frag->page_offset	  = off;
1340 	skb_frag_size_set(frag, size);
1341 
1342 	page = compound_head(page);
1343 	if (page->pfmemalloc && !page->mapping)
1344 		skb->pfmemalloc	= true;
1345 }
1346 
1347 /**
1348  * skb_fill_page_desc - initialise a paged fragment in an skb
1349  * @skb: buffer containing fragment to be initialised
1350  * @i: paged fragment index to initialise
1351  * @page: the page to use for this fragment
1352  * @off: the offset to the data with @page
1353  * @size: the length of the data
1354  *
1355  * As per __skb_fill_page_desc() -- initialises the @i'th fragment of
1356  * @skb to point to &size bytes at offset @off within @page. In
1357  * addition updates @skb such that @i is the last fragment.
1358  *
1359  * Does not take any additional reference on the fragment.
1360  */
skb_fill_page_desc(struct sk_buff * skb,int i,struct page * page,int off,int size)1361 static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
1362 				      struct page *page, int off, int size)
1363 {
1364 	__skb_fill_page_desc(skb, i, page, off, size);
1365 	skb_shinfo(skb)->nr_frags = i + 1;
1366 }
1367 
1368 extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1369 			    int off, int size, unsigned int truesize);
1370 
1371 #define SKB_PAGE_ASSERT(skb) 	BUG_ON(skb_shinfo(skb)->nr_frags)
1372 #define SKB_FRAG_ASSERT(skb) 	BUG_ON(skb_has_frag_list(skb))
1373 #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
1374 
1375 #ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_tail_pointer(const struct sk_buff * skb)1376 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1377 {
1378 	return skb->head + skb->tail;
1379 }
1380 
skb_reset_tail_pointer(struct sk_buff * skb)1381 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1382 {
1383 	skb->tail = skb->data - skb->head;
1384 }
1385 
skb_set_tail_pointer(struct sk_buff * skb,const int offset)1386 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1387 {
1388 	skb_reset_tail_pointer(skb);
1389 	skb->tail += offset;
1390 }
1391 #else /* NET_SKBUFF_DATA_USES_OFFSET */
skb_tail_pointer(const struct sk_buff * skb)1392 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
1393 {
1394 	return skb->tail;
1395 }
1396 
skb_reset_tail_pointer(struct sk_buff * skb)1397 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
1398 {
1399 	skb->tail = skb->data;
1400 }
1401 
skb_set_tail_pointer(struct sk_buff * skb,const int offset)1402 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
1403 {
1404 	skb->tail = skb->data + offset;
1405 }
1406 
1407 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1408 
1409 /*
1410  *	Add data to an sk_buff
1411  */
1412 extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
__skb_put(struct sk_buff * skb,unsigned int len)1413 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
1414 {
1415 	unsigned char *tmp = skb_tail_pointer(skb);
1416 	SKB_LINEAR_ASSERT(skb);
1417 	skb->tail += len;
1418 	skb->len  += len;
1419 	return tmp;
1420 }
1421 
1422 extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
__skb_push(struct sk_buff * skb,unsigned int len)1423 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
1424 {
1425 	skb->data -= len;
1426 	skb->len  += len;
1427 	return skb->data;
1428 }
1429 
1430 extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
__skb_pull(struct sk_buff * skb,unsigned int len)1431 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
1432 {
1433 	skb->len -= len;
1434 	BUG_ON(skb->len < skb->data_len);
1435 	return skb->data += len;
1436 }
1437 
skb_pull_inline(struct sk_buff * skb,unsigned int len)1438 static inline unsigned char *skb_pull_inline(struct sk_buff *skb, unsigned int len)
1439 {
1440 	return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
1441 }
1442 
1443 extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
1444 
__pskb_pull(struct sk_buff * skb,unsigned int len)1445 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
1446 {
1447 	if (len > skb_headlen(skb) &&
1448 	    !__pskb_pull_tail(skb, len - skb_headlen(skb)))
1449 		return NULL;
1450 	skb->len -= len;
1451 	return skb->data += len;
1452 }
1453 
pskb_pull(struct sk_buff * skb,unsigned int len)1454 static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
1455 {
1456 	return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
1457 }
1458 
pskb_may_pull(struct sk_buff * skb,unsigned int len)1459 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
1460 {
1461 	if (likely(len <= skb_headlen(skb)))
1462 		return 1;
1463 	if (unlikely(len > skb->len))
1464 		return 0;
1465 	return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
1466 }
1467 
1468 /**
1469  *	skb_headroom - bytes at buffer head
1470  *	@skb: buffer to check
1471  *
1472  *	Return the number of bytes of free space at the head of an &sk_buff.
1473  */
skb_headroom(const struct sk_buff * skb)1474 static inline unsigned int skb_headroom(const struct sk_buff *skb)
1475 {
1476 	return skb->data - skb->head;
1477 }
1478 
1479 /**
1480  *	skb_tailroom - bytes at buffer end
1481  *	@skb: buffer to check
1482  *
1483  *	Return the number of bytes of free space at the tail of an sk_buff
1484  */
skb_tailroom(const struct sk_buff * skb)1485 static inline int skb_tailroom(const struct sk_buff *skb)
1486 {
1487 	return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
1488 }
1489 
1490 /**
1491  *	skb_availroom - bytes at buffer end
1492  *	@skb: buffer to check
1493  *
1494  *	Return the number of bytes of free space at the tail of an sk_buff
1495  *	allocated by sk_stream_alloc()
1496  */
skb_availroom(const struct sk_buff * skb)1497 static inline int skb_availroom(const struct sk_buff *skb)
1498 {
1499 	if (skb_is_nonlinear(skb))
1500 		return 0;
1501 
1502 	return skb->end - skb->tail - skb->reserved_tailroom;
1503 }
1504 
1505 /**
1506  *	skb_reserve - adjust headroom
1507  *	@skb: buffer to alter
1508  *	@len: bytes to move
1509  *
1510  *	Increase the headroom of an empty &sk_buff by reducing the tail
1511  *	room. This is only allowed for an empty buffer.
1512  */
skb_reserve(struct sk_buff * skb,int len)1513 static inline void skb_reserve(struct sk_buff *skb, int len)
1514 {
1515 	skb->data += len;
1516 	skb->tail += len;
1517 }
1518 
skb_reset_inner_headers(struct sk_buff * skb)1519 static inline void skb_reset_inner_headers(struct sk_buff *skb)
1520 {
1521 	skb->inner_mac_header = skb->mac_header;
1522 	skb->inner_network_header = skb->network_header;
1523 	skb->inner_transport_header = skb->transport_header;
1524 }
1525 
skb_reset_mac_len(struct sk_buff * skb)1526 static inline void skb_reset_mac_len(struct sk_buff *skb)
1527 {
1528 	skb->mac_len = skb->network_header - skb->mac_header;
1529 }
1530 
1531 #ifdef NET_SKBUFF_DATA_USES_OFFSET
skb_inner_transport_header(const struct sk_buff * skb)1532 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1533 							*skb)
1534 {
1535 	return skb->head + skb->inner_transport_header;
1536 }
1537 
skb_reset_inner_transport_header(struct sk_buff * skb)1538 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1539 {
1540 	skb->inner_transport_header = skb->data - skb->head;
1541 }
1542 
skb_set_inner_transport_header(struct sk_buff * skb,const int offset)1543 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1544 						   const int offset)
1545 {
1546 	skb_reset_inner_transport_header(skb);
1547 	skb->inner_transport_header += offset;
1548 }
1549 
skb_inner_network_header(const struct sk_buff * skb)1550 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1551 {
1552 	return skb->head + skb->inner_network_header;
1553 }
1554 
skb_reset_inner_network_header(struct sk_buff * skb)1555 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1556 {
1557 	skb->inner_network_header = skb->data - skb->head;
1558 }
1559 
skb_set_inner_network_header(struct sk_buff * skb,const int offset)1560 static inline void skb_set_inner_network_header(struct sk_buff *skb,
1561 						const int offset)
1562 {
1563 	skb_reset_inner_network_header(skb);
1564 	skb->inner_network_header += offset;
1565 }
1566 
skb_inner_mac_header(const struct sk_buff * skb)1567 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1568 {
1569 	return skb->head + skb->inner_mac_header;
1570 }
1571 
skb_reset_inner_mac_header(struct sk_buff * skb)1572 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1573 {
1574 	skb->inner_mac_header = skb->data - skb->head;
1575 }
1576 
skb_set_inner_mac_header(struct sk_buff * skb,const int offset)1577 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1578 					    const int offset)
1579 {
1580 	skb_reset_inner_mac_header(skb);
1581 	skb->inner_mac_header += offset;
1582 }
skb_transport_header_was_set(const struct sk_buff * skb)1583 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1584 {
1585 	return skb->transport_header != ~0U;
1586 }
1587 
skb_transport_header(const struct sk_buff * skb)1588 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1589 {
1590 	return skb->head + skb->transport_header;
1591 }
1592 
skb_reset_transport_header(struct sk_buff * skb)1593 static inline void skb_reset_transport_header(struct sk_buff *skb)
1594 {
1595 	skb->transport_header = skb->data - skb->head;
1596 }
1597 
skb_set_transport_header(struct sk_buff * skb,const int offset)1598 static inline void skb_set_transport_header(struct sk_buff *skb,
1599 					    const int offset)
1600 {
1601 	skb_reset_transport_header(skb);
1602 	skb->transport_header += offset;
1603 }
1604 
skb_network_header(const struct sk_buff * skb)1605 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1606 {
1607 	return skb->head + skb->network_header;
1608 }
1609 
skb_reset_network_header(struct sk_buff * skb)1610 static inline void skb_reset_network_header(struct sk_buff *skb)
1611 {
1612 	skb->network_header = skb->data - skb->head;
1613 }
1614 
skb_set_network_header(struct sk_buff * skb,const int offset)1615 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1616 {
1617 	skb_reset_network_header(skb);
1618 	skb->network_header += offset;
1619 }
1620 
skb_mac_header(const struct sk_buff * skb)1621 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1622 {
1623 	return skb->head + skb->mac_header;
1624 }
1625 
skb_mac_header_was_set(const struct sk_buff * skb)1626 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1627 {
1628 	return skb->mac_header != ~0U;
1629 }
1630 
skb_reset_mac_header(struct sk_buff * skb)1631 static inline void skb_reset_mac_header(struct sk_buff *skb)
1632 {
1633 	skb->mac_header = skb->data - skb->head;
1634 }
1635 
skb_set_mac_header(struct sk_buff * skb,const int offset)1636 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1637 {
1638 	skb_reset_mac_header(skb);
1639 	skb->mac_header += offset;
1640 }
1641 
1642 #else /* NET_SKBUFF_DATA_USES_OFFSET */
skb_inner_transport_header(const struct sk_buff * skb)1643 static inline unsigned char *skb_inner_transport_header(const struct sk_buff
1644 							*skb)
1645 {
1646 	return skb->inner_transport_header;
1647 }
1648 
skb_reset_inner_transport_header(struct sk_buff * skb)1649 static inline void skb_reset_inner_transport_header(struct sk_buff *skb)
1650 {
1651 	skb->inner_transport_header = skb->data;
1652 }
1653 
skb_set_inner_transport_header(struct sk_buff * skb,const int offset)1654 static inline void skb_set_inner_transport_header(struct sk_buff *skb,
1655 						   const int offset)
1656 {
1657 	skb->inner_transport_header = skb->data + offset;
1658 }
1659 
skb_inner_network_header(const struct sk_buff * skb)1660 static inline unsigned char *skb_inner_network_header(const struct sk_buff *skb)
1661 {
1662 	return skb->inner_network_header;
1663 }
1664 
skb_reset_inner_network_header(struct sk_buff * skb)1665 static inline void skb_reset_inner_network_header(struct sk_buff *skb)
1666 {
1667 	skb->inner_network_header = skb->data;
1668 }
1669 
skb_set_inner_network_header(struct sk_buff * skb,const int offset)1670 static inline void skb_set_inner_network_header(struct sk_buff *skb,
1671 						const int offset)
1672 {
1673 	skb->inner_network_header = skb->data + offset;
1674 }
1675 
skb_inner_mac_header(const struct sk_buff * skb)1676 static inline unsigned char *skb_inner_mac_header(const struct sk_buff *skb)
1677 {
1678 	return skb->inner_mac_header;
1679 }
1680 
skb_reset_inner_mac_header(struct sk_buff * skb)1681 static inline void skb_reset_inner_mac_header(struct sk_buff *skb)
1682 {
1683 	skb->inner_mac_header = skb->data;
1684 }
1685 
skb_set_inner_mac_header(struct sk_buff * skb,const int offset)1686 static inline void skb_set_inner_mac_header(struct sk_buff *skb,
1687 						const int offset)
1688 {
1689 	skb->inner_mac_header = skb->data + offset;
1690 }
skb_transport_header_was_set(const struct sk_buff * skb)1691 static inline bool skb_transport_header_was_set(const struct sk_buff *skb)
1692 {
1693 	return skb->transport_header != NULL;
1694 }
1695 
skb_transport_header(const struct sk_buff * skb)1696 static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1697 {
1698 	return skb->transport_header;
1699 }
1700 
skb_reset_transport_header(struct sk_buff * skb)1701 static inline void skb_reset_transport_header(struct sk_buff *skb)
1702 {
1703 	skb->transport_header = skb->data;
1704 }
1705 
skb_set_transport_header(struct sk_buff * skb,const int offset)1706 static inline void skb_set_transport_header(struct sk_buff *skb,
1707 					    const int offset)
1708 {
1709 	skb->transport_header = skb->data + offset;
1710 }
1711 
skb_network_header(const struct sk_buff * skb)1712 static inline unsigned char *skb_network_header(const struct sk_buff *skb)
1713 {
1714 	return skb->network_header;
1715 }
1716 
skb_reset_network_header(struct sk_buff * skb)1717 static inline void skb_reset_network_header(struct sk_buff *skb)
1718 {
1719 	skb->network_header = skb->data;
1720 }
1721 
skb_set_network_header(struct sk_buff * skb,const int offset)1722 static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
1723 {
1724 	skb->network_header = skb->data + offset;
1725 }
1726 
skb_mac_header(const struct sk_buff * skb)1727 static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
1728 {
1729 	return skb->mac_header;
1730 }
1731 
skb_mac_header_was_set(const struct sk_buff * skb)1732 static inline int skb_mac_header_was_set(const struct sk_buff *skb)
1733 {
1734 	return skb->mac_header != NULL;
1735 }
1736 
skb_reset_mac_header(struct sk_buff * skb)1737 static inline void skb_reset_mac_header(struct sk_buff *skb)
1738 {
1739 	skb->mac_header = skb->data;
1740 }
1741 
skb_set_mac_header(struct sk_buff * skb,const int offset)1742 static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
1743 {
1744 	skb->mac_header = skb->data + offset;
1745 }
1746 #endif /* NET_SKBUFF_DATA_USES_OFFSET */
1747 
skb_probe_transport_header(struct sk_buff * skb,const int offset_hint)1748 static inline void skb_probe_transport_header(struct sk_buff *skb,
1749 					      const int offset_hint)
1750 {
1751 	struct flow_keys keys;
1752 
1753 	if (skb_transport_header_was_set(skb))
1754 		return;
1755 	else if (skb_flow_dissect(skb, &keys))
1756 		skb_set_transport_header(skb, keys.thoff);
1757 	else
1758 		skb_set_transport_header(skb, offset_hint);
1759 }
1760 
skb_mac_header_rebuild(struct sk_buff * skb)1761 static inline void skb_mac_header_rebuild(struct sk_buff *skb)
1762 {
1763 	if (skb_mac_header_was_set(skb)) {
1764 		const unsigned char *old_mac = skb_mac_header(skb);
1765 
1766 		skb_set_mac_header(skb, -skb->mac_len);
1767 		memmove(skb_mac_header(skb), old_mac, skb->mac_len);
1768 	}
1769 }
1770 
skb_checksum_start_offset(const struct sk_buff * skb)1771 static inline int skb_checksum_start_offset(const struct sk_buff *skb)
1772 {
1773 	return skb->csum_start - skb_headroom(skb);
1774 }
1775 
skb_transport_offset(const struct sk_buff * skb)1776 static inline int skb_transport_offset(const struct sk_buff *skb)
1777 {
1778 	return skb_transport_header(skb) - skb->data;
1779 }
1780 
skb_network_header_len(const struct sk_buff * skb)1781 static inline u32 skb_network_header_len(const struct sk_buff *skb)
1782 {
1783 	return skb->transport_header - skb->network_header;
1784 }
1785 
skb_inner_network_header_len(const struct sk_buff * skb)1786 static inline u32 skb_inner_network_header_len(const struct sk_buff *skb)
1787 {
1788 	return skb->inner_transport_header - skb->inner_network_header;
1789 }
1790 
skb_network_offset(const struct sk_buff * skb)1791 static inline int skb_network_offset(const struct sk_buff *skb)
1792 {
1793 	return skb_network_header(skb) - skb->data;
1794 }
1795 
skb_inner_network_offset(const struct sk_buff * skb)1796 static inline int skb_inner_network_offset(const struct sk_buff *skb)
1797 {
1798 	return skb_inner_network_header(skb) - skb->data;
1799 }
1800 
pskb_network_may_pull(struct sk_buff * skb,unsigned int len)1801 static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
1802 {
1803 	return pskb_may_pull(skb, skb_network_offset(skb) + len);
1804 }
1805 
1806 /*
1807  * CPUs often take a performance hit when accessing unaligned memory
1808  * locations. The actual performance hit varies, it can be small if the
1809  * hardware handles it or large if we have to take an exception and fix it
1810  * in software.
1811  *
1812  * Since an ethernet header is 14 bytes network drivers often end up with
1813  * the IP header at an unaligned offset. The IP header can be aligned by
1814  * shifting the start of the packet by 2 bytes. Drivers should do this
1815  * with:
1816  *
1817  * skb_reserve(skb, NET_IP_ALIGN);
1818  *
1819  * The downside to this alignment of the IP header is that the DMA is now
1820  * unaligned. On some architectures the cost of an unaligned DMA is high
1821  * and this cost outweighs the gains made by aligning the IP header.
1822  *
1823  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
1824  * to be overridden.
1825  */
1826 #ifndef NET_IP_ALIGN
1827 #define NET_IP_ALIGN	2
1828 #endif
1829 
1830 /*
1831  * The networking layer reserves some headroom in skb data (via
1832  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
1833  * the header has to grow. In the default case, if the header has to grow
1834  * 32 bytes or less we avoid the reallocation.
1835  *
1836  * Unfortunately this headroom changes the DMA alignment of the resulting
1837  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
1838  * on some architectures. An architecture can override this value,
1839  * perhaps setting it to a cacheline in size (since that will maintain
1840  * cacheline alignment of the DMA). It must be a power of 2.
1841  *
1842  * Various parts of the networking layer expect at least 32 bytes of
1843  * headroom, you should not reduce this.
1844  *
1845  * Using max(32, L1_CACHE_BYTES) makes sense (especially with RPS)
1846  * to reduce average number of cache lines per packet.
1847  * get_rps_cpus() for example only access one 64 bytes aligned block :
1848  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
1849  */
1850 #ifndef NET_SKB_PAD
1851 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
1852 #endif
1853 
1854 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
1855 
__skb_trim(struct sk_buff * skb,unsigned int len)1856 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
1857 {
1858 	if (unlikely(skb_is_nonlinear(skb))) {
1859 		WARN_ON(1);
1860 		return;
1861 	}
1862 	skb->len = len;
1863 	skb_set_tail_pointer(skb, len);
1864 }
1865 
1866 extern void skb_trim(struct sk_buff *skb, unsigned int len);
1867 
__pskb_trim(struct sk_buff * skb,unsigned int len)1868 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
1869 {
1870 	if (skb->data_len)
1871 		return ___pskb_trim(skb, len);
1872 	__skb_trim(skb, len);
1873 	return 0;
1874 }
1875 
pskb_trim(struct sk_buff * skb,unsigned int len)1876 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
1877 {
1878 	return (len < skb->len) ? __pskb_trim(skb, len) : 0;
1879 }
1880 
1881 /**
1882  *	pskb_trim_unique - remove end from a paged unique (not cloned) buffer
1883  *	@skb: buffer to alter
1884  *	@len: new length
1885  *
1886  *	This is identical to pskb_trim except that the caller knows that
1887  *	the skb is not cloned so we should never get an error due to out-
1888  *	of-memory.
1889  */
pskb_trim_unique(struct sk_buff * skb,unsigned int len)1890 static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
1891 {
1892 	int err = pskb_trim(skb, len);
1893 	BUG_ON(err);
1894 }
1895 
1896 /**
1897  *	skb_orphan - orphan a buffer
1898  *	@skb: buffer to orphan
1899  *
1900  *	If a buffer currently has an owner then we call the owner's
1901  *	destructor function and make the @skb unowned. The buffer continues
1902  *	to exist but is no longer charged to its former owner.
1903  */
skb_orphan(struct sk_buff * skb)1904 static inline void skb_orphan(struct sk_buff *skb)
1905 {
1906 	if (skb->destructor)
1907 		skb->destructor(skb);
1908 	skb->destructor = NULL;
1909 	skb->sk		= NULL;
1910 }
1911 
1912 /**
1913  *	skb_orphan_frags - orphan the frags contained in a buffer
1914  *	@skb: buffer to orphan frags from
1915  *	@gfp_mask: allocation mask for replacement pages
1916  *
1917  *	For each frag in the SKB which needs a destructor (i.e. has an
1918  *	owner) create a copy of that frag and release the original
1919  *	page by calling the destructor.
1920  */
skb_orphan_frags(struct sk_buff * skb,gfp_t gfp_mask)1921 static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
1922 {
1923 	if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)))
1924 		return 0;
1925 	return skb_copy_ubufs(skb, gfp_mask);
1926 }
1927 
1928 /**
1929  *	__skb_queue_purge - empty a list
1930  *	@list: list to empty
1931  *
1932  *	Delete all buffers on an &sk_buff list. Each buffer is removed from
1933  *	the list and one reference dropped. This function does not take the
1934  *	list lock and the caller must hold the relevant locks to use it.
1935  */
1936 extern void skb_queue_purge(struct sk_buff_head *list);
__skb_queue_purge(struct sk_buff_head * list)1937 static inline void __skb_queue_purge(struct sk_buff_head *list)
1938 {
1939 	struct sk_buff *skb;
1940 	while ((skb = __skb_dequeue(list)) != NULL)
1941 		kfree_skb(skb);
1942 }
1943 
1944 #define NETDEV_FRAG_PAGE_MAX_ORDER get_order(32768)
1945 #define NETDEV_FRAG_PAGE_MAX_SIZE  (PAGE_SIZE << NETDEV_FRAG_PAGE_MAX_ORDER)
1946 #define NETDEV_PAGECNT_MAX_BIAS	   NETDEV_FRAG_PAGE_MAX_SIZE
1947 
1948 extern void *netdev_alloc_frag(unsigned int fragsz);
1949 
1950 extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
1951 					  unsigned int length,
1952 					  gfp_t gfp_mask);
1953 
1954 /**
1955  *	netdev_alloc_skb - allocate an skbuff for rx on a specific device
1956  *	@dev: network device to receive on
1957  *	@length: length to allocate
1958  *
1959  *	Allocate a new &sk_buff and assign it a usage count of one. The
1960  *	buffer has unspecified headroom built in. Users should allocate
1961  *	the headroom they think they need without accounting for the
1962  *	built in space. The built in space is used for optimisations.
1963  *
1964  *	%NULL is returned if there is no free memory. Although this function
1965  *	allocates memory it can be called from an interrupt.
1966  */
netdev_alloc_skb(struct net_device * dev,unsigned int length)1967 static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
1968 					       unsigned int length)
1969 {
1970 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
1971 }
1972 
1973 /* legacy helper around __netdev_alloc_skb() */
__dev_alloc_skb(unsigned int length,gfp_t gfp_mask)1974 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1975 					      gfp_t gfp_mask)
1976 {
1977 	return __netdev_alloc_skb(NULL, length, gfp_mask);
1978 }
1979 
1980 /* legacy helper around netdev_alloc_skb() */
dev_alloc_skb(unsigned int length)1981 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1982 {
1983 	return netdev_alloc_skb(NULL, length);
1984 }
1985 
1986 
__netdev_alloc_skb_ip_align(struct net_device * dev,unsigned int length,gfp_t gfp)1987 static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
1988 		unsigned int length, gfp_t gfp)
1989 {
1990 	struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
1991 
1992 	if (NET_IP_ALIGN && skb)
1993 		skb_reserve(skb, NET_IP_ALIGN);
1994 	return skb;
1995 }
1996 
netdev_alloc_skb_ip_align(struct net_device * dev,unsigned int length)1997 static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
1998 		unsigned int length)
1999 {
2000 	return __netdev_alloc_skb_ip_align(dev, length, GFP_ATOMIC);
2001 }
2002 
2003 /*
2004  *	__skb_alloc_page - allocate pages for ps-rx on a skb and preserve pfmemalloc data
2005  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
2006  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2007  *	@order: size of the allocation
2008  *
2009  * 	Allocate a new page.
2010  *
2011  * 	%NULL is returned if there is no free memory.
2012 */
__skb_alloc_pages(gfp_t gfp_mask,struct sk_buff * skb,unsigned int order)2013 static inline struct page *__skb_alloc_pages(gfp_t gfp_mask,
2014 					      struct sk_buff *skb,
2015 					      unsigned int order)
2016 {
2017 	struct page *page;
2018 
2019 	gfp_mask |= __GFP_COLD;
2020 
2021 	if (!(gfp_mask & __GFP_NOMEMALLOC))
2022 		gfp_mask |= __GFP_MEMALLOC;
2023 
2024 	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
2025 	if (skb && page && page->pfmemalloc)
2026 		skb->pfmemalloc = true;
2027 
2028 	return page;
2029 }
2030 
2031 /**
2032  *	__skb_alloc_page - allocate a page for ps-rx for a given skb and preserve pfmemalloc data
2033  *	@gfp_mask: alloc_pages_node mask. Set __GFP_NOMEMALLOC if not for network packet RX
2034  *	@skb: skb to set pfmemalloc on if __GFP_MEMALLOC is used
2035  *
2036  * 	Allocate a new page.
2037  *
2038  * 	%NULL is returned if there is no free memory.
2039  */
__skb_alloc_page(gfp_t gfp_mask,struct sk_buff * skb)2040 static inline struct page *__skb_alloc_page(gfp_t gfp_mask,
2041 					     struct sk_buff *skb)
2042 {
2043 	return __skb_alloc_pages(gfp_mask, skb, 0);
2044 }
2045 
2046 /**
2047  *	skb_propagate_pfmemalloc - Propagate pfmemalloc if skb is allocated after RX page
2048  *	@page: The page that was allocated from skb_alloc_page
2049  *	@skb: The skb that may need pfmemalloc set
2050  */
skb_propagate_pfmemalloc(struct page * page,struct sk_buff * skb)2051 static inline void skb_propagate_pfmemalloc(struct page *page,
2052 					     struct sk_buff *skb)
2053 {
2054 	if (page && page->pfmemalloc)
2055 		skb->pfmemalloc = true;
2056 }
2057 
2058 /**
2059  * skb_frag_page - retrieve the page refered to by a paged fragment
2060  * @frag: the paged fragment
2061  *
2062  * Returns the &struct page associated with @frag.
2063  */
skb_frag_page(const skb_frag_t * frag)2064 static inline struct page *skb_frag_page(const skb_frag_t *frag)
2065 {
2066 	return frag->page.p;
2067 }
2068 
2069 /**
2070  * __skb_frag_ref - take an addition reference on a paged fragment.
2071  * @frag: the paged fragment
2072  *
2073  * Takes an additional reference on the paged fragment @frag.
2074  */
__skb_frag_ref(skb_frag_t * frag)2075 static inline void __skb_frag_ref(skb_frag_t *frag)
2076 {
2077 	get_page(skb_frag_page(frag));
2078 }
2079 
2080 /**
2081  * skb_frag_ref - take an addition reference on a paged fragment of an skb.
2082  * @skb: the buffer
2083  * @f: the fragment offset.
2084  *
2085  * Takes an additional reference on the @f'th paged fragment of @skb.
2086  */
skb_frag_ref(struct sk_buff * skb,int f)2087 static inline void skb_frag_ref(struct sk_buff *skb, int f)
2088 {
2089 	__skb_frag_ref(&skb_shinfo(skb)->frags[f]);
2090 }
2091 
2092 /**
2093  * __skb_frag_unref - release a reference on a paged fragment.
2094  * @frag: the paged fragment
2095  *
2096  * Releases a reference on the paged fragment @frag.
2097  */
__skb_frag_unref(skb_frag_t * frag)2098 static inline void __skb_frag_unref(skb_frag_t *frag)
2099 {
2100 	put_page(skb_frag_page(frag));
2101 }
2102 
2103 /**
2104  * skb_frag_unref - release a reference on a paged fragment of an skb.
2105  * @skb: the buffer
2106  * @f: the fragment offset
2107  *
2108  * Releases a reference on the @f'th paged fragment of @skb.
2109  */
skb_frag_unref(struct sk_buff * skb,int f)2110 static inline void skb_frag_unref(struct sk_buff *skb, int f)
2111 {
2112 	__skb_frag_unref(&skb_shinfo(skb)->frags[f]);
2113 }
2114 
2115 /**
2116  * skb_frag_address - gets the address of the data contained in a paged fragment
2117  * @frag: the paged fragment buffer
2118  *
2119  * Returns the address of the data within @frag. The page must already
2120  * be mapped.
2121  */
skb_frag_address(const skb_frag_t * frag)2122 static inline void *skb_frag_address(const skb_frag_t *frag)
2123 {
2124 	return page_address(skb_frag_page(frag)) + frag->page_offset;
2125 }
2126 
2127 /**
2128  * skb_frag_address_safe - gets the address of the data contained in a paged fragment
2129  * @frag: the paged fragment buffer
2130  *
2131  * Returns the address of the data within @frag. Checks that the page
2132  * is mapped and returns %NULL otherwise.
2133  */
skb_frag_address_safe(const skb_frag_t * frag)2134 static inline void *skb_frag_address_safe(const skb_frag_t *frag)
2135 {
2136 	void *ptr = page_address(skb_frag_page(frag));
2137 	if (unlikely(!ptr))
2138 		return NULL;
2139 
2140 	return ptr + frag->page_offset;
2141 }
2142 
2143 /**
2144  * __skb_frag_set_page - sets the page contained in a paged fragment
2145  * @frag: the paged fragment
2146  * @page: the page to set
2147  *
2148  * Sets the fragment @frag to contain @page.
2149  */
__skb_frag_set_page(skb_frag_t * frag,struct page * page)2150 static inline void __skb_frag_set_page(skb_frag_t *frag, struct page *page)
2151 {
2152 	frag->page.p = page;
2153 }
2154 
2155 /**
2156  * skb_frag_set_page - sets the page contained in a paged fragment of an skb
2157  * @skb: the buffer
2158  * @f: the fragment offset
2159  * @page: the page to set
2160  *
2161  * Sets the @f'th fragment of @skb to contain @page.
2162  */
skb_frag_set_page(struct sk_buff * skb,int f,struct page * page)2163 static inline void skb_frag_set_page(struct sk_buff *skb, int f,
2164 				     struct page *page)
2165 {
2166 	__skb_frag_set_page(&skb_shinfo(skb)->frags[f], page);
2167 }
2168 
2169 /**
2170  * skb_frag_dma_map - maps a paged fragment via the DMA API
2171  * @dev: the device to map the fragment to
2172  * @frag: the paged fragment to map
2173  * @offset: the offset within the fragment (starting at the
2174  *          fragment's own offset)
2175  * @size: the number of bytes to map
2176  * @dir: the direction of the mapping (%PCI_DMA_*)
2177  *
2178  * Maps the page associated with @frag to @device.
2179  */
skb_frag_dma_map(struct device * dev,const skb_frag_t * frag,size_t offset,size_t size,enum dma_data_direction dir)2180 static inline dma_addr_t skb_frag_dma_map(struct device *dev,
2181 					  const skb_frag_t *frag,
2182 					  size_t offset, size_t size,
2183 					  enum dma_data_direction dir)
2184 {
2185 	return dma_map_page(dev, skb_frag_page(frag),
2186 			    frag->page_offset + offset, size, dir);
2187 }
2188 
pskb_copy(struct sk_buff * skb,gfp_t gfp_mask)2189 static inline struct sk_buff *pskb_copy(struct sk_buff *skb,
2190 					gfp_t gfp_mask)
2191 {
2192 	return __pskb_copy(skb, skb_headroom(skb), gfp_mask);
2193 }
2194 
2195 /**
2196  *	skb_clone_writable - is the header of a clone writable
2197  *	@skb: buffer to check
2198  *	@len: length up to which to write
2199  *
2200  *	Returns true if modifying the header part of the cloned buffer
2201  *	does not requires the data to be copied.
2202  */
skb_clone_writable(const struct sk_buff * skb,unsigned int len)2203 static inline int skb_clone_writable(const struct sk_buff *skb, unsigned int len)
2204 {
2205 	return !skb_header_cloned(skb) &&
2206 	       skb_headroom(skb) + len <= skb->hdr_len;
2207 }
2208 
__skb_cow(struct sk_buff * skb,unsigned int headroom,int cloned)2209 static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
2210 			    int cloned)
2211 {
2212 	int delta = 0;
2213 
2214 	if (headroom > skb_headroom(skb))
2215 		delta = headroom - skb_headroom(skb);
2216 
2217 	if (delta || cloned)
2218 		return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
2219 					GFP_ATOMIC);
2220 	return 0;
2221 }
2222 
2223 /**
2224  *	skb_cow - copy header of skb when it is required
2225  *	@skb: buffer to cow
2226  *	@headroom: needed headroom
2227  *
2228  *	If the skb passed lacks sufficient headroom or its data part
2229  *	is shared, data is reallocated. If reallocation fails, an error
2230  *	is returned and original skb is not changed.
2231  *
2232  *	The result is skb with writable area skb->head...skb->tail
2233  *	and at least @headroom of space at head.
2234  */
skb_cow(struct sk_buff * skb,unsigned int headroom)2235 static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
2236 {
2237 	return __skb_cow(skb, headroom, skb_cloned(skb));
2238 }
2239 
2240 /**
2241  *	skb_cow_head - skb_cow but only making the head writable
2242  *	@skb: buffer to cow
2243  *	@headroom: needed headroom
2244  *
2245  *	This function is identical to skb_cow except that we replace the
2246  *	skb_cloned check by skb_header_cloned.  It should be used when
2247  *	you only need to push on some header and do not need to modify
2248  *	the data.
2249  */
skb_cow_head(struct sk_buff * skb,unsigned int headroom)2250 static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
2251 {
2252 	return __skb_cow(skb, headroom, skb_header_cloned(skb));
2253 }
2254 
2255 /**
2256  *	skb_padto	- pad an skbuff up to a minimal size
2257  *	@skb: buffer to pad
2258  *	@len: minimal length
2259  *
2260  *	Pads up a buffer to ensure the trailing bytes exist and are
2261  *	blanked. If the buffer already contains sufficient data it
2262  *	is untouched. Otherwise it is extended. Returns zero on
2263  *	success. The skb is freed on error.
2264  */
2265 
skb_padto(struct sk_buff * skb,unsigned int len)2266 static inline int skb_padto(struct sk_buff *skb, unsigned int len)
2267 {
2268 	unsigned int size = skb->len;
2269 	if (likely(size >= len))
2270 		return 0;
2271 	return skb_pad(skb, len - size);
2272 }
2273 
skb_add_data(struct sk_buff * skb,char __user * from,int copy)2274 static inline int skb_add_data(struct sk_buff *skb,
2275 			       char __user *from, int copy)
2276 {
2277 	const int off = skb->len;
2278 
2279 	if (skb->ip_summed == CHECKSUM_NONE) {
2280 		int err = 0;
2281 		__wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
2282 							    copy, 0, &err);
2283 		if (!err) {
2284 			skb->csum = csum_block_add(skb->csum, csum, off);
2285 			return 0;
2286 		}
2287 	} else if (!copy_from_user(skb_put(skb, copy), from, copy))
2288 		return 0;
2289 
2290 	__skb_trim(skb, off);
2291 	return -EFAULT;
2292 }
2293 
skb_can_coalesce(struct sk_buff * skb,int i,const struct page * page,int off)2294 static inline bool skb_can_coalesce(struct sk_buff *skb, int i,
2295 				    const struct page *page, int off)
2296 {
2297 	if (i) {
2298 		const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
2299 
2300 		return page == skb_frag_page(frag) &&
2301 		       off == frag->page_offset + skb_frag_size(frag);
2302 	}
2303 	return false;
2304 }
2305 
__skb_linearize(struct sk_buff * skb)2306 static inline int __skb_linearize(struct sk_buff *skb)
2307 {
2308 	return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
2309 }
2310 
2311 /**
2312  *	skb_linearize - convert paged skb to linear one
2313  *	@skb: buffer to linarize
2314  *
2315  *	If there is no free memory -ENOMEM is returned, otherwise zero
2316  *	is returned and the old skb data released.
2317  */
skb_linearize(struct sk_buff * skb)2318 static inline int skb_linearize(struct sk_buff *skb)
2319 {
2320 	return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
2321 }
2322 
2323 /**
2324  * skb_has_shared_frag - can any frag be overwritten
2325  * @skb: buffer to test
2326  *
2327  * Return true if the skb has at least one frag that might be modified
2328  * by an external entity (as in vmsplice()/sendfile())
2329  */
skb_has_shared_frag(const struct sk_buff * skb)2330 static inline bool skb_has_shared_frag(const struct sk_buff *skb)
2331 {
2332 	return skb_is_nonlinear(skb) &&
2333 	       skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2334 }
2335 
2336 /**
2337  *	skb_linearize_cow - make sure skb is linear and writable
2338  *	@skb: buffer to process
2339  *
2340  *	If there is no free memory -ENOMEM is returned, otherwise zero
2341  *	is returned and the old skb data released.
2342  */
skb_linearize_cow(struct sk_buff * skb)2343 static inline int skb_linearize_cow(struct sk_buff *skb)
2344 {
2345 	return skb_is_nonlinear(skb) || skb_cloned(skb) ?
2346 	       __skb_linearize(skb) : 0;
2347 }
2348 
2349 /**
2350  *	skb_postpull_rcsum - update checksum for received skb after pull
2351  *	@skb: buffer to update
2352  *	@start: start of data before pull
2353  *	@len: length of data pulled
2354  *
2355  *	After doing a pull on a received packet, you need to call this to
2356  *	update the CHECKSUM_COMPLETE checksum, or set ip_summed to
2357  *	CHECKSUM_NONE so that it can be recomputed from scratch.
2358  */
2359 
skb_postpull_rcsum(struct sk_buff * skb,const void * start,unsigned int len)2360 static inline void skb_postpull_rcsum(struct sk_buff *skb,
2361 				      const void *start, unsigned int len)
2362 {
2363 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2364 		skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
2365 }
2366 
2367 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
2368 
2369 /**
2370  *	pskb_trim_rcsum - trim received skb and update checksum
2371  *	@skb: buffer to trim
2372  *	@len: new length
2373  *
2374  *	This is exactly the same as pskb_trim except that it ensures the
2375  *	checksum of received packets are still valid after the operation.
2376  */
2377 
pskb_trim_rcsum(struct sk_buff * skb,unsigned int len)2378 static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
2379 {
2380 	if (likely(len >= skb->len))
2381 		return 0;
2382 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2383 		skb->ip_summed = CHECKSUM_NONE;
2384 	return __pskb_trim(skb, len);
2385 }
2386 
2387 #define skb_queue_walk(queue, skb) \
2388 		for (skb = (queue)->next;					\
2389 		     skb != (struct sk_buff *)(queue);				\
2390 		     skb = skb->next)
2391 
2392 #define skb_queue_walk_safe(queue, skb, tmp)					\
2393 		for (skb = (queue)->next, tmp = skb->next;			\
2394 		     skb != (struct sk_buff *)(queue);				\
2395 		     skb = tmp, tmp = skb->next)
2396 
2397 #define skb_queue_walk_from(queue, skb)						\
2398 		for (; skb != (struct sk_buff *)(queue);			\
2399 		     skb = skb->next)
2400 
2401 #define skb_queue_walk_from_safe(queue, skb, tmp)				\
2402 		for (tmp = skb->next;						\
2403 		     skb != (struct sk_buff *)(queue);				\
2404 		     skb = tmp, tmp = skb->next)
2405 
2406 #define skb_queue_reverse_walk(queue, skb) \
2407 		for (skb = (queue)->prev;					\
2408 		     skb != (struct sk_buff *)(queue);				\
2409 		     skb = skb->prev)
2410 
2411 #define skb_queue_reverse_walk_safe(queue, skb, tmp)				\
2412 		for (skb = (queue)->prev, tmp = skb->prev;			\
2413 		     skb != (struct sk_buff *)(queue);				\
2414 		     skb = tmp, tmp = skb->prev)
2415 
2416 #define skb_queue_reverse_walk_from_safe(queue, skb, tmp)			\
2417 		for (tmp = skb->prev;						\
2418 		     skb != (struct sk_buff *)(queue);				\
2419 		     skb = tmp, tmp = skb->prev)
2420 
skb_has_frag_list(const struct sk_buff * skb)2421 static inline bool skb_has_frag_list(const struct sk_buff *skb)
2422 {
2423 	return skb_shinfo(skb)->frag_list != NULL;
2424 }
2425 
skb_frag_list_init(struct sk_buff * skb)2426 static inline void skb_frag_list_init(struct sk_buff *skb)
2427 {
2428 	skb_shinfo(skb)->frag_list = NULL;
2429 }
2430 
skb_frag_add_head(struct sk_buff * skb,struct sk_buff * frag)2431 static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
2432 {
2433 	frag->next = skb_shinfo(skb)->frag_list;
2434 	skb_shinfo(skb)->frag_list = frag;
2435 }
2436 
2437 #define skb_walk_frags(skb, iter)	\
2438 	for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
2439 
2440 extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
2441 					   int *peeked, int *off, int *err);
2442 extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
2443 					 int noblock, int *err);
2444 extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
2445 				     struct poll_table_struct *wait);
2446 extern int	       skb_copy_datagram_iovec(const struct sk_buff *from,
2447 					       int offset, struct iovec *to,
2448 					       int size);
2449 extern int	       skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
2450 							int hlen,
2451 							struct iovec *iov);
2452 extern int	       skb_copy_datagram_from_iovec(struct sk_buff *skb,
2453 						    int offset,
2454 						    const struct iovec *from,
2455 						    int from_offset,
2456 						    int len);
2457 extern int	       skb_copy_datagram_const_iovec(const struct sk_buff *from,
2458 						     int offset,
2459 						     const struct iovec *to,
2460 						     int to_offset,
2461 						     int size);
2462 extern void	       skb_free_datagram(struct sock *sk, struct sk_buff *skb);
2463 extern void	       skb_free_datagram_locked(struct sock *sk,
2464 						struct sk_buff *skb);
2465 extern int	       skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
2466 					 unsigned int flags);
2467 extern __wsum	       skb_checksum(const struct sk_buff *skb, int offset,
2468 				    int len, __wsum csum);
2469 extern int	       skb_copy_bits(const struct sk_buff *skb, int offset,
2470 				     void *to, int len);
2471 extern int	       skb_store_bits(struct sk_buff *skb, int offset,
2472 				      const void *from, int len);
2473 extern __wsum	       skb_copy_and_csum_bits(const struct sk_buff *skb,
2474 					      int offset, u8 *to, int len,
2475 					      __wsum csum);
2476 extern int             skb_splice_bits(struct sk_buff *skb,
2477 						unsigned int offset,
2478 						struct pipe_inode_info *pipe,
2479 						unsigned int len,
2480 						unsigned int flags);
2481 extern void	       skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
2482 extern void	       skb_split(struct sk_buff *skb,
2483 				 struct sk_buff *skb1, const u32 len);
2484 extern int	       skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
2485 				 int shiftlen);
2486 
2487 extern struct sk_buff *skb_segment(struct sk_buff *skb,
2488 				   netdev_features_t features);
2489 
skb_header_pointer(const struct sk_buff * skb,int offset,int len,void * buffer)2490 static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
2491 				       int len, void *buffer)
2492 {
2493 	int hlen = skb_headlen(skb);
2494 
2495 	if (hlen - offset >= len)
2496 		return skb->data + offset;
2497 
2498 	if (skb_copy_bits(skb, offset, buffer, len) < 0)
2499 		return NULL;
2500 
2501 	return buffer;
2502 }
2503 
skb_copy_from_linear_data(const struct sk_buff * skb,void * to,const unsigned int len)2504 static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
2505 					     void *to,
2506 					     const unsigned int len)
2507 {
2508 	memcpy(to, skb->data, len);
2509 }
2510 
skb_copy_from_linear_data_offset(const struct sk_buff * skb,const int offset,void * to,const unsigned int len)2511 static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
2512 						    const int offset, void *to,
2513 						    const unsigned int len)
2514 {
2515 	memcpy(to, skb->data + offset, len);
2516 }
2517 
skb_copy_to_linear_data(struct sk_buff * skb,const void * from,const unsigned int len)2518 static inline void skb_copy_to_linear_data(struct sk_buff *skb,
2519 					   const void *from,
2520 					   const unsigned int len)
2521 {
2522 	memcpy(skb->data, from, len);
2523 }
2524 
skb_copy_to_linear_data_offset(struct sk_buff * skb,const int offset,const void * from,const unsigned int len)2525 static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
2526 						  const int offset,
2527 						  const void *from,
2528 						  const unsigned int len)
2529 {
2530 	memcpy(skb->data + offset, from, len);
2531 }
2532 
2533 extern void skb_init(void);
2534 
skb_get_ktime(const struct sk_buff * skb)2535 static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
2536 {
2537 	return skb->tstamp;
2538 }
2539 
2540 /**
2541  *	skb_get_timestamp - get timestamp from a skb
2542  *	@skb: skb to get stamp from
2543  *	@stamp: pointer to struct timeval to store stamp in
2544  *
2545  *	Timestamps are stored in the skb as offsets to a base timestamp.
2546  *	This function converts the offset back to a struct timeval and stores
2547  *	it in stamp.
2548  */
skb_get_timestamp(const struct sk_buff * skb,struct timeval * stamp)2549 static inline void skb_get_timestamp(const struct sk_buff *skb,
2550 				     struct timeval *stamp)
2551 {
2552 	*stamp = ktime_to_timeval(skb->tstamp);
2553 }
2554 
skb_get_timestampns(const struct sk_buff * skb,struct timespec * stamp)2555 static inline void skb_get_timestampns(const struct sk_buff *skb,
2556 				       struct timespec *stamp)
2557 {
2558 	*stamp = ktime_to_timespec(skb->tstamp);
2559 }
2560 
__net_timestamp(struct sk_buff * skb)2561 static inline void __net_timestamp(struct sk_buff *skb)
2562 {
2563 	skb->tstamp = ktime_get_real();
2564 }
2565 
net_timedelta(ktime_t t)2566 static inline ktime_t net_timedelta(ktime_t t)
2567 {
2568 	return ktime_sub(ktime_get_real(), t);
2569 }
2570 
net_invalid_timestamp(void)2571 static inline ktime_t net_invalid_timestamp(void)
2572 {
2573 	return ktime_set(0, 0);
2574 }
2575 
2576 extern void skb_timestamping_init(void);
2577 
2578 #ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
2579 
2580 extern void skb_clone_tx_timestamp(struct sk_buff *skb);
2581 extern bool skb_defer_rx_timestamp(struct sk_buff *skb);
2582 
2583 #else /* CONFIG_NETWORK_PHY_TIMESTAMPING */
2584 
skb_clone_tx_timestamp(struct sk_buff * skb)2585 static inline void skb_clone_tx_timestamp(struct sk_buff *skb)
2586 {
2587 }
2588 
skb_defer_rx_timestamp(struct sk_buff * skb)2589 static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2590 {
2591 	return false;
2592 }
2593 
2594 #endif /* !CONFIG_NETWORK_PHY_TIMESTAMPING */
2595 
2596 /**
2597  * skb_complete_tx_timestamp() - deliver cloned skb with tx timestamps
2598  *
2599  * PHY drivers may accept clones of transmitted packets for
2600  * timestamping via their phy_driver.txtstamp method. These drivers
2601  * must call this function to return the skb back to the stack, with
2602  * or without a timestamp.
2603  *
2604  * @skb: clone of the the original outgoing packet
2605  * @hwtstamps: hardware time stamps, may be NULL if not available
2606  *
2607  */
2608 void skb_complete_tx_timestamp(struct sk_buff *skb,
2609 			       struct skb_shared_hwtstamps *hwtstamps);
2610 
2611 /**
2612  * skb_tstamp_tx - queue clone of skb with send time stamps
2613  * @orig_skb:	the original outgoing packet
2614  * @hwtstamps:	hardware time stamps, may be NULL if not available
2615  *
2616  * If the skb has a socket associated, then this function clones the
2617  * skb (thus sharing the actual data and optional structures), stores
2618  * the optional hardware time stamping information (if non NULL) or
2619  * generates a software time stamp (otherwise), then queues the clone
2620  * to the error queue of the socket.  Errors are silently ignored.
2621  */
2622 extern void skb_tstamp_tx(struct sk_buff *orig_skb,
2623 			struct skb_shared_hwtstamps *hwtstamps);
2624 
sw_tx_timestamp(struct sk_buff * skb)2625 static inline void sw_tx_timestamp(struct sk_buff *skb)
2626 {
2627 	if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP &&
2628 	    !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
2629 		skb_tstamp_tx(skb, NULL);
2630 }
2631 
2632 /**
2633  * skb_tx_timestamp() - Driver hook for transmit timestamping
2634  *
2635  * Ethernet MAC Drivers should call this function in their hard_xmit()
2636  * function immediately before giving the sk_buff to the MAC hardware.
2637  *
2638  * @skb: A socket buffer.
2639  */
skb_tx_timestamp(struct sk_buff * skb)2640 static inline void skb_tx_timestamp(struct sk_buff *skb)
2641 {
2642 	skb_clone_tx_timestamp(skb);
2643 	sw_tx_timestamp(skb);
2644 }
2645 
2646 /**
2647  * skb_complete_wifi_ack - deliver skb with wifi status
2648  *
2649  * @skb: the original outgoing packet
2650  * @acked: ack status
2651  *
2652  */
2653 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked);
2654 
2655 extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
2656 extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
2657 
skb_csum_unnecessary(const struct sk_buff * skb)2658 static inline int skb_csum_unnecessary(const struct sk_buff *skb)
2659 {
2660 	return skb->ip_summed & CHECKSUM_UNNECESSARY;
2661 }
2662 
2663 /**
2664  *	skb_checksum_complete - Calculate checksum of an entire packet
2665  *	@skb: packet to process
2666  *
2667  *	This function calculates the checksum over the entire packet plus
2668  *	the value of skb->csum.  The latter can be used to supply the
2669  *	checksum of a pseudo header as used by TCP/UDP.  It returns the
2670  *	checksum.
2671  *
2672  *	For protocols that contain complete checksums such as ICMP/TCP/UDP,
2673  *	this function can be used to verify that checksum on received
2674  *	packets.  In that case the function should return zero if the
2675  *	checksum is correct.  In particular, this function will return zero
2676  *	if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
2677  *	hardware has already verified the correctness of the checksum.
2678  */
skb_checksum_complete(struct sk_buff * skb)2679 static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
2680 {
2681 	return skb_csum_unnecessary(skb) ?
2682 	       0 : __skb_checksum_complete(skb);
2683 }
2684 
2685 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2686 extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
nf_conntrack_put(struct nf_conntrack * nfct)2687 static inline void nf_conntrack_put(struct nf_conntrack *nfct)
2688 {
2689 	if (nfct && atomic_dec_and_test(&nfct->use))
2690 		nf_conntrack_destroy(nfct);
2691 }
nf_conntrack_get(struct nf_conntrack * nfct)2692 static inline void nf_conntrack_get(struct nf_conntrack *nfct)
2693 {
2694 	if (nfct)
2695 		atomic_inc(&nfct->use);
2696 }
2697 #endif
2698 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
nf_conntrack_get_reasm(struct sk_buff * skb)2699 static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
2700 {
2701 	if (skb)
2702 		atomic_inc(&skb->users);
2703 }
nf_conntrack_put_reasm(struct sk_buff * skb)2704 static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
2705 {
2706 	if (skb)
2707 		kfree_skb(skb);
2708 }
2709 #endif
2710 #ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(struct nf_bridge_info * nf_bridge)2711 static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
2712 {
2713 	if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
2714 		kfree(nf_bridge);
2715 }
nf_bridge_get(struct nf_bridge_info * nf_bridge)2716 static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
2717 {
2718 	if (nf_bridge)
2719 		atomic_inc(&nf_bridge->use);
2720 }
2721 #endif /* CONFIG_BRIDGE_NETFILTER */
nf_reset(struct sk_buff * skb)2722 static inline void nf_reset(struct sk_buff *skb)
2723 {
2724 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2725 	nf_conntrack_put(skb->nfct);
2726 	skb->nfct = NULL;
2727 #endif
2728 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2729 	nf_conntrack_put_reasm(skb->nfct_reasm);
2730 	skb->nfct_reasm = NULL;
2731 #endif
2732 #ifdef CONFIG_BRIDGE_NETFILTER
2733 	nf_bridge_put(skb->nf_bridge);
2734 	skb->nf_bridge = NULL;
2735 #endif
2736 }
2737 
nf_reset_trace(struct sk_buff * skb)2738 static inline void nf_reset_trace(struct sk_buff *skb)
2739 {
2740 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
2741 	skb->nf_trace = 0;
2742 #endif
2743 }
2744 
2745 /* Note: This doesn't put any conntrack and bridge info in dst. */
__nf_copy(struct sk_buff * dst,const struct sk_buff * src)2746 static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2747 {
2748 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2749 	dst->nfct = src->nfct;
2750 	nf_conntrack_get(src->nfct);
2751 	dst->nfctinfo = src->nfctinfo;
2752 #endif
2753 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2754 	dst->nfct_reasm = src->nfct_reasm;
2755 	nf_conntrack_get_reasm(src->nfct_reasm);
2756 #endif
2757 #ifdef CONFIG_BRIDGE_NETFILTER
2758 	dst->nf_bridge  = src->nf_bridge;
2759 	nf_bridge_get(src->nf_bridge);
2760 #endif
2761 }
2762 
nf_copy(struct sk_buff * dst,const struct sk_buff * src)2763 static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
2764 {
2765 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
2766 	nf_conntrack_put(dst->nfct);
2767 #endif
2768 #ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
2769 	nf_conntrack_put_reasm(dst->nfct_reasm);
2770 #endif
2771 #ifdef CONFIG_BRIDGE_NETFILTER
2772 	nf_bridge_put(dst->nf_bridge);
2773 #endif
2774 	__nf_copy(dst, src);
2775 }
2776 
2777 #ifdef CONFIG_NETWORK_SECMARK
skb_copy_secmark(struct sk_buff * to,const struct sk_buff * from)2778 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2779 {
2780 	to->secmark = from->secmark;
2781 }
2782 
skb_init_secmark(struct sk_buff * skb)2783 static inline void skb_init_secmark(struct sk_buff *skb)
2784 {
2785 	skb->secmark = 0;
2786 }
2787 #else
skb_copy_secmark(struct sk_buff * to,const struct sk_buff * from)2788 static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
2789 { }
2790 
skb_init_secmark(struct sk_buff * skb)2791 static inline void skb_init_secmark(struct sk_buff *skb)
2792 { }
2793 #endif
2794 
skb_set_queue_mapping(struct sk_buff * skb,u16 queue_mapping)2795 static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
2796 {
2797 	skb->queue_mapping = queue_mapping;
2798 }
2799 
skb_get_queue_mapping(const struct sk_buff * skb)2800 static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
2801 {
2802 	return skb->queue_mapping;
2803 }
2804 
skb_copy_queue_mapping(struct sk_buff * to,const struct sk_buff * from)2805 static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
2806 {
2807 	to->queue_mapping = from->queue_mapping;
2808 }
2809 
skb_record_rx_queue(struct sk_buff * skb,u16 rx_queue)2810 static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
2811 {
2812 	skb->queue_mapping = rx_queue + 1;
2813 }
2814 
skb_get_rx_queue(const struct sk_buff * skb)2815 static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
2816 {
2817 	return skb->queue_mapping - 1;
2818 }
2819 
skb_rx_queue_recorded(const struct sk_buff * skb)2820 static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
2821 {
2822 	return skb->queue_mapping != 0;
2823 }
2824 
2825 extern u16 __skb_tx_hash(const struct net_device *dev,
2826 			 const struct sk_buff *skb,
2827 			 unsigned int num_tx_queues);
2828 
2829 #ifdef CONFIG_XFRM
skb_sec_path(struct sk_buff * skb)2830 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2831 {
2832 	return skb->sp;
2833 }
2834 #else
skb_sec_path(struct sk_buff * skb)2835 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
2836 {
2837 	return NULL;
2838 }
2839 #endif
2840 
2841 /* Keeps track of mac header offset relative to skb->head.
2842  * It is useful for TSO of Tunneling protocol. e.g. GRE.
2843  * For non-tunnel skb it points to skb_mac_header() and for
2844  * tunnel skb it points to outer mac header. */
2845 struct skb_gso_cb {
2846 	int mac_offset;
2847 };
2848 #define SKB_GSO_CB(skb) ((struct skb_gso_cb *)(skb)->cb)
2849 
skb_tnl_header_len(const struct sk_buff * inner_skb)2850 static inline int skb_tnl_header_len(const struct sk_buff *inner_skb)
2851 {
2852 	return (skb_mac_header(inner_skb) - inner_skb->head) -
2853 		SKB_GSO_CB(inner_skb)->mac_offset;
2854 }
2855 
gso_pskb_expand_head(struct sk_buff * skb,int extra)2856 static inline int gso_pskb_expand_head(struct sk_buff *skb, int extra)
2857 {
2858 	int new_headroom, headroom;
2859 	int ret;
2860 
2861 	headroom = skb_headroom(skb);
2862 	ret = pskb_expand_head(skb, extra, 0, GFP_ATOMIC);
2863 	if (ret)
2864 		return ret;
2865 
2866 	new_headroom = skb_headroom(skb);
2867 	SKB_GSO_CB(skb)->mac_offset += (new_headroom - headroom);
2868 	return 0;
2869 }
2870 
skb_is_gso(const struct sk_buff * skb)2871 static inline bool skb_is_gso(const struct sk_buff *skb)
2872 {
2873 	return skb_shinfo(skb)->gso_size;
2874 }
2875 
skb_is_gso_v6(const struct sk_buff * skb)2876 static inline bool skb_is_gso_v6(const struct sk_buff *skb)
2877 {
2878 	return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
2879 }
2880 
2881 extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
2882 
skb_warn_if_lro(const struct sk_buff * skb)2883 static inline bool skb_warn_if_lro(const struct sk_buff *skb)
2884 {
2885 	/* LRO sets gso_size but not gso_type, whereas if GSO is really
2886 	 * wanted then gso_type will be set. */
2887 	const struct skb_shared_info *shinfo = skb_shinfo(skb);
2888 
2889 	if (skb_is_nonlinear(skb) && shinfo->gso_size != 0 &&
2890 	    unlikely(shinfo->gso_type == 0)) {
2891 		__skb_warn_lro_forwarding(skb);
2892 		return true;
2893 	}
2894 	return false;
2895 }
2896 
skb_forward_csum(struct sk_buff * skb)2897 static inline void skb_forward_csum(struct sk_buff *skb)
2898 {
2899 	/* Unfortunately we don't support this one.  Any brave souls? */
2900 	if (skb->ip_summed == CHECKSUM_COMPLETE)
2901 		skb->ip_summed = CHECKSUM_NONE;
2902 }
2903 
2904 /**
2905  * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE
2906  * @skb: skb to check
2907  *
2908  * fresh skbs have their ip_summed set to CHECKSUM_NONE.
2909  * Instead of forcing ip_summed to CHECKSUM_NONE, we can
2910  * use this helper, to document places where we make this assertion.
2911  */
skb_checksum_none_assert(const struct sk_buff * skb)2912 static inline void skb_checksum_none_assert(const struct sk_buff *skb)
2913 {
2914 #ifdef DEBUG
2915 	BUG_ON(skb->ip_summed != CHECKSUM_NONE);
2916 #endif
2917 }
2918 
2919 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
2920 
2921 u32 __skb_get_poff(const struct sk_buff *skb);
2922 
2923 /**
2924  * skb_head_is_locked - Determine if the skb->head is locked down
2925  * @skb: skb to check
2926  *
2927  * The head on skbs build around a head frag can be removed if they are
2928  * not cloned.  This function returns true if the skb head is locked down
2929  * due to either being allocated via kmalloc, or by being a clone with
2930  * multiple references to the head.
2931  */
skb_head_is_locked(const struct sk_buff * skb)2932 static inline bool skb_head_is_locked(const struct sk_buff *skb)
2933 {
2934 	return !skb->head_frag || skb_cloned(skb);
2935 }
2936 #endif	/* __KERNEL__ */
2937 #endif	/* _LINUX_SKBUFF_H */
2938