• Home
  • Raw
  • Download

Lines Matching refs:skb

95 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,  in skb_panic()  argument
99 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
100 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
101 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
105 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
107 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
110 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
112 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
160 struct sk_buff *skb; in __alloc_skb_head() local
163 skb = kmem_cache_alloc_node(skbuff_head_cache, in __alloc_skb_head()
165 if (!skb) in __alloc_skb_head()
173 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb_head()
174 skb->head = NULL; in __alloc_skb_head()
175 skb->truesize = sizeof(struct sk_buff); in __alloc_skb_head()
176 atomic_set(&skb->users, 1); in __alloc_skb_head()
178 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb_head()
180 return skb; in __alloc_skb_head()
205 struct sk_buff *skb; in __alloc_skb() local
216 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb()
217 if (!skb) in __alloc_skb()
219 prefetchw(skb); in __alloc_skb()
243 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
245 skb->truesize = SKB_TRUESIZE(size); in __alloc_skb()
246 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
247 atomic_set(&skb->users, 1); in __alloc_skb()
248 skb->head = data; in __alloc_skb()
249 skb->data = data; in __alloc_skb()
250 skb_reset_tail_pointer(skb); in __alloc_skb()
251 skb->end = skb->tail + size; in __alloc_skb()
252 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb()
253 skb->transport_header = (typeof(skb->transport_header))~0U; in __alloc_skb()
256 shinfo = skb_shinfo(skb); in __alloc_skb()
264 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
267 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
274 return skb; in __alloc_skb()
276 kmem_cache_free(cache, skb); in __alloc_skb()
277 skb = NULL; in __alloc_skb()
304 struct sk_buff *skb; in __build_skb() local
307 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
308 if (!skb) in __build_skb()
313 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
314 skb->truesize = SKB_TRUESIZE(size); in __build_skb()
315 atomic_set(&skb->users, 1); in __build_skb()
316 skb->head = data; in __build_skb()
317 skb->data = data; in __build_skb()
318 skb_reset_tail_pointer(skb); in __build_skb()
319 skb->end = skb->tail + size; in __build_skb()
320 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb()
321 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb()
324 shinfo = skb_shinfo(skb); in __build_skb()
329 return skb; in __build_skb()
339 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
341 if (skb && frag_size) { in build_skb()
342 skb->head_frag = 1; in build_skb()
344 skb->pfmemalloc = 1; in build_skb()
346 return skb; in build_skb()
445 struct sk_buff *skb = NULL; in __netdev_alloc_skb() local
458 skb = build_skb(data, fragsz); in __netdev_alloc_skb()
459 if (unlikely(!skb)) in __netdev_alloc_skb()
463 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, in __netdev_alloc_skb()
466 if (likely(skb)) { in __netdev_alloc_skb()
467 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
468 skb->dev = dev; in __netdev_alloc_skb()
470 return skb; in __netdev_alloc_skb()
474 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
477 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
478 skb->len += size; in skb_add_rx_frag()
479 skb->data_len += size; in skb_add_rx_frag()
480 skb->truesize += truesize; in skb_add_rx_frag()
484 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
487 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
490 skb->len += size; in skb_coalesce_rx_frag()
491 skb->data_len += size; in skb_coalesce_rx_frag()
492 skb->truesize += truesize; in skb_coalesce_rx_frag()
502 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
504 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
507 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
511 skb_walk_frags(skb, list) in skb_clone_fraglist()
515 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
517 if (skb->head_frag) in skb_free_head()
518 put_page(virt_to_head_page(skb->head)); in skb_free_head()
520 kfree(skb->head); in skb_free_head()
523 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
525 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
528 if (skb->cloned && in skb_release_data()
529 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
551 skb_free_head(skb); in skb_release_data()
557 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
561 switch (skb->fclone) { in kfree_skbmem()
563 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
567 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
573 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
578 skb->fclone = SKB_FCLONE_FREE; in kfree_skbmem()
586 static void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
588 skb_dst_drop(skb); in skb_release_head_state()
590 secpath_put(skb->sp); in skb_release_head_state()
592 if (skb->destructor) { in skb_release_head_state()
594 skb->destructor(skb); in skb_release_head_state()
597 nf_conntrack_put(skb->nfct); in skb_release_head_state()
600 nf_bridge_put(skb->nf_bridge); in skb_release_head_state()
604 skb->tc_index = 0; in skb_release_head_state()
606 skb->tc_verd = 0; in skb_release_head_state()
612 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
614 skb_release_head_state(skb); in skb_release_all()
615 if (likely(skb->head)) in skb_release_all()
616 skb_release_data(skb); in skb_release_all()
628 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
630 skb_release_all(skb); in __kfree_skb()
631 kfree_skbmem(skb); in __kfree_skb()
642 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
644 if (unlikely(!skb)) in kfree_skb()
646 if (likely(atomic_read(&skb->users) == 1)) in kfree_skb()
648 else if (likely(!atomic_dec_and_test(&skb->users))) in kfree_skb()
650 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
651 __kfree_skb(skb); in kfree_skb()
673 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
675 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { in skb_tx_error()
678 uarg = skb_shinfo(skb)->destructor_arg; in skb_tx_error()
681 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_tx_error()
694 void consume_skb(struct sk_buff *skb) in consume_skb() argument
696 if (unlikely(!skb)) in consume_skb()
698 if (likely(atomic_read(&skb->users) == 1)) in consume_skb()
700 else if (likely(!atomic_dec_and_test(&skb->users))) in consume_skb()
702 trace_consume_skb(skb); in consume_skb()
703 __kfree_skb(skb); in consume_skb()
768 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
770 #define C(x) n->x = skb->x in __skb_clone()
774 __copy_skb_header(n, skb); in __skb_clone()
779 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
791 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
792 skb->cloned = 1; in __skb_clone()
830 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
833 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
835 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; in skb_copy_ubufs()
839 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
860 skb_frag_unref(skb, i); in skb_copy_ubufs()
866 __skb_fill_page_desc(skb, i, head, 0, in skb_copy_ubufs()
867 skb_shinfo(skb)->frags[i].size); in skb_copy_ubufs()
871 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_copy_ubufs()
890 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
892 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
897 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
900 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
905 if (skb_pfmemalloc(skb)) in skb_clone()
916 return __skb_clone(n, skb); in skb_clone()
920 static void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
923 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
924 skb->csum_start += off; in skb_headers_offset_update()
926 skb->transport_header += off; in skb_headers_offset_update()
927 skb->network_header += off; in skb_headers_offset_update()
928 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
929 skb->mac_header += off; in skb_headers_offset_update()
930 skb->inner_transport_header += off; in skb_headers_offset_update()
931 skb->inner_network_header += off; in skb_headers_offset_update()
932 skb->inner_mac_header += off; in skb_headers_offset_update()
944 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
946 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
968 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
970 int headerlen = skb_headroom(skb); in skb_copy()
971 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
973 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
981 skb_put(n, skb->len); in skb_copy()
983 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) in skb_copy()
986 copy_skb_header(n, skb); in skb_copy()
1008 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1011 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1012 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1021 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1023 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1025 n->truesize += skb->data_len; in __pskb_copy_fclone()
1026 n->data_len = skb->data_len; in __pskb_copy_fclone()
1027 n->len = skb->len; in __pskb_copy_fclone()
1029 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1032 if (skb_orphan_frags(skb, gfp_mask)) { in __pskb_copy_fclone()
1037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1038 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1039 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1044 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1045 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1049 copy_skb_header(n, skb); in __pskb_copy_fclone()
1071 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1076 int size = nhead + skb_end_offset(skb) + ntail; in pskb_expand_head()
1081 if (skb_shared(skb)) in pskb_expand_head()
1086 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1097 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1100 skb_shinfo(skb), in pskb_expand_head()
1101 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1108 if (skb_cloned(skb)) { in pskb_expand_head()
1110 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1112 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1113 skb_frag_ref(skb, i); in pskb_expand_head()
1115 if (skb_has_frag_list(skb)) in pskb_expand_head()
1116 skb_clone_fraglist(skb); in pskb_expand_head()
1118 skb_release_data(skb); in pskb_expand_head()
1120 skb_free_head(skb); in pskb_expand_head()
1122 off = (data + nhead) - skb->head; in pskb_expand_head()
1124 skb->head = data; in pskb_expand_head()
1125 skb->head_frag = 0; in pskb_expand_head()
1126 skb->data += off; in pskb_expand_head()
1128 skb->end = size; in pskb_expand_head()
1131 skb->end = skb->head + size; in pskb_expand_head()
1133 skb->tail += off; in pskb_expand_head()
1134 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1135 skb->cloned = 0; in pskb_expand_head()
1136 skb->hdr_len = 0; in pskb_expand_head()
1137 skb->nohdr = 0; in pskb_expand_head()
1138 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1150 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1153 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1156 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1158 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1187 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1194 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1195 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1197 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1206 skb_put(n, skb->len); in skb_copy_expand()
1216 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1217 skb->len + head_copy_len)) in skb_copy_expand()
1220 copy_skb_header(n, skb); in skb_copy_expand()
1240 int skb_pad(struct sk_buff *skb, int pad) in skb_pad() argument
1246 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in skb_pad()
1247 memset(skb->data+skb->len, 0, pad); in skb_pad()
1251 ntail = skb->data_len + pad - (skb->end - skb->tail); in skb_pad()
1252 if (likely(skb_cloned(skb) || ntail > 0)) { in skb_pad()
1253 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in skb_pad()
1261 err = skb_linearize(skb); in skb_pad()
1265 memset(skb->data + skb->len, 0, pad); in skb_pad()
1269 kfree_skb(skb); in skb_pad()
1287 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
1289 if (tail != skb) { in pskb_put()
1290 skb->data_len += len; in pskb_put()
1291 skb->len += len; in pskb_put()
1306 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1308 unsigned char *tmp = skb_tail_pointer(skb); in skb_put()
1309 SKB_LINEAR_ASSERT(skb); in skb_put()
1310 skb->tail += len; in skb_put()
1311 skb->len += len; in skb_put()
1312 if (unlikely(skb->tail > skb->end)) in skb_put()
1313 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
1327 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
1329 skb->data -= len; in skb_push()
1330 skb->len += len; in skb_push()
1331 if (unlikely(skb->data<skb->head)) in skb_push()
1332 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
1333 return skb->data; in skb_push()
1347 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
1349 return skb_pull_inline(skb, len); in skb_pull()
1362 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
1364 if (skb->len > len) in skb_trim()
1365 __skb_trim(skb, len); in skb_trim()
1372 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
1376 int offset = skb_headlen(skb); in ___pskb_trim()
1377 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
1381 if (skb_cloned(skb) && in ___pskb_trim()
1382 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
1390 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1397 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1400 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
1403 skb_frag_unref(skb, i); in ___pskb_trim()
1405 if (skb_has_frag_list(skb)) in ___pskb_trim()
1406 skb_drop_fraglist(skb); in ___pskb_trim()
1410 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
1442 if (len > skb_headlen(skb)) { in ___pskb_trim()
1443 skb->data_len -= skb->len - len; in ___pskb_trim()
1444 skb->len = len; in ___pskb_trim()
1446 skb->len = len; in ___pskb_trim()
1447 skb->data_len = 0; in ___pskb_trim()
1448 skb_set_tail_pointer(skb, len); in ___pskb_trim()
1480 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
1486 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
1488 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
1489 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
1494 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) in __pskb_pull_tail()
1500 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
1505 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1506 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1521 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
1557 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
1558 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
1564 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
1572 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1573 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1576 skb_frag_unref(skb, i); in __pskb_pull_tail()
1579 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
1581 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_pull_tail()
1582 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); in __pskb_pull_tail()
1588 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
1590 skb->tail += delta; in __pskb_pull_tail()
1591 skb->data_len -= delta; in __pskb_pull_tail()
1593 return skb_tail_pointer(skb); in __pskb_pull_tail()
1612 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
1614 int start = skb_headlen(skb); in skb_copy_bits()
1618 if (offset > (int)skb->len - len) in skb_copy_bits()
1625 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
1632 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
1634 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
1659 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
1793 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
1804 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
1805 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
1806 skb_headlen(skb), in __skb_splice_bits()
1808 skb_head_is_locked(skb), in __skb_splice_bits()
1815 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
1816 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
1833 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, in skb_splice_bits() argument
1848 struct sock *sk = skb->sk; in skb_splice_bits()
1855 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) in skb_splice_bits()
1863 skb_walk_frags(skb, frag_iter) { in skb_splice_bits()
1901 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
1903 int start = skb_headlen(skb); in skb_store_bits()
1907 if (offset > (int)skb->len - len) in skb_store_bits()
1913 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
1920 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
1921 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
1946 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
1974 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
1977 int start = skb_headlen(skb); in __skb_checksum()
1986 csum = ops->update(skb->data + offset, copy, csum); in __skb_checksum()
1993 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
1995 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2019 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2045 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2053 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2059 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2062 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2071 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2080 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2085 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2089 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2109 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2234 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
2239 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
2240 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
2242 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
2244 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
2246 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
2249 if (csstart != skb->len) in skb_copy_and_csum_dev()
2250 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
2251 skb->len - csstart, 0); in skb_copy_and_csum_dev()
2253 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
2254 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
2312 struct sk_buff *skb; in skb_queue_purge() local
2313 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
2314 kfree_skb(skb); in skb_queue_purge()
2370 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
2375 __skb_unlink(skb, list); in skb_unlink()
2422 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
2428 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
2431 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
2432 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
2434 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
2435 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
2436 skb1->data_len = skb->data_len; in skb_split_inside_header()
2438 skb->data_len = 0; in skb_split_inside_header()
2439 skb->len = len; in skb_split_inside_header()
2440 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
2443 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
2448 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
2450 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
2451 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
2452 skb->len = len; in skb_split_no_header()
2453 skb->data_len = len - pos; in skb_split_no_header()
2456 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
2459 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
2470 skb_frag_ref(skb, i); in skb_split_no_header()
2473 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
2474 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2478 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2490 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
2492 int pos = skb_headlen(skb); in skb_split()
2494 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; in skb_split()
2496 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
2498 skb_split_no_header(skb, skb1, len, pos); in skb_split()
2506 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
2508 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_prepare_for_shift()
2529 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
2534 BUG_ON(shiftlen > skb->len); in skb_shift()
2535 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ in skb_shift()
2540 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2554 if (skb_prepare_for_shift(skb) || in skb_shift()
2559 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2573 if ((shiftlen == skb->len) && in skb_shift()
2574 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
2577 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
2580 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
2584 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2612 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
2621 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
2622 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
2623 skb_shinfo(skb)->nr_frags = to; in skb_shift()
2625 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
2632 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
2635 skb->len -= shiftlen; in skb_shift()
2636 skb->data_len -= shiftlen; in skb_shift()
2637 skb->truesize -= shiftlen; in skb_shift()
2655 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
2660 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
2799 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
2808 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); in skb_find_text()
2826 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, in skb_append_datato_frags() argument
2828 int len, int odd, struct sk_buff *skb), in skb_append_datato_frags() argument
2831 int frg_cnt = skb_shinfo(skb)->nr_frags; in skb_append_datato_frags()
2849 offset, copy, 0, skb); in skb_append_datato_frags()
2854 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, in skb_append_datato_frags()
2860 skb->truesize += copy; in skb_append_datato_frags()
2862 skb->len += copy; in skb_append_datato_frags()
2863 skb->data_len += copy; in skb_append_datato_frags()
2884 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
2886 unsigned char *data = skb->data; in skb_pull_rcsum()
2888 BUG_ON(len > skb->len); in skb_pull_rcsum()
2889 __skb_pull(skb, len); in skb_pull_rcsum()
2890 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
2891 return skb->data; in skb_pull_rcsum()
3120 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) in skb_gro_receive() argument
3122 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
3123 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
3124 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
3126 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
3159 delta_truesize = skb->truesize - in skb_gro_receive()
3160 SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
3162 skb->truesize -= skb->data_len; in skb_gro_receive()
3163 skb->len -= skb->data_len; in skb_gro_receive()
3164 skb->data_len = 0; in skb_gro_receive()
3166 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
3168 } else if (skb->head_frag) { in skb_gro_receive()
3171 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
3178 first_offset = skb->data - in skb_gro_receive()
3191 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()
3192 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
3239 delta_truesize = skb->truesize; in skb_gro_receive()
3245 skb->data_len -= eat; in skb_gro_receive()
3246 skb->len -= eat; in skb_gro_receive()
3250 __skb_pull(skb, offset); in skb_gro_receive()
3253 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
3255 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
3256 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
3257 __skb_header_release(skb); in skb_gro_receive()
3270 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
3299 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in __skb_to_sgvec() argument
3301 int start = skb_headlen(skb); in __skb_to_sgvec()
3309 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
3316 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
3321 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
3323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
3337 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
3377 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
3380 return __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec_nomark()
3384 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
3386 int nsg = __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec()
3411 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
3421 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
3422 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) in skb_cow_data()
3426 if (!skb_has_frag_list(skb)) { in skb_cow_data()
3432 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
3433 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
3437 *trailer = skb; in skb_cow_data()
3444 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
3504 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
3506 struct sock *sk = skb->sk; in sock_rmem_free()
3508 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
3514 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
3516 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
3520 skb_orphan(skb); in sock_queue_err_skb()
3521 skb->sk = sk; in sock_queue_err_skb()
3522 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
3523 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
3526 skb_dst_force(skb); in sock_queue_err_skb()
3528 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
3538 struct sk_buff *skb, *skb_next; in sock_dequeue_err_skb() local
3543 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
3544 if (skb && (skb_next = skb_peek(q))) in sock_dequeue_err_skb()
3552 return skb; in sock_dequeue_err_skb()
3569 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
3571 struct sock *sk = skb->sk; in skb_clone_sk()
3577 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
3590 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
3597 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
3603 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
3609 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
3612 kfree_skb(skb); in __skb_complete_tx_timestamp()
3615 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
3618 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
3624 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
3625 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND); in skb_complete_tx_timestamp()
3635 struct sk_buff *skb; in __skb_tstamp_tx() local
3645 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
3646 if (!skb) in __skb_tstamp_tx()
3649 __skb_complete_tx_timestamp(skb, sk, tstype); in __skb_tstamp_tx()
3661 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
3663 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
3667 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
3668 skb->wifi_acked = acked; in skb_complete_wifi_ack()
3670 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
3679 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
3683 kfree_skb(skb); in skb_complete_wifi_ack()
3700 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
3702 if (unlikely(start > skb_headlen(skb)) || in skb_partial_csum_set()
3703 unlikely((int)start + off > skb_headlen(skb) - 2)) { in skb_partial_csum_set()
3705 start, off, skb_headlen(skb)); in skb_partial_csum_set()
3708 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
3709 skb->csum_start = skb_headroom(skb) + start; in skb_partial_csum_set()
3710 skb->csum_offset = off; in skb_partial_csum_set()
3711 skb_set_transport_header(skb, start); in skb_partial_csum_set()
3716 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
3719 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
3725 if (max > skb->len) in skb_maybe_pull_tail()
3726 max = skb->len; in skb_maybe_pull_tail()
3728 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
3731 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
3739 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
3747 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
3749 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
3753 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
3756 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
3758 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
3762 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
3773 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
3782 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
3788 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) in skb_checksum_setup_ipv4()
3791 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
3798 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
3803 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
3804 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
3805 skb->len - off, in skb_checksum_setup_ipv4()
3806 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
3818 #define OPT_HDR(type, skb, off) \ argument
3819 (type *)(skb_network_header(skb) + (off))
3821 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
3836 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
3840 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
3842 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
3850 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3857 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
3865 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3872 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
3880 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
3887 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
3907 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
3912 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
3913 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
3914 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
3926 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
3930 switch (skb->protocol) { in skb_checksum_setup()
3932 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
3936 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
3948 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
3951 skb->dev->name); in __skb_warn_lro_forwarding()
3955 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
3958 skb_release_head_state(skb); in kfree_skb_partial()
3959 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
3961 __kfree_skb(skb); in kfree_skb_partial()
4058 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
4060 skb->tstamp.tv64 = 0; in skb_scrub_packet()
4061 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
4062 skb->skb_iif = 0; in skb_scrub_packet()
4063 skb->ignore_df = 0; in skb_scrub_packet()
4064 skb_dst_drop(skb); in skb_scrub_packet()
4065 secpath_reset(skb); in skb_scrub_packet()
4066 nf_reset(skb); in skb_scrub_packet()
4067 nf_reset_trace(skb); in skb_scrub_packet()
4072 ipvs_reset(skb); in skb_scrub_packet()
4073 skb_orphan(skb); in skb_scrub_packet()
4074 skb->mark = 0; in skb_scrub_packet()
4088 unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
4090 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
4093 if (skb->encapsulation) { in skb_gso_transport_seglen()
4094 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
4095 skb_transport_header(skb); in skb_gso_transport_seglen()
4098 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
4100 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
4110 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
4112 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
4113 kfree_skb(skb); in skb_reorder_vlan_header()
4117 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN, in skb_reorder_vlan_header()
4119 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
4120 return skb; in skb_reorder_vlan_header()
4123 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
4128 if (unlikely(vlan_tx_tag_present(skb))) { in skb_vlan_untag()
4130 return skb; in skb_vlan_untag()
4133 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
4134 if (unlikely(!skb)) in skb_vlan_untag()
4137 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) in skb_vlan_untag()
4140 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
4142 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
4144 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
4145 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
4147 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
4148 if (unlikely(!skb)) in skb_vlan_untag()
4151 skb_reset_network_header(skb); in skb_vlan_untag()
4152 skb_reset_transport_header(skb); in skb_vlan_untag()
4153 skb_reset_mac_len(skb); in skb_vlan_untag()
4155 return skb; in skb_vlan_untag()
4158 kfree_skb(skb); in skb_vlan_untag()
4182 struct sk_buff *skb; in alloc_skb_with_frags() local
4199 skb = alloc_skb(header_len, gfp_head); in alloc_skb_with_frags()
4200 if (!skb) in alloc_skb_with_frags()
4203 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
4229 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
4233 return skb; in alloc_skb_with_frags()
4236 kfree_skb(skb); in alloc_skb_with_frags()