• Home
  • Raw
  • Download

Lines Matching refs:skb

106 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,  in skb_panic()  argument
110 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
111 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
112 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
116 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
118 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
121 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
123 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
176 struct sk_buff *skb; in napi_skb_cache_get() local
186 skb = nc->skb_cache[--nc->skb_count]; in napi_skb_cache_get()
187 kasan_unpoison_object_data(skbuff_head_cache, skb); in napi_skb_cache_get()
189 return skb; in napi_skb_cache_get()
193 static void __build_skb_around(struct sk_buff *skb, void *data, in __build_skb_around() argument
202 skb->truesize = SKB_TRUESIZE(size); in __build_skb_around()
203 refcount_set(&skb->users, 1); in __build_skb_around()
204 skb->head = data; in __build_skb_around()
205 skb->data = data; in __build_skb_around()
206 skb_reset_tail_pointer(skb); in __build_skb_around()
207 skb_set_end_offset(skb, size); in __build_skb_around()
208 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb_around()
209 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb_around()
212 shinfo = skb_shinfo(skb); in __build_skb_around()
216 skb_set_kcov_handle(skb, kcov_common_handle()); in __build_skb_around()
240 struct sk_buff *skb; in __build_skb() local
242 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
243 if (unlikely(!skb)) in __build_skb()
246 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
247 __build_skb_around(skb, data, frag_size); in __build_skb()
249 return skb; in __build_skb()
259 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
261 if (skb && frag_size) { in build_skb()
262 skb->head_frag = 1; in build_skb()
264 skb->pfmemalloc = 1; in build_skb()
266 return skb; in build_skb()
276 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
279 if (unlikely(!skb)) in build_skb_around()
282 __build_skb_around(skb, data, frag_size); in build_skb_around()
285 skb->head_frag = 1; in build_skb_around()
287 skb->pfmemalloc = 1; in build_skb_around()
289 return skb; in build_skb_around()
305 struct sk_buff *skb; in __napi_build_skb() local
307 skb = napi_skb_cache_get(); in __napi_build_skb()
308 if (unlikely(!skb)) in __napi_build_skb()
311 memset(skb, 0, offsetof(struct sk_buff, tail)); in __napi_build_skb()
312 __build_skb_around(skb, data, frag_size); in __napi_build_skb()
314 return skb; in __napi_build_skb()
329 struct sk_buff *skb = __napi_build_skb(data, frag_size); in napi_build_skb() local
331 if (likely(skb) && frag_size) { in napi_build_skb()
332 skb->head_frag = 1; in napi_build_skb()
333 skb_propagate_pfmemalloc(virt_to_head_page(data), skb); in napi_build_skb()
336 return skb; in napi_build_skb()
401 struct sk_buff *skb; in __alloc_skb() local
414 skb = napi_skb_cache_get(); in __alloc_skb()
416 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node); in __alloc_skb()
417 if (unlikely(!skb)) in __alloc_skb()
419 prefetchw(skb); in __alloc_skb()
443 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
444 __build_skb_around(skb, data, 0); in __alloc_skb()
445 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
450 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
452 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
458 return skb; in __alloc_skb()
461 kmem_cache_free(cache, skb); in __alloc_skb()
483 struct sk_buff *skb; in __netdev_alloc_skb() local
495 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
496 if (!skb) in __netdev_alloc_skb()
522 skb = __build_skb(data, len); in __netdev_alloc_skb()
523 if (unlikely(!skb)) { in __netdev_alloc_skb()
529 skb->pfmemalloc = 1; in __netdev_alloc_skb()
530 skb->head_frag = 1; in __netdev_alloc_skb()
533 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
534 skb->dev = dev; in __netdev_alloc_skb()
537 return skb; in __netdev_alloc_skb()
558 struct sk_buff *skb; in __napi_alloc_skb() local
569 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI, in __napi_alloc_skb()
571 if (!skb) in __napi_alloc_skb()
587 skb = __napi_build_skb(data, len); in __napi_alloc_skb()
588 if (unlikely(!skb)) { in __napi_alloc_skb()
594 skb->pfmemalloc = 1; in __napi_alloc_skb()
595 skb->head_frag = 1; in __napi_alloc_skb()
598 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
599 skb->dev = napi->dev; in __napi_alloc_skb()
602 return skb; in __napi_alloc_skb()
606 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
609 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
610 skb->len += size; in skb_add_rx_frag()
611 skb->data_len += size; in skb_add_rx_frag()
612 skb->truesize += truesize; in skb_add_rx_frag()
616 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
619 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
622 skb->len += size; in skb_coalesce_rx_frag()
623 skb->data_len += size; in skb_coalesce_rx_frag()
624 skb->truesize += truesize; in skb_coalesce_rx_frag()
634 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
636 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
639 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
643 skb_walk_frags(skb, list) in skb_clone_fraglist()
647 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
649 unsigned char *head = skb->head; in skb_free_head()
651 if (skb->head_frag) { in skb_free_head()
652 if (skb_pp_recycle(skb, head)) in skb_free_head()
660 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
662 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
665 if (skb->cloned && in skb_release_data()
666 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
670 skb_zcopy_clear(skb, true); in skb_release_data()
673 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); in skb_release_data()
678 skb_free_head(skb); in skb_release_data()
689 skb->pp_recycle = 0; in skb_release_data()
695 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
699 switch (skb->fclone) { in kfree_skbmem()
701 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
705 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
716 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
725 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
727 skb_dst_drop(skb); in skb_release_head_state()
728 if (skb->destructor) { in skb_release_head_state()
730 skb->destructor(skb); in skb_release_head_state()
733 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
735 skb_ext_put(skb); in skb_release_head_state()
739 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
741 skb_release_head_state(skb); in skb_release_all()
742 if (likely(skb->head)) in skb_release_all()
743 skb_release_data(skb); in skb_release_all()
755 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
757 skb_release_all(skb); in __kfree_skb()
758 kfree_skbmem(skb); in __kfree_skb()
771 void kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) in kfree_skb_reason() argument
773 if (!skb_unref(skb)) in kfree_skb_reason()
776 trace_android_vh_kfree_skb(skb); in kfree_skb_reason()
777 trace_kfree_skb(skb, __builtin_return_address(0), reason); in kfree_skb_reason()
778 __kfree_skb(skb); in kfree_skb_reason()
786 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
792 kfree_skb_reason(skb, SKB_DROP_REASON_NOT_SPECIFIED); in kfree_skb()
813 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
815 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
816 struct net_device *dev = skb->dev; in skb_dump()
817 struct sock *sk = skb->sk; in skb_dump()
824 len = skb->len; in skb_dump()
826 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
828 headroom = skb_headroom(skb); in skb_dump()
829 tailroom = skb_tailroom(skb); in skb_dump()
831 has_mac = skb_mac_header_was_set(skb); in skb_dump()
832 has_trans = skb_transport_header_was_set(skb); in skb_dump()
839 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
840 has_mac ? skb->mac_header : -1, in skb_dump()
841 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
842 skb->network_header, in skb_dump()
843 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
844 has_trans ? skb->transport_header : -1, in skb_dump()
847 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
848 skb->csum_valid, skb->csum_level, in skb_dump()
849 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
850 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
861 16, 1, skb->head, headroom, false); in skb_dump()
863 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
866 16, 1, skb->data, seg_len, false); in skb_dump()
871 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
873 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
874 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
894 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
896 skb_walk_frags(skb, list_skb) in skb_dump()
909 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
911 skb_zcopy_clear(skb, true); in skb_tx_error()
924 void consume_skb(struct sk_buff *skb) in consume_skb() argument
926 if (!skb_unref(skb)) in consume_skb()
929 trace_consume_skb(skb); in consume_skb()
930 __kfree_skb(skb); in consume_skb()
942 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
944 trace_consume_skb(skb); in __consume_stateless_skb()
945 skb_release_data(skb); in __consume_stateless_skb()
946 kfree_skbmem(skb); in __consume_stateless_skb()
949 static void napi_skb_cache_put(struct sk_buff *skb) in napi_skb_cache_put() argument
954 kasan_poison_object_data(skbuff_head_cache, skb); in napi_skb_cache_put()
955 nc->skb_cache[nc->skb_count++] = skb; in napi_skb_cache_put()
968 void __kfree_skb_defer(struct sk_buff *skb) in __kfree_skb_defer() argument
970 skb_release_all(skb); in __kfree_skb_defer()
971 napi_skb_cache_put(skb); in __kfree_skb_defer()
974 void napi_skb_free_stolen_head(struct sk_buff *skb) in napi_skb_free_stolen_head() argument
976 if (unlikely(skb->slow_gro)) { in napi_skb_free_stolen_head()
977 nf_reset_ct(skb); in napi_skb_free_stolen_head()
978 skb_dst_drop(skb); in napi_skb_free_stolen_head()
979 skb_ext_put(skb); in napi_skb_free_stolen_head()
980 skb_orphan(skb); in napi_skb_free_stolen_head()
981 skb->slow_gro = 0; in napi_skb_free_stolen_head()
983 napi_skb_cache_put(skb); in napi_skb_free_stolen_head()
986 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
990 dev_consume_skb_any(skb); in napi_consume_skb()
996 if (!skb_unref(skb)) in napi_consume_skb()
1000 trace_consume_skb(skb); in napi_consume_skb()
1003 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
1004 __kfree_skb(skb); in napi_consume_skb()
1008 skb_release_all(skb); in napi_consume_skb()
1009 napi_skb_cache_put(skb); in napi_consume_skb()
1080 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
1082 #define C(x) n->x = skb->x in __skb_clone()
1086 __copy_skb_header(n, skb); in __skb_clone()
1091 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1106 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1107 skb->cloned = 1; in __skb_clone()
1198 struct sk_buff *skb; in msg_zerocopy_alloc() local
1202 skb = sock_omalloc(sk, 0, GFP_KERNEL); in msg_zerocopy_alloc()
1203 if (!skb) in msg_zerocopy_alloc()
1206 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in msg_zerocopy_alloc()
1207 uarg = (void *)skb->cb; in msg_zerocopy_alloc()
1211 kfree_skb(skb); in msg_zerocopy_alloc()
1277 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1279 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1299 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in __msg_zerocopy_callback() local
1301 struct sock *sk = skb->sk; in __msg_zerocopy_callback()
1321 serr = SKB_EXT_ERR(skb); in __msg_zerocopy_callback()
1335 __skb_queue_tail(q, skb); in __msg_zerocopy_callback()
1336 skb = NULL; in __msg_zerocopy_callback()
1343 consume_skb(skb); in __msg_zerocopy_callback()
1347 void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg, in msg_zerocopy_callback() argument
1369 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) in skb_zerocopy_iter_dgram() argument
1371 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1375 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1379 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1381 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1389 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1390 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1391 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1395 skb->sk = sk; in skb_zerocopy_iter_stream()
1396 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1397 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1401 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1402 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1441 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1443 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1448 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1454 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; in skb_copy_ubufs()
1472 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1499 skb_frag_unref(skb, i); in skb_copy_ubufs()
1503 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); in skb_copy_ubufs()
1506 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1507 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1510 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1529 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1531 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1536 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1539 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1544 if (skb_pfmemalloc(skb)) in skb_clone()
1554 return __skb_clone(n, skb); in skb_clone()
1558 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1561 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1562 skb->csum_start += off; in skb_headers_offset_update()
1564 skb->transport_header += off; in skb_headers_offset_update()
1565 skb->network_header += off; in skb_headers_offset_update()
1566 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1567 skb->mac_header += off; in skb_headers_offset_update()
1568 skb->inner_transport_header += off; in skb_headers_offset_update()
1569 skb->inner_network_header += off; in skb_headers_offset_update()
1570 skb->inner_mac_header += off; in skb_headers_offset_update()
1584 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1586 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1608 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1610 int headerlen = skb_headroom(skb); in skb_copy()
1611 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1613 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1621 skb_put(n, skb->len); in skb_copy()
1623 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
1625 skb_copy_header(n, skb); in skb_copy()
1647 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1650 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1651 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1660 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1662 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1664 n->truesize += skb->data_len; in __pskb_copy_fclone()
1665 n->data_len = skb->data_len; in __pskb_copy_fclone()
1666 n->len = skb->len; in __pskb_copy_fclone()
1668 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1671 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
1672 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
1677 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1678 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1679 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1684 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1685 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1689 skb_copy_header(n, skb); in __pskb_copy_fclone()
1711 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1714 int i, osize = skb_end_offset(skb); in pskb_expand_head()
1721 BUG_ON(skb_shared(skb)); in pskb_expand_head()
1725 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1736 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1739 skb_shinfo(skb), in pskb_expand_head()
1740 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1747 if (skb_cloned(skb)) { in pskb_expand_head()
1748 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1750 if (skb_zcopy(skb)) in pskb_expand_head()
1751 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
1752 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1753 skb_frag_ref(skb, i); in pskb_expand_head()
1755 if (skb_has_frag_list(skb)) in pskb_expand_head()
1756 skb_clone_fraglist(skb); in pskb_expand_head()
1758 skb_release_data(skb); in pskb_expand_head()
1760 skb_free_head(skb); in pskb_expand_head()
1762 off = (data + nhead) - skb->head; in pskb_expand_head()
1764 skb->head = data; in pskb_expand_head()
1765 skb->head_frag = 0; in pskb_expand_head()
1766 skb->data += off; in pskb_expand_head()
1768 skb_set_end_offset(skb, size); in pskb_expand_head()
1772 skb->tail += off; in pskb_expand_head()
1773 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1774 skb->cloned = 0; in pskb_expand_head()
1775 skb->hdr_len = 0; in pskb_expand_head()
1776 skb->nohdr = 0; in pskb_expand_head()
1777 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1779 skb_metadata_clear(skb); in pskb_expand_head()
1785 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
1786 skb->truesize += size - osize; in pskb_expand_head()
1799 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1802 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1805 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1807 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1818 int __skb_unclone_keeptruesize(struct sk_buff *skb, gfp_t pri) in __skb_unclone_keeptruesize() argument
1824 saved_end_offset = skb_end_offset(skb); in __skb_unclone_keeptruesize()
1825 saved_truesize = skb->truesize; in __skb_unclone_keeptruesize()
1827 res = pskb_expand_head(skb, 0, 0, pri); in __skb_unclone_keeptruesize()
1831 skb->truesize = saved_truesize; in __skb_unclone_keeptruesize()
1833 if (likely(skb_end_offset(skb) == saved_end_offset)) in __skb_unclone_keeptruesize()
1836 shinfo = skb_shinfo(skb); in __skb_unclone_keeptruesize()
1841 memmove(skb->head + saved_end_offset, in __skb_unclone_keeptruesize()
1845 skb_set_end_offset(skb, saved_end_offset); in __skb_unclone_keeptruesize()
1862 struct sk_buff *skb_expand_head(struct sk_buff *skb, unsigned int headroom) in skb_expand_head() argument
1864 int delta = headroom - skb_headroom(skb); in skb_expand_head()
1865 int osize = skb_end_offset(skb); in skb_expand_head()
1866 struct sock *sk = skb->sk; in skb_expand_head()
1870 return skb; in skb_expand_head()
1874 if (skb_shared(skb) || !is_skb_wmem(skb)) { in skb_expand_head()
1875 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); in skb_expand_head()
1882 consume_skb(skb); in skb_expand_head()
1883 skb = nskb; in skb_expand_head()
1885 if (pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) in skb_expand_head()
1888 if (sk && is_skb_wmem(skb)) { in skb_expand_head()
1889 delta = skb_end_offset(skb) - osize; in skb_expand_head()
1891 skb->truesize += delta; in skb_expand_head()
1893 return skb; in skb_expand_head()
1896 kfree_skb(skb); in skb_expand_head()
1919 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1926 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1927 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1929 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1938 skb_put(n, skb->len); in skb_copy_expand()
1948 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1949 skb->len + head_copy_len)); in skb_copy_expand()
1951 skb_copy_header(n, skb); in skb_copy_expand()
1973 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
1979 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
1980 memset(skb->data+skb->len, 0, pad); in __skb_pad()
1984 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
1985 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
1986 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
1994 err = skb_linearize(skb); in __skb_pad()
1998 memset(skb->data + skb->len, 0, pad); in __skb_pad()
2003 kfree_skb(skb); in __skb_pad()
2021 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
2023 if (tail != skb) { in pskb_put()
2024 skb->data_len += len; in pskb_put()
2025 skb->len += len; in pskb_put()
2040 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
2042 void *tmp = skb_tail_pointer(skb); in skb_put()
2043 SKB_LINEAR_ASSERT(skb); in skb_put()
2044 skb->tail += len; in skb_put()
2045 skb->len += len; in skb_put()
2046 if (unlikely(skb->tail > skb->end)) in skb_put()
2047 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
2061 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
2063 skb->data -= len; in skb_push()
2064 skb->len += len; in skb_push()
2065 if (unlikely(skb->data < skb->head)) in skb_push()
2066 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
2067 return skb->data; in skb_push()
2081 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
2083 return skb_pull_inline(skb, len); in skb_pull()
2096 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
2098 if (skb->len > len) in skb_trim()
2099 __skb_trim(skb, len); in skb_trim()
2106 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
2110 int offset = skb_headlen(skb); in ___pskb_trim()
2111 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
2115 if (skb_cloned(skb) && in ___pskb_trim()
2116 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
2124 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
2131 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
2134 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
2137 skb_frag_unref(skb, i); in ___pskb_trim()
2139 if (skb_has_frag_list(skb)) in ___pskb_trim()
2140 skb_drop_fraglist(skb); in ___pskb_trim()
2144 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
2176 if (len > skb_headlen(skb)) { in ___pskb_trim()
2177 skb->data_len -= skb->len - len; in ___pskb_trim()
2178 skb->len = len; in ___pskb_trim()
2180 skb->len = len; in ___pskb_trim()
2181 skb->data_len = 0; in ___pskb_trim()
2182 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2185 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2186 skb_condense(skb); in ___pskb_trim()
2193 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2195 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2196 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2198 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2199 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2201 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in pskb_trim_rcsum_slow()
2202 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len; in pskb_trim_rcsum_slow()
2203 int offset = skb_checksum_start_offset(skb) + skb->csum_offset; in pskb_trim_rcsum_slow()
2208 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2237 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2243 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2245 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2246 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2251 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2252 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2257 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2262 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2263 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2278 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2290 if (skb_is_gso(skb) && !list->head_frag && in __pskb_pull_tail()
2292 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; in __pskb_pull_tail()
2315 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2316 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2322 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2330 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2331 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2334 skb_frag_unref(skb, i); in __pskb_pull_tail()
2337 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2339 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2350 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2353 skb->tail += delta; in __pskb_pull_tail()
2354 skb->data_len -= delta; in __pskb_pull_tail()
2356 if (!skb->data_len) in __pskb_pull_tail()
2357 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2359 return skb_tail_pointer(skb); in __pskb_pull_tail()
2378 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2380 int start = skb_headlen(skb); in skb_copy_bits()
2384 if (offset > (int)skb->len - len) in skb_copy_bits()
2391 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2400 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2429 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
2563 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
2575 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
2576 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
2577 skb_headlen(skb), in __skb_splice_bits()
2579 skb_head_is_locked(skb), in __skb_splice_bits()
2586 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
2587 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
2595 skb_walk_frags(skb, iter) { in __skb_splice_bits()
2615 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
2630 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
2663 static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, in __skb_send_sock() argument
2667 struct sk_buff *head = skb; in __skb_send_sock()
2674 while (offset < skb_headlen(skb) && len) { in __skb_send_sock()
2678 slen = min_t(int, len, skb_headlen(skb) - offset); in __skb_send_sock()
2679 kv.iov_base = skb->data + offset; in __skb_send_sock()
2698 offset -= skb_headlen(skb); in __skb_send_sock()
2701 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2702 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2710 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in __skb_send_sock()
2711 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in __skb_send_sock()
2735 if (skb == head) { in __skb_send_sock()
2736 if (skb_has_frag_list(skb)) { in __skb_send_sock()
2737 skb = skb_shinfo(skb)->frag_list; in __skb_send_sock()
2740 } else if (skb->next) { in __skb_send_sock()
2741 skb = skb->next; in __skb_send_sock()
2754 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
2757 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked, in skb_send_sock_locked()
2763 int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len) in skb_send_sock() argument
2765 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked, in skb_send_sock()
2781 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
2783 int start = skb_headlen(skb); in skb_store_bits()
2787 if (offset > (int)skb->len - len) in skb_store_bits()
2793 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2800 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2801 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2831 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2859 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2862 int start = skb_headlen(skb); in __skb_checksum()
2872 skb->data + offset, copy, csum); in __skb_checksum()
2879 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
2881 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2916 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2943 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2951 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2957 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2960 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2970 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2979 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2984 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2986 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
3015 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
3042 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
3046 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
3049 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
3050 !skb->csum_complete_sw) in __skb_checksum_complete_head()
3051 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
3053 if (!skb_shared(skb)) in __skb_checksum_complete_head()
3054 skb->csum_valid = !sum; in __skb_checksum_complete_head()
3068 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
3073 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
3075 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
3084 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
3085 !skb->csum_complete_sw) in __skb_checksum_complete()
3086 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
3089 if (!skb_shared(skb)) { in __skb_checksum_complete()
3091 skb->csum = csum; in __skb_checksum_complete()
3092 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
3093 skb->csum_complete_sw = 1; in __skb_checksum_complete()
3094 skb->csum_valid = !sum; in __skb_checksum_complete()
3233 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3238 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3239 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3241 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3243 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3245 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3248 if (csstart != skb->len) in skb_copy_and_csum_dev()
3249 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3250 skb->len - csstart); in skb_copy_and_csum_dev()
3252 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3253 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3311 struct sk_buff *skb; in skb_queue_purge() local
3312 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
3313 kfree_skb(skb); in skb_queue_purge()
3333 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3336 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3337 sum += skb->truesize; in skb_rbtree_purge()
3338 kfree_skb(skb); in skb_rbtree_purge()
3395 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3400 __skb_unlink(skb, list); in skb_unlink()
3425 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3431 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3434 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3435 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3437 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3438 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3439 skb1->data_len = skb->data_len; in skb_split_inside_header()
3441 skb->data_len = 0; in skb_split_inside_header()
3442 skb->len = len; in skb_split_inside_header()
3443 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
3446 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
3451 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3453 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3454 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3455 skb->len = len; in skb_split_no_header()
3456 skb->data_len = len - pos; in skb_split_no_header()
3459 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3462 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3473 skb_frag_ref(skb, i); in skb_split_no_header()
3476 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3477 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3481 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3493 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
3495 int pos = skb_headlen(skb); in skb_split()
3497 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG; in skb_split()
3498 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
3500 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
3502 skb_split_no_header(skb, skb1, len, pos); in skb_split()
3510 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
3512 return skb_unclone_keeptruesize(skb, GFP_ATOMIC); in skb_prepare_for_shift()
3533 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
3538 BUG_ON(shiftlen > skb->len); in skb_shift()
3540 if (skb_headlen(skb)) in skb_shift()
3542 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
3548 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3562 if (skb_prepare_for_shift(skb) || in skb_shift()
3567 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3581 if ((shiftlen == skb->len) && in skb_shift()
3582 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
3585 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
3588 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
3592 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3620 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
3624 __skb_frag_unref(fragfrom, skb->pp_recycle); in skb_shift()
3629 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
3630 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
3631 skb_shinfo(skb)->nr_frags = to; in skb_shift()
3633 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
3640 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
3643 skb->len -= shiftlen; in skb_shift()
3644 skb->data_len -= shiftlen; in skb_shift()
3645 skb->truesize -= shiftlen; in skb_shift()
3663 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
3668 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
3824 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
3835 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
3842 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
3845 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
3847 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
3848 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
3851 skb_fill_page_desc_noacc(skb, i, page, offset, size); in skb_append_pagefrags()
3871 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
3873 unsigned char *data = skb->data; in skb_pull_rcsum()
3875 BUG_ON(len > skb->len); in skb_pull_rcsum()
3876 __skb_pull(skb, len); in skb_pull_rcsum()
3877 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
3878 return skb->data; in skb_pull_rcsum()
3895 struct sk_buff *skb_segment_list(struct sk_buff *skb, in skb_segment_list() argument
3899 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list; in skb_segment_list()
3900 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment_list()
3907 skb_push(skb, -skb_network_offset(skb) + offset); in skb_segment_list()
3910 err = skb_unclone(skb, GFP_ATOMIC); in skb_segment_list()
3914 skb_shinfo(skb)->frag_list = NULL; in skb_segment_list()
3934 skb->next = nskb; in skb_segment_list()
3950 len_diff = skb_network_header_len(nskb) - skb_network_header_len(skb); in skb_segment_list()
3951 __copy_skb_header(nskb, skb); in skb_segment_list()
3953 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb)); in skb_segment_list()
3955 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment_list()
3964 skb->truesize = skb->truesize - delta_truesize; in skb_segment_list()
3965 skb->data_len = skb->data_len - delta_len; in skb_segment_list()
3966 skb->len = skb->len - delta_len; in skb_segment_list()
3968 skb_gso_reset(skb); in skb_segment_list()
3970 skb->prev = tail; in skb_segment_list()
3972 if (skb_needs_linearize(skb, features) && in skb_segment_list()
3973 __skb_linearize(skb)) in skb_segment_list()
3976 skb_get(skb); in skb_segment_list()
3978 return skb; in skb_segment_list()
3981 kfree_skb_list(skb->next); in skb_segment_list()
3982 skb->next = NULL; in skb_segment_list()
3987 int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive_list() argument
3989 if (unlikely(p->len + skb->len >= 65536)) in skb_gro_receive_list()
3993 skb_shinfo(p)->frag_list = skb; in skb_gro_receive_list()
3995 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive_list()
3997 skb_pull(skb, skb_gro_offset(skb)); in skb_gro_receive_list()
3999 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive_list()
4001 p->data_len += skb->len; in skb_gro_receive_list()
4004 skb->destructor = NULL; in skb_gro_receive_list()
4005 p->truesize += skb->truesize; in skb_gro_receive_list()
4006 p->len += skb->len; in skb_gro_receive_list()
4008 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive_list()
4373 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument
4375 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
4376 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
4377 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
4378 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
4383 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
4412 new_truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
4413 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
4415 skb->truesize = new_truesize; in skb_gro_receive()
4416 skb->len -= skb->data_len; in skb_gro_receive()
4417 skb->data_len = 0; in skb_gro_receive()
4419 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
4421 } else if (skb->head_frag) { in skb_gro_receive()
4424 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
4431 first_offset = skb->data - in skb_gro_receive()
4445 delta_truesize = skb->truesize - new_truesize; in skb_gro_receive()
4446 skb->truesize = new_truesize; in skb_gro_receive()
4447 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
4453 skb->destructor = NULL; in skb_gro_receive()
4454 delta_truesize = skb->truesize; in skb_gro_receive()
4460 skb->data_len -= eat; in skb_gro_receive()
4461 skb->len -= eat; in skb_gro_receive()
4465 __skb_pull(skb, offset); in skb_gro_receive()
4468 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
4470 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
4471 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
4472 __skb_header_release(skb); in skb_gro_receive()
4485 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
4559 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
4562 int start = skb_headlen(skb); in __skb_to_sgvec()
4573 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4580 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4585 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4587 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4603 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
4642 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
4644 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
4674 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
4677 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
4700 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
4710 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
4711 !__pskb_pull_tail(skb, __skb_pagelen(skb))) in skb_cow_data()
4715 if (!skb_has_frag_list(skb)) { in skb_cow_data()
4721 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
4722 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
4726 *trailer = skb; in skb_cow_data()
4733 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
4793 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
4795 struct sock *sk = skb->sk; in sock_rmem_free()
4797 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
4800 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
4805 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
4812 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
4814 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
4818 skb_orphan(skb); in sock_queue_err_skb()
4819 skb->sk = sk; in sock_queue_err_skb()
4820 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
4821 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
4822 skb_set_err_queue(skb); in sock_queue_err_skb()
4825 skb_dst_force(skb); in sock_queue_err_skb()
4827 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
4834 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
4836 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
4837 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
4843 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
4848 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
4849 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
4856 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
4862 return skb; in sock_dequeue_err_skb()
4879 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
4881 struct sock *sk = skb->sk; in skb_clone_sk()
4887 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
4900 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
4908 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
4910 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
4916 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
4918 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
4924 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
4927 kfree_skb(skb); in __skb_complete_tx_timestamp()
4944 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
4947 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
4956 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
4957 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
4963 kfree_skb(skb); in skb_complete_tx_timestamp()
4972 struct sk_buff *skb; in __skb_tstamp_tx() local
4991 skb = tcp_get_timestamping_opt_stats(sk, orig_skb, in __skb_tstamp_tx()
4996 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
4998 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
5000 if (skb_orphan_frags_rx(skb, GFP_ATOMIC)) { in __skb_tstamp_tx()
5001 kfree_skb(skb); in __skb_tstamp_tx()
5005 if (!skb) in __skb_tstamp_tx()
5009 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
5011 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
5015 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
5017 skb->tstamp = ktime_get_real(); in __skb_tstamp_tx()
5019 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
5031 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
5033 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
5037 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
5038 skb->wifi_acked = acked; in skb_complete_wifi_ack()
5040 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
5049 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
5053 kfree_skb(skb); in skb_complete_wifi_ack()
5069 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
5072 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
5074 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
5076 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
5079 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
5080 skb->csum_start = csum_start; in skb_partial_csum_set()
5081 skb->csum_offset = off; in skb_partial_csum_set()
5082 skb_set_transport_header(skb, start); in skb_partial_csum_set()
5087 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
5090 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
5096 if (max > skb->len) in skb_maybe_pull_tail()
5097 max = skb->len; in skb_maybe_pull_tail()
5099 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
5102 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
5110 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
5118 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
5120 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5124 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
5127 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
5129 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
5133 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
5144 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
5153 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
5159 if (ip_is_fragment(ip_hdr(skb))) in skb_checksum_setup_ipv4()
5162 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
5169 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
5174 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
5175 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
5176 skb->len - off, in skb_checksum_setup_ipv4()
5177 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
5189 #define OPT_HDR(type, skb, off) \ argument
5190 (type *)(skb_network_header(skb) + (off))
5192 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
5207 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
5211 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
5213 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
5221 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5228 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
5236 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5243 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
5251 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
5258 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
5278 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
5283 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
5284 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
5285 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
5297 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
5301 switch (skb->protocol) { in skb_checksum_setup()
5303 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
5307 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
5332 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
5336 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
5339 if (skb->len < len) in skb_checksum_maybe_trim()
5341 else if (skb->len == len) in skb_checksum_maybe_trim()
5342 return skb; in skb_checksum_maybe_trim()
5344 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
5372 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
5374 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
5377 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
5380 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
5397 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5405 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5408 skb->dev->name); in __skb_warn_lro_forwarding()
5412 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5415 skb_release_head_state(skb); in kfree_skb_partial()
5416 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
5418 __kfree_skb(skb); in kfree_skb_partial()
5534 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
5536 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5537 skb->skb_iif = 0; in skb_scrub_packet()
5538 skb->ignore_df = 0; in skb_scrub_packet()
5539 skb_dst_drop(skb); in skb_scrub_packet()
5540 skb_ext_reset(skb); in skb_scrub_packet()
5541 nf_reset_ct(skb); in skb_scrub_packet()
5542 nf_reset_trace(skb); in skb_scrub_packet()
5545 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5546 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5552 ipvs_reset(skb); in skb_scrub_packet()
5553 skb->mark = 0; in skb_scrub_packet()
5554 skb->tstamp = 0; in skb_scrub_packet()
5568 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
5570 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
5573 if (skb->encapsulation) { in skb_gso_transport_seglen()
5574 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
5575 skb_transport_header(skb); in skb_gso_transport_seglen()
5578 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
5580 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
5581 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
5603 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
5605 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
5606 skb_network_header(skb); in skb_gso_network_seglen()
5608 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
5620 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
5622 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
5624 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
5648 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
5651 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
5660 skb_walk_frags(skb, iter) { in skb_gso_size_check()
5678 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
5680 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
5693 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
5695 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()
5699 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
5704 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
5705 kfree_skb(skb); in skb_reorder_vlan_header()
5709 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5711 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
5715 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
5717 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5721 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5722 return skb; in skb_reorder_vlan_header()
5725 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
5730 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
5732 return skb; in skb_vlan_untag()
5735 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
5736 if (unlikely(!skb)) in skb_vlan_untag()
5739 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short)))) in skb_vlan_untag()
5742 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5744 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5746 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
5747 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
5749 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
5750 if (unlikely(!skb)) in skb_vlan_untag()
5753 skb_reset_network_header(skb); in skb_vlan_untag()
5754 if (!skb_transport_header_was_set(skb)) in skb_vlan_untag()
5755 skb_reset_transport_header(skb); in skb_vlan_untag()
5756 skb_reset_mac_len(skb); in skb_vlan_untag()
5758 return skb; in skb_vlan_untag()
5761 kfree_skb(skb); in skb_vlan_untag()
5766 int skb_ensure_writable(struct sk_buff *skb, int write_len) in skb_ensure_writable() argument
5768 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
5771 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
5774 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
5781 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
5784 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
5793 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
5797 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
5799 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
5802 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
5803 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
5805 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
5806 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
5808 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
5809 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
5811 skb_reset_mac_len(skb); in __skb_vlan_pop()
5820 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
5826 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
5827 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
5829 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5832 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5837 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5840 vlan_proto = skb->protocol; in skb_vlan_pop()
5841 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5845 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
5853 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
5855 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
5856 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
5865 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
5866 skb_vlan_tag_get(skb)); in skb_vlan_push()
5870 skb->protocol = skb->vlan_proto; in skb_vlan_push()
5871 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
5873 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
5875 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
5892 int skb_eth_pop(struct sk_buff *skb) in skb_eth_pop() argument
5894 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) || in skb_eth_pop()
5895 skb_network_offset(skb) < ETH_HLEN) in skb_eth_pop()
5898 skb_pull_rcsum(skb, ETH_HLEN); in skb_eth_pop()
5899 skb_reset_mac_header(skb); in skb_eth_pop()
5900 skb_reset_mac_len(skb); in skb_eth_pop()
5919 int skb_eth_push(struct sk_buff *skb, const unsigned char *dst, in skb_eth_push() argument
5925 if (skb_network_offset(skb) || skb_vlan_tag_present(skb)) in skb_eth_push()
5928 err = skb_cow_head(skb, sizeof(*eth)); in skb_eth_push()
5932 skb_push(skb, sizeof(*eth)); in skb_eth_push()
5933 skb_reset_mac_header(skb); in skb_eth_push()
5934 skb_reset_mac_len(skb); in skb_eth_push()
5936 eth = eth_hdr(skb); in skb_eth_push()
5939 eth->h_proto = skb->protocol; in skb_eth_push()
5941 skb_postpush_rcsum(skb, eth, sizeof(*eth)); in skb_eth_push()
5948 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
5951 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
5954 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
5975 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
5985 if (skb->encapsulation) in skb_mpls_push()
5988 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
5992 if (!skb->inner_protocol) { in skb_mpls_push()
5993 skb_set_inner_network_header(skb, skb_network_offset(skb)); in skb_mpls_push()
5994 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
5997 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
5998 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
6000 skb_reset_mac_header(skb); in skb_mpls_push()
6001 skb_set_network_header(skb, mac_len); in skb_mpls_push()
6002 skb_reset_mac_len(skb); in skb_mpls_push()
6004 lse = mpls_hdr(skb); in skb_mpls_push()
6006 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
6009 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
6010 skb->protocol = mpls_proto; in skb_mpls_push()
6028 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
6033 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
6036 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
6040 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
6041 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
6044 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
6045 skb_reset_mac_header(skb); in skb_mpls_pop()
6046 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
6052 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
6053 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
6055 skb->protocol = next_proto; in skb_mpls_pop()
6071 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
6075 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
6078 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
6082 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
6083 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
6085 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
6088 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
6103 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
6108 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
6111 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) in skb_mpls_dec_ttl()
6114 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
6122 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
6145 struct sk_buff *skb; in alloc_skb_with_frags() local
6157 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
6158 if (!skb) in alloc_skb_with_frags()
6161 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
6186 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
6190 return skb; in alloc_skb_with_frags()
6193 kfree_skb(skb); in alloc_skb_with_frags()
6199 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
6203 int size = skb_end_offset(skb); in pskb_carve_inside_header()
6209 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
6220 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
6221 skb->len -= off; in pskb_carve_inside_header()
6224 skb_shinfo(skb), in pskb_carve_inside_header()
6226 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
6227 if (skb_cloned(skb)) { in pskb_carve_inside_header()
6229 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
6233 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
6234 skb_frag_ref(skb, i); in pskb_carve_inside_header()
6235 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
6236 skb_clone_fraglist(skb); in pskb_carve_inside_header()
6237 skb_release_data(skb); in pskb_carve_inside_header()
6242 skb_free_head(skb); in pskb_carve_inside_header()
6245 skb->head = data; in pskb_carve_inside_header()
6246 skb->data = data; in pskb_carve_inside_header()
6247 skb->head_frag = 0; in pskb_carve_inside_header()
6248 skb_set_end_offset(skb, size); in pskb_carve_inside_header()
6249 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
6250 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
6251 skb->cloned = 0; in pskb_carve_inside_header()
6252 skb->hdr_len = 0; in pskb_carve_inside_header()
6253 skb->nohdr = 0; in pskb_carve_inside_header()
6254 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
6259 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6264 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
6318 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
6322 int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
6324 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
6329 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
6340 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0])); in pskb_carve_inside_nonlinear()
6341 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
6347 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
6350 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
6364 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
6370 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6371 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
6374 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) { in pskb_carve_inside_nonlinear()
6376 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
6377 kfree_skb_list(skb_shinfo(skb)->frag_list); in pskb_carve_inside_nonlinear()
6381 skb_release_data(skb); in pskb_carve_inside_nonlinear()
6383 skb->head = data; in pskb_carve_inside_nonlinear()
6384 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
6385 skb->data = data; in pskb_carve_inside_nonlinear()
6386 skb_set_end_offset(skb, size); in pskb_carve_inside_nonlinear()
6387 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
6388 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
6389 skb->cloned = 0; in pskb_carve_inside_nonlinear()
6390 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
6391 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
6392 skb->len -= off; in pskb_carve_inside_nonlinear()
6393 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
6394 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
6399 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
6401 int headlen = skb_headlen(skb); in pskb_carve()
6404 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
6406 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
6412 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
6415 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
6441 void skb_condense(struct sk_buff *skb) in skb_condense() argument
6443 if (skb->data_len) { in skb_condense()
6444 if (skb->data_len > skb->end - skb->tail || in skb_condense()
6445 skb_cloned(skb)) in skb_condense()
6449 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
6458 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6526 void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id, in __skb_ext_set() argument
6531 skb_ext_put(skb); in __skb_ext_set()
6535 skb->extensions = ext; in __skb_ext_set()
6536 skb->active_extensions = 1 << id; in __skb_ext_set()
6554 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6559 if (skb->active_extensions) { in skb_ext_add()
6560 old = skb->extensions; in skb_ext_add()
6562 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6582 skb->slow_gro = 1; in skb_ext_add()
6583 skb->extensions = new; in skb_ext_add()
6584 skb->active_extensions |= 1 << id; in skb_ext_add()
6599 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6601 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6603 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6604 if (skb->active_extensions == 0) { in __skb_ext_del()
6605 skb->extensions = NULL; in __skb_ext_del()