Lines Matching refs:skb
101 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
105 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
106 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
107 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
111 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
113 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
116 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
118 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
186 struct sk_buff *skb; in __alloc_skb() local
197 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb()
198 if (!skb) in __alloc_skb()
200 prefetchw(skb); in __alloc_skb()
224 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
226 skb->truesize = SKB_TRUESIZE(size); in __alloc_skb()
227 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
228 refcount_set(&skb->users, 1); in __alloc_skb()
229 skb->head = data; in __alloc_skb()
230 skb->data = data; in __alloc_skb()
231 skb_reset_tail_pointer(skb); in __alloc_skb()
232 skb->end = skb->tail + size; in __alloc_skb()
233 skb->mac_header = (typeof(skb->mac_header))~0U; in __alloc_skb()
234 skb->transport_header = (typeof(skb->transport_header))~0U; in __alloc_skb()
237 shinfo = skb_shinfo(skb); in __alloc_skb()
244 fclones = container_of(skb, struct sk_buff_fclones, skb1); in __alloc_skb()
246 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
252 return skb; in __alloc_skb()
254 kmem_cache_free(cache, skb); in __alloc_skb()
255 skb = NULL; in __alloc_skb()
261 static struct sk_buff *__build_skb_around(struct sk_buff *skb, in __build_skb_around() argument
270 skb->truesize = SKB_TRUESIZE(size); in __build_skb_around()
271 refcount_set(&skb->users, 1); in __build_skb_around()
272 skb->head = data; in __build_skb_around()
273 skb->data = data; in __build_skb_around()
274 skb_reset_tail_pointer(skb); in __build_skb_around()
275 skb->end = skb->tail + size; in __build_skb_around()
276 skb->mac_header = (typeof(skb->mac_header))~0U; in __build_skb_around()
277 skb->transport_header = (typeof(skb->transport_header))~0U; in __build_skb_around()
280 shinfo = skb_shinfo(skb); in __build_skb_around()
284 return skb; in __build_skb_around()
308 struct sk_buff *skb; in __build_skb() local
310 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in __build_skb()
311 if (unlikely(!skb)) in __build_skb()
314 memset(skb, 0, offsetof(struct sk_buff, tail)); in __build_skb()
316 return __build_skb_around(skb, data, frag_size); in __build_skb()
326 struct sk_buff *skb = __build_skb(data, frag_size); in build_skb() local
328 if (skb && frag_size) { in build_skb()
329 skb->head_frag = 1; in build_skb()
331 skb->pfmemalloc = 1; in build_skb()
333 return skb; in build_skb()
343 struct sk_buff *build_skb_around(struct sk_buff *skb, in build_skb_around() argument
346 if (unlikely(!skb)) in build_skb_around()
349 skb = __build_skb_around(skb, data, frag_size); in build_skb_around()
351 if (skb && frag_size) { in build_skb_around()
352 skb->head_frag = 1; in build_skb_around()
354 skb->pfmemalloc = 1; in build_skb_around()
356 return skb; in build_skb_around()
428 struct sk_buff *skb; in __netdev_alloc_skb() local
436 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __netdev_alloc_skb()
437 if (!skb) in __netdev_alloc_skb()
463 skb = __build_skb(data, len); in __netdev_alloc_skb()
464 if (unlikely(!skb)) { in __netdev_alloc_skb()
471 skb->pfmemalloc = 1; in __netdev_alloc_skb()
472 skb->head_frag = 1; in __netdev_alloc_skb()
475 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
476 skb->dev = dev; in __netdev_alloc_skb()
479 return skb; in __netdev_alloc_skb()
500 struct sk_buff *skb; in __napi_alloc_skb() local
507 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE); in __napi_alloc_skb()
508 if (!skb) in __napi_alloc_skb()
523 skb = __build_skb(data, len); in __napi_alloc_skb()
524 if (unlikely(!skb)) { in __napi_alloc_skb()
531 skb->pfmemalloc = 1; in __napi_alloc_skb()
532 skb->head_frag = 1; in __napi_alloc_skb()
535 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); in __napi_alloc_skb()
536 skb->dev = napi->dev; in __napi_alloc_skb()
539 return skb; in __napi_alloc_skb()
543 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
546 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
547 skb->len += size; in skb_add_rx_frag()
548 skb->data_len += size; in skb_add_rx_frag()
549 skb->truesize += truesize; in skb_add_rx_frag()
553 void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size, in skb_coalesce_rx_frag() argument
556 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_coalesce_rx_frag()
559 skb->len += size; in skb_coalesce_rx_frag()
560 skb->data_len += size; in skb_coalesce_rx_frag()
561 skb->truesize += truesize; in skb_coalesce_rx_frag()
571 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
573 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
576 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
580 skb_walk_frags(skb, list) in skb_clone_fraglist()
584 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
586 unsigned char *head = skb->head; in skb_free_head()
588 if (skb->head_frag) in skb_free_head()
594 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
596 struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_release_data()
599 if (skb->cloned && in skb_release_data()
600 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
610 skb_zcopy_clear(skb, true); in skb_release_data()
611 skb_free_head(skb); in skb_release_data()
617 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
621 switch (skb->fclone) { in kfree_skbmem()
623 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
627 fclones = container_of(skb, struct sk_buff_fclones, skb1); in kfree_skbmem()
638 fclones = container_of(skb, struct sk_buff_fclones, skb2); in kfree_skbmem()
647 void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
649 skb_dst_drop(skb); in skb_release_head_state()
650 if (skb->destructor) { in skb_release_head_state()
652 skb->destructor(skb); in skb_release_head_state()
655 nf_conntrack_put(skb_nfct(skb)); in skb_release_head_state()
657 skb_ext_put(skb); in skb_release_head_state()
661 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
663 skb_release_head_state(skb); in skb_release_all()
664 if (likely(skb->head)) in skb_release_all()
665 skb_release_data(skb); in skb_release_all()
677 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
679 skb_release_all(skb); in __kfree_skb()
680 kfree_skbmem(skb); in __kfree_skb()
691 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
693 if (!skb_unref(skb)) in kfree_skb()
696 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
697 __kfree_skb(skb); in kfree_skb()
718 void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt) in skb_dump() argument
721 struct skb_shared_info *sh = skb_shinfo(skb); in skb_dump()
722 struct net_device *dev = skb->dev; in skb_dump()
723 struct sock *sk = skb->sk; in skb_dump()
733 len = skb->len; in skb_dump()
735 len = min_t(int, skb->len, MAX_HEADER + 128); in skb_dump()
737 headroom = skb_headroom(skb); in skb_dump()
738 tailroom = skb_tailroom(skb); in skb_dump()
740 has_mac = skb_mac_header_was_set(skb); in skb_dump()
741 has_trans = skb_transport_header_was_set(skb); in skb_dump()
748 level, skb->len, headroom, skb_headlen(skb), tailroom, in skb_dump()
749 has_mac ? skb->mac_header : -1, in skb_dump()
750 has_mac ? skb_mac_header_len(skb) : -1, in skb_dump()
751 skb->network_header, in skb_dump()
752 has_trans ? skb_network_header_len(skb) : -1, in skb_dump()
753 has_trans ? skb->transport_header : -1, in skb_dump()
756 skb->csum, skb->ip_summed, skb->csum_complete_sw, in skb_dump()
757 skb->csum_valid, skb->csum_level, in skb_dump()
758 skb->hash, skb->sw_hash, skb->l4_hash, in skb_dump()
759 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif); in skb_dump()
770 16, 1, skb->head, headroom, false); in skb_dump()
772 seg_len = min_t(int, skb_headlen(skb), len); in skb_dump()
775 16, 1, skb->data, seg_len, false); in skb_dump()
780 16, 1, skb_tail_pointer(skb), tailroom, false); in skb_dump()
782 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) { in skb_dump()
783 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_dump()
803 if (full_pkt && skb_has_frag_list(skb)) { in skb_dump()
805 skb_walk_frags(skb, list_skb) in skb_dump()
818 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
820 skb_zcopy_clear(skb, true); in skb_tx_error()
832 void consume_skb(struct sk_buff *skb) in consume_skb() argument
834 if (!skb_unref(skb)) in consume_skb()
837 trace_consume_skb(skb); in consume_skb()
838 __kfree_skb(skb); in consume_skb()
849 void __consume_stateless_skb(struct sk_buff *skb) in __consume_stateless_skb() argument
851 trace_consume_skb(skb); in __consume_stateless_skb()
852 skb_release_data(skb); in __consume_stateless_skb()
853 kfree_skbmem(skb); in __consume_stateless_skb()
868 static inline void _kfree_skb_defer(struct sk_buff *skb) in _kfree_skb_defer() argument
873 skb_release_all(skb); in _kfree_skb_defer()
876 nc->skb_cache[nc->skb_count++] = skb; in _kfree_skb_defer()
880 prefetchw(skb); in _kfree_skb_defer()
890 void __kfree_skb_defer(struct sk_buff *skb) in __kfree_skb_defer() argument
892 _kfree_skb_defer(skb); in __kfree_skb_defer()
895 void napi_consume_skb(struct sk_buff *skb, int budget) in napi_consume_skb() argument
897 if (unlikely(!skb)) in napi_consume_skb()
902 dev_consume_skb_any(skb); in napi_consume_skb()
906 if (!skb_unref(skb)) in napi_consume_skb()
910 trace_consume_skb(skb); in napi_consume_skb()
913 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) { in napi_consume_skb()
914 __kfree_skb(skb); in napi_consume_skb()
918 _kfree_skb_defer(skb); in napi_consume_skb()
981 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
983 #define C(x) n->x = skb->x in __skb_clone()
987 __copy_skb_header(n, skb); in __skb_clone()
992 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
1006 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
1007 skb->cloned = 1; in __skb_clone()
1098 struct sk_buff *skb; in sock_zerocopy_alloc() local
1102 skb = sock_omalloc(sk, 0, GFP_KERNEL); in sock_zerocopy_alloc()
1103 if (!skb) in sock_zerocopy_alloc()
1106 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb)); in sock_zerocopy_alloc()
1107 uarg = (void *)skb->cb; in sock_zerocopy_alloc()
1111 kfree_skb(skb); in sock_zerocopy_alloc()
1176 static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len) in skb_zerocopy_notify_extend() argument
1178 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb); in skb_zerocopy_notify_extend()
1198 struct sk_buff *tail, *skb = skb_from_uarg(uarg); in sock_zerocopy_callback() local
1200 struct sock *sk = skb->sk; in sock_zerocopy_callback()
1218 serr = SKB_EXT_ERR(skb); in sock_zerocopy_callback()
1232 __skb_queue_tail(q, skb); in sock_zerocopy_callback()
1233 skb = NULL; in sock_zerocopy_callback()
1240 consume_skb(skb); in sock_zerocopy_callback()
1270 int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) in skb_zerocopy_iter_dgram() argument
1272 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_dgram()
1276 int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, in skb_zerocopy_iter_stream() argument
1280 struct ubuf_info *orig_uarg = skb_zcopy(skb); in skb_zerocopy_iter_stream()
1282 int err, orig_len = skb->len; in skb_zerocopy_iter_stream()
1290 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); in skb_zerocopy_iter_stream()
1291 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { in skb_zerocopy_iter_stream()
1292 struct sock *save_sk = skb->sk; in skb_zerocopy_iter_stream()
1296 skb->sk = sk; in skb_zerocopy_iter_stream()
1297 ___pskb_trim(skb, orig_len); in skb_zerocopy_iter_stream()
1298 skb->sk = save_sk; in skb_zerocopy_iter_stream()
1302 skb_zcopy_set(skb, uarg, NULL); in skb_zerocopy_iter_stream()
1303 return skb->len - orig_len; in skb_zerocopy_iter_stream()
1342 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
1344 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
1349 if (skb_shared(skb) || skb_unclone(skb, gfp_mask)) in skb_copy_ubufs()
1355 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT; in skb_copy_ubufs()
1373 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
1400 skb_frag_unref(skb, i); in skb_copy_ubufs()
1404 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE); in skb_copy_ubufs()
1407 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off); in skb_copy_ubufs()
1408 skb_shinfo(skb)->nr_frags = new_frags; in skb_copy_ubufs()
1411 skb_zcopy_clear(skb, false); in skb_copy_ubufs()
1430 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
1432 struct sk_buff_fclones *fclones = container_of(skb, in skb_clone()
1437 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
1440 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
1445 if (skb_pfmemalloc(skb)) in skb_clone()
1455 return __skb_clone(n, skb); in skb_clone()
1459 void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
1462 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_headers_offset_update()
1463 skb->csum_start += off; in skb_headers_offset_update()
1465 skb->transport_header += off; in skb_headers_offset_update()
1466 skb->network_header += off; in skb_headers_offset_update()
1467 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
1468 skb->mac_header += off; in skb_headers_offset_update()
1469 skb->inner_transport_header += off; in skb_headers_offset_update()
1470 skb->inner_network_header += off; in skb_headers_offset_update()
1471 skb->inner_mac_header += off; in skb_headers_offset_update()
1485 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
1487 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
1509 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
1511 int headerlen = skb_headroom(skb); in skb_copy()
1512 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
1514 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
1522 skb_put(n, skb->len); in skb_copy()
1524 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)); in skb_copy()
1526 skb_copy_header(n, skb); in skb_copy()
1548 struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom, in __pskb_copy_fclone() argument
1551 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy_fclone()
1552 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0); in __pskb_copy_fclone()
1561 skb_put(n, skb_headlen(skb)); in __pskb_copy_fclone()
1563 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy_fclone()
1565 n->truesize += skb->data_len; in __pskb_copy_fclone()
1566 n->data_len = skb->data_len; in __pskb_copy_fclone()
1567 n->len = skb->len; in __pskb_copy_fclone()
1569 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy_fclone()
1572 if (skb_orphan_frags(skb, gfp_mask) || in __pskb_copy_fclone()
1573 skb_zerocopy_clone(n, skb, gfp_mask)) { in __pskb_copy_fclone()
1578 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy_fclone()
1579 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy_fclone()
1580 skb_frag_ref(skb, i); in __pskb_copy_fclone()
1585 if (skb_has_frag_list(skb)) { in __pskb_copy_fclone()
1586 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy_fclone()
1590 skb_copy_header(n, skb); in __pskb_copy_fclone()
1612 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1615 int i, osize = skb_end_offset(skb); in pskb_expand_head()
1622 BUG_ON(skb_shared(skb)); in pskb_expand_head()
1626 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1637 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1640 skb_shinfo(skb), in pskb_expand_head()
1641 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1648 if (skb_cloned(skb)) { in pskb_expand_head()
1649 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1651 if (skb_zcopy(skb)) in pskb_expand_head()
1652 refcount_inc(&skb_uarg(skb)->refcnt); in pskb_expand_head()
1653 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1654 skb_frag_ref(skb, i); in pskb_expand_head()
1656 if (skb_has_frag_list(skb)) in pskb_expand_head()
1657 skb_clone_fraglist(skb); in pskb_expand_head()
1659 skb_release_data(skb); in pskb_expand_head()
1661 skb_free_head(skb); in pskb_expand_head()
1663 off = (data + nhead) - skb->head; in pskb_expand_head()
1665 skb->head = data; in pskb_expand_head()
1666 skb->head_frag = 0; in pskb_expand_head()
1667 skb->data += off; in pskb_expand_head()
1669 skb->end = size; in pskb_expand_head()
1672 skb->end = skb->head + size; in pskb_expand_head()
1674 skb->tail += off; in pskb_expand_head()
1675 skb_headers_offset_update(skb, nhead); in pskb_expand_head()
1676 skb->cloned = 0; in pskb_expand_head()
1677 skb->hdr_len = 0; in pskb_expand_head()
1678 skb->nohdr = 0; in pskb_expand_head()
1679 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1681 skb_metadata_clear(skb); in pskb_expand_head()
1687 if (!skb->sk || skb->destructor == sock_edemux) in pskb_expand_head()
1688 skb->truesize += size - osize; in pskb_expand_head()
1701 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1704 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1707 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1709 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1738 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1745 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1746 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1748 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1757 skb_put(n, skb->len); in skb_copy_expand()
1767 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1768 skb->len + head_copy_len)); in skb_copy_expand()
1770 skb_copy_header(n, skb); in skb_copy_expand()
1792 int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error) in __skb_pad() argument
1798 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in __skb_pad()
1799 memset(skb->data+skb->len, 0, pad); in __skb_pad()
1803 ntail = skb->data_len + pad - (skb->end - skb->tail); in __skb_pad()
1804 if (likely(skb_cloned(skb) || ntail > 0)) { in __skb_pad()
1805 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in __skb_pad()
1813 err = skb_linearize(skb); in __skb_pad()
1817 memset(skb->data + skb->len, 0, pad); in __skb_pad()
1822 kfree_skb(skb); in __skb_pad()
1840 void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) in pskb_put() argument
1842 if (tail != skb) { in pskb_put()
1843 skb->data_len += len; in pskb_put()
1844 skb->len += len; in pskb_put()
1859 void *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1861 void *tmp = skb_tail_pointer(skb); in skb_put()
1862 SKB_LINEAR_ASSERT(skb); in skb_put()
1863 skb->tail += len; in skb_put()
1864 skb->len += len; in skb_put()
1865 if (unlikely(skb->tail > skb->end)) in skb_put()
1866 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
1880 void *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
1882 skb->data -= len; in skb_push()
1883 skb->len += len; in skb_push()
1884 if (unlikely(skb->data < skb->head)) in skb_push()
1885 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
1886 return skb->data; in skb_push()
1900 void *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
1902 return skb_pull_inline(skb, len); in skb_pull()
1915 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
1917 if (skb->len > len) in skb_trim()
1918 __skb_trim(skb, len); in skb_trim()
1925 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
1929 int offset = skb_headlen(skb); in ___pskb_trim()
1930 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
1934 if (skb_cloned(skb) && in ___pskb_trim()
1935 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
1943 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1950 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1953 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
1956 skb_frag_unref(skb, i); in ___pskb_trim()
1958 if (skb_has_frag_list(skb)) in ___pskb_trim()
1959 skb_drop_fraglist(skb); in ___pskb_trim()
1963 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
1995 if (len > skb_headlen(skb)) { in ___pskb_trim()
1996 skb->data_len -= skb->len - len; in ___pskb_trim()
1997 skb->len = len; in ___pskb_trim()
1999 skb->len = len; in ___pskb_trim()
2000 skb->data_len = 0; in ___pskb_trim()
2001 skb_set_tail_pointer(skb, len); in ___pskb_trim()
2004 if (!skb->sk || skb->destructor == sock_edemux) in ___pskb_trim()
2005 skb_condense(skb); in ___pskb_trim()
2012 int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len) in pskb_trim_rcsum_slow() argument
2014 if (skb->ip_summed == CHECKSUM_COMPLETE) { in pskb_trim_rcsum_slow()
2015 int delta = skb->len - len; in pskb_trim_rcsum_slow()
2017 skb->csum = csum_block_sub(skb->csum, in pskb_trim_rcsum_slow()
2018 skb_checksum(skb, len, delta, 0), in pskb_trim_rcsum_slow()
2021 return __pskb_trim(skb, len); in pskb_trim_rcsum_slow()
2050 void *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
2056 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
2058 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
2059 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
2064 BUG_ON(skb_copy_bits(skb, skb_headlen(skb), in __pskb_pull_tail()
2065 skb_tail_pointer(skb), delta)); in __pskb_pull_tail()
2070 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
2075 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2076 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2091 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
2125 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
2126 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
2132 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
2140 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
2141 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
2144 skb_frag_unref(skb, i); in __pskb_pull_tail()
2147 skb_frag_t *frag = &skb_shinfo(skb)->frags[k]; in __pskb_pull_tail()
2149 *frag = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
2160 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
2163 skb->tail += delta; in __pskb_pull_tail()
2164 skb->data_len -= delta; in __pskb_pull_tail()
2166 if (!skb->data_len) in __pskb_pull_tail()
2167 skb_zcopy_clear(skb, false); in __pskb_pull_tail()
2169 return skb_tail_pointer(skb); in __pskb_pull_tail()
2188 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
2190 int start = skb_headlen(skb); in skb_copy_bits()
2194 if (offset > (int)skb->len - len) in skb_copy_bits()
2201 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
2208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
2210 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
2239 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
2373 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
2385 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
2386 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
2387 skb_headlen(skb), in __skb_splice_bits()
2389 skb_head_is_locked(skb), in __skb_splice_bits()
2396 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
2397 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
2405 skb_walk_frags(skb, iter) { in __skb_splice_bits()
2425 int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset, in skb_splice_bits() argument
2440 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk); in skb_splice_bits()
2450 int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset, in skb_send_sock_locked() argument
2454 struct sk_buff *head = skb; in skb_send_sock_locked()
2461 while (offset < skb_headlen(skb) && len) { in skb_send_sock_locked()
2465 slen = min_t(int, len, skb_headlen(skb) - offset); in skb_send_sock_locked()
2466 kv.iov_base = skb->data + offset; in skb_send_sock_locked()
2484 offset -= skb_headlen(skb); in skb_send_sock_locked()
2487 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in skb_send_sock_locked()
2488 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in skb_send_sock_locked()
2496 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) { in skb_send_sock_locked()
2497 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx]; in skb_send_sock_locked()
2519 if (skb == head) { in skb_send_sock_locked()
2520 if (skb_has_frag_list(skb)) { in skb_send_sock_locked()
2521 skb = skb_shinfo(skb)->frag_list; in skb_send_sock_locked()
2524 } else if (skb->next) { in skb_send_sock_locked()
2525 skb = skb->next; in skb_send_sock_locked()
2550 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
2552 int start = skb_headlen(skb); in skb_store_bits()
2556 if (offset > (int)skb->len - len) in skb_store_bits()
2562 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
2569 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
2570 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
2600 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
2628 __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len, in __skb_checksum() argument
2631 int start = skb_headlen(skb); in __skb_checksum()
2641 skb->data + offset, copy, csum); in __skb_checksum()
2648 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_checksum()
2650 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_checksum()
2685 skb_walk_frags(skb, frag_iter) { in __skb_checksum()
2712 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
2720 return __skb_checksum(skb, offset, len, csum, &ops); in skb_checksum()
2726 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2729 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2738 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2747 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2752 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2754 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2783 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2810 __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) in __skb_checksum_complete_head() argument
2814 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum)); in __skb_checksum_complete_head()
2817 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete_head()
2818 !skb->csum_complete_sw) in __skb_checksum_complete_head()
2819 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete_head()
2821 if (!skb_shared(skb)) in __skb_checksum_complete_head()
2822 skb->csum_valid = !sum; in __skb_checksum_complete_head()
2836 __sum16 __skb_checksum_complete(struct sk_buff *skb) in __skb_checksum_complete() argument
2841 csum = skb_checksum(skb, 0, skb->len, 0); in __skb_checksum_complete()
2843 sum = csum_fold(csum_add(skb->csum, csum)); in __skb_checksum_complete()
2852 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && in __skb_checksum_complete()
2853 !skb->csum_complete_sw) in __skb_checksum_complete()
2854 netdev_rx_csum_fault(skb->dev, skb); in __skb_checksum_complete()
2857 if (!skb_shared(skb)) { in __skb_checksum_complete()
2859 skb->csum = csum; in __skb_checksum_complete()
2860 skb->ip_summed = CHECKSUM_COMPLETE; in __skb_checksum_complete()
2861 skb->csum_complete_sw = 1; in __skb_checksum_complete()
2862 skb->csum_valid = !sum; in __skb_checksum_complete()
2998 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
3003 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
3004 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
3006 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
3008 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
3010 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
3013 if (csstart != skb->len) in skb_copy_and_csum_dev()
3014 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
3015 skb->len - csstart, 0); in skb_copy_and_csum_dev()
3017 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
3018 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
3076 struct sk_buff *skb; in skb_queue_purge() local
3077 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
3078 kfree_skb(skb); in skb_queue_purge()
3098 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode); in skb_rbtree_purge() local
3101 rb_erase(&skb->rbnode, root); in skb_rbtree_purge()
3102 sum += skb->truesize; in skb_rbtree_purge()
3103 kfree_skb(skb); in skb_rbtree_purge()
3160 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
3165 __skb_unlink(skb, list); in skb_unlink()
3190 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
3196 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
3199 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
3200 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
3202 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
3203 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
3204 skb1->data_len = skb->data_len; in skb_split_inside_header()
3206 skb->data_len = 0; in skb_split_inside_header()
3207 skb->len = len; in skb_split_inside_header()
3208 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
3211 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
3216 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
3218 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
3219 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
3220 skb->len = len; in skb_split_no_header()
3221 skb->data_len = len - pos; in skb_split_no_header()
3224 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
3227 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
3238 skb_frag_ref(skb, i); in skb_split_no_header()
3241 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
3242 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3246 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
3258 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
3260 int pos = skb_headlen(skb); in skb_split()
3262 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags & in skb_split()
3264 skb_zerocopy_clone(skb1, skb, 0); in skb_split()
3266 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
3268 skb_split_no_header(skb, skb1, len, pos); in skb_split()
3276 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
3278 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_prepare_for_shift()
3299 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
3304 BUG_ON(shiftlen > skb->len); in skb_shift()
3306 if (skb_headlen(skb)) in skb_shift()
3308 if (skb_zcopy(tgt) || skb_zcopy(skb)) in skb_shift()
3314 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3328 if (skb_prepare_for_shift(skb) || in skb_shift()
3333 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3347 if ((shiftlen == skb->len) && in skb_shift()
3348 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
3351 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
3354 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
3358 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
3386 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
3395 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
3396 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
3397 skb_shinfo(skb)->nr_frags = to; in skb_shift()
3399 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
3406 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
3409 skb->len -= shiftlen; in skb_shift()
3410 skb->data_len -= shiftlen; in skb_shift()
3411 skb->truesize -= shiftlen; in skb_shift()
3429 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
3434 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
3572 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
3581 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state)); in skb_find_text()
3588 int skb_append_pagefrags(struct sk_buff *skb, struct page *page, in skb_append_pagefrags() argument
3591 int i = skb_shinfo(skb)->nr_frags; in skb_append_pagefrags()
3593 if (skb_can_coalesce(skb, i, page, offset)) { in skb_append_pagefrags()
3594 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size); in skb_append_pagefrags()
3597 skb_fill_page_desc(skb, i, page, offset, size); in skb_append_pagefrags()
3617 void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
3619 unsigned char *data = skb->data; in skb_pull_rcsum()
3621 BUG_ON(len > skb->len); in skb_pull_rcsum()
3622 __skb_pull(skb, len); in skb_pull_rcsum()
3623 skb_postpull_rcsum(skb, data, len); in skb_pull_rcsum()
3624 return skb->data; in skb_pull_rcsum()
3986 int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb) in skb_gro_receive() argument
3988 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb); in skb_gro_receive()
3989 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
3990 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
3991 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
3995 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush)) in skb_gro_receive()
4024 delta_truesize = skb->truesize - in skb_gro_receive()
4025 SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
4027 skb->truesize -= skb->data_len; in skb_gro_receive()
4028 skb->len -= skb->data_len; in skb_gro_receive()
4029 skb->data_len = 0; in skb_gro_receive()
4031 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
4033 } else if (skb->head_frag) { in skb_gro_receive()
4036 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
4043 first_offset = skb->data - in skb_gro_receive()
4056 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()
4057 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
4062 delta_truesize = skb->truesize; in skb_gro_receive()
4068 skb->data_len -= eat; in skb_gro_receive()
4069 skb->len -= eat; in skb_gro_receive()
4073 __skb_pull(skb, offset); in skb_gro_receive()
4076 skb_shinfo(p)->frag_list = skb; in skb_gro_receive()
4078 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
4079 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
4080 __skb_header_release(skb); in skb_gro_receive()
4093 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
4162 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len, in __skb_to_sgvec() argument
4165 int start = skb_headlen(skb); in __skb_to_sgvec()
4176 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
4183 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
4188 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
4190 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
4206 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
4245 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
4247 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec()
4277 int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg, in skb_to_sgvec_nomark() argument
4280 return __skb_to_sgvec(skb, sg, offset, len, 0); in skb_to_sgvec_nomark()
4303 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
4313 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
4314 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) in skb_cow_data()
4318 if (!skb_has_frag_list(skb)) { in skb_cow_data()
4324 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
4325 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
4329 *trailer = skb; in skb_cow_data()
4336 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
4396 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
4398 struct sock *sk = skb->sk; in sock_rmem_free()
4400 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
4403 static void skb_set_err_queue(struct sk_buff *skb) in skb_set_err_queue() argument
4408 skb->pkt_type = PACKET_OUTGOING; in skb_set_err_queue()
4415 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
4417 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
4421 skb_orphan(skb); in sock_queue_err_skb()
4422 skb->sk = sk; in sock_queue_err_skb()
4423 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
4424 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
4425 skb_set_err_queue(skb); in sock_queue_err_skb()
4428 skb_dst_force(skb); in sock_queue_err_skb()
4430 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
4437 static bool is_icmp_err_skb(const struct sk_buff *skb) in is_icmp_err_skb() argument
4439 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP || in is_icmp_err_skb()
4440 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6); in is_icmp_err_skb()
4446 struct sk_buff *skb, *skb_next = NULL; in sock_dequeue_err_skb() local
4451 skb = __skb_dequeue(q); in sock_dequeue_err_skb()
4452 if (skb && (skb_next = skb_peek(q))) { in sock_dequeue_err_skb()
4459 if (is_icmp_err_skb(skb) && !icmp_next) in sock_dequeue_err_skb()
4465 return skb; in sock_dequeue_err_skb()
4482 struct sk_buff *skb_clone_sk(struct sk_buff *skb) in skb_clone_sk() argument
4484 struct sock *sk = skb->sk; in skb_clone_sk()
4490 clone = skb_clone(skb, GFP_ATOMIC); in skb_clone_sk()
4503 static void __skb_complete_tx_timestamp(struct sk_buff *skb, in __skb_complete_tx_timestamp() argument
4511 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb)); in __skb_complete_tx_timestamp()
4513 serr = SKB_EXT_ERR(skb); in __skb_complete_tx_timestamp()
4519 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0; in __skb_complete_tx_timestamp()
4521 serr->ee.ee_data = skb_shinfo(skb)->tskey; in __skb_complete_tx_timestamp()
4527 err = sock_queue_err_skb(sk, skb); in __skb_complete_tx_timestamp()
4530 kfree_skb(skb); in __skb_complete_tx_timestamp()
4547 void skb_complete_tx_timestamp(struct sk_buff *skb, in skb_complete_tx_timestamp() argument
4550 struct sock *sk = skb->sk; in skb_complete_tx_timestamp()
4559 *skb_hwtstamps(skb) = *hwtstamps; in skb_complete_tx_timestamp()
4560 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); in skb_complete_tx_timestamp()
4566 kfree_skb(skb); in skb_complete_tx_timestamp()
4574 struct sk_buff *skb; in __skb_tstamp_tx() local
4593 skb = tcp_get_timestamping_opt_stats(sk); in __skb_tstamp_tx()
4597 skb = alloc_skb(0, GFP_ATOMIC); in __skb_tstamp_tx()
4599 skb = skb_clone(orig_skb, GFP_ATOMIC); in __skb_tstamp_tx()
4601 if (!skb) in __skb_tstamp_tx()
4605 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags & in __skb_tstamp_tx()
4607 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey; in __skb_tstamp_tx()
4611 *skb_hwtstamps(skb) = *hwtstamps; in __skb_tstamp_tx()
4613 skb->tstamp = ktime_get_real(); in __skb_tstamp_tx()
4615 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats); in __skb_tstamp_tx()
4627 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
4629 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
4633 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
4634 skb->wifi_acked = acked; in skb_complete_wifi_ack()
4636 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
4645 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
4649 kfree_skb(skb); in skb_complete_wifi_ack()
4665 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
4668 u32 csum_start = skb_headroom(skb) + (u32)start; in skb_partial_csum_set()
4670 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) { in skb_partial_csum_set()
4672 start, off, skb_headroom(skb), skb_headlen(skb)); in skb_partial_csum_set()
4675 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
4676 skb->csum_start = csum_start; in skb_partial_csum_set()
4677 skb->csum_offset = off; in skb_partial_csum_set()
4678 skb_set_transport_header(skb, start); in skb_partial_csum_set()
4683 static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len, in skb_maybe_pull_tail() argument
4686 if (skb_headlen(skb) >= len) in skb_maybe_pull_tail()
4692 if (max > skb->len) in skb_maybe_pull_tail()
4693 max = skb->len; in skb_maybe_pull_tail()
4695 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL) in skb_maybe_pull_tail()
4698 if (skb_headlen(skb) < len) in skb_maybe_pull_tail()
4706 static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb, in skb_checksum_setup_ip() argument
4714 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr), in skb_checksum_setup_ip()
4716 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
4720 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check; in skb_checksum_setup_ip()
4723 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr), in skb_checksum_setup_ip()
4725 if (!err && !skb_partial_csum_set(skb, off, in skb_checksum_setup_ip()
4729 return err ? ERR_PTR(err) : &udp_hdr(skb)->check; in skb_checksum_setup_ip()
4740 static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv4() argument
4749 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv4()
4755 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF)) in skb_checksum_setup_ipv4()
4758 off = ip_hdrlen(skb); in skb_checksum_setup_ipv4()
4765 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off); in skb_checksum_setup_ipv4()
4770 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, in skb_checksum_setup_ipv4()
4771 ip_hdr(skb)->daddr, in skb_checksum_setup_ipv4()
4772 skb->len - off, in skb_checksum_setup_ipv4()
4773 ip_hdr(skb)->protocol, 0); in skb_checksum_setup_ipv4()
4785 #define OPT_HDR(type, skb, off) \ argument
4786 (type *)(skb_network_header(skb) + (off))
4788 static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate) in skb_checksum_setup_ipv6() argument
4803 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN); in skb_checksum_setup_ipv6()
4807 nexthdr = ipv6_hdr(skb)->nexthdr; in skb_checksum_setup_ipv6()
4809 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len); in skb_checksum_setup_ipv6()
4817 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
4824 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off); in skb_checksum_setup_ipv6()
4832 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
4839 hp = OPT_HDR(struct ip_auth_hdr, skb, off); in skb_checksum_setup_ipv6()
4847 err = skb_maybe_pull_tail(skb, in skb_checksum_setup_ipv6()
4854 hp = OPT_HDR(struct frag_hdr, skb, off); in skb_checksum_setup_ipv6()
4874 csum = skb_checksum_setup_ip(skb, nexthdr, off); in skb_checksum_setup_ipv6()
4879 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, in skb_checksum_setup_ipv6()
4880 &ipv6_hdr(skb)->daddr, in skb_checksum_setup_ipv6()
4881 skb->len - off, nexthdr, 0); in skb_checksum_setup_ipv6()
4893 int skb_checksum_setup(struct sk_buff *skb, bool recalculate) in skb_checksum_setup() argument
4897 switch (skb->protocol) { in skb_checksum_setup()
4899 err = skb_checksum_setup_ipv4(skb, recalculate); in skb_checksum_setup()
4903 err = skb_checksum_setup_ipv6(skb, recalculate); in skb_checksum_setup()
4928 static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb, in skb_checksum_maybe_trim() argument
4932 unsigned int len = skb_transport_offset(skb) + transport_len; in skb_checksum_maybe_trim()
4935 if (skb->len < len) in skb_checksum_maybe_trim()
4937 else if (skb->len == len) in skb_checksum_maybe_trim()
4938 return skb; in skb_checksum_maybe_trim()
4940 skb_chk = skb_clone(skb, GFP_ATOMIC); in skb_checksum_maybe_trim()
4968 struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb, in skb_checksum_trimmed() argument
4970 __sum16(*skb_chkf)(struct sk_buff *skb)) in skb_checksum_trimmed() argument
4973 unsigned int offset = skb_transport_offset(skb); in skb_checksum_trimmed()
4976 skb_chk = skb_checksum_maybe_trim(skb, transport_len); in skb_checksum_trimmed()
4993 if (skb_chk && skb_chk != skb) in skb_checksum_trimmed()
5001 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
5004 skb->dev->name); in __skb_warn_lro_forwarding()
5008 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
5011 skb_release_head_state(skb); in kfree_skb_partial()
5012 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
5014 __kfree_skb(skb); in kfree_skb_partial()
5116 void skb_scrub_packet(struct sk_buff *skb, bool xnet) in skb_scrub_packet() argument
5118 skb->pkt_type = PACKET_HOST; in skb_scrub_packet()
5119 skb->skb_iif = 0; in skb_scrub_packet()
5120 skb->ignore_df = 0; in skb_scrub_packet()
5121 skb_dst_drop(skb); in skb_scrub_packet()
5122 skb_ext_reset(skb); in skb_scrub_packet()
5123 nf_reset_ct(skb); in skb_scrub_packet()
5124 nf_reset_trace(skb); in skb_scrub_packet()
5127 skb->offload_fwd_mark = 0; in skb_scrub_packet()
5128 skb->offload_l3_fwd_mark = 0; in skb_scrub_packet()
5134 ipvs_reset(skb); in skb_scrub_packet()
5135 skb->mark = 0; in skb_scrub_packet()
5136 skb->tstamp = 0; in skb_scrub_packet()
5150 static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) in skb_gso_transport_seglen() argument
5152 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_transport_seglen()
5155 if (skb->encapsulation) { in skb_gso_transport_seglen()
5156 thlen = skb_inner_transport_header(skb) - in skb_gso_transport_seglen()
5157 skb_transport_header(skb); in skb_gso_transport_seglen()
5160 thlen += inner_tcp_hdrlen(skb); in skb_gso_transport_seglen()
5162 thlen = tcp_hdrlen(skb); in skb_gso_transport_seglen()
5163 } else if (unlikely(skb_is_gso_sctp(skb))) { in skb_gso_transport_seglen()
5185 static unsigned int skb_gso_network_seglen(const struct sk_buff *skb) in skb_gso_network_seglen() argument
5187 unsigned int hdr_len = skb_transport_header(skb) - in skb_gso_network_seglen()
5188 skb_network_header(skb); in skb_gso_network_seglen()
5190 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_network_seglen()
5202 static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) in skb_gso_mac_seglen() argument
5204 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); in skb_gso_mac_seglen()
5206 return hdr_len + skb_gso_transport_seglen(skb); in skb_gso_mac_seglen()
5230 static inline bool skb_gso_size_check(const struct sk_buff *skb, in skb_gso_size_check() argument
5233 const struct skb_shared_info *shinfo = skb_shinfo(skb); in skb_gso_size_check()
5242 skb_walk_frags(skb, iter) { in skb_gso_size_check()
5260 bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu) in skb_gso_validate_network_len() argument
5262 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu); in skb_gso_validate_network_len()
5275 bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len) in skb_gso_validate_mac_len() argument
5277 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len); in skb_gso_validate_mac_len()
5281 static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb) in skb_reorder_vlan_header() argument
5286 if (skb_cow(skb, skb_headroom(skb)) < 0) { in skb_reorder_vlan_header()
5287 kfree_skb(skb); in skb_reorder_vlan_header()
5291 mac_len = skb->data - skb_mac_header(skb); in skb_reorder_vlan_header()
5293 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb), in skb_reorder_vlan_header()
5297 meta_len = skb_metadata_len(skb); in skb_reorder_vlan_header()
5299 meta = skb_metadata_end(skb) - meta_len; in skb_reorder_vlan_header()
5303 skb->mac_header += VLAN_HLEN; in skb_reorder_vlan_header()
5304 return skb; in skb_reorder_vlan_header()
5307 struct sk_buff *skb_vlan_untag(struct sk_buff *skb) in skb_vlan_untag() argument
5312 if (unlikely(skb_vlan_tag_present(skb))) { in skb_vlan_untag()
5314 return skb; in skb_vlan_untag()
5317 skb = skb_share_check(skb, GFP_ATOMIC); in skb_vlan_untag()
5318 if (unlikely(!skb)) in skb_vlan_untag()
5321 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) in skb_vlan_untag()
5324 vhdr = (struct vlan_hdr *)skb->data; in skb_vlan_untag()
5326 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci); in skb_vlan_untag()
5328 skb_pull_rcsum(skb, VLAN_HLEN); in skb_vlan_untag()
5329 vlan_set_encap_proto(skb, vhdr); in skb_vlan_untag()
5331 skb = skb_reorder_vlan_header(skb); in skb_vlan_untag()
5332 if (unlikely(!skb)) in skb_vlan_untag()
5335 skb_reset_network_header(skb); in skb_vlan_untag()
5336 skb_reset_transport_header(skb); in skb_vlan_untag()
5337 skb_reset_mac_len(skb); in skb_vlan_untag()
5339 return skb; in skb_vlan_untag()
5342 kfree_skb(skb); in skb_vlan_untag()
5347 int skb_ensure_writable(struct sk_buff *skb, int write_len) in skb_ensure_writable() argument
5349 if (!pskb_may_pull(skb, write_len)) in skb_ensure_writable()
5352 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) in skb_ensure_writable()
5355 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_ensure_writable()
5362 int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) in __skb_vlan_pop() argument
5365 int offset = skb->data - skb_mac_header(skb); in __skb_vlan_pop()
5374 err = skb_ensure_writable(skb, VLAN_ETH_HLEN); in __skb_vlan_pop()
5378 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in __skb_vlan_pop()
5380 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); in __skb_vlan_pop()
5383 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); in __skb_vlan_pop()
5384 __skb_pull(skb, VLAN_HLEN); in __skb_vlan_pop()
5386 vlan_set_encap_proto(skb, vhdr); in __skb_vlan_pop()
5387 skb->mac_header += VLAN_HLEN; in __skb_vlan_pop()
5389 if (skb_network_offset(skb) < ETH_HLEN) in __skb_vlan_pop()
5390 skb_set_network_header(skb, ETH_HLEN); in __skb_vlan_pop()
5392 skb_reset_mac_len(skb); in __skb_vlan_pop()
5401 int skb_vlan_pop(struct sk_buff *skb) in skb_vlan_pop() argument
5407 if (likely(skb_vlan_tag_present(skb))) { in skb_vlan_pop()
5408 __vlan_hwaccel_clear_tag(skb); in skb_vlan_pop()
5410 if (unlikely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5413 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5418 if (likely(!eth_type_vlan(skb->protocol))) in skb_vlan_pop()
5421 vlan_proto = skb->protocol; in skb_vlan_pop()
5422 err = __skb_vlan_pop(skb, &vlan_tci); in skb_vlan_pop()
5426 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_pop()
5434 int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci) in skb_vlan_push() argument
5436 if (skb_vlan_tag_present(skb)) { in skb_vlan_push()
5437 int offset = skb->data - skb_mac_header(skb); in skb_vlan_push()
5446 err = __vlan_insert_tag(skb, skb->vlan_proto, in skb_vlan_push()
5447 skb_vlan_tag_get(skb)); in skb_vlan_push()
5451 skb->protocol = skb->vlan_proto; in skb_vlan_push()
5452 skb->mac_len += VLAN_HLEN; in skb_vlan_push()
5454 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); in skb_vlan_push()
5456 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci); in skb_vlan_push()
5462 static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr, in skb_mod_eth_type() argument
5465 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mod_eth_type()
5468 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mod_eth_type()
5486 int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto, in skb_mpls_push() argument
5496 if (skb->encapsulation) in skb_mpls_push()
5499 err = skb_cow_head(skb, MPLS_HLEN); in skb_mpls_push()
5503 if (!skb->inner_protocol) { in skb_mpls_push()
5504 skb_set_inner_network_header(skb, mac_len); in skb_mpls_push()
5505 skb_set_inner_protocol(skb, skb->protocol); in skb_mpls_push()
5508 skb_push(skb, MPLS_HLEN); in skb_mpls_push()
5509 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), in skb_mpls_push()
5511 skb_reset_mac_header(skb); in skb_mpls_push()
5512 skb_set_network_header(skb, mac_len); in skb_mpls_push()
5514 lse = mpls_hdr(skb); in skb_mpls_push()
5516 skb_postpush_rcsum(skb, lse, MPLS_HLEN); in skb_mpls_push()
5519 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto); in skb_mpls_push()
5520 skb->protocol = mpls_proto; in skb_mpls_push()
5538 int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len, in skb_mpls_pop() argument
5543 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_pop()
5546 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN); in skb_mpls_pop()
5550 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN); in skb_mpls_pop()
5551 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb), in skb_mpls_pop()
5554 __skb_pull(skb, MPLS_HLEN); in skb_mpls_pop()
5555 skb_reset_mac_header(skb); in skb_mpls_pop()
5556 skb_set_network_header(skb, mac_len); in skb_mpls_pop()
5562 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN); in skb_mpls_pop()
5563 skb_mod_eth_type(skb, hdr, next_proto); in skb_mpls_pop()
5565 skb->protocol = next_proto; in skb_mpls_pop()
5581 int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse) in skb_mpls_update_lse() argument
5585 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_update_lse()
5588 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN); in skb_mpls_update_lse()
5592 if (skb->ip_summed == CHECKSUM_COMPLETE) { in skb_mpls_update_lse()
5593 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse }; in skb_mpls_update_lse()
5595 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum); in skb_mpls_update_lse()
5598 mpls_hdr(skb)->label_stack_entry = mpls_lse; in skb_mpls_update_lse()
5613 int skb_mpls_dec_ttl(struct sk_buff *skb) in skb_mpls_dec_ttl() argument
5618 if (unlikely(!eth_p_mpls(skb->protocol))) in skb_mpls_dec_ttl()
5621 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry); in skb_mpls_dec_ttl()
5629 return skb_mpls_update_lse(skb, cpu_to_be32(lse)); in skb_mpls_dec_ttl()
5652 struct sk_buff *skb; in alloc_skb_with_frags() local
5664 skb = alloc_skb(header_len, gfp_mask); in alloc_skb_with_frags()
5665 if (!skb) in alloc_skb_with_frags()
5668 skb->truesize += npages << PAGE_SHIFT; in alloc_skb_with_frags()
5693 skb_fill_page_desc(skb, i, page, 0, chunk); in alloc_skb_with_frags()
5697 return skb; in alloc_skb_with_frags()
5700 kfree_skb(skb); in alloc_skb_with_frags()
5706 static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off, in pskb_carve_inside_header() argument
5710 int size = skb_end_offset(skb); in pskb_carve_inside_header()
5716 if (skb_pfmemalloc(skb)) in pskb_carve_inside_header()
5727 skb_copy_from_linear_data_offset(skb, off, data, new_hlen); in pskb_carve_inside_header()
5728 skb->len -= off; in pskb_carve_inside_header()
5731 skb_shinfo(skb), in pskb_carve_inside_header()
5733 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_header()
5734 if (skb_cloned(skb)) { in pskb_carve_inside_header()
5736 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_header()
5740 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_carve_inside_header()
5741 skb_frag_ref(skb, i); in pskb_carve_inside_header()
5742 if (skb_has_frag_list(skb)) in pskb_carve_inside_header()
5743 skb_clone_fraglist(skb); in pskb_carve_inside_header()
5744 skb_release_data(skb); in pskb_carve_inside_header()
5749 skb_free_head(skb); in pskb_carve_inside_header()
5752 skb->head = data; in pskb_carve_inside_header()
5753 skb->data = data; in pskb_carve_inside_header()
5754 skb->head_frag = 0; in pskb_carve_inside_header()
5756 skb->end = size; in pskb_carve_inside_header()
5758 skb->end = skb->head + size; in pskb_carve_inside_header()
5760 skb_set_tail_pointer(skb, skb_headlen(skb)); in pskb_carve_inside_header()
5761 skb_headers_offset_update(skb, 0); in pskb_carve_inside_header()
5762 skb->cloned = 0; in pskb_carve_inside_header()
5763 skb->hdr_len = 0; in pskb_carve_inside_header()
5764 skb->nohdr = 0; in pskb_carve_inside_header()
5765 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_header()
5770 static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5775 static int pskb_carve_frag_list(struct sk_buff *skb, in pskb_carve_frag_list() argument
5829 static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off, in pskb_carve_inside_nonlinear() argument
5833 int size = skb_end_offset(skb); in pskb_carve_inside_nonlinear()
5835 const int nfrags = skb_shinfo(skb)->nr_frags; in pskb_carve_inside_nonlinear()
5840 if (skb_pfmemalloc(skb)) in pskb_carve_inside_nonlinear()
5851 skb_shinfo(skb), offsetof(struct skb_shared_info, in pskb_carve_inside_nonlinear()
5852 frags[skb_shinfo(skb)->nr_frags])); in pskb_carve_inside_nonlinear()
5853 if (skb_orphan_frags(skb, gfp_mask)) { in pskb_carve_inside_nonlinear()
5859 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]); in pskb_carve_inside_nonlinear()
5862 shinfo->frags[k] = skb_shinfo(skb)->frags[i]; in pskb_carve_inside_nonlinear()
5876 skb_frag_ref(skb, i); in pskb_carve_inside_nonlinear()
5882 if (skb_has_frag_list(skb)) in pskb_carve_inside_nonlinear()
5883 skb_clone_fraglist(skb); in pskb_carve_inside_nonlinear()
5887 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask); in pskb_carve_inside_nonlinear()
5889 skb_release_data(skb); in pskb_carve_inside_nonlinear()
5891 skb->head = data; in pskb_carve_inside_nonlinear()
5892 skb->head_frag = 0; in pskb_carve_inside_nonlinear()
5893 skb->data = data; in pskb_carve_inside_nonlinear()
5895 skb->end = size; in pskb_carve_inside_nonlinear()
5897 skb->end = skb->head + size; in pskb_carve_inside_nonlinear()
5899 skb_reset_tail_pointer(skb); in pskb_carve_inside_nonlinear()
5900 skb_headers_offset_update(skb, 0); in pskb_carve_inside_nonlinear()
5901 skb->cloned = 0; in pskb_carve_inside_nonlinear()
5902 skb->hdr_len = 0; in pskb_carve_inside_nonlinear()
5903 skb->nohdr = 0; in pskb_carve_inside_nonlinear()
5904 skb->len -= off; in pskb_carve_inside_nonlinear()
5905 skb->data_len = skb->len; in pskb_carve_inside_nonlinear()
5906 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_carve_inside_nonlinear()
5911 static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp) in pskb_carve() argument
5913 int headlen = skb_headlen(skb); in pskb_carve()
5916 return pskb_carve_inside_header(skb, len, headlen, gfp); in pskb_carve()
5918 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp); in pskb_carve()
5924 struct sk_buff *pskb_extract(struct sk_buff *skb, int off, in pskb_extract() argument
5927 struct sk_buff *clone = skb_clone(skb, gfp); in pskb_extract()
5953 void skb_condense(struct sk_buff *skb) in skb_condense() argument
5955 if (skb->data_len) { in skb_condense()
5956 if (skb->data_len > skb->end - skb->tail || in skb_condense()
5957 skb_cloned(skb)) in skb_condense()
5961 __pskb_pull_tail(skb, skb->data_len); in skb_condense()
5970 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); in skb_condense()
6033 void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id) in skb_ext_add() argument
6038 if (skb->active_extensions) { in skb_ext_add()
6039 old = skb->extensions; in skb_ext_add()
6041 new = skb_ext_maybe_cow(old, skb->active_extensions); in skb_ext_add()
6061 skb->extensions = new; in skb_ext_add()
6062 skb->active_extensions |= 1 << id; in skb_ext_add()
6077 void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id) in __skb_ext_del() argument
6079 struct skb_ext *ext = skb->extensions; in __skb_ext_del()
6081 skb->active_extensions &= ~(1 << id); in __skb_ext_del()
6082 if (skb->active_extensions == 0) { in __skb_ext_del()
6083 skb->extensions = NULL; in __skb_ext_del()