Lines Matching refs:skb
119 static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr, in skb_panic() argument
123 msg, addr, skb->len, sz, skb->head, skb->data, in skb_panic()
124 (unsigned long)skb->tail, (unsigned long)skb->end, in skb_panic()
125 skb->dev ? skb->dev->name : "<NULL>"); in skb_panic()
129 static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_over_panic() argument
131 skb_panic(skb, sz, addr, __func__); in skb_over_panic()
134 static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr) in skb_under_panic() argument
136 skb_panic(skb, sz, addr, __func__); in skb_under_panic()
184 struct sk_buff *skb; in __alloc_skb_head() local
187 skb = kmem_cache_alloc_node(skbuff_head_cache, in __alloc_skb_head()
189 if (!skb) in __alloc_skb_head()
197 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb_head()
198 skb->head = NULL; in __alloc_skb_head()
199 skb->truesize = sizeof(struct sk_buff); in __alloc_skb_head()
200 atomic_set(&skb->users, 1); in __alloc_skb_head()
203 skb->mac_header = ~0U; in __alloc_skb_head()
206 return skb; in __alloc_skb_head()
231 struct sk_buff *skb; in __alloc_skb() local
242 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node); in __alloc_skb()
243 if (!skb) in __alloc_skb()
245 prefetchw(skb); in __alloc_skb()
269 memset(skb, 0, offsetof(struct sk_buff, tail)); in __alloc_skb()
271 skb->truesize = SKB_TRUESIZE(size); in __alloc_skb()
272 skb->pfmemalloc = pfmemalloc; in __alloc_skb()
273 atomic_set(&skb->users, 1); in __alloc_skb()
274 skb->head = data; in __alloc_skb()
275 skb->data = data; in __alloc_skb()
276 skb_reset_tail_pointer(skb); in __alloc_skb()
277 skb->end = skb->tail + size; in __alloc_skb()
279 skb->mac_header = ~0U; in __alloc_skb()
280 skb->transport_header = ~0U; in __alloc_skb()
284 shinfo = skb_shinfo(skb); in __alloc_skb()
290 struct sk_buff *child = skb + 1; in __alloc_skb()
295 skb->fclone = SKB_FCLONE_ORIG; in __alloc_skb()
302 return skb; in __alloc_skb()
304 kmem_cache_free(cache, skb); in __alloc_skb()
305 skb = NULL; in __alloc_skb()
330 struct sk_buff *skb; in build_skb() local
333 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC); in build_skb()
334 if (!skb) in build_skb()
339 memset(skb, 0, offsetof(struct sk_buff, tail)); in build_skb()
340 skb->truesize = SKB_TRUESIZE(size); in build_skb()
341 skb->head_frag = frag_size != 0; in build_skb()
342 atomic_set(&skb->users, 1); in build_skb()
343 skb->head = data; in build_skb()
344 skb->data = data; in build_skb()
345 skb_reset_tail_pointer(skb); in build_skb()
346 skb->end = skb->tail + size; in build_skb()
348 skb->mac_header = ~0U; in build_skb()
349 skb->transport_header = ~0U; in build_skb()
353 shinfo = skb_shinfo(skb); in build_skb()
358 return skb; in build_skb()
445 struct sk_buff *skb = NULL; in __netdev_alloc_skb() local
458 skb = build_skb(data, fragsz); in __netdev_alloc_skb()
459 if (unlikely(!skb)) in __netdev_alloc_skb()
463 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, in __netdev_alloc_skb()
466 if (likely(skb)) { in __netdev_alloc_skb()
467 skb_reserve(skb, NET_SKB_PAD); in __netdev_alloc_skb()
468 skb->dev = dev; in __netdev_alloc_skb()
470 return skb; in __netdev_alloc_skb()
474 void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off, in skb_add_rx_frag() argument
477 skb_fill_page_desc(skb, i, page, off, size); in skb_add_rx_frag()
478 skb->len += size; in skb_add_rx_frag()
479 skb->data_len += size; in skb_add_rx_frag()
480 skb->truesize += truesize; in skb_add_rx_frag()
490 static inline void skb_drop_fraglist(struct sk_buff *skb) in skb_drop_fraglist() argument
492 skb_drop_list(&skb_shinfo(skb)->frag_list); in skb_drop_fraglist()
495 static void skb_clone_fraglist(struct sk_buff *skb) in skb_clone_fraglist() argument
499 skb_walk_frags(skb, list) in skb_clone_fraglist()
503 static void skb_free_head(struct sk_buff *skb) in skb_free_head() argument
505 if (skb->head_frag) in skb_free_head()
506 put_page(virt_to_head_page(skb->head)); in skb_free_head()
508 kfree(skb->head); in skb_free_head()
511 static void skb_release_data(struct sk_buff *skb) in skb_release_data() argument
513 if (!skb->cloned || in skb_release_data()
514 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1, in skb_release_data()
515 &skb_shinfo(skb)->dataref)) { in skb_release_data()
516 if (skb_shinfo(skb)->nr_frags) { in skb_release_data()
518 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_release_data()
519 skb_frag_unref(skb, i); in skb_release_data()
526 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { in skb_release_data()
529 uarg = skb_shinfo(skb)->destructor_arg; in skb_release_data()
534 if (skb_has_frag_list(skb)) in skb_release_data()
535 skb_drop_fraglist(skb); in skb_release_data()
537 skb_free_head(skb); in skb_release_data()
544 static void kfree_skbmem(struct sk_buff *skb) in kfree_skbmem() argument
549 switch (skb->fclone) { in kfree_skbmem()
551 kmem_cache_free(skbuff_head_cache, skb); in kfree_skbmem()
555 fclone_ref = (atomic_t *) (skb + 2); in kfree_skbmem()
557 kmem_cache_free(skbuff_fclone_cache, skb); in kfree_skbmem()
561 fclone_ref = (atomic_t *) (skb + 1); in kfree_skbmem()
562 other = skb - 1; in kfree_skbmem()
567 skb->fclone = SKB_FCLONE_UNAVAILABLE; in kfree_skbmem()
575 static void skb_release_head_state(struct sk_buff *skb) in skb_release_head_state() argument
577 skb_dst_drop(skb); in skb_release_head_state()
579 secpath_put(skb->sp); in skb_release_head_state()
581 if (skb->destructor) { in skb_release_head_state()
583 skb->destructor(skb); in skb_release_head_state()
586 nf_conntrack_put(skb->nfct); in skb_release_head_state()
589 nf_conntrack_put_reasm(skb->nfct_reasm); in skb_release_head_state()
592 nf_bridge_put(skb->nf_bridge); in skb_release_head_state()
596 skb->tc_index = 0; in skb_release_head_state()
598 skb->tc_verd = 0; in skb_release_head_state()
604 static void skb_release_all(struct sk_buff *skb) in skb_release_all() argument
606 skb_release_head_state(skb); in skb_release_all()
607 if (likely(skb->head)) in skb_release_all()
608 skb_release_data(skb); in skb_release_all()
620 void __kfree_skb(struct sk_buff *skb) in __kfree_skb() argument
622 skb_release_all(skb); in __kfree_skb()
623 kfree_skbmem(skb); in __kfree_skb()
634 void kfree_skb(struct sk_buff *skb) in kfree_skb() argument
636 if (unlikely(!skb)) in kfree_skb()
638 if (likely(atomic_read(&skb->users) == 1)) in kfree_skb()
640 else if (likely(!atomic_dec_and_test(&skb->users))) in kfree_skb()
642 trace_kfree_skb(skb, __builtin_return_address(0)); in kfree_skb()
643 __kfree_skb(skb); in kfree_skb()
665 void skb_tx_error(struct sk_buff *skb) in skb_tx_error() argument
667 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) { in skb_tx_error()
670 uarg = skb_shinfo(skb)->destructor_arg; in skb_tx_error()
673 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_tx_error()
686 void consume_skb(struct sk_buff *skb) in consume_skb() argument
688 if (unlikely(!skb)) in consume_skb()
690 if (likely(atomic_read(&skb->users) == 1)) in consume_skb()
692 else if (likely(!atomic_dec_and_test(&skb->users))) in consume_skb()
694 trace_consume_skb(skb); in consume_skb()
695 __kfree_skb(skb); in consume_skb()
752 static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) in __skb_clone() argument
754 #define C(x) n->x = skb->x in __skb_clone()
758 __copy_skb_header(n, skb); in __skb_clone()
763 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; in __skb_clone()
775 atomic_inc(&(skb_shinfo(skb)->dataref)); in __skb_clone()
776 skb->cloned = 1; in __skb_clone()
814 int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask) in skb_copy_ubufs() argument
817 int num_frags = skb_shinfo(skb)->nr_frags; in skb_copy_ubufs()
819 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg; in skb_copy_ubufs()
823 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_ubufs()
844 skb_frag_unref(skb, i); in skb_copy_ubufs()
850 __skb_fill_page_desc(skb, i, head, 0, in skb_copy_ubufs()
851 skb_shinfo(skb)->frags[i].size); in skb_copy_ubufs()
855 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY; in skb_copy_ubufs()
874 struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) in skb_clone() argument
878 if (skb_orphan_frags(skb, gfp_mask)) in skb_clone()
881 n = skb + 1; in skb_clone()
882 if (skb->fclone == SKB_FCLONE_ORIG && in skb_clone()
888 if (skb_pfmemalloc(skb)) in skb_clone()
900 return __skb_clone(n, skb); in skb_clone()
904 static void skb_headers_offset_update(struct sk_buff *skb, int off) in skb_headers_offset_update() argument
907 skb->transport_header += off; in skb_headers_offset_update()
908 skb->network_header += off; in skb_headers_offset_update()
909 if (skb_mac_header_was_set(skb)) in skb_headers_offset_update()
910 skb->mac_header += off; in skb_headers_offset_update()
911 skb->inner_transport_header += off; in skb_headers_offset_update()
912 skb->inner_network_header += off; in skb_headers_offset_update()
913 skb->inner_mac_header += off; in skb_headers_offset_update()
935 static inline int skb_alloc_rx_flag(const struct sk_buff *skb) in skb_alloc_rx_flag() argument
937 if (skb_pfmemalloc(skb)) in skb_alloc_rx_flag()
959 struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) in skb_copy() argument
961 int headerlen = skb_headroom(skb); in skb_copy()
962 unsigned int size = skb_end_offset(skb) + skb->data_len; in skb_copy()
964 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in skb_copy()
972 skb_put(n, skb->len); in skb_copy()
974 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len)) in skb_copy()
977 copy_skb_header(n, skb); in skb_copy()
996 struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask) in __pskb_copy() argument
998 unsigned int size = skb_headlen(skb) + headroom; in __pskb_copy()
1000 skb_alloc_rx_flag(skb), NUMA_NO_NODE); in __pskb_copy()
1008 skb_put(n, skb_headlen(skb)); in __pskb_copy()
1010 skb_copy_from_linear_data(skb, n->data, n->len); in __pskb_copy()
1012 n->truesize += skb->data_len; in __pskb_copy()
1013 n->data_len = skb->data_len; in __pskb_copy()
1014 n->len = skb->len; in __pskb_copy()
1016 if (skb_shinfo(skb)->nr_frags) { in __pskb_copy()
1019 if (skb_orphan_frags(skb, gfp_mask)) { in __pskb_copy()
1024 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_copy()
1025 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i]; in __pskb_copy()
1026 skb_frag_ref(skb, i); in __pskb_copy()
1031 if (skb_has_frag_list(skb)) { in __pskb_copy()
1032 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list; in __pskb_copy()
1036 copy_skb_header(n, skb); in __pskb_copy()
1058 int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, in pskb_expand_head() argument
1063 int size = nhead + skb_end_offset(skb) + ntail; in pskb_expand_head()
1068 if (skb_shared(skb)) in pskb_expand_head()
1073 if (skb_pfmemalloc(skb)) in pskb_expand_head()
1084 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head); in pskb_expand_head()
1087 skb_shinfo(skb), in pskb_expand_head()
1088 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags])); in pskb_expand_head()
1095 if (skb_cloned(skb)) { in pskb_expand_head()
1097 if (skb_orphan_frags(skb, gfp_mask)) in pskb_expand_head()
1099 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in pskb_expand_head()
1100 skb_frag_ref(skb, i); in pskb_expand_head()
1102 if (skb_has_frag_list(skb)) in pskb_expand_head()
1103 skb_clone_fraglist(skb); in pskb_expand_head()
1105 skb_release_data(skb); in pskb_expand_head()
1107 skb_free_head(skb); in pskb_expand_head()
1109 off = (data + nhead) - skb->head; in pskb_expand_head()
1111 skb->head = data; in pskb_expand_head()
1112 skb->head_frag = 0; in pskb_expand_head()
1113 skb->data += off; in pskb_expand_head()
1115 skb->end = size; in pskb_expand_head()
1118 skb->end = skb->head + size; in pskb_expand_head()
1120 skb->tail += off; in pskb_expand_head()
1121 skb_headers_offset_update(skb, off); in pskb_expand_head()
1123 if (skb->ip_summed == CHECKSUM_PARTIAL) in pskb_expand_head()
1124 skb->csum_start += nhead; in pskb_expand_head()
1125 skb->cloned = 0; in pskb_expand_head()
1126 skb->hdr_len = 0; in pskb_expand_head()
1127 skb->nohdr = 0; in pskb_expand_head()
1128 atomic_set(&skb_shinfo(skb)->dataref, 1); in pskb_expand_head()
1140 struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) in skb_realloc_headroom() argument
1143 int delta = headroom - skb_headroom(skb); in skb_realloc_headroom()
1146 skb2 = pskb_copy(skb, GFP_ATOMIC); in skb_realloc_headroom()
1148 skb2 = skb_clone(skb, GFP_ATOMIC); in skb_realloc_headroom()
1177 struct sk_buff *skb_copy_expand(const struct sk_buff *skb, in skb_copy_expand() argument
1184 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom, in skb_copy_expand()
1185 gfp_mask, skb_alloc_rx_flag(skb), in skb_copy_expand()
1187 int oldheadroom = skb_headroom(skb); in skb_copy_expand()
1197 skb_put(n, skb->len); in skb_copy_expand()
1207 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off, in skb_copy_expand()
1208 skb->len + head_copy_len)) in skb_copy_expand()
1211 copy_skb_header(n, skb); in skb_copy_expand()
1236 int skb_pad(struct sk_buff *skb, int pad) in skb_pad() argument
1242 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) { in skb_pad()
1243 memset(skb->data+skb->len, 0, pad); in skb_pad()
1247 ntail = skb->data_len + pad - (skb->end - skb->tail); in skb_pad()
1248 if (likely(skb_cloned(skb) || ntail > 0)) { in skb_pad()
1249 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); in skb_pad()
1257 err = skb_linearize(skb); in skb_pad()
1261 memset(skb->data + skb->len, 0, pad); in skb_pad()
1265 kfree_skb(skb); in skb_pad()
1279 unsigned char *skb_put(struct sk_buff *skb, unsigned int len) in skb_put() argument
1281 unsigned char *tmp = skb_tail_pointer(skb); in skb_put()
1282 SKB_LINEAR_ASSERT(skb); in skb_put()
1283 skb->tail += len; in skb_put()
1284 skb->len += len; in skb_put()
1285 if (unlikely(skb->tail > skb->end)) in skb_put()
1286 skb_over_panic(skb, len, __builtin_return_address(0)); in skb_put()
1300 unsigned char *skb_push(struct sk_buff *skb, unsigned int len) in skb_push() argument
1302 skb->data -= len; in skb_push()
1303 skb->len += len; in skb_push()
1304 if (unlikely(skb->data<skb->head)) in skb_push()
1305 skb_under_panic(skb, len, __builtin_return_address(0)); in skb_push()
1306 return skb->data; in skb_push()
1320 unsigned char *skb_pull(struct sk_buff *skb, unsigned int len) in skb_pull() argument
1322 return skb_pull_inline(skb, len); in skb_pull()
1335 void skb_trim(struct sk_buff *skb, unsigned int len) in skb_trim() argument
1337 if (skb->len > len) in skb_trim()
1338 __skb_trim(skb, len); in skb_trim()
1345 int ___pskb_trim(struct sk_buff *skb, unsigned int len) in ___pskb_trim() argument
1349 int offset = skb_headlen(skb); in ___pskb_trim()
1350 int nfrags = skb_shinfo(skb)->nr_frags; in ___pskb_trim()
1354 if (skb_cloned(skb) && in ___pskb_trim()
1355 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))) in ___pskb_trim()
1363 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); in ___pskb_trim()
1370 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); in ___pskb_trim()
1373 skb_shinfo(skb)->nr_frags = i; in ___pskb_trim()
1376 skb_frag_unref(skb, i); in ___pskb_trim()
1378 if (skb_has_frag_list(skb)) in ___pskb_trim()
1379 skb_drop_fraglist(skb); in ___pskb_trim()
1383 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp); in ___pskb_trim()
1415 if (len > skb_headlen(skb)) { in ___pskb_trim()
1416 skb->data_len -= skb->len - len; in ___pskb_trim()
1417 skb->len = len; in ___pskb_trim()
1419 skb->len = len; in ___pskb_trim()
1420 skb->data_len = 0; in ___pskb_trim()
1421 skb_set_tail_pointer(skb, len); in ___pskb_trim()
1453 unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta) in __pskb_pull_tail() argument
1459 int i, k, eat = (skb->tail + delta) - skb->end; in __pskb_pull_tail()
1461 if (eat > 0 || skb_cloned(skb)) { in __pskb_pull_tail()
1462 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, in __pskb_pull_tail()
1467 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) in __pskb_pull_tail()
1473 if (!skb_has_frag_list(skb)) in __pskb_pull_tail()
1478 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1479 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1494 struct sk_buff *list = skb_shinfo(skb)->frag_list; in __pskb_pull_tail()
1530 while ((list = skb_shinfo(skb)->frag_list) != insp) { in __pskb_pull_tail()
1531 skb_shinfo(skb)->frag_list = list->next; in __pskb_pull_tail()
1537 skb_shinfo(skb)->frag_list = clone; in __pskb_pull_tail()
1545 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_pull_tail()
1546 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_pull_tail()
1549 skb_frag_unref(skb, i); in __pskb_pull_tail()
1552 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; in __pskb_pull_tail()
1554 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_pull_tail()
1555 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); in __pskb_pull_tail()
1561 skb_shinfo(skb)->nr_frags = k; in __pskb_pull_tail()
1563 skb->tail += delta; in __pskb_pull_tail()
1564 skb->data_len -= delta; in __pskb_pull_tail()
1566 return skb_tail_pointer(skb); in __pskb_pull_tail()
1585 int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) in skb_copy_bits() argument
1587 int start = skb_headlen(skb); in skb_copy_bits()
1591 if (offset > (int)skb->len - len) in skb_copy_bits()
1598 skb_copy_from_linear_data_offset(skb, offset, to, copy); in skb_copy_bits()
1605 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_bits()
1607 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; in skb_copy_bits()
1632 skb_walk_frags(skb, frag_iter) { in skb_copy_bits()
1766 static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe, in __skb_splice_bits() argument
1777 if (__splice_segment(virt_to_page(skb->data), in __skb_splice_bits()
1778 (unsigned long) skb->data & (PAGE_SIZE - 1), in __skb_splice_bits()
1779 skb_headlen(skb), in __skb_splice_bits()
1781 skb_head_is_locked(skb), in __skb_splice_bits()
1788 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) { in __skb_splice_bits()
1789 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; in __skb_splice_bits()
1806 int skb_splice_bits(struct sk_buff *skb, unsigned int offset, in skb_splice_bits() argument
1821 struct sock *sk = skb->sk; in skb_splice_bits()
1828 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk)) in skb_splice_bits()
1836 skb_walk_frags(skb, frag_iter) { in skb_splice_bits()
1874 int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) in skb_store_bits() argument
1876 int start = skb_headlen(skb); in skb_store_bits()
1880 if (offset > (int)skb->len - len) in skb_store_bits()
1886 skb_copy_to_linear_data_offset(skb, offset, from, copy); in skb_store_bits()
1893 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_store_bits()
1894 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_store_bits()
1919 skb_walk_frags(skb, frag_iter) { in skb_store_bits()
1948 __wsum skb_checksum(const struct sk_buff *skb, int offset, in skb_checksum() argument
1951 int start = skb_headlen(skb); in skb_checksum()
1960 csum = csum_partial(skb->data + offset, copy, csum); in skb_checksum()
1967 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_checksum()
1969 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_checksum()
1993 skb_walk_frags(skb, frag_iter) { in skb_checksum()
2021 __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, in skb_copy_and_csum_bits() argument
2024 int start = skb_headlen(skb); in skb_copy_and_csum_bits()
2033 csum = csum_partial_copy_nocheck(skb->data + offset, to, in skb_copy_and_csum_bits()
2042 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in skb_copy_and_csum_bits()
2047 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_copy_and_csum_bits()
2051 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in skb_copy_and_csum_bits()
2071 skb_walk_frags(skb, frag_iter) { in skb_copy_and_csum_bits()
2098 void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) in skb_copy_and_csum_dev() argument
2103 if (skb->ip_summed == CHECKSUM_PARTIAL) in skb_copy_and_csum_dev()
2104 csstart = skb_checksum_start_offset(skb); in skb_copy_and_csum_dev()
2106 csstart = skb_headlen(skb); in skb_copy_and_csum_dev()
2108 BUG_ON(csstart > skb_headlen(skb)); in skb_copy_and_csum_dev()
2110 skb_copy_from_linear_data(skb, to, csstart); in skb_copy_and_csum_dev()
2113 if (csstart != skb->len) in skb_copy_and_csum_dev()
2114 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart, in skb_copy_and_csum_dev()
2115 skb->len - csstart, 0); in skb_copy_and_csum_dev()
2117 if (skb->ip_summed == CHECKSUM_PARTIAL) { in skb_copy_and_csum_dev()
2118 long csstuff = csstart + skb->csum_offset; in skb_copy_and_csum_dev()
2176 struct sk_buff *skb; in skb_queue_purge() local
2177 while ((skb = skb_dequeue(list)) != NULL) in skb_queue_purge()
2178 kfree_skb(skb); in skb_queue_purge()
2234 void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) in skb_unlink() argument
2239 __skb_unlink(skb, list); in skb_unlink()
2286 static inline void skb_split_inside_header(struct sk_buff *skb, in skb_split_inside_header() argument
2292 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len), in skb_split_inside_header()
2295 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) in skb_split_inside_header()
2296 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i]; in skb_split_inside_header()
2298 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags; in skb_split_inside_header()
2299 skb_shinfo(skb)->nr_frags = 0; in skb_split_inside_header()
2300 skb1->data_len = skb->data_len; in skb_split_inside_header()
2302 skb->data_len = 0; in skb_split_inside_header()
2303 skb->len = len; in skb_split_inside_header()
2304 skb_set_tail_pointer(skb, len); in skb_split_inside_header()
2307 static inline void skb_split_no_header(struct sk_buff *skb, in skb_split_no_header() argument
2312 const int nfrags = skb_shinfo(skb)->nr_frags; in skb_split_no_header()
2314 skb_shinfo(skb)->nr_frags = 0; in skb_split_no_header()
2315 skb1->len = skb1->data_len = skb->len - len; in skb_split_no_header()
2316 skb->len = len; in skb_split_no_header()
2317 skb->data_len = len - pos; in skb_split_no_header()
2320 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in skb_split_no_header()
2323 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; in skb_split_no_header()
2334 skb_frag_ref(skb, i); in skb_split_no_header()
2337 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); in skb_split_no_header()
2338 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2342 skb_shinfo(skb)->nr_frags++; in skb_split_no_header()
2354 void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len) in skb_split() argument
2356 int pos = skb_headlen(skb); in skb_split()
2358 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; in skb_split()
2360 skb_split_inside_header(skb, skb1, len, pos); in skb_split()
2362 skb_split_no_header(skb, skb1, len, pos); in skb_split()
2370 static int skb_prepare_for_shift(struct sk_buff *skb) in skb_prepare_for_shift() argument
2372 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC); in skb_prepare_for_shift()
2393 int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) in skb_shift() argument
2398 BUG_ON(shiftlen > skb->len); in skb_shift()
2399 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */ in skb_shift()
2404 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2418 if (skb_prepare_for_shift(skb) || in skb_shift()
2423 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2437 if ((shiftlen == skb->len) && in skb_shift()
2438 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to)) in skb_shift()
2441 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt)) in skb_shift()
2444 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) { in skb_shift()
2448 fragfrom = &skb_shinfo(skb)->frags[from]; in skb_shift()
2476 fragfrom = &skb_shinfo(skb)->frags[0]; in skb_shift()
2485 while (from < skb_shinfo(skb)->nr_frags) in skb_shift()
2486 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++]; in skb_shift()
2487 skb_shinfo(skb)->nr_frags = to; in skb_shift()
2489 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags); in skb_shift()
2496 skb->ip_summed = CHECKSUM_PARTIAL; in skb_shift()
2499 skb->len -= shiftlen; in skb_shift()
2500 skb->data_len -= shiftlen; in skb_shift()
2501 skb->truesize -= shiftlen; in skb_shift()
2519 void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, in skb_prepare_seq_read() argument
2524 st->root_skb = st->cur_skb = skb; in skb_prepare_seq_read()
2658 unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, in skb_find_text() argument
2667 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state)); in skb_find_text()
2685 int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, in skb_append_datato_frags() argument
2687 int len, int odd, struct sk_buff *skb), in skb_append_datato_frags() argument
2690 int frg_cnt = skb_shinfo(skb)->nr_frags; in skb_append_datato_frags()
2708 offset, copy, 0, skb); in skb_append_datato_frags()
2713 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset, in skb_append_datato_frags()
2719 skb->truesize += copy; in skb_append_datato_frags()
2721 skb->len += copy; in skb_append_datato_frags()
2722 skb->data_len += copy; in skb_append_datato_frags()
2743 unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len) in skb_pull_rcsum() argument
2745 BUG_ON(len > skb->len); in skb_pull_rcsum()
2746 skb->len -= len; in skb_pull_rcsum()
2747 BUG_ON(skb->len < skb->data_len); in skb_pull_rcsum()
2748 skb_postpull_rcsum(skb, skb->data, len); in skb_pull_rcsum()
2749 return skb->data += len; in skb_pull_rcsum()
2762 struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) in skb_segment() argument
2766 struct sk_buff *fskb = skb_shinfo(skb)->frag_list; in skb_segment()
2767 unsigned int mss = skb_shinfo(skb)->gso_size; in skb_segment()
2768 unsigned int doffset = skb->data - skb_mac_header(skb); in skb_segment()
2770 unsigned int tnl_hlen = skb_tnl_header_len(skb); in skb_segment()
2776 int nfrags = skb_shinfo(skb)->nr_frags; in skb_segment()
2781 proto = skb_network_protocol(skb); in skb_segment()
2786 __skb_push(skb, doffset); in skb_segment()
2787 headroom = skb_headroom(skb); in skb_segment()
2788 pos = skb_headlen(skb); in skb_segment()
2796 len = skb->len - offset; in skb_segment()
2800 hsize = skb_headlen(skb) - offset; in skb_segment()
2827 GFP_ATOMIC, skb_alloc_rx_flag(skb), in skb_segment()
2843 __copy_skb_header(nskb, skb); in skb_segment()
2844 nskb->mac_len = skb->mac_len; in skb_segment()
2851 skb_set_network_header(nskb, skb->mac_len); in skb_segment()
2853 skb_network_header_len(skb)); in skb_segment()
2855 skb_copy_from_linear_data_offset(skb, -tnl_hlen, in skb_segment()
2859 if (fskb != skb_shinfo(skb)->frag_list) in skb_segment()
2864 nskb->csum = skb_copy_and_csum_bits(skb, offset, in skb_segment()
2872 skb_copy_from_linear_data_offset(skb, offset, in skb_segment()
2875 skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; in skb_segment()
2878 *frag = skb_shinfo(skb)->frags[i]; in skb_segment()
2929 } while ((offset += len) < skb->len); in skb_segment()
2934 while ((skb = segs)) { in skb_segment()
2935 segs = skb->next; in skb_segment()
2936 kfree_skb(skb); in skb_segment()
2942 int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb) in skb_gro_receive() argument
2946 struct skb_shared_info *skbinfo = skb_shinfo(skb); in skb_gro_receive()
2949 unsigned int len = skb_gro_len(skb); in skb_gro_receive()
2950 unsigned int offset = skb_gro_offset(skb); in skb_gro_receive()
2951 unsigned int headlen = skb_headlen(skb); in skb_gro_receive()
2983 delta_truesize = skb->truesize - in skb_gro_receive()
2984 SKB_TRUESIZE(skb_end_offset(skb)); in skb_gro_receive()
2986 skb->truesize -= skb->data_len; in skb_gro_receive()
2987 skb->len -= skb->data_len; in skb_gro_receive()
2988 skb->data_len = 0; in skb_gro_receive()
2990 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE; in skb_gro_receive()
2992 } else if (skb->head_frag) { in skb_gro_receive()
2995 struct page *page = virt_to_head_page(skb->head); in skb_gro_receive()
3002 first_offset = skb->data - in skb_gro_receive()
3015 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff)); in skb_gro_receive()
3016 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD; in skb_gro_receive()
3057 delta_truesize = skb->truesize; in skb_gro_receive()
3063 skb->data_len -= eat; in skb_gro_receive()
3064 skb->len -= eat; in skb_gro_receive()
3068 __skb_pull(skb, offset); in skb_gro_receive()
3070 NAPI_GRO_CB(p)->last->next = skb; in skb_gro_receive()
3071 NAPI_GRO_CB(p)->last = skb; in skb_gro_receive()
3072 skb_header_release(skb); in skb_gro_receive()
3080 NAPI_GRO_CB(skb)->same_flow = 1; in skb_gro_receive()
3111 __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in __skb_to_sgvec() argument
3113 int start = skb_headlen(skb); in __skb_to_sgvec()
3121 sg_set_buf(sg, skb->data + offset, copy); in __skb_to_sgvec()
3128 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __skb_to_sgvec()
3133 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); in __skb_to_sgvec()
3135 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in __skb_to_sgvec()
3149 skb_walk_frags(skb, frag_iter) { in __skb_to_sgvec()
3170 int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) in skb_to_sgvec() argument
3172 int nsg = __skb_to_sgvec(skb, sg, offset, len); in skb_to_sgvec()
3197 int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) in skb_cow_data() argument
3207 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) && in skb_cow_data()
3208 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL) in skb_cow_data()
3212 if (!skb_has_frag_list(skb)) { in skb_cow_data()
3218 if (skb_tailroom(skb) < tailbits && in skb_cow_data()
3219 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC)) in skb_cow_data()
3223 *trailer = skb; in skb_cow_data()
3230 skb_p = &skb_shinfo(skb)->frag_list; in skb_cow_data()
3290 static void sock_rmem_free(struct sk_buff *skb) in sock_rmem_free() argument
3292 struct sock *sk = skb->sk; in sock_rmem_free()
3294 atomic_sub(skb->truesize, &sk->sk_rmem_alloc); in sock_rmem_free()
3300 int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) in sock_queue_err_skb() argument
3302 int len = skb->len; in sock_queue_err_skb()
3304 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= in sock_queue_err_skb()
3308 skb_orphan(skb); in sock_queue_err_skb()
3309 skb->sk = sk; in sock_queue_err_skb()
3310 skb->destructor = sock_rmem_free; in sock_queue_err_skb()
3311 atomic_add(skb->truesize, &sk->sk_rmem_alloc); in sock_queue_err_skb()
3314 skb_dst_force(skb); in sock_queue_err_skb()
3316 skb_queue_tail(&sk->sk_error_queue, skb); in sock_queue_err_skb()
3328 struct sk_buff *skb; in skb_tstamp_tx() local
3346 skb = skb_clone(orig_skb, GFP_ATOMIC); in skb_tstamp_tx()
3347 if (!skb) in skb_tstamp_tx()
3350 serr = SKB_EXT_ERR(skb); in skb_tstamp_tx()
3355 err = sock_queue_err_skb(sk, skb); in skb_tstamp_tx()
3358 kfree_skb(skb); in skb_tstamp_tx()
3362 void skb_complete_wifi_ack(struct sk_buff *skb, bool acked) in skb_complete_wifi_ack() argument
3364 struct sock *sk = skb->sk; in skb_complete_wifi_ack()
3368 skb->wifi_acked_valid = 1; in skb_complete_wifi_ack()
3369 skb->wifi_acked = acked; in skb_complete_wifi_ack()
3371 serr = SKB_EXT_ERR(skb); in skb_complete_wifi_ack()
3376 err = sock_queue_err_skb(sk, skb); in skb_complete_wifi_ack()
3378 kfree_skb(skb); in skb_complete_wifi_ack()
3395 bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off) in skb_partial_csum_set() argument
3397 if (unlikely(start > skb_headlen(skb)) || in skb_partial_csum_set()
3398 unlikely((int)start + off > skb_headlen(skb) - 2)) { in skb_partial_csum_set()
3400 start, off, skb_headlen(skb)); in skb_partial_csum_set()
3403 skb->ip_summed = CHECKSUM_PARTIAL; in skb_partial_csum_set()
3404 skb->csum_start = skb_headroom(skb) + start; in skb_partial_csum_set()
3405 skb->csum_offset = off; in skb_partial_csum_set()
3406 skb_set_transport_header(skb, start); in skb_partial_csum_set()
3411 void __skb_warn_lro_forwarding(const struct sk_buff *skb) in __skb_warn_lro_forwarding() argument
3414 skb->dev->name); in __skb_warn_lro_forwarding()
3418 void kfree_skb_partial(struct sk_buff *skb, bool head_stolen) in kfree_skb_partial() argument
3421 skb_release_head_state(skb); in kfree_skb_partial()
3422 kmem_cache_free(skbuff_head_cache, skb); in kfree_skb_partial()
3424 __kfree_skb(skb); in kfree_skb_partial()