Lines Matching refs:skb
86 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
98 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) in __ip_local_out() argument
100 struct iphdr *iph = ip_hdr(skb); in __ip_local_out()
102 iph->tot_len = htons(skb->len); in __ip_local_out()
108 skb = l3mdev_ip_out(sk, skb); in __ip_local_out()
109 if (unlikely(!skb)) in __ip_local_out()
112 skb->protocol = htons(ETH_P_IP); in __ip_local_out()
115 net, sk, skb, NULL, skb_dst(skb)->dev, in __ip_local_out()
119 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_local_out() argument
123 err = __ip_local_out(net, sk, skb); in ip_local_out()
125 err = dst_output(net, sk, skb); in ip_local_out()
144 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, in ip_build_and_send_pkt() argument
148 struct rtable *rt = skb_rtable(skb); in ip_build_and_send_pkt()
153 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); in ip_build_and_send_pkt()
154 skb_reset_network_header(skb); in ip_build_and_send_pkt()
155 iph = ip_hdr(skb); in ip_build_and_send_pkt()
173 ip_options_build(skb, &opt->opt, daddr, rt, 0); in ip_build_and_send_pkt()
176 skb->priority = sk->sk_priority; in ip_build_and_send_pkt()
177 if (!skb->mark) in ip_build_and_send_pkt()
178 skb->mark = sk->sk_mark; in ip_build_and_send_pkt()
181 return ip_local_out(net, skb->sk, skb); in ip_build_and_send_pkt()
185 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_finish_output2() argument
187 struct dst_entry *dst = skb_dst(skb); in ip_finish_output2()
195 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); in ip_finish_output2()
197 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); in ip_finish_output2()
200 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { in ip_finish_output2()
203 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); in ip_finish_output2()
205 kfree_skb(skb); in ip_finish_output2()
208 if (skb->sk) in ip_finish_output2()
209 skb_set_owner_w(skb2, skb->sk); in ip_finish_output2()
210 consume_skb(skb); in ip_finish_output2()
211 skb = skb2; in ip_finish_output2()
215 int res = lwtunnel_xmit(skb); in ip_finish_output2()
222 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw); in ip_finish_output2()
226 sock_confirm_neigh(skb, neigh); in ip_finish_output2()
228 res = neigh_output(neigh, skb, is_v6gw); in ip_finish_output2()
236 kfree_skb(skb); in ip_finish_output2()
241 struct sk_buff *skb, unsigned int mtu) in ip_finish_output_gso() argument
249 if (skb_gso_validate_network_len(skb, mtu)) in ip_finish_output_gso()
250 return ip_finish_output2(net, sk, skb); in ip_finish_output_gso()
265 features = netif_skb_features(skb); in ip_finish_output_gso()
266 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); in ip_finish_output_gso()
267 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); in ip_finish_output_gso()
269 kfree_skb(skb); in ip_finish_output_gso()
273 consume_skb(skb); in ip_finish_output_gso()
290 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) in __ip_finish_output() argument
296 if (skb_dst(skb)->xfrm) { in __ip_finish_output()
297 IPCB(skb)->flags |= IPSKB_REROUTED; in __ip_finish_output()
298 return dst_output(net, sk, skb); in __ip_finish_output()
301 mtu = ip_skb_dst_mtu(sk, skb); in __ip_finish_output()
302 if (skb_is_gso(skb)) in __ip_finish_output()
303 return ip_finish_output_gso(net, sk, skb, mtu); in __ip_finish_output()
305 if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) in __ip_finish_output()
306 return ip_fragment(net, sk, skb, mtu, ip_finish_output2); in __ip_finish_output()
308 return ip_finish_output2(net, sk, skb); in __ip_finish_output()
311 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_finish_output() argument
315 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); in ip_finish_output()
318 return __ip_finish_output(net, sk, skb); in ip_finish_output()
320 return __ip_finish_output(net, sk, skb) ? : ret; in ip_finish_output()
322 kfree_skb(skb); in ip_finish_output()
328 struct sk_buff *skb) in ip_mc_finish_output() argument
334 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); in ip_mc_finish_output()
342 kfree_skb(skb); in ip_mc_finish_output()
350 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); in ip_mc_finish_output()
353 skb_dst_drop(skb); in ip_mc_finish_output()
354 skb_dst_set(skb, &new_rt->dst); in ip_mc_finish_output()
357 err = dev_loopback_xmit(net, sk, skb); in ip_mc_finish_output()
361 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_mc_output() argument
363 struct rtable *rt = skb_rtable(skb); in ip_mc_output()
369 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); in ip_mc_output()
371 skb->dev = dev; in ip_mc_output()
372 skb->protocol = htons(ETH_P_IP); in ip_mc_output()
391 !(IPCB(skb)->flags & IPSKB_FORWARDED)) in ip_mc_output()
394 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); in ip_mc_output()
403 if (ip_hdr(skb)->ttl == 0) { in ip_mc_output()
404 kfree_skb(skb); in ip_mc_output()
410 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); in ip_mc_output()
418 net, sk, skb, NULL, skb->dev, in ip_mc_output()
420 !(IPCB(skb)->flags & IPSKB_REROUTED)); in ip_mc_output()
423 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) in ip_output() argument
425 struct net_device *dev = skb_dst(skb)->dev; in ip_output()
427 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); in ip_output()
429 skb->dev = dev; in ip_output()
430 skb->protocol = htons(ETH_P_IP); in ip_output()
433 net, sk, skb, NULL, dev, in ip_output()
435 !(IPCB(skb)->flags & IPSKB_REROUTED)); in ip_output()
453 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, in __ip_queue_xmit() argument
470 rt = skb_rtable(skb); in __ip_queue_xmit()
499 skb_dst_set_noref(skb, &rt->dst); in __ip_queue_xmit()
506 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); in __ip_queue_xmit()
507 skb_reset_network_header(skb); in __ip_queue_xmit()
508 iph = ip_hdr(skb); in __ip_queue_xmit()
510 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) in __ip_queue_xmit()
522 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); in __ip_queue_xmit()
525 ip_select_ident_segs(net, skb, sk, in __ip_queue_xmit()
526 skb_shinfo(skb)->gso_segs ?: 1); in __ip_queue_xmit()
529 skb->priority = sk->sk_priority; in __ip_queue_xmit()
530 skb->mark = sk->sk_mark; in __ip_queue_xmit()
532 res = ip_local_out(net, sk, skb); in __ip_queue_xmit()
539 kfree_skb(skb); in __ip_queue_xmit()
568 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, in ip_fragment() argument
572 struct iphdr *iph = ip_hdr(skb); in ip_fragment()
575 return ip_do_fragment(net, sk, skb, output); in ip_fragment()
577 if (unlikely(!skb->ignore_df || in ip_fragment()
578 (IPCB(skb)->frag_max_size && in ip_fragment()
579 IPCB(skb)->frag_max_size > mtu))) { in ip_fragment()
581 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, in ip_fragment()
583 kfree_skb(skb); in ip_fragment()
587 return ip_do_fragment(net, sk, skb, output); in ip_fragment()
590 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph, in ip_fraglist_init() argument
593 unsigned int first_len = skb_pagelen(skb); in ip_fraglist_init()
595 iter->frag = skb_shinfo(skb)->frag_list; in ip_fraglist_init()
596 skb_frag_list_init(skb); in ip_fraglist_init()
602 skb->data_len = first_len - skb_headlen(skb); in ip_fraglist_init()
603 skb->len = first_len; in ip_fraglist_init()
610 static void ip_fraglist_ipcb_prepare(struct sk_buff *skb, in ip_fraglist_ipcb_prepare() argument
616 IPCB(to)->flags = IPCB(skb)->flags; in ip_fraglist_ipcb_prepare()
622 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter) in ip_fraglist_prepare() argument
637 ip_copy_metadata(frag, skb); in ip_fraglist_prepare()
638 iter->offset += skb->len - hlen; in ip_fraglist_prepare()
647 void ip_frag_init(struct sk_buff *skb, unsigned int hlen, in ip_frag_init() argument
651 struct iphdr *iph = ip_hdr(skb); in ip_frag_init()
658 state->left = skb->len - hlen; /* Space per frame */ in ip_frag_init()
682 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state) in ip_frag_next() argument
707 ip_copy_metadata(skb2, skb); in ip_frag_next()
718 if (skb->sk) in ip_frag_next()
719 skb_set_owner_w(skb2, skb->sk); in ip_frag_next()
725 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen); in ip_frag_next()
730 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len)) in ip_frag_next()
766 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, in ip_do_fragment() argument
771 struct rtable *rt = skb_rtable(skb); in ip_do_fragment()
774 ktime_t tstamp = skb->tstamp; in ip_do_fragment()
779 if (skb->ip_summed == CHECKSUM_PARTIAL && in ip_do_fragment()
780 (err = skb_checksum_help(skb))) in ip_do_fragment()
787 iph = ip_hdr(skb); in ip_do_fragment()
789 mtu = ip_skb_dst_mtu(sk, skb); in ip_do_fragment()
790 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) in ip_do_fragment()
791 mtu = IPCB(skb)->frag_max_size; in ip_do_fragment()
799 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; in ip_do_fragment()
809 if (skb_has_frag_list(skb)) { in ip_do_fragment()
811 unsigned int first_len = skb_pagelen(skb); in ip_do_fragment()
816 skb_cloned(skb) || in ip_do_fragment()
817 skb_headroom(skb) < ll_rs) in ip_do_fragment()
820 skb_walk_frags(skb, frag) { in ip_do_fragment()
832 if (skb->sk) { in ip_do_fragment()
833 frag->sk = skb->sk; in ip_do_fragment()
836 skb->truesize -= frag->truesize; in ip_do_fragment()
840 ip_fraglist_init(skb, iph, hlen, &iter); in ip_do_fragment()
846 ip_fraglist_ipcb_prepare(skb, &iter); in ip_do_fragment()
847 ip_fraglist_prepare(skb, &iter); in ip_do_fragment()
850 skb->tstamp = tstamp; in ip_do_fragment()
851 err = output(net, sk, skb); in ip_do_fragment()
858 skb = ip_fraglist_next(&iter); in ip_do_fragment()
872 skb_walk_frags(skb, frag2) { in ip_do_fragment()
877 skb->truesize += frag2->truesize; in ip_do_fragment()
886 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU, in ip_do_fragment()
896 skb2 = ip_frag_next(skb, &state); in ip_do_fragment()
901 ip_frag_ipcb(skb, skb2, first_frag, &state); in ip_do_fragment()
913 consume_skb(skb); in ip_do_fragment()
918 kfree_skb(skb); in ip_do_fragment()
925 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) in ip_generic_getfrag() argument
929 if (skb->ip_summed == CHECKSUM_PARTIAL) { in ip_generic_getfrag()
936 skb->csum = csum_block_add(skb->csum, csum, odd); in ip_generic_getfrag()
959 int len, int odd, struct sk_buff *skb), in __ip_append_data() argument
965 struct sk_buff *skb; in __ip_append_data() local
981 skb = skb_peek_tail(queue); in __ip_append_data()
983 exthdrlen = !skb ? rt->dst.header_len : 0; in __ip_append_data()
1015 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb)); in __ip_append_data()
1018 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */ in __ip_append_data()
1024 skb_zcopy_set(skb, uarg, &extra_uref); in __ip_append_data()
1037 if (!skb) in __ip_append_data()
1042 copy = mtu - skb->len; in __ip_append_data()
1044 copy = maxfraglen - skb->len; in __ip_append_data()
1054 skb_prev = skb; in __ip_append_data()
1091 skb = sock_alloc_send_skb(sk, in __ip_append_data()
1095 skb = NULL; in __ip_append_data()
1098 skb = alloc_skb(alloclen + hh_len + 15, in __ip_append_data()
1100 if (unlikely(!skb)) in __ip_append_data()
1103 if (!skb) in __ip_append_data()
1109 skb->ip_summed = csummode; in __ip_append_data()
1110 skb->csum = 0; in __ip_append_data()
1111 skb_reserve(skb, hh_len); in __ip_append_data()
1116 data = skb_put(skb, fraglen + exthdrlen - pagedlen); in __ip_append_data()
1117 skb_set_network_header(skb, exthdrlen); in __ip_append_data()
1118 skb->transport_header = (skb->network_header + in __ip_append_data()
1123 skb->csum = skb_copy_and_csum_bits( in __ip_append_data()
1127 skb->csum); in __ip_append_data()
1133 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { in __ip_append_data()
1135 kfree_skb(skb); in __ip_append_data()
1146 skb_shinfo(skb)->tx_flags = cork->tx_flags; in __ip_append_data()
1148 skb_shinfo(skb)->tskey = tskey; in __ip_append_data()
1150 skb_zcopy_set(skb, uarg, &extra_uref); in __ip_append_data()
1153 skb_set_dst_pending_confirm(skb, 1); in __ip_append_data()
1158 if (!skb->destructor) { in __ip_append_data()
1159 skb->destructor = sock_wfree; in __ip_append_data()
1160 skb->sk = sk; in __ip_append_data()
1161 wmem_alloc_delta += skb->truesize; in __ip_append_data()
1163 __skb_queue_tail(queue, skb); in __ip_append_data()
1171 skb_tailroom(skb) >= copy) { in __ip_append_data()
1174 off = skb->len; in __ip_append_data()
1175 if (getfrag(from, skb_put(skb, copy), in __ip_append_data()
1176 offset, copy, off, skb) < 0) { in __ip_append_data()
1177 __skb_trim(skb, off); in __ip_append_data()
1182 int i = skb_shinfo(skb)->nr_frags; in __ip_append_data()
1188 if (!skb_can_coalesce(skb, i, pfrag->page, in __ip_append_data()
1194 __skb_fill_page_desc(skb, i, pfrag->page, in __ip_append_data()
1196 skb_shinfo(skb)->nr_frags = ++i; in __ip_append_data()
1202 offset, copy, skb->len, skb) < 0) in __ip_append_data()
1206 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in __ip_append_data()
1207 skb->len += copy; in __ip_append_data()
1208 skb->data_len += copy; in __ip_append_data()
1209 skb->truesize += copy; in __ip_append_data()
1212 err = skb_zerocopy_iter_dgram(skb, from, copy); in __ip_append_data()
1298 int odd, struct sk_buff *skb), in ip_append_data() argument
1326 struct sk_buff *skb; in ip_append_page() local
1366 skb = skb_peek_tail(&sk->sk_write_queue); in ip_append_page()
1367 if (!skb) in ip_append_page()
1374 len = mtu - skb->len; in ip_append_page()
1376 len = maxfraglen - skb->len; in ip_append_page()
1382 skb_prev = skb; in ip_append_page()
1386 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); in ip_append_page()
1387 if (unlikely(!skb)) { in ip_append_page()
1395 skb->ip_summed = CHECKSUM_NONE; in ip_append_page()
1396 skb->csum = 0; in ip_append_page()
1397 skb_reserve(skb, hh_len); in ip_append_page()
1402 skb_put(skb, fragheaderlen + fraggap); in ip_append_page()
1403 skb_reset_network_header(skb); in ip_append_page()
1404 skb->transport_header = (skb->network_header + in ip_append_page()
1407 skb->csum = skb_copy_and_csum_bits(skb_prev, in ip_append_page()
1409 skb_transport_header(skb), in ip_append_page()
1412 skb->csum); in ip_append_page()
1419 __skb_queue_tail(&sk->sk_write_queue, skb); in ip_append_page()
1426 if (skb_append_pagefrags(skb, page, offset, len)) { in ip_append_page()
1431 if (skb->ip_summed == CHECKSUM_NONE) { in ip_append_page()
1434 skb->csum = csum_block_add(skb->csum, csum, skb->len); in ip_append_page()
1437 skb->len += len; in ip_append_page()
1438 skb->data_len += len; in ip_append_page()
1439 skb->truesize += len; in ip_append_page()
1470 struct sk_buff *skb, *tmp_skb; in __ip_make_skb() local
1480 skb = __skb_dequeue(queue); in __ip_make_skb()
1481 if (!skb) in __ip_make_skb()
1483 tail_skb = &(skb_shinfo(skb)->frag_list); in __ip_make_skb()
1486 if (skb->data < skb_network_header(skb)) in __ip_make_skb()
1487 __skb_pull(skb, skb_network_offset(skb)); in __ip_make_skb()
1489 __skb_pull(tmp_skb, skb_network_header_len(skb)); in __ip_make_skb()
1492 skb->len += tmp_skb->len; in __ip_make_skb()
1493 skb->data_len += tmp_skb->len; in __ip_make_skb()
1494 skb->truesize += tmp_skb->truesize; in __ip_make_skb()
1503 skb->ignore_df = ip_sk_ignore_df(sk); in __ip_make_skb()
1510 (skb->len <= dst_mtu(&rt->dst) && in __ip_make_skb()
1524 iph = ip_hdr(skb); in __ip_make_skb()
1532 ip_select_ident(net, skb, sk); in __ip_make_skb()
1536 ip_options_build(skb, opt, cork->addr, rt, 0); in __ip_make_skb()
1539 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; in __ip_make_skb()
1540 skb->mark = cork->mark; in __ip_make_skb()
1541 skb->tstamp = cork->transmit_time; in __ip_make_skb()
1547 skb_dst_set(skb, &rt->dst); in __ip_make_skb()
1551 skb_transport_header(skb))->type); in __ip_make_skb()
1555 return skb; in __ip_make_skb()
1558 int ip_send_skb(struct net *net, struct sk_buff *skb) in ip_send_skb() argument
1562 err = ip_local_out(net, skb->sk, skb); in ip_send_skb()
1575 struct sk_buff *skb; in ip_push_pending_frames() local
1577 skb = ip_finish_skb(sk, fl4); in ip_push_pending_frames()
1578 if (!skb) in ip_push_pending_frames()
1582 return ip_send_skb(sock_net(sk), skb); in ip_push_pending_frames()
1592 struct sk_buff *skb; in __ip_flush_pending_frames() local
1594 while ((skb = __skb_dequeue_tail(queue)) != NULL) in __ip_flush_pending_frames()
1595 kfree_skb(skb); in __ip_flush_pending_frames()
1608 int len, int odd, struct sk_buff *skb), in ip_make_skb() argument
1643 int len, int odd, struct sk_buff *skb) in ip_reply_glue_bits() argument
1648 skb->csum = csum_block_add(skb->csum, csum, odd); in ip_reply_glue_bits()
1656 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, in ip_send_unicast_reply() argument
1665 struct rtable *rt = skb_rtable(skb); in ip_send_unicast_reply()
1671 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) in ip_send_unicast_reply()
1686 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) in ip_send_unicast_reply()
1687 oif = skb->skb_iif; in ip_send_unicast_reply()
1690 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark, in ip_send_unicast_reply()
1692 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, in ip_send_unicast_reply()
1695 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, in ip_send_unicast_reply()
1697 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); in ip_send_unicast_reply()
1704 sk->sk_protocol = ip_hdr(skb)->protocol; in ip_send_unicast_reply()