• Home
  • Raw
  • Download

Lines Matching +full:foo +full:- +full:queue

1 // SPDX-License-Identifier: GPL-2.0-only
39 * silently drop skb instead of failing with -EPERM.
79 #include <linux/bpf-cgroup.h>
94 iph->check = 0; in ip_send_check()
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); in ip_send_check()
103 iph->tot_len = htons(skb->len); in __ip_local_out()
113 skb->protocol = htons(ETH_P_IP); in __ip_local_out()
116 net, sk, skb, NULL, skb_dst(skb)->dev, in __ip_local_out()
134 int ttl = inet->uc_ttl; in ip_select_ttl()
155 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); in ip_build_and_send_pkt()
158 iph->version = 4; in ip_build_and_send_pkt()
159 iph->ihl = 5; in ip_build_and_send_pkt()
160 iph->tos = tos; in ip_build_and_send_pkt()
161 iph->ttl = ip_select_ttl(inet, &rt->dst); in ip_build_and_send_pkt()
162 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); in ip_build_and_send_pkt()
163 iph->saddr = saddr; in ip_build_and_send_pkt()
164 iph->protocol = sk->sk_protocol; in ip_build_and_send_pkt()
166 if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) { in ip_build_and_send_pkt()
167 iph->frag_off = htons(IP_DF); in ip_build_and_send_pkt()
168 iph->id = 0; in ip_build_and_send_pkt()
170 iph->frag_off = 0; in ip_build_and_send_pkt()
174 if (sk->sk_protocol == IPPROTO_TCP) in ip_build_and_send_pkt()
175 iph->id = (__force __be16)prandom_u32(); in ip_build_and_send_pkt()
180 if (opt && opt->opt.optlen) { in ip_build_and_send_pkt()
181 iph->ihl += opt->opt.optlen>>2; in ip_build_and_send_pkt()
182 ip_options_build(skb, &opt->opt, daddr, rt, 0); in ip_build_and_send_pkt()
185 skb->priority = sk->sk_priority; in ip_build_and_send_pkt()
186 if (!skb->mark) in ip_build_and_send_pkt()
187 skb->mark = sk->sk_mark; in ip_build_and_send_pkt()
190 return ip_local_out(net, skb->sk, skb); in ip_build_and_send_pkt()
198 struct net_device *dev = dst->dev; in ip_finish_output2()
203 if (rt->rt_type == RTN_MULTICAST) { in ip_finish_output2()
204 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); in ip_finish_output2()
205 } else if (rt->rt_type == RTN_BROADCAST) in ip_finish_output2()
206 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); in ip_finish_output2()
209 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { in ip_finish_output2()
215 return -ENOMEM; in ip_finish_output2()
217 if (skb->sk) in ip_finish_output2()
218 skb_set_owner_w(skb2, skb->sk); in ip_finish_output2()
223 if (lwtunnel_xmit_redirect(dst->lwtstate)) { in ip_finish_output2()
246 return -EINVAL; in ip_finish_output2()
261 /* Slowpath - GSO segment length exceeds the egress MTU. in ip_finish_output_gso()
264 * - Forwarding of a TCP GRO skb, when DF flag is not set. in ip_finish_output_gso()
265 * - Forwarding of an skb that arrived on a virtualization interface in ip_finish_output_gso()
266 * (virtio-net/vhost/tap) with TSO/GSO size set by other network in ip_finish_output_gso()
268 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an in ip_finish_output_gso()
270 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is in ip_finish_output_gso()
279 return -ENOMEM; in ip_finish_output_gso()
303 if (skb_dst(skb)->xfrm) { in __ip_finish_output()
304 IPCB(skb)->flags |= IPSKB_REROUTED; in __ip_finish_output()
312 if (skb->len > mtu || IPCB(skb)->frag_max_size) in __ip_finish_output()
353 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting in ip_mc_finish_output()
354 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten, in ip_mc_finish_output()
357 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb)); in ip_mc_finish_output()
359 new_rt->rt_iif = 0; in ip_mc_finish_output()
361 skb_dst_set(skb, &new_rt->dst); in ip_mc_finish_output()
371 struct net_device *dev = rt->dst.dev; in ip_mc_output()
376 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); in ip_mc_output()
378 skb->dev = dev; in ip_mc_output()
379 skb->protocol = htons(ETH_P_IP); in ip_mc_output()
385 if (rt->rt_flags&RTCF_MULTICAST) { in ip_mc_output()
397 ((rt->rt_flags & RTCF_LOCAL) || in ip_mc_output()
398 !(IPCB(skb)->flags & IPSKB_FORWARDED)) in ip_mc_output()
404 net, sk, newskb, NULL, newskb->dev, in ip_mc_output()
410 if (ip_hdr(skb)->ttl == 0) { in ip_mc_output()
416 if (rt->rt_flags&RTCF_BROADCAST) { in ip_mc_output()
420 net, sk, newskb, NULL, newskb->dev, in ip_mc_output()
425 net, sk, skb, NULL, skb->dev, in ip_mc_output()
427 !(IPCB(skb)->flags & IPSKB_REROUTED)); in ip_mc_output()
432 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev; in ip_output()
434 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); in ip_output()
436 skb->dev = dev; in ip_output()
437 skb->protocol = htons(ETH_P_IP); in ip_output()
442 !(IPCB(skb)->flags & IPSKB_REROUTED)); in ip_output()
448 * iph->saddr = fl4->saddr;
449 * iph->daddr = fl4->daddr;
454 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); in ip_copy_addrs()
456 iph->saddr = fl4->saddr; in ip_copy_addrs()
457 iph->daddr = fl4->daddr; in ip_copy_addrs()
460 /* Note: skb->sk can be different from sk, in case of tunnels */
476 inet_opt = rcu_dereference(inet->inet_opt); in __ip_queue_xmit()
477 fl4 = &fl->u.ip4; in __ip_queue_xmit()
488 daddr = inet->inet_daddr; in __ip_queue_xmit()
489 if (inet_opt && inet_opt->opt.srr) in __ip_queue_xmit()
490 daddr = inet_opt->opt.faddr; in __ip_queue_xmit()
497 daddr, inet->inet_saddr, in __ip_queue_xmit()
498 inet->inet_dport, in __ip_queue_xmit()
499 inet->inet_sport, in __ip_queue_xmit()
500 sk->sk_protocol, in __ip_queue_xmit()
502 sk->sk_bound_dev_if); in __ip_queue_xmit()
505 sk_setup_caps(sk, &rt->dst); in __ip_queue_xmit()
507 skb_dst_set_noref(skb, &rt->dst); in __ip_queue_xmit()
510 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) in __ip_queue_xmit()
514 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); in __ip_queue_xmit()
518 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) in __ip_queue_xmit()
519 iph->frag_off = htons(IP_DF); in __ip_queue_xmit()
521 iph->frag_off = 0; in __ip_queue_xmit()
522 iph->ttl = ip_select_ttl(inet, &rt->dst); in __ip_queue_xmit()
523 iph->protocol = sk->sk_protocol; in __ip_queue_xmit()
526 /* Transport layer set skb->h.foo itself. */ in __ip_queue_xmit()
528 if (inet_opt && inet_opt->opt.optlen) { in __ip_queue_xmit()
529 iph->ihl += inet_opt->opt.optlen >> 2; in __ip_queue_xmit()
530 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); in __ip_queue_xmit()
534 skb_shinfo(skb)->gso_segs ?: 1); in __ip_queue_xmit()
536 /* TODO : should we use skb->sk here instead of sk ? */ in __ip_queue_xmit()
537 skb->priority = sk->sk_priority; in __ip_queue_xmit()
538 skb->mark = sk->sk_mark; in __ip_queue_xmit()
548 return -EHOSTUNREACH; in __ip_queue_xmit()
554 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos); in ip_queue_xmit()
560 to->pkt_type = from->pkt_type; in ip_copy_metadata()
561 to->priority = from->priority; in ip_copy_metadata()
562 to->protocol = from->protocol; in ip_copy_metadata()
563 to->skb_iif = from->skb_iif; in ip_copy_metadata()
566 to->dev = from->dev; in ip_copy_metadata()
567 to->mark = from->mark; in ip_copy_metadata()
572 to->tc_index = from->tc_index; in ip_copy_metadata()
577 to->ipvs_property = from->ipvs_property; in ip_copy_metadata()
588 if ((iph->frag_off & htons(IP_DF)) == 0) in ip_fragment()
591 if (unlikely(!skb->ignore_df || in ip_fragment()
592 (IPCB(skb)->frag_max_size && in ip_fragment()
593 IPCB(skb)->frag_max_size > mtu))) { in ip_fragment()
598 return -EMSGSIZE; in ip_fragment()
609 iter->frag = skb_shinfo(skb)->frag_list; in ip_fraglist_init()
612 iter->offset = 0; in ip_fraglist_init()
613 iter->iph = iph; in ip_fraglist_init()
614 iter->hlen = hlen; in ip_fraglist_init()
616 skb->data_len = first_len - skb_headlen(skb); in ip_fraglist_init()
617 skb->len = first_len; in ip_fraglist_init()
618 iph->tot_len = htons(first_len); in ip_fraglist_init()
619 iph->frag_off = htons(IP_MF); in ip_fraglist_init()
626 unsigned int hlen = iter->hlen; in ip_fraglist_prepare()
627 struct iphdr *iph = iter->iph; in ip_fraglist_prepare()
630 frag = iter->frag; in ip_fraglist_prepare()
631 frag->ip_summed = CHECKSUM_NONE; in ip_fraglist_prepare()
636 iter->iph = ip_hdr(frag); in ip_fraglist_prepare()
637 iph = iter->iph; in ip_fraglist_prepare()
638 iph->tot_len = htons(frag->len); in ip_fraglist_prepare()
640 iter->offset += skb->len - hlen; in ip_fraglist_prepare()
641 iph->frag_off = htons(iter->offset >> 3); in ip_fraglist_prepare()
642 if (frag->next) in ip_fraglist_prepare()
643 iph->frag_off |= htons(IP_MF); in ip_fraglist_prepare()
655 state->DF = DF; in ip_frag_init()
656 state->hlen = hlen; in ip_frag_init()
657 state->ll_rs = ll_rs; in ip_frag_init()
658 state->mtu = mtu; in ip_frag_init()
660 state->left = skb->len - hlen; /* Space per frame */ in ip_frag_init()
661 state->ptr = hlen; /* Where to start from */ in ip_frag_init()
663 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3; in ip_frag_init()
664 state->not_last_frag = iph->frag_off & htons(IP_MF); in ip_frag_init()
672 IPCB(to)->flags = IPCB(from)->flags; in ip_frag_ipcb()
686 unsigned int len = state->left; in ip_frag_next()
690 len = state->left; in ip_frag_next()
691 /* IF: it doesn't fit, use 'mtu' - the data space left */ in ip_frag_next()
692 if (len > state->mtu) in ip_frag_next()
693 len = state->mtu; in ip_frag_next()
696 if (len < state->left) { in ip_frag_next()
701 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC); in ip_frag_next()
703 return ERR_PTR(-ENOMEM); in ip_frag_next()
710 skb_reserve(skb2, state->ll_rs); in ip_frag_next()
711 skb_put(skb2, len + state->hlen); in ip_frag_next()
713 skb2->transport_header = skb2->network_header + state->hlen; in ip_frag_next()
720 if (skb->sk) in ip_frag_next()
721 skb_set_owner_w(skb2, skb->sk); in ip_frag_next()
727 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen); in ip_frag_next()
732 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len)) in ip_frag_next()
734 state->left -= len; in ip_frag_next()
740 iph->frag_off = htons((state->offset >> 3)); in ip_frag_next()
741 if (state->DF) in ip_frag_next()
742 iph->frag_off |= htons(IP_DF); in ip_frag_next()
748 if (state->left > 0 || state->not_last_frag) in ip_frag_next()
749 iph->frag_off |= htons(IP_MF); in ip_frag_next()
750 state->ptr += len; in ip_frag_next()
751 state->offset += len; in ip_frag_next()
753 iph->tot_len = htons(len + state->hlen); in ip_frag_next()
765 * single device frame, and queue such a frame for sending.
776 ktime_t tstamp = skb->tstamp; in ip_do_fragment()
781 if (skb->ip_summed == CHECKSUM_PARTIAL && in ip_do_fragment()
792 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) in ip_do_fragment()
793 mtu = IPCB(skb)->frag_max_size; in ip_do_fragment()
799 hlen = iph->ihl * 4; in ip_do_fragment()
800 mtu = mtu - hlen; /* Size of data space */ in ip_do_fragment()
801 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; in ip_do_fragment()
802 ll_rs = LL_RESERVED_SPACE(rt->dst.dev); in ip_do_fragment()
815 if (first_len - hlen > mtu || in ip_do_fragment()
816 ((first_len - hlen) & 7) || in ip_do_fragment()
824 if (frag->len > mtu || in ip_do_fragment()
825 ((frag->len & 7) && frag->next) || in ip_do_fragment()
833 BUG_ON(frag->sk); in ip_do_fragment()
834 if (skb->sk) { in ip_do_fragment()
835 frag->sk = skb->sk; in ip_do_fragment()
836 frag->destructor = sock_wfree; in ip_do_fragment()
838 skb->truesize -= frag->truesize; in ip_do_fragment()
850 IPCB(iter.frag)->flags = IPCB(skb)->flags; in ip_do_fragment()
852 if (first_frag && IPCB(skb)->opt.optlen) { in ip_do_fragment()
853 /* ipcb->opt is not populated for frags in ip_do_fragment()
857 IPCB(iter.frag)->opt.optlen = in ip_do_fragment()
858 IPCB(skb)->opt.optlen; in ip_do_fragment()
864 skb->tstamp = tstamp; in ip_do_fragment()
889 frag2->sk = NULL; in ip_do_fragment()
890 frag2->destructor = NULL; in ip_do_fragment()
891 skb->truesize += frag2->truesize; in ip_do_fragment()
900 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU, in ip_do_fragment()
918 * Put this fragment into the sending queue. in ip_do_fragment()
920 skb2->tstamp = tstamp; in ip_do_fragment()
943 if (skb->ip_summed == CHECKSUM_PARTIAL) { in ip_generic_getfrag()
944 if (!copy_from_iter_full(to, len, &msg->msg_iter)) in ip_generic_getfrag()
945 return -EFAULT; in ip_generic_getfrag()
948 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter)) in ip_generic_getfrag()
949 return -EFAULT; in ip_generic_getfrag()
950 skb->csum = csum_block_add(skb->csum, csum, odd); in ip_generic_getfrag()
969 struct sk_buff_head *queue, in __ip_append_data() argument
981 struct ip_options *opt = cork->opt; in __ip_append_data()
990 struct rtable *rt = (struct rtable *)cork->dst; in __ip_append_data()
995 skb = skb_peek_tail(queue); in __ip_append_data()
997 exthdrlen = !skb ? rt->dst.header_len : 0; in __ip_append_data()
998 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; in __ip_append_data()
999 paged = !!cork->gso_size; in __ip_append_data()
1001 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && in __ip_append_data()
1002 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) in __ip_append_data()
1003 tskey = sk->sk_tskey++; in __ip_append_data()
1005 hh_len = LL_RESERVED_SPACE(rt->dst.dev); in __ip_append_data()
1007 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); in __ip_append_data()
1008 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; in __ip_append_data()
1011 if (cork->length + length > maxnonfragsize - fragheaderlen) { in __ip_append_data()
1012 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, in __ip_append_data()
1013 mtu - (opt ? opt->optlen : 0)); in __ip_append_data()
1014 return -EMSGSIZE; in __ip_append_data()
1023 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) && in __ip_append_data()
1024 (!(flags & MSG_MORE) || cork->gso_size) && in __ip_append_data()
1025 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM))) in __ip_append_data()
1031 return -ENOBUFS; in __ip_append_data()
1033 if (rt->dst.dev->features & NETIF_F_SG && in __ip_append_data()
1037 uarg->zerocopy = 0; in __ip_append_data()
1042 cork->length += length; in __ip_append_data()
1056 copy = mtu - skb->len; in __ip_append_data()
1058 copy = maxfraglen - skb->len; in __ip_append_data()
1070 fraggap = skb_prev->len - maxfraglen; in __ip_append_data()
1079 if (datalen > mtu - fragheaderlen) in __ip_append_data()
1080 datalen = maxfraglen - fragheaderlen; in __ip_append_data()
1093 alloc_extra += rt->dst.trailer_len; in __ip_append_data()
1096 !(rt->dst.dev->features&NETIF_F_SG)) in __ip_append_data()
1100 !(rt->dst.dev->features & NETIF_F_SG))) in __ip_append_data()
1104 pagedlen = fraglen - alloclen; in __ip_append_data()
1114 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <= in __ip_append_data()
1115 2 * sk->sk_sndbuf) in __ip_append_data()
1117 sk->sk_allocation); in __ip_append_data()
1119 err = -ENOBUFS; in __ip_append_data()
1127 skb->ip_summed = csummode; in __ip_append_data()
1128 skb->csum = 0; in __ip_append_data()
1134 data = skb_put(skb, fraglen + exthdrlen - pagedlen); in __ip_append_data()
1136 skb->transport_header = (skb->network_header + in __ip_append_data()
1141 skb->csum = skb_copy_and_csum_bits( in __ip_append_data()
1144 skb_prev->csum = csum_sub(skb_prev->csum, in __ip_append_data()
1145 skb->csum); in __ip_append_data()
1150 copy = datalen - transhdrlen - fraggap - pagedlen; in __ip_append_data()
1152 err = -EFAULT; in __ip_append_data()
1158 length -= copy + transhdrlen; in __ip_append_data()
1164 skb_shinfo(skb)->tx_flags = cork->tx_flags; in __ip_append_data()
1165 cork->tx_flags = 0; in __ip_append_data()
1166 skb_shinfo(skb)->tskey = tskey; in __ip_append_data()
1174 * Put the packet on the pending queue. in __ip_append_data()
1176 if (!skb->destructor) { in __ip_append_data()
1177 skb->destructor = sock_wfree; in __ip_append_data()
1178 skb->sk = sk; in __ip_append_data()
1179 wmem_alloc_delta += skb->truesize; in __ip_append_data()
1181 __skb_queue_tail(queue, skb); in __ip_append_data()
1188 if (!(rt->dst.dev->features&NETIF_F_SG) && in __ip_append_data()
1192 off = skb->len; in __ip_append_data()
1196 err = -EFAULT; in __ip_append_data()
1199 } else if (!uarg || !uarg->zerocopy) { in __ip_append_data()
1200 int i = skb_shinfo(skb)->nr_frags; in __ip_append_data()
1202 err = -ENOMEM; in __ip_append_data()
1206 if (!skb_can_coalesce(skb, i, pfrag->page, in __ip_append_data()
1207 pfrag->offset)) { in __ip_append_data()
1208 err = -EMSGSIZE; in __ip_append_data()
1212 __skb_fill_page_desc(skb, i, pfrag->page, in __ip_append_data()
1213 pfrag->offset, 0); in __ip_append_data()
1214 skb_shinfo(skb)->nr_frags = ++i; in __ip_append_data()
1215 get_page(pfrag->page); in __ip_append_data()
1217 copy = min_t(int, copy, pfrag->size - pfrag->offset); in __ip_append_data()
1219 page_address(pfrag->page) + pfrag->offset, in __ip_append_data()
1220 offset, copy, skb->len, skb) < 0) in __ip_append_data()
1223 pfrag->offset += copy; in __ip_append_data()
1224 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); in __ip_append_data()
1225 skb->len += copy; in __ip_append_data()
1226 skb->data_len += copy; in __ip_append_data()
1227 skb->truesize += copy; in __ip_append_data()
1235 length -= copy; in __ip_append_data()
1239 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); in __ip_append_data()
1243 err = -EFAULT; in __ip_append_data()
1247 cork->length -= length; in __ip_append_data()
1249 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc); in __ip_append_data()
1261 return -EFAULT; in ip_setup_cork()
1266 opt = ipc->opt; in ip_setup_cork()
1268 if (!cork->opt) { in ip_setup_cork()
1269 cork->opt = kmalloc(sizeof(struct ip_options) + 40, in ip_setup_cork()
1270 sk->sk_allocation); in ip_setup_cork()
1271 if (unlikely(!cork->opt)) in ip_setup_cork()
1272 return -ENOBUFS; in ip_setup_cork()
1274 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); in ip_setup_cork()
1275 cork->flags |= IPCORK_OPT; in ip_setup_cork()
1276 cork->addr = ipc->addr; in ip_setup_cork()
1279 cork->fragsize = ip_sk_use_pmtu(sk) ? in ip_setup_cork()
1280 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu); in ip_setup_cork()
1282 if (!inetdev_valid_mtu(cork->fragsize)) in ip_setup_cork()
1283 return -ENETUNREACH; in ip_setup_cork()
1285 cork->gso_size = ipc->gso_size; in ip_setup_cork()
1287 cork->dst = &rt->dst; in ip_setup_cork()
1291 cork->length = 0; in ip_setup_cork()
1292 cork->ttl = ipc->ttl; in ip_setup_cork()
1293 cork->tos = ipc->tos; in ip_setup_cork()
1294 cork->mark = ipc->sockc.mark; in ip_setup_cork()
1295 cork->priority = ipc->priority; in ip_setup_cork()
1296 cork->transmit_time = ipc->sockc.transmit_time; in ip_setup_cork()
1297 cork->tx_flags = 0; in ip_setup_cork()
1298 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags); in ip_setup_cork()
1307 * or non-page data.
1309 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1327 if (skb_queue_empty(&sk->sk_write_queue)) { in ip_append_data()
1328 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); in ip_append_data()
1335 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, in ip_append_data()
1354 if (inet->hdrincl) in ip_append_page()
1355 return -EPERM; in ip_append_page()
1360 if (skb_queue_empty(&sk->sk_write_queue)) in ip_append_page()
1361 return -EINVAL; in ip_append_page()
1363 cork = &inet->cork.base; in ip_append_page()
1364 rt = (struct rtable *)cork->dst; in ip_append_page()
1365 if (cork->flags & IPCORK_OPT) in ip_append_page()
1366 opt = cork->opt; in ip_append_page()
1368 if (!(rt->dst.dev->features & NETIF_F_SG)) in ip_append_page()
1369 return -EOPNOTSUPP; in ip_append_page()
1371 hh_len = LL_RESERVED_SPACE(rt->dst.dev); in ip_append_page()
1372 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize; in ip_append_page()
1374 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); in ip_append_page()
1375 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; in ip_append_page()
1378 if (cork->length + size > maxnonfragsize - fragheaderlen) { in ip_append_page()
1379 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, in ip_append_page()
1380 mtu - (opt ? opt->optlen : 0)); in ip_append_page()
1381 return -EMSGSIZE; in ip_append_page()
1384 skb = skb_peek_tail(&sk->sk_write_queue); in ip_append_page()
1386 return -EINVAL; in ip_append_page()
1388 cork->length += size; in ip_append_page()
1392 len = mtu - skb->len; in ip_append_page()
1394 len = maxfraglen - skb->len; in ip_append_page()
1401 fraggap = skb_prev->len - maxfraglen; in ip_append_page()
1404 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); in ip_append_page()
1406 err = -ENOBUFS; in ip_append_page()
1413 skb->ip_summed = CHECKSUM_NONE; in ip_append_page()
1414 skb->csum = 0; in ip_append_page()
1422 skb->transport_header = (skb->network_header + in ip_append_page()
1425 skb->csum = skb_copy_and_csum_bits(skb_prev, in ip_append_page()
1429 skb_prev->csum = csum_sub(skb_prev->csum, in ip_append_page()
1430 skb->csum); in ip_append_page()
1435 * Put the packet on the pending queue. in ip_append_page()
1437 __skb_queue_tail(&sk->sk_write_queue, skb); in ip_append_page()
1445 err = -EMSGSIZE; in ip_append_page()
1449 if (skb->ip_summed == CHECKSUM_NONE) { in ip_append_page()
1452 skb->csum = csum_block_add(skb->csum, csum, skb->len); in ip_append_page()
1455 skb->len += len; in ip_append_page()
1456 skb->data_len += len; in ip_append_page()
1457 skb->truesize += len; in ip_append_page()
1458 refcount_add(len, &sk->sk_wmem_alloc); in ip_append_page()
1460 size -= len; in ip_append_page()
1465 cork->length -= size; in ip_append_page()
1472 cork->flags &= ~IPCORK_OPT; in ip_cork_release()
1473 kfree(cork->opt); in ip_cork_release()
1474 cork->opt = NULL; in ip_cork_release()
1475 dst_release(cork->dst); in ip_cork_release()
1476 cork->dst = NULL; in ip_cork_release()
1485 struct sk_buff_head *queue, in __ip_make_skb() argument
1493 struct rtable *rt = (struct rtable *)cork->dst; in __ip_make_skb()
1498 skb = __skb_dequeue(queue); in __ip_make_skb()
1501 tail_skb = &(skb_shinfo(skb)->frag_list); in __ip_make_skb()
1503 /* move skb->data to ip header from ext header */ in __ip_make_skb()
1504 if (skb->data < skb_network_header(skb)) in __ip_make_skb()
1506 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { in __ip_make_skb()
1509 tail_skb = &(tmp_skb->next); in __ip_make_skb()
1510 skb->len += tmp_skb->len; in __ip_make_skb()
1511 skb->data_len += tmp_skb->len; in __ip_make_skb()
1512 skb->truesize += tmp_skb->truesize; in __ip_make_skb()
1513 tmp_skb->destructor = NULL; in __ip_make_skb()
1514 tmp_skb->sk = NULL; in __ip_make_skb()
1521 skb->ignore_df = ip_sk_ignore_df(sk); in __ip_make_skb()
1526 if (inet->pmtudisc == IP_PMTUDISC_DO || in __ip_make_skb()
1527 inet->pmtudisc == IP_PMTUDISC_PROBE || in __ip_make_skb()
1528 (skb->len <= dst_mtu(&rt->dst) && in __ip_make_skb()
1529 ip_dont_fragment(sk, &rt->dst))) in __ip_make_skb()
1532 if (cork->flags & IPCORK_OPT) in __ip_make_skb()
1533 opt = cork->opt; in __ip_make_skb()
1535 if (cork->ttl != 0) in __ip_make_skb()
1536 ttl = cork->ttl; in __ip_make_skb()
1537 else if (rt->rt_type == RTN_MULTICAST) in __ip_make_skb()
1538 ttl = inet->mc_ttl; in __ip_make_skb()
1540 ttl = ip_select_ttl(inet, &rt->dst); in __ip_make_skb()
1543 iph->version = 4; in __ip_make_skb()
1544 iph->ihl = 5; in __ip_make_skb()
1545 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos; in __ip_make_skb()
1546 iph->frag_off = df; in __ip_make_skb()
1547 iph->ttl = ttl; in __ip_make_skb()
1548 iph->protocol = sk->sk_protocol; in __ip_make_skb()
1553 iph->ihl += opt->optlen >> 2; in __ip_make_skb()
1554 ip_options_build(skb, opt, cork->addr, rt, 0); in __ip_make_skb()
1557 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; in __ip_make_skb()
1558 skb->mark = cork->mark; in __ip_make_skb()
1559 skb->tstamp = cork->transmit_time; in __ip_make_skb()
1564 cork->dst = NULL; in __ip_make_skb()
1565 skb_dst_set(skb, &rt->dst); in __ip_make_skb()
1567 if (iph->protocol == IPPROTO_ICMP) in __ip_make_skb()
1569 skb_transport_header(skb))->type); in __ip_make_skb()
1580 err = ip_local_out(net, skb->sk, skb); in ip_send_skb()
1607 struct sk_buff_head *queue, in __ip_flush_pending_frames() argument
1612 while ((skb = __skb_dequeue_tail(queue)) != NULL) in __ip_flush_pending_frames()
1620 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); in ip_flush_pending_frames()
1631 struct sk_buff_head queue; in ip_make_skb() local
1637 __skb_queue_head_init(&queue); in ip_make_skb()
1639 cork->flags = 0; in ip_make_skb()
1640 cork->addr = 0; in ip_make_skb()
1641 cork->opt = NULL; in ip_make_skb()
1646 err = __ip_append_data(sk, fl4, &queue, cork, in ip_make_skb()
1647 &current->task_frag, getfrag, in ip_make_skb()
1650 __ip_flush_pending_frames(sk, &queue, cork); in ip_make_skb()
1654 return __ip_make_skb(sk, fl4, &queue, cork); in ip_make_skb()
1666 skb->csum = csum_block_add(skb->csum, csum, odd); in ip_reply_glue_bits()
1703 oif = arg->bound_dev_if; in ip_send_unicast_reply()
1704 if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) in ip_send_unicast_reply()
1705 oif = skb->skb_iif; in ip_send_unicast_reply()
1708 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark, in ip_send_unicast_reply()
1709 RT_TOS(arg->tos), in ip_send_unicast_reply()
1710 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, in ip_send_unicast_reply()
1713 tcp_hdr(skb)->source, tcp_hdr(skb)->dest, in ip_send_unicast_reply()
1714 arg->uid); in ip_send_unicast_reply()
1720 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; in ip_send_unicast_reply()
1722 sk->sk_protocol = ip_hdr(skb)->protocol; in ip_send_unicast_reply()
1723 sk->sk_bound_dev_if = arg->bound_dev_if; in ip_send_unicast_reply()
1724 sk->sk_sndbuf = sysctl_wmem_default; in ip_send_unicast_reply()
1726 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, in ip_send_unicast_reply()
1733 nskb = skb_peek(&sk->sk_write_queue); in ip_send_unicast_reply()
1735 if (arg->csumoffset >= 0) in ip_send_unicast_reply()
1737 arg->csumoffset) = csum_fold(csum_add(nskb->csum, in ip_send_unicast_reply()
1738 arg->csum)); in ip_send_unicast_reply()
1739 nskb->ip_summed = CHECKSUM_NONE; in ip_send_unicast_reply()