1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * The Internet Protocol (IP) output module.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Donald Becker, <becker@super.org>
12 * Alan Cox, <Alan.Cox@linux.org>
13 * Richard Underwood
14 * Stefan Becker, <stefanb@yello.ping.de>
15 * Jorge Cwik, <jorge@laser.satlink.net>
16 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
17 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 *
19 * See ip_input.c for original log
20 *
21 * Fixes:
22 * Alan Cox : Missing nonblock feature in ip_build_xmit.
23 * Mike Kilburn : htons() missing in ip_build_xmit.
24 * Bradford Johnson: Fix faulty handling of some frames when
25 * no route is found.
26 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
27 * (in case if packet not accepted by
28 * output firewall rules)
29 * Mike McLagan : Routing by source
30 * Alexey Kuznetsov: use new route cache
31 * Andi Kleen: Fix broken PMTU recovery and remove
32 * some redundant tests.
33 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
34 * Andi Kleen : Replace ip_reply with ip_send_reply.
35 * Andi Kleen : Split fast and slow ip_build_xmit path
36 * for decreased register pressure on x86
37 * and more readibility.
38 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
39 * silently drop skb instead of failing with -EPERM.
40 * Detlev Wengorz : Copy protocol for fragments.
41 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * datagrams.
43 * Hirokazu Takahashi: sendfile() on UDP works now.
44 */
45
46 #include <linux/uaccess.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
55
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
58 #include <linux/in.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
65
66 #include <net/snmp.h>
67 #include <net/ip.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
70 #include <net/xfrm.h>
71 #include <linux/skbuff.h>
72 #include <net/sock.h>
73 #include <net/arp.h>
74 #include <net/icmp.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <net/inet_ecn.h>
78 #include <net/lwtunnel.h>
79 #include <linux/bpf-cgroup.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/netfilter_bridge.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
85
86 static int
87 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
88 unsigned int mtu,
89 int (*output)(struct net *, struct sock *, struct sk_buff *));
90
91 /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)92 void ip_send_check(struct iphdr *iph)
93 {
94 iph->check = 0;
95 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
96 }
97 EXPORT_SYMBOL(ip_send_check);
98
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)99 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
100 {
101 struct iphdr *iph = ip_hdr(skb);
102
103 iph->tot_len = htons(skb->len);
104 ip_send_check(iph);
105
106 /* if egress device is enslaved to an L3 master device pass the
107 * skb to its handler for processing
108 */
109 skb = l3mdev_ip_out(sk, skb);
110 if (unlikely(!skb))
111 return 0;
112
113 skb->protocol = htons(ETH_P_IP);
114
115 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
116 net, sk, skb, NULL, skb_dst(skb)->dev,
117 dst_output);
118 }
119
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)120 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
121 {
122 int err;
123
124 err = __ip_local_out(net, sk, skb);
125 if (likely(err == 1))
126 err = dst_output(net, sk, skb);
127
128 return err;
129 }
130 EXPORT_SYMBOL_GPL(ip_local_out);
131
ip_select_ttl(struct inet_sock * inet,struct dst_entry * dst)132 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
133 {
134 int ttl = inet->uc_ttl;
135
136 if (ttl < 0)
137 ttl = ip4_dst_hoplimit(dst);
138 return ttl;
139 }
140
141 /*
142 * Add an ip header to a skbuff and send it out.
143 *
144 */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt,u8 tos)145 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
146 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt,
147 u8 tos)
148 {
149 struct inet_sock *inet = inet_sk(sk);
150 struct rtable *rt = skb_rtable(skb);
151 struct net *net = sock_net(sk);
152 struct iphdr *iph;
153
154 /* Build the IP header. */
155 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
156 skb_reset_network_header(skb);
157 iph = ip_hdr(skb);
158 iph->version = 4;
159 iph->ihl = 5;
160 iph->tos = tos;
161 iph->ttl = ip_select_ttl(inet, &rt->dst);
162 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
163 iph->saddr = saddr;
164 iph->protocol = sk->sk_protocol;
165 /* Do not bother generating IPID for small packets (eg SYNACK) */
166 if (skb->len <= IPV4_MIN_MTU || ip_dont_fragment(sk, &rt->dst)) {
167 iph->frag_off = htons(IP_DF);
168 iph->id = 0;
169 } else {
170 iph->frag_off = 0;
171 /* TCP packets here are SYNACK with fat IPv4/TCP options.
172 * Avoid using the hashed IP ident generator.
173 */
174 if (sk->sk_protocol == IPPROTO_TCP)
175 iph->id = (__force __be16)prandom_u32();
176 else
177 __ip_select_ident(net, iph, 1);
178 }
179
180 if (opt && opt->opt.optlen) {
181 iph->ihl += opt->opt.optlen>>2;
182 ip_options_build(skb, &opt->opt, daddr, rt, 0);
183 }
184
185 skb->priority = sk->sk_priority;
186 if (!skb->mark)
187 skb->mark = sk->sk_mark;
188
189 /* Send it out. */
190 return ip_local_out(net, skb->sk, skb);
191 }
192 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
193
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)194 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
195 {
196 struct dst_entry *dst = skb_dst(skb);
197 struct rtable *rt = (struct rtable *)dst;
198 struct net_device *dev = dst->dev;
199 unsigned int hh_len = LL_RESERVED_SPACE(dev);
200 struct neighbour *neigh;
201 bool is_v6gw = false;
202
203 if (rt->rt_type == RTN_MULTICAST) {
204 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
205 } else if (rt->rt_type == RTN_BROADCAST)
206 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
207
208 /* Be paranoid, rather than too clever. */
209 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
210 struct sk_buff *skb2;
211
212 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
213 if (!skb2) {
214 kfree_skb(skb);
215 return -ENOMEM;
216 }
217 if (skb->sk)
218 skb_set_owner_w(skb2, skb->sk);
219 consume_skb(skb);
220 skb = skb2;
221 }
222
223 if (lwtunnel_xmit_redirect(dst->lwtstate)) {
224 int res = lwtunnel_xmit(skb);
225
226 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
227 return res;
228 }
229
230 rcu_read_lock_bh();
231 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
232 if (!IS_ERR(neigh)) {
233 int res;
234
235 sock_confirm_neigh(skb, neigh);
236 /* if crossing protocols, can not use the cached header */
237 res = neigh_output(neigh, skb, is_v6gw);
238 rcu_read_unlock_bh();
239 return res;
240 }
241 rcu_read_unlock_bh();
242
243 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
244 __func__);
245 kfree_skb(skb);
246 return -EINVAL;
247 }
248
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)249 static int ip_finish_output_gso(struct net *net, struct sock *sk,
250 struct sk_buff *skb, unsigned int mtu)
251 {
252 struct sk_buff *segs, *nskb;
253 netdev_features_t features;
254 int ret = 0;
255
256 /* common case: seglen is <= mtu
257 */
258 if (skb_gso_validate_network_len(skb, mtu))
259 return ip_finish_output2(net, sk, skb);
260
261 /* Slowpath - GSO segment length exceeds the egress MTU.
262 *
263 * This can happen in several cases:
264 * - Forwarding of a TCP GRO skb, when DF flag is not set.
265 * - Forwarding of an skb that arrived on a virtualization interface
266 * (virtio-net/vhost/tap) with TSO/GSO size set by other network
267 * stack.
268 * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
269 * interface with a smaller MTU.
270 * - Arriving GRO skb (or GSO skb in a virtualized environment) that is
271 * bridged to a NETIF_F_TSO tunnel stacked over an interface with an
272 * insufficent MTU.
273 */
274 features = netif_skb_features(skb);
275 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
276 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
277 if (IS_ERR_OR_NULL(segs)) {
278 kfree_skb(skb);
279 return -ENOMEM;
280 }
281
282 consume_skb(skb);
283
284 skb_list_walk_safe(segs, segs, nskb) {
285 int err;
286
287 skb_mark_not_on_list(segs);
288 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
289
290 if (err && ret == 0)
291 ret = err;
292 }
293
294 return ret;
295 }
296
__ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)297 static int __ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
298 {
299 unsigned int mtu;
300
301 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
302 /* Policy lookup after SNAT yielded a new policy */
303 if (skb_dst(skb)->xfrm) {
304 IPCB(skb)->flags |= IPSKB_REROUTED;
305 return dst_output(net, sk, skb);
306 }
307 #endif
308 mtu = ip_skb_dst_mtu(sk, skb);
309 if (skb_is_gso(skb))
310 return ip_finish_output_gso(net, sk, skb, mtu);
311
312 if (skb->len > mtu || IPCB(skb)->frag_max_size)
313 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
314
315 return ip_finish_output2(net, sk, skb);
316 }
317
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)318 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
319 {
320 int ret;
321
322 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
323 switch (ret) {
324 case NET_XMIT_SUCCESS:
325 return __ip_finish_output(net, sk, skb);
326 case NET_XMIT_CN:
327 return __ip_finish_output(net, sk, skb) ? : ret;
328 default:
329 kfree_skb(skb);
330 return ret;
331 }
332 }
333
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)334 static int ip_mc_finish_output(struct net *net, struct sock *sk,
335 struct sk_buff *skb)
336 {
337 struct rtable *new_rt;
338 bool do_cn = false;
339 int ret, err;
340
341 ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
342 switch (ret) {
343 case NET_XMIT_CN:
344 do_cn = true;
345 fallthrough;
346 case NET_XMIT_SUCCESS:
347 break;
348 default:
349 kfree_skb(skb);
350 return ret;
351 }
352
353 /* Reset rt_iif so that inet_iif() will return skb->skb_iif. Setting
354 * this to non-zero causes ipi_ifindex in in_pktinfo to be overwritten,
355 * see ipv4_pktinfo_prepare().
356 */
357 new_rt = rt_dst_clone(net->loopback_dev, skb_rtable(skb));
358 if (new_rt) {
359 new_rt->rt_iif = 0;
360 skb_dst_drop(skb);
361 skb_dst_set(skb, &new_rt->dst);
362 }
363
364 err = dev_loopback_xmit(net, sk, skb);
365 return (do_cn && err) ? ret : err;
366 }
367
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)368 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
369 {
370 struct rtable *rt = skb_rtable(skb);
371 struct net_device *dev = rt->dst.dev;
372
373 /*
374 * If the indicated interface is up and running, send the packet.
375 */
376 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
377
378 skb->dev = dev;
379 skb->protocol = htons(ETH_P_IP);
380
381 /*
382 * Multicasts are looped back for other local users
383 */
384
385 if (rt->rt_flags&RTCF_MULTICAST) {
386 if (sk_mc_loop(sk)
387 #ifdef CONFIG_IP_MROUTE
388 /* Small optimization: do not loopback not local frames,
389 which returned after forwarding; they will be dropped
390 by ip_mr_input in any case.
391 Note, that local frames are looped back to be delivered
392 to local recipients.
393
394 This check is duplicated in ip_mr_input at the moment.
395 */
396 &&
397 ((rt->rt_flags & RTCF_LOCAL) ||
398 !(IPCB(skb)->flags & IPSKB_FORWARDED))
399 #endif
400 ) {
401 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
402 if (newskb)
403 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
404 net, sk, newskb, NULL, newskb->dev,
405 ip_mc_finish_output);
406 }
407
408 /* Multicasts with ttl 0 must not go beyond the host */
409
410 if (ip_hdr(skb)->ttl == 0) {
411 kfree_skb(skb);
412 return 0;
413 }
414 }
415
416 if (rt->rt_flags&RTCF_BROADCAST) {
417 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
418 if (newskb)
419 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
420 net, sk, newskb, NULL, newskb->dev,
421 ip_mc_finish_output);
422 }
423
424 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
425 net, sk, skb, NULL, skb->dev,
426 ip_finish_output,
427 !(IPCB(skb)->flags & IPSKB_REROUTED));
428 }
429
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)430 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
431 {
432 struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
433
434 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
435
436 skb->dev = dev;
437 skb->protocol = htons(ETH_P_IP);
438
439 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
440 net, sk, skb, indev, dev,
441 ip_finish_output,
442 !(IPCB(skb)->flags & IPSKB_REROUTED));
443 }
444
445 /*
446 * copy saddr and daddr, possibly using 64bit load/stores
447 * Equivalent to :
448 * iph->saddr = fl4->saddr;
449 * iph->daddr = fl4->daddr;
450 */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)451 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
452 {
453 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
454 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
455
456 iph->saddr = fl4->saddr;
457 iph->daddr = fl4->daddr;
458 }
459
460 /* Note: skb->sk can be different from sk, in case of tunnels */
__ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl,__u8 tos)461 int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
462 __u8 tos)
463 {
464 struct inet_sock *inet = inet_sk(sk);
465 struct net *net = sock_net(sk);
466 struct ip_options_rcu *inet_opt;
467 struct flowi4 *fl4;
468 struct rtable *rt;
469 struct iphdr *iph;
470 int res;
471
472 /* Skip all of this if the packet is already routed,
473 * f.e. by something like SCTP.
474 */
475 rcu_read_lock();
476 inet_opt = rcu_dereference(inet->inet_opt);
477 fl4 = &fl->u.ip4;
478 rt = skb_rtable(skb);
479 if (rt)
480 goto packet_routed;
481
482 /* Make sure we can route this packet. */
483 rt = (struct rtable *)__sk_dst_check(sk, 0);
484 if (!rt) {
485 __be32 daddr;
486
487 /* Use correct destination address if we have options. */
488 daddr = inet->inet_daddr;
489 if (inet_opt && inet_opt->opt.srr)
490 daddr = inet_opt->opt.faddr;
491
492 /* If this fails, retransmit mechanism of transport layer will
493 * keep trying until route appears or the connection times
494 * itself out.
495 */
496 rt = ip_route_output_ports(net, fl4, sk,
497 daddr, inet->inet_saddr,
498 inet->inet_dport,
499 inet->inet_sport,
500 sk->sk_protocol,
501 RT_CONN_FLAGS_TOS(sk, tos),
502 sk->sk_bound_dev_if);
503 if (IS_ERR(rt))
504 goto no_route;
505 sk_setup_caps(sk, &rt->dst);
506 }
507 skb_dst_set_noref(skb, &rt->dst);
508
509 packet_routed:
510 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
511 goto no_route;
512
513 /* OK, we know where to send it, allocate and build IP header. */
514 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
515 skb_reset_network_header(skb);
516 iph = ip_hdr(skb);
517 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (tos & 0xff));
518 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
519 iph->frag_off = htons(IP_DF);
520 else
521 iph->frag_off = 0;
522 iph->ttl = ip_select_ttl(inet, &rt->dst);
523 iph->protocol = sk->sk_protocol;
524 ip_copy_addrs(iph, fl4);
525
526 /* Transport layer set skb->h.foo itself. */
527
528 if (inet_opt && inet_opt->opt.optlen) {
529 iph->ihl += inet_opt->opt.optlen >> 2;
530 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
531 }
532
533 ip_select_ident_segs(net, skb, sk,
534 skb_shinfo(skb)->gso_segs ?: 1);
535
536 /* TODO : should we use skb->sk here instead of sk ? */
537 skb->priority = sk->sk_priority;
538 skb->mark = sk->sk_mark;
539
540 res = ip_local_out(net, sk, skb);
541 rcu_read_unlock();
542 return res;
543
544 no_route:
545 rcu_read_unlock();
546 IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
547 kfree_skb(skb);
548 return -EHOSTUNREACH;
549 }
550 EXPORT_SYMBOL(__ip_queue_xmit);
551
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)552 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
553 {
554 return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
555 }
556 EXPORT_SYMBOL(ip_queue_xmit);
557
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)558 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
559 {
560 to->pkt_type = from->pkt_type;
561 to->priority = from->priority;
562 to->protocol = from->protocol;
563 to->skb_iif = from->skb_iif;
564 skb_dst_drop(to);
565 skb_dst_copy(to, from);
566 to->dev = from->dev;
567 to->mark = from->mark;
568
569 skb_copy_hash(to, from);
570
571 #ifdef CONFIG_NET_SCHED
572 to->tc_index = from->tc_index;
573 #endif
574 nf_copy(to, from);
575 skb_ext_copy(to, from);
576 #if IS_ENABLED(CONFIG_IP_VS)
577 to->ipvs_property = from->ipvs_property;
578 #endif
579 skb_copy_secmark(to, from);
580 }
581
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))582 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
583 unsigned int mtu,
584 int (*output)(struct net *, struct sock *, struct sk_buff *))
585 {
586 struct iphdr *iph = ip_hdr(skb);
587
588 if ((iph->frag_off & htons(IP_DF)) == 0)
589 return ip_do_fragment(net, sk, skb, output);
590
591 if (unlikely(!skb->ignore_df ||
592 (IPCB(skb)->frag_max_size &&
593 IPCB(skb)->frag_max_size > mtu))) {
594 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
595 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
596 htonl(mtu));
597 kfree_skb(skb);
598 return -EMSGSIZE;
599 }
600
601 return ip_do_fragment(net, sk, skb, output);
602 }
603
ip_fraglist_init(struct sk_buff * skb,struct iphdr * iph,unsigned int hlen,struct ip_fraglist_iter * iter)604 void ip_fraglist_init(struct sk_buff *skb, struct iphdr *iph,
605 unsigned int hlen, struct ip_fraglist_iter *iter)
606 {
607 unsigned int first_len = skb_pagelen(skb);
608
609 iter->frag = skb_shinfo(skb)->frag_list;
610 skb_frag_list_init(skb);
611
612 iter->offset = 0;
613 iter->iph = iph;
614 iter->hlen = hlen;
615
616 skb->data_len = first_len - skb_headlen(skb);
617 skb->len = first_len;
618 iph->tot_len = htons(first_len);
619 iph->frag_off = htons(IP_MF);
620 ip_send_check(iph);
621 }
622 EXPORT_SYMBOL(ip_fraglist_init);
623
ip_fraglist_prepare(struct sk_buff * skb,struct ip_fraglist_iter * iter)624 void ip_fraglist_prepare(struct sk_buff *skb, struct ip_fraglist_iter *iter)
625 {
626 unsigned int hlen = iter->hlen;
627 struct iphdr *iph = iter->iph;
628 struct sk_buff *frag;
629
630 frag = iter->frag;
631 frag->ip_summed = CHECKSUM_NONE;
632 skb_reset_transport_header(frag);
633 __skb_push(frag, hlen);
634 skb_reset_network_header(frag);
635 memcpy(skb_network_header(frag), iph, hlen);
636 iter->iph = ip_hdr(frag);
637 iph = iter->iph;
638 iph->tot_len = htons(frag->len);
639 ip_copy_metadata(frag, skb);
640 iter->offset += skb->len - hlen;
641 iph->frag_off = htons(iter->offset >> 3);
642 if (frag->next)
643 iph->frag_off |= htons(IP_MF);
644 /* Ready, complete checksum */
645 ip_send_check(iph);
646 }
647 EXPORT_SYMBOL(ip_fraglist_prepare);
648
ip_frag_init(struct sk_buff * skb,unsigned int hlen,unsigned int ll_rs,unsigned int mtu,bool DF,struct ip_frag_state * state)649 void ip_frag_init(struct sk_buff *skb, unsigned int hlen,
650 unsigned int ll_rs, unsigned int mtu, bool DF,
651 struct ip_frag_state *state)
652 {
653 struct iphdr *iph = ip_hdr(skb);
654
655 state->DF = DF;
656 state->hlen = hlen;
657 state->ll_rs = ll_rs;
658 state->mtu = mtu;
659
660 state->left = skb->len - hlen; /* Space per frame */
661 state->ptr = hlen; /* Where to start from */
662
663 state->offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
664 state->not_last_frag = iph->frag_off & htons(IP_MF);
665 }
666 EXPORT_SYMBOL(ip_frag_init);
667
ip_frag_ipcb(struct sk_buff * from,struct sk_buff * to,bool first_frag)668 static void ip_frag_ipcb(struct sk_buff *from, struct sk_buff *to,
669 bool first_frag)
670 {
671 /* Copy the flags to each fragment. */
672 IPCB(to)->flags = IPCB(from)->flags;
673
674 /* ANK: dirty, but effective trick. Upgrade options only if
675 * the segment to be fragmented was THE FIRST (otherwise,
676 * options are already fixed) and make it ONCE
677 * on the initial skb, so that all the following fragments
678 * will inherit fixed options.
679 */
680 if (first_frag)
681 ip_options_fragment(from);
682 }
683
ip_frag_next(struct sk_buff * skb,struct ip_frag_state * state)684 struct sk_buff *ip_frag_next(struct sk_buff *skb, struct ip_frag_state *state)
685 {
686 unsigned int len = state->left;
687 struct sk_buff *skb2;
688 struct iphdr *iph;
689
690 len = state->left;
691 /* IF: it doesn't fit, use 'mtu' - the data space left */
692 if (len > state->mtu)
693 len = state->mtu;
694 /* IF: we are not sending up to and including the packet end
695 then align the next start on an eight byte boundary */
696 if (len < state->left) {
697 len &= ~7;
698 }
699
700 /* Allocate buffer */
701 skb2 = alloc_skb(len + state->hlen + state->ll_rs, GFP_ATOMIC);
702 if (!skb2)
703 return ERR_PTR(-ENOMEM);
704
705 /*
706 * Set up data on packet
707 */
708
709 ip_copy_metadata(skb2, skb);
710 skb_reserve(skb2, state->ll_rs);
711 skb_put(skb2, len + state->hlen);
712 skb_reset_network_header(skb2);
713 skb2->transport_header = skb2->network_header + state->hlen;
714
715 /*
716 * Charge the memory for the fragment to any owner
717 * it might possess
718 */
719
720 if (skb->sk)
721 skb_set_owner_w(skb2, skb->sk);
722
723 /*
724 * Copy the packet header into the new buffer.
725 */
726
727 skb_copy_from_linear_data(skb, skb_network_header(skb2), state->hlen);
728
729 /*
730 * Copy a block of the IP datagram.
731 */
732 if (skb_copy_bits(skb, state->ptr, skb_transport_header(skb2), len))
733 BUG();
734 state->left -= len;
735
736 /*
737 * Fill in the new header fields.
738 */
739 iph = ip_hdr(skb2);
740 iph->frag_off = htons((state->offset >> 3));
741 if (state->DF)
742 iph->frag_off |= htons(IP_DF);
743
744 /*
745 * Added AC : If we are fragmenting a fragment that's not the
746 * last fragment then keep MF on each bit
747 */
748 if (state->left > 0 || state->not_last_frag)
749 iph->frag_off |= htons(IP_MF);
750 state->ptr += len;
751 state->offset += len;
752
753 iph->tot_len = htons(len + state->hlen);
754
755 ip_send_check(iph);
756
757 return skb2;
758 }
759 EXPORT_SYMBOL(ip_frag_next);
760
761 /*
762 * This IP datagram is too large to be sent in one piece. Break it up into
763 * smaller pieces (each of size equal to IP header plus
764 * a block of the data of the original IP data part) that will yet fit in a
765 * single device frame, and queue such a frame for sending.
766 */
767
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))768 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
769 int (*output)(struct net *, struct sock *, struct sk_buff *))
770 {
771 struct iphdr *iph;
772 struct sk_buff *skb2;
773 struct rtable *rt = skb_rtable(skb);
774 unsigned int mtu, hlen, ll_rs;
775 struct ip_fraglist_iter iter;
776 ktime_t tstamp = skb->tstamp;
777 struct ip_frag_state state;
778 int err = 0;
779
780 /* for offloaded checksums cleanup checksum before fragmentation */
781 if (skb->ip_summed == CHECKSUM_PARTIAL &&
782 (err = skb_checksum_help(skb)))
783 goto fail;
784
785 /*
786 * Point into the IP datagram header.
787 */
788
789 iph = ip_hdr(skb);
790
791 mtu = ip_skb_dst_mtu(sk, skb);
792 if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
793 mtu = IPCB(skb)->frag_max_size;
794
795 /*
796 * Setup starting values.
797 */
798
799 hlen = iph->ihl * 4;
800 mtu = mtu - hlen; /* Size of data space */
801 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
802 ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
803
804 /* When frag_list is given, use it. First, check its validity:
805 * some transformers could create wrong frag_list or break existing
806 * one, it is not prohibited. In this case fall back to copying.
807 *
808 * LATER: this step can be merged to real generation of fragments,
809 * we can switch to copy when see the first bad fragment.
810 */
811 if (skb_has_frag_list(skb)) {
812 struct sk_buff *frag, *frag2;
813 unsigned int first_len = skb_pagelen(skb);
814
815 if (first_len - hlen > mtu ||
816 ((first_len - hlen) & 7) ||
817 ip_is_fragment(iph) ||
818 skb_cloned(skb) ||
819 skb_headroom(skb) < ll_rs)
820 goto slow_path;
821
822 skb_walk_frags(skb, frag) {
823 /* Correct geometry. */
824 if (frag->len > mtu ||
825 ((frag->len & 7) && frag->next) ||
826 skb_headroom(frag) < hlen + ll_rs)
827 goto slow_path_clean;
828
829 /* Partially cloned skb? */
830 if (skb_shared(frag))
831 goto slow_path_clean;
832
833 BUG_ON(frag->sk);
834 if (skb->sk) {
835 frag->sk = skb->sk;
836 frag->destructor = sock_wfree;
837 }
838 skb->truesize -= frag->truesize;
839 }
840
841 /* Everything is OK. Generate! */
842 ip_fraglist_init(skb, iph, hlen, &iter);
843
844 for (;;) {
845 /* Prepare header of the next frame,
846 * before previous one went down. */
847 if (iter.frag) {
848 bool first_frag = (iter.offset == 0);
849
850 IPCB(iter.frag)->flags = IPCB(skb)->flags;
851 ip_fraglist_prepare(skb, &iter);
852 if (first_frag && IPCB(skb)->opt.optlen) {
853 /* ipcb->opt is not populated for frags
854 * coming from __ip_make_skb(),
855 * ip_options_fragment() needs optlen
856 */
857 IPCB(iter.frag)->opt.optlen =
858 IPCB(skb)->opt.optlen;
859 ip_options_fragment(iter.frag);
860 ip_send_check(iter.iph);
861 }
862 }
863
864 skb->tstamp = tstamp;
865 err = output(net, sk, skb);
866
867 if (!err)
868 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
869 if (err || !iter.frag)
870 break;
871
872 skb = ip_fraglist_next(&iter);
873 }
874
875 if (err == 0) {
876 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
877 return 0;
878 }
879
880 kfree_skb_list(iter.frag);
881
882 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
883 return err;
884
885 slow_path_clean:
886 skb_walk_frags(skb, frag2) {
887 if (frag2 == frag)
888 break;
889 frag2->sk = NULL;
890 frag2->destructor = NULL;
891 skb->truesize += frag2->truesize;
892 }
893 }
894
895 slow_path:
896 /*
897 * Fragment the datagram.
898 */
899
900 ip_frag_init(skb, hlen, ll_rs, mtu, IPCB(skb)->flags & IPSKB_FRAG_PMTU,
901 &state);
902
903 /*
904 * Keep copying data until we run out.
905 */
906
907 while (state.left > 0) {
908 bool first_frag = (state.offset == 0);
909
910 skb2 = ip_frag_next(skb, &state);
911 if (IS_ERR(skb2)) {
912 err = PTR_ERR(skb2);
913 goto fail;
914 }
915 ip_frag_ipcb(skb, skb2, first_frag);
916
917 /*
918 * Put this fragment into the sending queue.
919 */
920 skb2->tstamp = tstamp;
921 err = output(net, sk, skb2);
922 if (err)
923 goto fail;
924
925 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
926 }
927 consume_skb(skb);
928 IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
929 return err;
930
931 fail:
932 kfree_skb(skb);
933 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
934 return err;
935 }
936 EXPORT_SYMBOL(ip_do_fragment);
937
938 int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)939 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
940 {
941 struct msghdr *msg = from;
942
943 if (skb->ip_summed == CHECKSUM_PARTIAL) {
944 if (!copy_from_iter_full(to, len, &msg->msg_iter))
945 return -EFAULT;
946 } else {
947 __wsum csum = 0;
948 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
949 return -EFAULT;
950 skb->csum = csum_block_add(skb->csum, csum, odd);
951 }
952 return 0;
953 }
954 EXPORT_SYMBOL(ip_generic_getfrag);
955
956 static inline __wsum
csum_page(struct page * page,int offset,int copy)957 csum_page(struct page *page, int offset, int copy)
958 {
959 char *kaddr;
960 __wsum csum;
961 kaddr = kmap(page);
962 csum = csum_partial(kaddr + offset, copy, 0);
963 kunmap(page);
964 return csum;
965 }
966
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)967 static int __ip_append_data(struct sock *sk,
968 struct flowi4 *fl4,
969 struct sk_buff_head *queue,
970 struct inet_cork *cork,
971 struct page_frag *pfrag,
972 int getfrag(void *from, char *to, int offset,
973 int len, int odd, struct sk_buff *skb),
974 void *from, int length, int transhdrlen,
975 unsigned int flags)
976 {
977 struct inet_sock *inet = inet_sk(sk);
978 struct ubuf_info *uarg = NULL;
979 struct sk_buff *skb;
980
981 struct ip_options *opt = cork->opt;
982 int hh_len;
983 int exthdrlen;
984 int mtu;
985 int copy;
986 int err;
987 int offset = 0;
988 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
989 int csummode = CHECKSUM_NONE;
990 struct rtable *rt = (struct rtable *)cork->dst;
991 unsigned int wmem_alloc_delta = 0;
992 bool paged, extra_uref = false;
993 u32 tskey = 0;
994
995 skb = skb_peek_tail(queue);
996
997 exthdrlen = !skb ? rt->dst.header_len : 0;
998 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
999 paged = !!cork->gso_size;
1000
1001 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
1002 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1003 tskey = sk->sk_tskey++;
1004
1005 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1006
1007 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1008 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1009 maxnonfragsize = ip_sk_ignore_df(sk) ? IP_MAX_MTU : mtu;
1010
1011 if (cork->length + length > maxnonfragsize - fragheaderlen) {
1012 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1013 mtu - (opt ? opt->optlen : 0));
1014 return -EMSGSIZE;
1015 }
1016
1017 /*
1018 * transhdrlen > 0 means that this is the first fragment and we wish
1019 * it won't be fragmented in the future.
1020 */
1021 if (transhdrlen &&
1022 length + fragheaderlen <= mtu &&
1023 rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
1024 (!(flags & MSG_MORE) || cork->gso_size) &&
1025 (!exthdrlen || (rt->dst.dev->features & NETIF_F_HW_ESP_TX_CSUM)))
1026 csummode = CHECKSUM_PARTIAL;
1027
1028 if (flags & MSG_ZEROCOPY && length && sock_flag(sk, SOCK_ZEROCOPY)) {
1029 uarg = sock_zerocopy_realloc(sk, length, skb_zcopy(skb));
1030 if (!uarg)
1031 return -ENOBUFS;
1032 extra_uref = !skb_zcopy(skb); /* only ref on new uarg */
1033 if (rt->dst.dev->features & NETIF_F_SG &&
1034 csummode == CHECKSUM_PARTIAL) {
1035 paged = true;
1036 } else {
1037 uarg->zerocopy = 0;
1038 skb_zcopy_set(skb, uarg, &extra_uref);
1039 }
1040 }
1041
1042 cork->length += length;
1043
1044 /* So, what's going on in the loop below?
1045 *
1046 * We use calculated fragment length to generate chained skb,
1047 * each of segments is IP fragment ready for sending to network after
1048 * adding appropriate IP header.
1049 */
1050
1051 if (!skb)
1052 goto alloc_new_skb;
1053
1054 while (length > 0) {
1055 /* Check if the remaining data fits into current packet. */
1056 copy = mtu - skb->len;
1057 if (copy < length)
1058 copy = maxfraglen - skb->len;
1059 if (copy <= 0) {
1060 char *data;
1061 unsigned int datalen;
1062 unsigned int fraglen;
1063 unsigned int fraggap;
1064 unsigned int alloclen, alloc_extra;
1065 unsigned int pagedlen;
1066 struct sk_buff *skb_prev;
1067 alloc_new_skb:
1068 skb_prev = skb;
1069 if (skb_prev)
1070 fraggap = skb_prev->len - maxfraglen;
1071 else
1072 fraggap = 0;
1073
1074 /*
1075 * If remaining data exceeds the mtu,
1076 * we know we need more fragment(s).
1077 */
1078 datalen = length + fraggap;
1079 if (datalen > mtu - fragheaderlen)
1080 datalen = maxfraglen - fragheaderlen;
1081 fraglen = datalen + fragheaderlen;
1082 pagedlen = 0;
1083
1084 alloc_extra = hh_len + 15;
1085 alloc_extra += exthdrlen;
1086
1087 /* The last fragment gets additional space at tail.
1088 * Note, with MSG_MORE we overallocate on fragments,
1089 * because we have no idea what fragment will be
1090 * the last.
1091 */
1092 if (datalen == length + fraggap)
1093 alloc_extra += rt->dst.trailer_len;
1094
1095 if ((flags & MSG_MORE) &&
1096 !(rt->dst.dev->features&NETIF_F_SG))
1097 alloclen = mtu;
1098 else if (!paged &&
1099 (fraglen + alloc_extra < SKB_MAX_ALLOC ||
1100 !(rt->dst.dev->features & NETIF_F_SG)))
1101 alloclen = fraglen;
1102 else {
1103 alloclen = min_t(int, fraglen, MAX_HEADER);
1104 pagedlen = fraglen - alloclen;
1105 }
1106
1107 alloclen += alloc_extra;
1108
1109 if (transhdrlen) {
1110 skb = sock_alloc_send_skb(sk, alloclen,
1111 (flags & MSG_DONTWAIT), &err);
1112 } else {
1113 skb = NULL;
1114 if (refcount_read(&sk->sk_wmem_alloc) + wmem_alloc_delta <=
1115 2 * sk->sk_sndbuf)
1116 skb = alloc_skb(alloclen,
1117 sk->sk_allocation);
1118 if (unlikely(!skb))
1119 err = -ENOBUFS;
1120 }
1121 if (!skb)
1122 goto error;
1123
1124 /*
1125 * Fill in the control structures
1126 */
1127 skb->ip_summed = csummode;
1128 skb->csum = 0;
1129 skb_reserve(skb, hh_len);
1130
1131 /*
1132 * Find where to start putting bytes.
1133 */
1134 data = skb_put(skb, fraglen + exthdrlen - pagedlen);
1135 skb_set_network_header(skb, exthdrlen);
1136 skb->transport_header = (skb->network_header +
1137 fragheaderlen);
1138 data += fragheaderlen + exthdrlen;
1139
1140 if (fraggap) {
1141 skb->csum = skb_copy_and_csum_bits(
1142 skb_prev, maxfraglen,
1143 data + transhdrlen, fraggap);
1144 skb_prev->csum = csum_sub(skb_prev->csum,
1145 skb->csum);
1146 data += fraggap;
1147 pskb_trim_unique(skb_prev, maxfraglen);
1148 }
1149
1150 copy = datalen - transhdrlen - fraggap - pagedlen;
1151 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1152 err = -EFAULT;
1153 kfree_skb(skb);
1154 goto error;
1155 }
1156
1157 offset += copy;
1158 length -= copy + transhdrlen;
1159 transhdrlen = 0;
1160 exthdrlen = 0;
1161 csummode = CHECKSUM_NONE;
1162
1163 /* only the initial fragment is time stamped */
1164 skb_shinfo(skb)->tx_flags = cork->tx_flags;
1165 cork->tx_flags = 0;
1166 skb_shinfo(skb)->tskey = tskey;
1167 tskey = 0;
1168 skb_zcopy_set(skb, uarg, &extra_uref);
1169
1170 if ((flags & MSG_CONFIRM) && !skb_prev)
1171 skb_set_dst_pending_confirm(skb, 1);
1172
1173 /*
1174 * Put the packet on the pending queue.
1175 */
1176 if (!skb->destructor) {
1177 skb->destructor = sock_wfree;
1178 skb->sk = sk;
1179 wmem_alloc_delta += skb->truesize;
1180 }
1181 __skb_queue_tail(queue, skb);
1182 continue;
1183 }
1184
1185 if (copy > length)
1186 copy = length;
1187
1188 if (!(rt->dst.dev->features&NETIF_F_SG) &&
1189 skb_tailroom(skb) >= copy) {
1190 unsigned int off;
1191
1192 off = skb->len;
1193 if (getfrag(from, skb_put(skb, copy),
1194 offset, copy, off, skb) < 0) {
1195 __skb_trim(skb, off);
1196 err = -EFAULT;
1197 goto error;
1198 }
1199 } else if (!uarg || !uarg->zerocopy) {
1200 int i = skb_shinfo(skb)->nr_frags;
1201
1202 err = -ENOMEM;
1203 if (!sk_page_frag_refill(sk, pfrag))
1204 goto error;
1205
1206 if (!skb_can_coalesce(skb, i, pfrag->page,
1207 pfrag->offset)) {
1208 err = -EMSGSIZE;
1209 if (i == MAX_SKB_FRAGS)
1210 goto error;
1211
1212 __skb_fill_page_desc(skb, i, pfrag->page,
1213 pfrag->offset, 0);
1214 skb_shinfo(skb)->nr_frags = ++i;
1215 get_page(pfrag->page);
1216 }
1217 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1218 if (getfrag(from,
1219 page_address(pfrag->page) + pfrag->offset,
1220 offset, copy, skb->len, skb) < 0)
1221 goto error_efault;
1222
1223 pfrag->offset += copy;
1224 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1225 skb->len += copy;
1226 skb->data_len += copy;
1227 skb->truesize += copy;
1228 wmem_alloc_delta += copy;
1229 } else {
1230 err = skb_zerocopy_iter_dgram(skb, from, copy);
1231 if (err < 0)
1232 goto error;
1233 }
1234 offset += copy;
1235 length -= copy;
1236 }
1237
1238 if (wmem_alloc_delta)
1239 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1240 return 0;
1241
1242 error_efault:
1243 err = -EFAULT;
1244 error:
1245 if (uarg)
1246 sock_zerocopy_put_abort(uarg, extra_uref);
1247 cork->length -= length;
1248 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1249 refcount_add(wmem_alloc_delta, &sk->sk_wmem_alloc);
1250 return err;
1251 }
1252
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)1253 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1254 struct ipcm_cookie *ipc, struct rtable **rtp)
1255 {
1256 struct ip_options_rcu *opt;
1257 struct rtable *rt;
1258
1259 rt = *rtp;
1260 if (unlikely(!rt))
1261 return -EFAULT;
1262
1263 /*
1264 * setup for corking.
1265 */
1266 opt = ipc->opt;
1267 if (opt) {
1268 if (!cork->opt) {
1269 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1270 sk->sk_allocation);
1271 if (unlikely(!cork->opt))
1272 return -ENOBUFS;
1273 }
1274 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1275 cork->flags |= IPCORK_OPT;
1276 cork->addr = ipc->addr;
1277 }
1278
1279 cork->fragsize = ip_sk_use_pmtu(sk) ?
1280 dst_mtu(&rt->dst) : READ_ONCE(rt->dst.dev->mtu);
1281
1282 if (!inetdev_valid_mtu(cork->fragsize))
1283 return -ENETUNREACH;
1284
1285 cork->gso_size = ipc->gso_size;
1286
1287 cork->dst = &rt->dst;
1288 /* We stole this route, caller should not release it. */
1289 *rtp = NULL;
1290
1291 cork->length = 0;
1292 cork->ttl = ipc->ttl;
1293 cork->tos = ipc->tos;
1294 cork->mark = ipc->sockc.mark;
1295 cork->priority = ipc->priority;
1296 cork->transmit_time = ipc->sockc.transmit_time;
1297 cork->tx_flags = 0;
1298 sock_tx_timestamp(sk, ipc->sockc.tsflags, &cork->tx_flags);
1299
1300 return 0;
1301 }
1302
1303 /*
1304 * ip_append_data() and ip_append_page() can make one large IP datagram
1305 * from many pieces of data. Each pieces will be holded on the socket
1306 * until ip_push_pending_frames() is called. Each piece can be a page
1307 * or non-page data.
1308 *
1309 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1310 * this interface potentially.
1311 *
1312 * LATER: length must be adjusted by pad at tail, when it is required.
1313 */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1314 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1315 int getfrag(void *from, char *to, int offset, int len,
1316 int odd, struct sk_buff *skb),
1317 void *from, int length, int transhdrlen,
1318 struct ipcm_cookie *ipc, struct rtable **rtp,
1319 unsigned int flags)
1320 {
1321 struct inet_sock *inet = inet_sk(sk);
1322 int err;
1323
1324 if (flags&MSG_PROBE)
1325 return 0;
1326
1327 if (skb_queue_empty(&sk->sk_write_queue)) {
1328 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1329 if (err)
1330 return err;
1331 } else {
1332 transhdrlen = 0;
1333 }
1334
1335 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1336 sk_page_frag(sk), getfrag,
1337 from, length, transhdrlen, flags);
1338 }
1339
ip_append_page(struct sock * sk,struct flowi4 * fl4,struct page * page,int offset,size_t size,int flags)1340 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1341 int offset, size_t size, int flags)
1342 {
1343 struct inet_sock *inet = inet_sk(sk);
1344 struct sk_buff *skb;
1345 struct rtable *rt;
1346 struct ip_options *opt = NULL;
1347 struct inet_cork *cork;
1348 int hh_len;
1349 int mtu;
1350 int len;
1351 int err;
1352 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1353
1354 if (inet->hdrincl)
1355 return -EPERM;
1356
1357 if (flags&MSG_PROBE)
1358 return 0;
1359
1360 if (skb_queue_empty(&sk->sk_write_queue))
1361 return -EINVAL;
1362
1363 cork = &inet->cork.base;
1364 rt = (struct rtable *)cork->dst;
1365 if (cork->flags & IPCORK_OPT)
1366 opt = cork->opt;
1367
1368 if (!(rt->dst.dev->features & NETIF_F_SG))
1369 return -EOPNOTSUPP;
1370
1371 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1372 mtu = cork->gso_size ? IP_MAX_MTU : cork->fragsize;
1373
1374 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1375 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1376 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1377
1378 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1379 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1380 mtu - (opt ? opt->optlen : 0));
1381 return -EMSGSIZE;
1382 }
1383
1384 skb = skb_peek_tail(&sk->sk_write_queue);
1385 if (!skb)
1386 return -EINVAL;
1387
1388 cork->length += size;
1389
1390 while (size > 0) {
1391 /* Check if the remaining data fits into current packet. */
1392 len = mtu - skb->len;
1393 if (len < size)
1394 len = maxfraglen - skb->len;
1395
1396 if (len <= 0) {
1397 struct sk_buff *skb_prev;
1398 int alloclen;
1399
1400 skb_prev = skb;
1401 fraggap = skb_prev->len - maxfraglen;
1402
1403 alloclen = fragheaderlen + hh_len + fraggap + 15;
1404 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1405 if (unlikely(!skb)) {
1406 err = -ENOBUFS;
1407 goto error;
1408 }
1409
1410 /*
1411 * Fill in the control structures
1412 */
1413 skb->ip_summed = CHECKSUM_NONE;
1414 skb->csum = 0;
1415 skb_reserve(skb, hh_len);
1416
1417 /*
1418 * Find where to start putting bytes.
1419 */
1420 skb_put(skb, fragheaderlen + fraggap);
1421 skb_reset_network_header(skb);
1422 skb->transport_header = (skb->network_header +
1423 fragheaderlen);
1424 if (fraggap) {
1425 skb->csum = skb_copy_and_csum_bits(skb_prev,
1426 maxfraglen,
1427 skb_transport_header(skb),
1428 fraggap);
1429 skb_prev->csum = csum_sub(skb_prev->csum,
1430 skb->csum);
1431 pskb_trim_unique(skb_prev, maxfraglen);
1432 }
1433
1434 /*
1435 * Put the packet on the pending queue.
1436 */
1437 __skb_queue_tail(&sk->sk_write_queue, skb);
1438 continue;
1439 }
1440
1441 if (len > size)
1442 len = size;
1443
1444 if (skb_append_pagefrags(skb, page, offset, len)) {
1445 err = -EMSGSIZE;
1446 goto error;
1447 }
1448
1449 if (skb->ip_summed == CHECKSUM_NONE) {
1450 __wsum csum;
1451 csum = csum_page(page, offset, len);
1452 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1453 }
1454
1455 skb->len += len;
1456 skb->data_len += len;
1457 skb->truesize += len;
1458 refcount_add(len, &sk->sk_wmem_alloc);
1459 offset += len;
1460 size -= len;
1461 }
1462 return 0;
1463
1464 error:
1465 cork->length -= size;
1466 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1467 return err;
1468 }
1469
ip_cork_release(struct inet_cork * cork)1470 static void ip_cork_release(struct inet_cork *cork)
1471 {
1472 cork->flags &= ~IPCORK_OPT;
1473 kfree(cork->opt);
1474 cork->opt = NULL;
1475 dst_release(cork->dst);
1476 cork->dst = NULL;
1477 }
1478
1479 /*
1480 * Combined all pending IP fragments on the socket as one IP datagram
1481 * and push them out.
1482 */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)1483 struct sk_buff *__ip_make_skb(struct sock *sk,
1484 struct flowi4 *fl4,
1485 struct sk_buff_head *queue,
1486 struct inet_cork *cork)
1487 {
1488 struct sk_buff *skb, *tmp_skb;
1489 struct sk_buff **tail_skb;
1490 struct inet_sock *inet = inet_sk(sk);
1491 struct net *net = sock_net(sk);
1492 struct ip_options *opt = NULL;
1493 struct rtable *rt = (struct rtable *)cork->dst;
1494 struct iphdr *iph;
1495 __be16 df = 0;
1496 __u8 ttl;
1497
1498 skb = __skb_dequeue(queue);
1499 if (!skb)
1500 goto out;
1501 tail_skb = &(skb_shinfo(skb)->frag_list);
1502
1503 /* move skb->data to ip header from ext header */
1504 if (skb->data < skb_network_header(skb))
1505 __skb_pull(skb, skb_network_offset(skb));
1506 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1507 __skb_pull(tmp_skb, skb_network_header_len(skb));
1508 *tail_skb = tmp_skb;
1509 tail_skb = &(tmp_skb->next);
1510 skb->len += tmp_skb->len;
1511 skb->data_len += tmp_skb->len;
1512 skb->truesize += tmp_skb->truesize;
1513 tmp_skb->destructor = NULL;
1514 tmp_skb->sk = NULL;
1515 }
1516
1517 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1518 * to fragment the frame generated here. No matter, what transforms
1519 * how transforms change size of the packet, it will come out.
1520 */
1521 skb->ignore_df = ip_sk_ignore_df(sk);
1522
1523 /* DF bit is set when we want to see DF on outgoing frames.
1524 * If ignore_df is set too, we still allow to fragment this frame
1525 * locally. */
1526 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1527 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1528 (skb->len <= dst_mtu(&rt->dst) &&
1529 ip_dont_fragment(sk, &rt->dst)))
1530 df = htons(IP_DF);
1531
1532 if (cork->flags & IPCORK_OPT)
1533 opt = cork->opt;
1534
1535 if (cork->ttl != 0)
1536 ttl = cork->ttl;
1537 else if (rt->rt_type == RTN_MULTICAST)
1538 ttl = inet->mc_ttl;
1539 else
1540 ttl = ip_select_ttl(inet, &rt->dst);
1541
1542 iph = ip_hdr(skb);
1543 iph->version = 4;
1544 iph->ihl = 5;
1545 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1546 iph->frag_off = df;
1547 iph->ttl = ttl;
1548 iph->protocol = sk->sk_protocol;
1549 ip_copy_addrs(iph, fl4);
1550 ip_select_ident(net, skb, sk);
1551
1552 if (opt) {
1553 iph->ihl += opt->optlen >> 2;
1554 ip_options_build(skb, opt, cork->addr, rt, 0);
1555 }
1556
1557 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1558 skb->mark = cork->mark;
1559 skb->tstamp = cork->transmit_time;
1560 /*
1561 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1562 * on dst refcount
1563 */
1564 cork->dst = NULL;
1565 skb_dst_set(skb, &rt->dst);
1566
1567 if (iph->protocol == IPPROTO_ICMP)
1568 icmp_out_count(net, ((struct icmphdr *)
1569 skb_transport_header(skb))->type);
1570
1571 ip_cork_release(cork);
1572 out:
1573 return skb;
1574 }
1575
ip_send_skb(struct net * net,struct sk_buff * skb)1576 int ip_send_skb(struct net *net, struct sk_buff *skb)
1577 {
1578 int err;
1579
1580 err = ip_local_out(net, skb->sk, skb);
1581 if (err) {
1582 if (err > 0)
1583 err = net_xmit_errno(err);
1584 if (err)
1585 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1586 }
1587
1588 return err;
1589 }
1590
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)1591 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1592 {
1593 struct sk_buff *skb;
1594
1595 skb = ip_finish_skb(sk, fl4);
1596 if (!skb)
1597 return 0;
1598
1599 /* Netfilter gets whole the not fragmented skb. */
1600 return ip_send_skb(sock_net(sk), skb);
1601 }
1602
1603 /*
1604 * Throw away all pending data on the socket.
1605 */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)1606 static void __ip_flush_pending_frames(struct sock *sk,
1607 struct sk_buff_head *queue,
1608 struct inet_cork *cork)
1609 {
1610 struct sk_buff *skb;
1611
1612 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1613 kfree_skb(skb);
1614
1615 ip_cork_release(cork);
1616 }
1617
ip_flush_pending_frames(struct sock * sk)1618 void ip_flush_pending_frames(struct sock *sk)
1619 {
1620 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1621 }
1622
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,struct inet_cork * cork,unsigned int flags)1623 struct sk_buff *ip_make_skb(struct sock *sk,
1624 struct flowi4 *fl4,
1625 int getfrag(void *from, char *to, int offset,
1626 int len, int odd, struct sk_buff *skb),
1627 void *from, int length, int transhdrlen,
1628 struct ipcm_cookie *ipc, struct rtable **rtp,
1629 struct inet_cork *cork, unsigned int flags)
1630 {
1631 struct sk_buff_head queue;
1632 int err;
1633
1634 if (flags & MSG_PROBE)
1635 return NULL;
1636
1637 __skb_queue_head_init(&queue);
1638
1639 cork->flags = 0;
1640 cork->addr = 0;
1641 cork->opt = NULL;
1642 err = ip_setup_cork(sk, cork, ipc, rtp);
1643 if (err)
1644 return ERR_PTR(err);
1645
1646 err = __ip_append_data(sk, fl4, &queue, cork,
1647 ¤t->task_frag, getfrag,
1648 from, length, transhdrlen, flags);
1649 if (err) {
1650 __ip_flush_pending_frames(sk, &queue, cork);
1651 return ERR_PTR(err);
1652 }
1653
1654 return __ip_make_skb(sk, fl4, &queue, cork);
1655 }
1656
1657 /*
1658 * Fetch data from kernel space and fill in checksum if needed.
1659 */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)1660 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1661 int len, int odd, struct sk_buff *skb)
1662 {
1663 __wsum csum;
1664
1665 csum = csum_partial_copy_nocheck(dptr+offset, to, len);
1666 skb->csum = csum_block_add(skb->csum, csum, odd);
1667 return 0;
1668 }
1669
1670 /*
1671 * Generic function to send a packet as reply to another packet.
1672 * Used to send some TCP resets/acks so far.
1673 */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len,u64 transmit_time)1674 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1675 const struct ip_options *sopt,
1676 __be32 daddr, __be32 saddr,
1677 const struct ip_reply_arg *arg,
1678 unsigned int len, u64 transmit_time)
1679 {
1680 struct ip_options_data replyopts;
1681 struct ipcm_cookie ipc;
1682 struct flowi4 fl4;
1683 struct rtable *rt = skb_rtable(skb);
1684 struct net *net = sock_net(sk);
1685 struct sk_buff *nskb;
1686 int err;
1687 int oif;
1688
1689 if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt))
1690 return;
1691
1692 ipcm_init(&ipc);
1693 ipc.addr = daddr;
1694 ipc.sockc.transmit_time = transmit_time;
1695
1696 if (replyopts.opt.opt.optlen) {
1697 ipc.opt = &replyopts.opt;
1698
1699 if (replyopts.opt.opt.srr)
1700 daddr = replyopts.opt.opt.faddr;
1701 }
1702
1703 oif = arg->bound_dev_if;
1704 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1705 oif = skb->skb_iif;
1706
1707 flowi4_init_output(&fl4, oif,
1708 IP4_REPLY_MARK(net, skb->mark) ?: sk->sk_mark,
1709 RT_TOS(arg->tos),
1710 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1711 ip_reply_arg_flowi_flags(arg),
1712 daddr, saddr,
1713 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1714 arg->uid);
1715 security_skb_classify_flow(skb, flowi4_to_flowi_common(&fl4));
1716 rt = ip_route_output_key(net, &fl4);
1717 if (IS_ERR(rt))
1718 return;
1719
1720 inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK;
1721
1722 sk->sk_protocol = ip_hdr(skb)->protocol;
1723 sk->sk_bound_dev_if = arg->bound_dev_if;
1724 sk->sk_sndbuf = READ_ONCE(sysctl_wmem_default);
1725 ipc.sockc.mark = fl4.flowi4_mark;
1726 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1727 len, 0, &ipc, &rt, MSG_DONTWAIT);
1728 if (unlikely(err)) {
1729 ip_flush_pending_frames(sk);
1730 goto out;
1731 }
1732
1733 nskb = skb_peek(&sk->sk_write_queue);
1734 if (nskb) {
1735 if (arg->csumoffset >= 0)
1736 *((__sum16 *)skb_transport_header(nskb) +
1737 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1738 arg->csum));
1739 nskb->ip_summed = CHECKSUM_NONE;
1740 ip_push_pending_frames(sk, &fl4);
1741 }
1742 out:
1743 ip_rt_put(rt);
1744 }
1745
ip_init(void)1746 void __init ip_init(void)
1747 {
1748 ip_rt_init();
1749 inet_initpeers();
1750
1751 #if defined(CONFIG_IP_MULTICAST)
1752 igmp_mc_init();
1753 #endif
1754 }
1755