1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * The Internet Protocol (IP) output module.
7 *
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
17 *
18 * See ip_input.c for original log
19 *
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
24 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
43 */
44
45 #include <asm/uaccess.h>
46 #include <linux/module.h>
47 #include <linux/types.h>
48 #include <linux/kernel.h>
49 #include <linux/mm.h>
50 #include <linux/string.h>
51 #include <linux/errno.h>
52 #include <linux/highmem.h>
53 #include <linux/slab.h>
54
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
64
65 #include <net/snmp.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
69 #include <net/xfrm.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/icmp.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/netfilter_bridge.h>
79 #include <linux/mroute.h>
80 #include <linux/netlink.h>
81 #include <linux/tcp.h>
82
83 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
84 EXPORT_SYMBOL(sysctl_ip_default_ttl);
85
86 /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)87 void ip_send_check(struct iphdr *iph)
88 {
89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
91 }
92 EXPORT_SYMBOL(ip_send_check);
93
__ip_local_out(struct sk_buff * skb)94 int __ip_local_out(struct sk_buff *skb)
95 {
96 struct iphdr *iph = ip_hdr(skb);
97
98 iph->tot_len = htons(skb->len);
99 ip_send_check(iph);
100 return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
101 skb_dst(skb)->dev, dst_output);
102 }
103
ip_local_out_sk(struct sock * sk,struct sk_buff * skb)104 int ip_local_out_sk(struct sock *sk, struct sk_buff *skb)
105 {
106 int err;
107
108 err = __ip_local_out(skb);
109 if (likely(err == 1))
110 err = dst_output_sk(sk, skb);
111
112 return err;
113 }
114 EXPORT_SYMBOL_GPL(ip_local_out_sk);
115
ip_select_ttl(struct inet_sock * inet,struct dst_entry * dst)116 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
117 {
118 int ttl = inet->uc_ttl;
119
120 if (ttl < 0)
121 ttl = ip4_dst_hoplimit(dst);
122 return ttl;
123 }
124
125 /*
126 * Add an ip header to a skbuff and send it out.
127 *
128 */
ip_build_and_send_pkt(struct sk_buff * skb,struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt)129 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
130 __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
131 {
132 struct inet_sock *inet = inet_sk(sk);
133 struct rtable *rt = skb_rtable(skb);
134 struct iphdr *iph;
135
136 /* Build the IP header. */
137 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
138 skb_reset_network_header(skb);
139 iph = ip_hdr(skb);
140 iph->version = 4;
141 iph->ihl = 5;
142 iph->tos = inet->tos;
143 if (ip_dont_fragment(sk, &rt->dst))
144 iph->frag_off = htons(IP_DF);
145 else
146 iph->frag_off = 0;
147 iph->ttl = ip_select_ttl(inet, &rt->dst);
148 iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
149 iph->saddr = saddr;
150 iph->protocol = sk->sk_protocol;
151 ip_select_ident(skb, sk);
152
153 if (opt && opt->opt.optlen) {
154 iph->ihl += opt->opt.optlen>>2;
155 ip_options_build(skb, &opt->opt, daddr, rt, 0);
156 }
157
158 skb->priority = sk->sk_priority;
159 skb->mark = sk->sk_mark;
160
161 /* Send it out. */
162 return ip_local_out(skb);
163 }
164 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
165
ip_finish_output2(struct sk_buff * skb)166 static inline int ip_finish_output2(struct sk_buff *skb)
167 {
168 struct dst_entry *dst = skb_dst(skb);
169 struct rtable *rt = (struct rtable *)dst;
170 struct net_device *dev = dst->dev;
171 unsigned int hh_len = LL_RESERVED_SPACE(dev);
172 struct neighbour *neigh;
173 u32 nexthop;
174
175 if (rt->rt_type == RTN_MULTICAST) {
176 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
177 } else if (rt->rt_type == RTN_BROADCAST)
178 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
179
180 /* Be paranoid, rather than too clever. */
181 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
182 struct sk_buff *skb2;
183
184 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
185 if (skb2 == NULL) {
186 kfree_skb(skb);
187 return -ENOMEM;
188 }
189 if (skb->sk)
190 skb_set_owner_w(skb2, skb->sk);
191 consume_skb(skb);
192 skb = skb2;
193 }
194
195 rcu_read_lock_bh();
196 nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
197 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
198 if (unlikely(!neigh))
199 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
200 if (!IS_ERR(neigh)) {
201 int res = dst_neigh_output(dst, neigh, skb);
202
203 rcu_read_unlock_bh();
204 return res;
205 }
206 rcu_read_unlock_bh();
207
208 net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
209 __func__);
210 kfree_skb(skb);
211 return -EINVAL;
212 }
213
ip_finish_output_gso(struct sk_buff * skb)214 static int ip_finish_output_gso(struct sk_buff *skb)
215 {
216 netdev_features_t features;
217 struct sk_buff *segs;
218 int ret = 0;
219
220 /* common case: locally created skb or seglen is <= mtu */
221 if (((IPCB(skb)->flags & IPSKB_FORWARDED) == 0) ||
222 skb_gso_network_seglen(skb) <= ip_skb_dst_mtu(skb))
223 return ip_finish_output2(skb);
224
225 /* Slowpath - GSO segment length is exceeding the dst MTU.
226 *
227 * This can happen in two cases:
228 * 1) TCP GRO packet, DF bit not set
229 * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly
230 * from host network stack.
231 */
232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR_OR_NULL(segs)) {
235 kfree_skb(skb);
236 return -ENOMEM;
237 }
238
239 consume_skb(skb);
240
241 do {
242 struct sk_buff *nskb = segs->next;
243 int err;
244
245 segs->next = NULL;
246 err = ip_fragment(segs, ip_finish_output2);
247
248 if (err && ret == 0)
249 ret = err;
250 segs = nskb;
251 } while (segs);
252
253 return ret;
254 }
255
ip_finish_output(struct sk_buff * skb)256 static int ip_finish_output(struct sk_buff *skb)
257 {
258 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
259 /* Policy lookup after SNAT yielded a new policy */
260 if (skb_dst(skb)->xfrm != NULL) {
261 IPCB(skb)->flags |= IPSKB_REROUTED;
262 return dst_output(skb);
263 }
264 #endif
265 if (skb_is_gso(skb))
266 return ip_finish_output_gso(skb);
267
268 if (skb->len > ip_skb_dst_mtu(skb))
269 return ip_fragment(skb, ip_finish_output2);
270
271 return ip_finish_output2(skb);
272 }
273
ip_mc_output(struct sock * sk,struct sk_buff * skb)274 int ip_mc_output(struct sock *sk, struct sk_buff *skb)
275 {
276 struct rtable *rt = skb_rtable(skb);
277 struct net_device *dev = rt->dst.dev;
278
279 /*
280 * If the indicated interface is up and running, send the packet.
281 */
282 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
283
284 skb->dev = dev;
285 skb->protocol = htons(ETH_P_IP);
286
287 /*
288 * Multicasts are looped back for other local users
289 */
290
291 if (rt->rt_flags&RTCF_MULTICAST) {
292 if (sk_mc_loop(sk)
293 #ifdef CONFIG_IP_MROUTE
294 /* Small optimization: do not loopback not local frames,
295 which returned after forwarding; they will be dropped
296 by ip_mr_input in any case.
297 Note, that local frames are looped back to be delivered
298 to local recipients.
299
300 This check is duplicated in ip_mr_input at the moment.
301 */
302 &&
303 ((rt->rt_flags & RTCF_LOCAL) ||
304 !(IPCB(skb)->flags & IPSKB_FORWARDED))
305 #endif
306 ) {
307 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
308 if (newskb)
309 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
310 newskb, NULL, newskb->dev,
311 dev_loopback_xmit);
312 }
313
314 /* Multicasts with ttl 0 must not go beyond the host */
315
316 if (ip_hdr(skb)->ttl == 0) {
317 kfree_skb(skb);
318 return 0;
319 }
320 }
321
322 if (rt->rt_flags&RTCF_BROADCAST) {
323 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
324 if (newskb)
325 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, newskb,
326 NULL, newskb->dev, dev_loopback_xmit);
327 }
328
329 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL,
330 skb->dev, ip_finish_output,
331 !(IPCB(skb)->flags & IPSKB_REROUTED));
332 }
333
ip_output(struct sock * sk,struct sk_buff * skb)334 int ip_output(struct sock *sk, struct sk_buff *skb)
335 {
336 struct net_device *dev = skb_dst(skb)->dev;
337
338 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
339
340 skb->dev = dev;
341 skb->protocol = htons(ETH_P_IP);
342
343 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, skb, NULL, dev,
344 ip_finish_output,
345 !(IPCB(skb)->flags & IPSKB_REROUTED));
346 }
347
348 /*
349 * copy saddr and daddr, possibly using 64bit load/stores
350 * Equivalent to :
351 * iph->saddr = fl4->saddr;
352 * iph->daddr = fl4->daddr;
353 */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)354 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
355 {
356 BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
357 offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
358 memcpy(&iph->saddr, &fl4->saddr,
359 sizeof(fl4->saddr) + sizeof(fl4->daddr));
360 }
361
362 /* Note: skb->sk can be different from sk, in case of tunnels */
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)363 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
364 {
365 struct inet_sock *inet = inet_sk(sk);
366 struct ip_options_rcu *inet_opt;
367 struct flowi4 *fl4;
368 struct rtable *rt;
369 struct iphdr *iph;
370 int res;
371
372 /* Skip all of this if the packet is already routed,
373 * f.e. by something like SCTP.
374 */
375 rcu_read_lock();
376 inet_opt = rcu_dereference(inet->inet_opt);
377 fl4 = &fl->u.ip4;
378 rt = skb_rtable(skb);
379 if (rt != NULL)
380 goto packet_routed;
381
382 /* Make sure we can route this packet. */
383 rt = (struct rtable *)__sk_dst_check(sk, 0);
384 if (rt == NULL) {
385 __be32 daddr;
386
387 /* Use correct destination address if we have options. */
388 daddr = inet->inet_daddr;
389 if (inet_opt && inet_opt->opt.srr)
390 daddr = inet_opt->opt.faddr;
391
392 /* If this fails, retransmit mechanism of transport layer will
393 * keep trying until route appears or the connection times
394 * itself out.
395 */
396 rt = ip_route_output_ports(sock_net(sk), fl4, sk,
397 daddr, inet->inet_saddr,
398 inet->inet_dport,
399 inet->inet_sport,
400 sk->sk_protocol,
401 RT_CONN_FLAGS(sk),
402 sk->sk_bound_dev_if);
403 if (IS_ERR(rt))
404 goto no_route;
405 sk_setup_caps(sk, &rt->dst);
406 }
407 skb_dst_set_noref(skb, &rt->dst);
408
409 packet_routed:
410 if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
411 goto no_route;
412
413 /* OK, we know where to send it, allocate and build IP header. */
414 skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
415 skb_reset_network_header(skb);
416 iph = ip_hdr(skb);
417 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
418 if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
419 iph->frag_off = htons(IP_DF);
420 else
421 iph->frag_off = 0;
422 iph->ttl = ip_select_ttl(inet, &rt->dst);
423 iph->protocol = sk->sk_protocol;
424 ip_copy_addrs(iph, fl4);
425
426 /* Transport layer set skb->h.foo itself. */
427
428 if (inet_opt && inet_opt->opt.optlen) {
429 iph->ihl += inet_opt->opt.optlen >> 2;
430 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
431 }
432
433 ip_select_ident_segs(skb, sk, skb_shinfo(skb)->gso_segs ?: 1);
434
435 /* TODO : should we use skb->sk here instead of sk ? */
436 skb->priority = sk->sk_priority;
437 skb->mark = sk->sk_mark;
438
439 res = ip_local_out(skb);
440 rcu_read_unlock();
441 return res;
442
443 no_route:
444 rcu_read_unlock();
445 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
446 kfree_skb(skb);
447 return -EHOSTUNREACH;
448 }
449 EXPORT_SYMBOL(ip_queue_xmit);
450
451
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)452 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
453 {
454 to->pkt_type = from->pkt_type;
455 to->priority = from->priority;
456 to->protocol = from->protocol;
457 skb_dst_drop(to);
458 skb_dst_copy(to, from);
459 to->dev = from->dev;
460 to->mark = from->mark;
461
462 /* Copy the flags to each fragment. */
463 IPCB(to)->flags = IPCB(from)->flags;
464
465 #ifdef CONFIG_NET_SCHED
466 to->tc_index = from->tc_index;
467 #endif
468 nf_copy(to, from);
469 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
470 to->ipvs_property = from->ipvs_property;
471 #endif
472 skb_copy_secmark(to, from);
473 }
474
475 /*
476 * This IP datagram is too large to be sent in one piece. Break it up into
477 * smaller pieces (each of size equal to IP header plus
478 * a block of the data of the original IP data part) that will yet fit in a
479 * single device frame, and queue such a frame for sending.
480 */
481
ip_fragment(struct sk_buff * skb,int (* output)(struct sk_buff *))482 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
483 {
484 struct iphdr *iph;
485 int ptr;
486 struct net_device *dev;
487 struct sk_buff *skb2;
488 unsigned int mtu, hlen, left, len, ll_rs;
489 int offset;
490 __be16 not_last_frag;
491 struct rtable *rt = skb_rtable(skb);
492 int err = 0;
493
494 dev = rt->dst.dev;
495
496 /*
497 * Point into the IP datagram header.
498 */
499
500 iph = ip_hdr(skb);
501
502 mtu = ip_skb_dst_mtu(skb);
503 if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) ||
504 (IPCB(skb)->frag_max_size &&
505 IPCB(skb)->frag_max_size > mtu))) {
506 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
507 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
508 htonl(mtu));
509 kfree_skb(skb);
510 return -EMSGSIZE;
511 }
512
513 /*
514 * Setup starting values.
515 */
516
517 hlen = iph->ihl * 4;
518 mtu = mtu - hlen; /* Size of data space */
519 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
520 if (skb->nf_bridge)
521 mtu -= nf_bridge_mtu_reduction(skb);
522 #endif
523 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
524
525 /* When frag_list is given, use it. First, check its validity:
526 * some transformers could create wrong frag_list or break existing
527 * one, it is not prohibited. In this case fall back to copying.
528 *
529 * LATER: this step can be merged to real generation of fragments,
530 * we can switch to copy when see the first bad fragment.
531 */
532 if (skb_has_frag_list(skb)) {
533 struct sk_buff *frag, *frag2;
534 int first_len = skb_pagelen(skb);
535
536 if (first_len - hlen > mtu ||
537 ((first_len - hlen) & 7) ||
538 ip_is_fragment(iph) ||
539 skb_cloned(skb))
540 goto slow_path;
541
542 skb_walk_frags(skb, frag) {
543 /* Correct geometry. */
544 if (frag->len > mtu ||
545 ((frag->len & 7) && frag->next) ||
546 skb_headroom(frag) < hlen)
547 goto slow_path_clean;
548
549 /* Partially cloned skb? */
550 if (skb_shared(frag))
551 goto slow_path_clean;
552
553 BUG_ON(frag->sk);
554 if (skb->sk) {
555 frag->sk = skb->sk;
556 frag->destructor = sock_wfree;
557 }
558 skb->truesize -= frag->truesize;
559 }
560
561 /* Everything is OK. Generate! */
562
563 err = 0;
564 offset = 0;
565 frag = skb_shinfo(skb)->frag_list;
566 skb_frag_list_init(skb);
567 skb->data_len = first_len - skb_headlen(skb);
568 skb->len = first_len;
569 iph->tot_len = htons(first_len);
570 iph->frag_off = htons(IP_MF);
571 ip_send_check(iph);
572
573 for (;;) {
574 /* Prepare header of the next frame,
575 * before previous one went down. */
576 if (frag) {
577 frag->ip_summed = CHECKSUM_NONE;
578 skb_reset_transport_header(frag);
579 __skb_push(frag, hlen);
580 skb_reset_network_header(frag);
581 memcpy(skb_network_header(frag), iph, hlen);
582 iph = ip_hdr(frag);
583 iph->tot_len = htons(frag->len);
584 ip_copy_metadata(frag, skb);
585 if (offset == 0)
586 ip_options_fragment(frag);
587 offset += skb->len - hlen;
588 iph->frag_off = htons(offset>>3);
589 if (frag->next != NULL)
590 iph->frag_off |= htons(IP_MF);
591 /* Ready, complete checksum */
592 ip_send_check(iph);
593 }
594
595 err = output(skb);
596
597 if (!err)
598 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
599 if (err || !frag)
600 break;
601
602 skb = frag;
603 frag = skb->next;
604 skb->next = NULL;
605 }
606
607 if (err == 0) {
608 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
609 return 0;
610 }
611
612 while (frag) {
613 skb = frag->next;
614 kfree_skb(frag);
615 frag = skb;
616 }
617 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
618 return err;
619
620 slow_path_clean:
621 skb_walk_frags(skb, frag2) {
622 if (frag2 == frag)
623 break;
624 frag2->sk = NULL;
625 frag2->destructor = NULL;
626 skb->truesize += frag2->truesize;
627 }
628 }
629
630 slow_path:
631 /* for offloaded checksums cleanup checksum before fragmentation */
632 if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
633 goto fail;
634 iph = ip_hdr(skb);
635
636 left = skb->len - hlen; /* Space per frame */
637 ptr = hlen; /* Where to start from */
638
639 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
640 * we need to make room for the encapsulating header
641 */
642 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
643
644 /*
645 * Fragment the datagram.
646 */
647
648 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
649 not_last_frag = iph->frag_off & htons(IP_MF);
650
651 /*
652 * Keep copying data until we run out.
653 */
654
655 while (left > 0) {
656 len = left;
657 /* IF: it doesn't fit, use 'mtu' - the data space left */
658 if (len > mtu)
659 len = mtu;
660 /* IF: we are not sending up to and including the packet end
661 then align the next start on an eight byte boundary */
662 if (len < left) {
663 len &= ~7;
664 }
665 /*
666 * Allocate buffer.
667 */
668
669 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
670 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
671 err = -ENOMEM;
672 goto fail;
673 }
674
675 /*
676 * Set up data on packet
677 */
678
679 ip_copy_metadata(skb2, skb);
680 skb_reserve(skb2, ll_rs);
681 skb_put(skb2, len + hlen);
682 skb_reset_network_header(skb2);
683 skb2->transport_header = skb2->network_header + hlen;
684
685 /*
686 * Charge the memory for the fragment to any owner
687 * it might possess
688 */
689
690 if (skb->sk)
691 skb_set_owner_w(skb2, skb->sk);
692
693 /*
694 * Copy the packet header into the new buffer.
695 */
696
697 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
698
699 /*
700 * Copy a block of the IP datagram.
701 */
702 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
703 BUG();
704 left -= len;
705
706 /*
707 * Fill in the new header fields.
708 */
709 iph = ip_hdr(skb2);
710 iph->frag_off = htons((offset >> 3));
711
712 /* ANK: dirty, but effective trick. Upgrade options only if
713 * the segment to be fragmented was THE FIRST (otherwise,
714 * options are already fixed) and make it ONCE
715 * on the initial skb, so that all the following fragments
716 * will inherit fixed options.
717 */
718 if (offset == 0)
719 ip_options_fragment(skb);
720
721 /*
722 * Added AC : If we are fragmenting a fragment that's not the
723 * last fragment then keep MF on each bit
724 */
725 if (left > 0 || not_last_frag)
726 iph->frag_off |= htons(IP_MF);
727 ptr += len;
728 offset += len;
729
730 /*
731 * Put this fragment into the sending queue.
732 */
733 iph->tot_len = htons(len + hlen);
734
735 ip_send_check(iph);
736
737 err = output(skb2);
738 if (err)
739 goto fail;
740
741 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
742 }
743 consume_skb(skb);
744 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
745 return err;
746
747 fail:
748 kfree_skb(skb);
749 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
750 return err;
751 }
752 EXPORT_SYMBOL(ip_fragment);
753
754 int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)755 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
756 {
757 struct iovec *iov = from;
758
759 if (skb->ip_summed == CHECKSUM_PARTIAL) {
760 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
761 return -EFAULT;
762 } else {
763 __wsum csum = 0;
764 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
765 return -EFAULT;
766 skb->csum = csum_block_add(skb->csum, csum, odd);
767 }
768 return 0;
769 }
770 EXPORT_SYMBOL(ip_generic_getfrag);
771
772 static inline __wsum
csum_page(struct page * page,int offset,int copy)773 csum_page(struct page *page, int offset, int copy)
774 {
775 char *kaddr;
776 __wsum csum;
777 kaddr = kmap(page);
778 csum = csum_partial(kaddr + offset, copy, 0);
779 kunmap(page);
780 return csum;
781 }
782
ip_ufo_append_data(struct sock * sk,struct sk_buff_head * queue,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int hh_len,int fragheaderlen,int transhdrlen,int maxfraglen,unsigned int flags)783 static inline int ip_ufo_append_data(struct sock *sk,
784 struct sk_buff_head *queue,
785 int getfrag(void *from, char *to, int offset, int len,
786 int odd, struct sk_buff *skb),
787 void *from, int length, int hh_len, int fragheaderlen,
788 int transhdrlen, int maxfraglen, unsigned int flags)
789 {
790 struct sk_buff *skb;
791 int err;
792
793 /* There is support for UDP fragmentation offload by network
794 * device, so create one single skb packet containing complete
795 * udp datagram
796 */
797 if ((skb = skb_peek_tail(queue)) == NULL) {
798 skb = sock_alloc_send_skb(sk,
799 hh_len + fragheaderlen + transhdrlen + 20,
800 (flags & MSG_DONTWAIT), &err);
801
802 if (skb == NULL)
803 return err;
804
805 /* reserve space for Hardware header */
806 skb_reserve(skb, hh_len);
807
808 /* create space for UDP/IP header */
809 skb_put(skb, fragheaderlen + transhdrlen);
810
811 /* initialize network header pointer */
812 skb_reset_network_header(skb);
813
814 /* initialize protocol header pointer */
815 skb->transport_header = skb->network_header + fragheaderlen;
816
817 skb->csum = 0;
818
819
820 __skb_queue_tail(queue, skb);
821 } else if (skb_is_gso(skb)) {
822 goto append;
823 }
824
825 skb->ip_summed = CHECKSUM_PARTIAL;
826 /* specify the length of each IP datagram fragment */
827 skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
828 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
829
830 append:
831 return skb_append_datato_frags(sk, skb, getfrag, from,
832 (length - transhdrlen));
833 }
834
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)835 static int __ip_append_data(struct sock *sk,
836 struct flowi4 *fl4,
837 struct sk_buff_head *queue,
838 struct inet_cork *cork,
839 struct page_frag *pfrag,
840 int getfrag(void *from, char *to, int offset,
841 int len, int odd, struct sk_buff *skb),
842 void *from, int length, int transhdrlen,
843 unsigned int flags)
844 {
845 struct inet_sock *inet = inet_sk(sk);
846 struct sk_buff *skb;
847
848 struct ip_options *opt = cork->opt;
849 int hh_len;
850 int exthdrlen;
851 int mtu;
852 int copy;
853 int err;
854 int offset = 0;
855 unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
856 int csummode = CHECKSUM_NONE;
857 struct rtable *rt = (struct rtable *)cork->dst;
858 u32 tskey = 0;
859
860 skb = skb_peek_tail(queue);
861
862 exthdrlen = !skb ? rt->dst.header_len : 0;
863 mtu = cork->fragsize;
864 if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
865 sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
866 tskey = sk->sk_tskey++;
867
868 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
869
870 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
871 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
872 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
873
874 if (cork->length + length > maxnonfragsize - fragheaderlen) {
875 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
876 mtu - (opt ? opt->optlen : 0));
877 return -EMSGSIZE;
878 }
879
880 /*
881 * transhdrlen > 0 means that this is the first fragment and we wish
882 * it won't be fragmented in the future.
883 */
884 if (transhdrlen &&
885 length + fragheaderlen <= mtu &&
886 rt->dst.dev->features & NETIF_F_V4_CSUM &&
887 !exthdrlen)
888 csummode = CHECKSUM_PARTIAL;
889
890 cork->length += length;
891 if ((skb && skb_is_gso(skb)) ||
892 (((length + fragheaderlen) > mtu) &&
893 (skb_queue_len(queue) <= 1) &&
894 (sk->sk_protocol == IPPROTO_UDP) &&
895 (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
896 (sk->sk_type == SOCK_DGRAM))) {
897 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
898 hh_len, fragheaderlen, transhdrlen,
899 maxfraglen, flags);
900 if (err)
901 goto error;
902 return 0;
903 }
904
905 /* So, what's going on in the loop below?
906 *
907 * We use calculated fragment length to generate chained skb,
908 * each of segments is IP fragment ready for sending to network after
909 * adding appropriate IP header.
910 */
911
912 if (!skb)
913 goto alloc_new_skb;
914
915 while (length > 0) {
916 /* Check if the remaining data fits into current packet. */
917 copy = mtu - skb->len;
918 if (copy < length)
919 copy = maxfraglen - skb->len;
920 if (copy <= 0) {
921 char *data;
922 unsigned int datalen;
923 unsigned int fraglen;
924 unsigned int fraggap;
925 unsigned int alloclen;
926 struct sk_buff *skb_prev;
927 alloc_new_skb:
928 skb_prev = skb;
929 if (skb_prev)
930 fraggap = skb_prev->len - maxfraglen;
931 else
932 fraggap = 0;
933
934 /*
935 * If remaining data exceeds the mtu,
936 * we know we need more fragment(s).
937 */
938 datalen = length + fraggap;
939 if (datalen > mtu - fragheaderlen)
940 datalen = maxfraglen - fragheaderlen;
941 fraglen = datalen + fragheaderlen;
942
943 if ((flags & MSG_MORE) &&
944 !(rt->dst.dev->features&NETIF_F_SG))
945 alloclen = mtu;
946 else
947 alloclen = fraglen;
948
949 alloclen += exthdrlen;
950
951 /* The last fragment gets additional space at tail.
952 * Note, with MSG_MORE we overallocate on fragments,
953 * because we have no idea what fragment will be
954 * the last.
955 */
956 if (datalen == length + fraggap)
957 alloclen += rt->dst.trailer_len;
958
959 if (transhdrlen) {
960 skb = sock_alloc_send_skb(sk,
961 alloclen + hh_len + 15,
962 (flags & MSG_DONTWAIT), &err);
963 } else {
964 skb = NULL;
965 if (atomic_read(&sk->sk_wmem_alloc) <=
966 2 * sk->sk_sndbuf)
967 skb = sock_wmalloc(sk,
968 alloclen + hh_len + 15, 1,
969 sk->sk_allocation);
970 if (unlikely(skb == NULL))
971 err = -ENOBUFS;
972 }
973 if (skb == NULL)
974 goto error;
975
976 /*
977 * Fill in the control structures
978 */
979 skb->ip_summed = csummode;
980 skb->csum = 0;
981 skb_reserve(skb, hh_len);
982
983 /* only the initial fragment is time stamped */
984 skb_shinfo(skb)->tx_flags = cork->tx_flags;
985 cork->tx_flags = 0;
986 skb_shinfo(skb)->tskey = tskey;
987 tskey = 0;
988
989 /*
990 * Find where to start putting bytes.
991 */
992 data = skb_put(skb, fraglen + exthdrlen);
993 skb_set_network_header(skb, exthdrlen);
994 skb->transport_header = (skb->network_header +
995 fragheaderlen);
996 data += fragheaderlen + exthdrlen;
997
998 if (fraggap) {
999 skb->csum = skb_copy_and_csum_bits(
1000 skb_prev, maxfraglen,
1001 data + transhdrlen, fraggap, 0);
1002 skb_prev->csum = csum_sub(skb_prev->csum,
1003 skb->csum);
1004 data += fraggap;
1005 pskb_trim_unique(skb_prev, maxfraglen);
1006 }
1007
1008 copy = datalen - transhdrlen - fraggap;
1009 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1010 err = -EFAULT;
1011 kfree_skb(skb);
1012 goto error;
1013 }
1014
1015 offset += copy;
1016 length -= datalen - fraggap;
1017 transhdrlen = 0;
1018 exthdrlen = 0;
1019 csummode = CHECKSUM_NONE;
1020
1021 /*
1022 * Put the packet on the pending queue.
1023 */
1024 __skb_queue_tail(queue, skb);
1025 continue;
1026 }
1027
1028 if (copy > length)
1029 copy = length;
1030
1031 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1032 unsigned int off;
1033
1034 off = skb->len;
1035 if (getfrag(from, skb_put(skb, copy),
1036 offset, copy, off, skb) < 0) {
1037 __skb_trim(skb, off);
1038 err = -EFAULT;
1039 goto error;
1040 }
1041 } else {
1042 int i = skb_shinfo(skb)->nr_frags;
1043
1044 err = -ENOMEM;
1045 if (!sk_page_frag_refill(sk, pfrag))
1046 goto error;
1047
1048 if (!skb_can_coalesce(skb, i, pfrag->page,
1049 pfrag->offset)) {
1050 err = -EMSGSIZE;
1051 if (i == MAX_SKB_FRAGS)
1052 goto error;
1053
1054 __skb_fill_page_desc(skb, i, pfrag->page,
1055 pfrag->offset, 0);
1056 skb_shinfo(skb)->nr_frags = ++i;
1057 get_page(pfrag->page);
1058 }
1059 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1060 if (getfrag(from,
1061 page_address(pfrag->page) + pfrag->offset,
1062 offset, copy, skb->len, skb) < 0)
1063 goto error_efault;
1064
1065 pfrag->offset += copy;
1066 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1067 skb->len += copy;
1068 skb->data_len += copy;
1069 skb->truesize += copy;
1070 atomic_add(copy, &sk->sk_wmem_alloc);
1071 }
1072 offset += copy;
1073 length -= copy;
1074 }
1075
1076 return 0;
1077
1078 error_efault:
1079 err = -EFAULT;
1080 error:
1081 cork->length -= length;
1082 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1083 return err;
1084 }
1085
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)1086 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1087 struct ipcm_cookie *ipc, struct rtable **rtp)
1088 {
1089 struct ip_options_rcu *opt;
1090 struct rtable *rt;
1091
1092 /*
1093 * setup for corking.
1094 */
1095 opt = ipc->opt;
1096 if (opt) {
1097 if (cork->opt == NULL) {
1098 cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1099 sk->sk_allocation);
1100 if (unlikely(cork->opt == NULL))
1101 return -ENOBUFS;
1102 }
1103 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1104 cork->flags |= IPCORK_OPT;
1105 cork->addr = ipc->addr;
1106 }
1107 rt = *rtp;
1108 if (unlikely(!rt))
1109 return -EFAULT;
1110 /*
1111 * We steal reference to this route, caller should not release it
1112 */
1113 *rtp = NULL;
1114 cork->fragsize = ip_sk_use_pmtu(sk) ?
1115 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1116 cork->dst = &rt->dst;
1117 cork->length = 0;
1118 cork->ttl = ipc->ttl;
1119 cork->tos = ipc->tos;
1120 cork->priority = ipc->priority;
1121 cork->tx_flags = ipc->tx_flags;
1122
1123 return 0;
1124 }
1125
1126 /*
1127 * ip_append_data() and ip_append_page() can make one large IP datagram
1128 * from many pieces of data. Each pieces will be holded on the socket
1129 * until ip_push_pending_frames() is called. Each piece can be a page
1130 * or non-page data.
1131 *
1132 * Not only UDP, other transport protocols - e.g. raw sockets - can use
1133 * this interface potentially.
1134 *
1135 * LATER: length must be adjusted by pad at tail, when it is required.
1136 */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1137 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1138 int getfrag(void *from, char *to, int offset, int len,
1139 int odd, struct sk_buff *skb),
1140 void *from, int length, int transhdrlen,
1141 struct ipcm_cookie *ipc, struct rtable **rtp,
1142 unsigned int flags)
1143 {
1144 struct inet_sock *inet = inet_sk(sk);
1145 int err;
1146
1147 if (flags&MSG_PROBE)
1148 return 0;
1149
1150 if (skb_queue_empty(&sk->sk_write_queue)) {
1151 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1152 if (err)
1153 return err;
1154 } else {
1155 transhdrlen = 0;
1156 }
1157
1158 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1159 sk_page_frag(sk), getfrag,
1160 from, length, transhdrlen, flags);
1161 }
1162
ip_append_page(struct sock * sk,struct flowi4 * fl4,struct page * page,int offset,size_t size,int flags)1163 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1164 int offset, size_t size, int flags)
1165 {
1166 struct inet_sock *inet = inet_sk(sk);
1167 struct sk_buff *skb;
1168 struct rtable *rt;
1169 struct ip_options *opt = NULL;
1170 struct inet_cork *cork;
1171 int hh_len;
1172 int mtu;
1173 int len;
1174 int err;
1175 unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1176
1177 if (inet->hdrincl)
1178 return -EPERM;
1179
1180 if (flags&MSG_PROBE)
1181 return 0;
1182
1183 if (skb_queue_empty(&sk->sk_write_queue))
1184 return -EINVAL;
1185
1186 cork = &inet->cork.base;
1187 rt = (struct rtable *)cork->dst;
1188 if (cork->flags & IPCORK_OPT)
1189 opt = cork->opt;
1190
1191 if (!(rt->dst.dev->features&NETIF_F_SG))
1192 return -EOPNOTSUPP;
1193
1194 hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1195 mtu = cork->fragsize;
1196
1197 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1198 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1199 maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1200
1201 if (cork->length + size > maxnonfragsize - fragheaderlen) {
1202 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1203 mtu - (opt ? opt->optlen : 0));
1204 return -EMSGSIZE;
1205 }
1206
1207 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1208 return -EINVAL;
1209
1210 cork->length += size;
1211 if ((size + skb->len > mtu) &&
1212 (skb_queue_len(&sk->sk_write_queue) == 1) &&
1213 (sk->sk_protocol == IPPROTO_UDP) &&
1214 (rt->dst.dev->features & NETIF_F_UFO)) {
1215 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1216 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1217 }
1218
1219
1220 while (size > 0) {
1221 int i;
1222
1223 if (skb_is_gso(skb))
1224 len = size;
1225 else {
1226
1227 /* Check if the remaining data fits into current packet. */
1228 len = mtu - skb->len;
1229 if (len < size)
1230 len = maxfraglen - skb->len;
1231 }
1232 if (len <= 0) {
1233 struct sk_buff *skb_prev;
1234 int alloclen;
1235
1236 skb_prev = skb;
1237 fraggap = skb_prev->len - maxfraglen;
1238
1239 alloclen = fragheaderlen + hh_len + fraggap + 15;
1240 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1241 if (unlikely(!skb)) {
1242 err = -ENOBUFS;
1243 goto error;
1244 }
1245
1246 /*
1247 * Fill in the control structures
1248 */
1249 skb->ip_summed = CHECKSUM_NONE;
1250 skb->csum = 0;
1251 skb_reserve(skb, hh_len);
1252
1253 /*
1254 * Find where to start putting bytes.
1255 */
1256 skb_put(skb, fragheaderlen + fraggap);
1257 skb_reset_network_header(skb);
1258 skb->transport_header = (skb->network_header +
1259 fragheaderlen);
1260 if (fraggap) {
1261 skb->csum = skb_copy_and_csum_bits(skb_prev,
1262 maxfraglen,
1263 skb_transport_header(skb),
1264 fraggap, 0);
1265 skb_prev->csum = csum_sub(skb_prev->csum,
1266 skb->csum);
1267 pskb_trim_unique(skb_prev, maxfraglen);
1268 }
1269
1270 /*
1271 * Put the packet on the pending queue.
1272 */
1273 __skb_queue_tail(&sk->sk_write_queue, skb);
1274 continue;
1275 }
1276
1277 i = skb_shinfo(skb)->nr_frags;
1278 if (len > size)
1279 len = size;
1280 if (skb_can_coalesce(skb, i, page, offset)) {
1281 skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
1282 } else if (i < MAX_SKB_FRAGS) {
1283 get_page(page);
1284 skb_fill_page_desc(skb, i, page, offset, len);
1285 } else {
1286 err = -EMSGSIZE;
1287 goto error;
1288 }
1289
1290 if (skb->ip_summed == CHECKSUM_NONE) {
1291 __wsum csum;
1292 csum = csum_page(page, offset, len);
1293 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1294 }
1295
1296 skb->len += len;
1297 skb->data_len += len;
1298 skb->truesize += len;
1299 atomic_add(len, &sk->sk_wmem_alloc);
1300 offset += len;
1301 size -= len;
1302 }
1303 return 0;
1304
1305 error:
1306 cork->length -= size;
1307 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1308 return err;
1309 }
1310
ip_cork_release(struct inet_cork * cork)1311 static void ip_cork_release(struct inet_cork *cork)
1312 {
1313 cork->flags &= ~IPCORK_OPT;
1314 kfree(cork->opt);
1315 cork->opt = NULL;
1316 dst_release(cork->dst);
1317 cork->dst = NULL;
1318 }
1319
1320 /*
1321 * Combined all pending IP fragments on the socket as one IP datagram
1322 * and push them out.
1323 */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)1324 struct sk_buff *__ip_make_skb(struct sock *sk,
1325 struct flowi4 *fl4,
1326 struct sk_buff_head *queue,
1327 struct inet_cork *cork)
1328 {
1329 struct sk_buff *skb, *tmp_skb;
1330 struct sk_buff **tail_skb;
1331 struct inet_sock *inet = inet_sk(sk);
1332 struct net *net = sock_net(sk);
1333 struct ip_options *opt = NULL;
1334 struct rtable *rt = (struct rtable *)cork->dst;
1335 struct iphdr *iph;
1336 __be16 df = 0;
1337 __u8 ttl;
1338
1339 if ((skb = __skb_dequeue(queue)) == NULL)
1340 goto out;
1341 tail_skb = &(skb_shinfo(skb)->frag_list);
1342
1343 /* move skb->data to ip header from ext header */
1344 if (skb->data < skb_network_header(skb))
1345 __skb_pull(skb, skb_network_offset(skb));
1346 while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1347 __skb_pull(tmp_skb, skb_network_header_len(skb));
1348 *tail_skb = tmp_skb;
1349 tail_skb = &(tmp_skb->next);
1350 skb->len += tmp_skb->len;
1351 skb->data_len += tmp_skb->len;
1352 skb->truesize += tmp_skb->truesize;
1353 tmp_skb->destructor = NULL;
1354 tmp_skb->sk = NULL;
1355 }
1356
1357 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1358 * to fragment the frame generated here. No matter, what transforms
1359 * how transforms change size of the packet, it will come out.
1360 */
1361 skb->ignore_df = ip_sk_ignore_df(sk);
1362
1363 /* DF bit is set when we want to see DF on outgoing frames.
1364 * If ignore_df is set too, we still allow to fragment this frame
1365 * locally. */
1366 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1367 inet->pmtudisc == IP_PMTUDISC_PROBE ||
1368 (skb->len <= dst_mtu(&rt->dst) &&
1369 ip_dont_fragment(sk, &rt->dst)))
1370 df = htons(IP_DF);
1371
1372 if (cork->flags & IPCORK_OPT)
1373 opt = cork->opt;
1374
1375 if (cork->ttl != 0)
1376 ttl = cork->ttl;
1377 else if (rt->rt_type == RTN_MULTICAST)
1378 ttl = inet->mc_ttl;
1379 else
1380 ttl = ip_select_ttl(inet, &rt->dst);
1381
1382 iph = ip_hdr(skb);
1383 iph->version = 4;
1384 iph->ihl = 5;
1385 iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1386 iph->frag_off = df;
1387 iph->ttl = ttl;
1388 iph->protocol = sk->sk_protocol;
1389 ip_copy_addrs(iph, fl4);
1390 ip_select_ident(skb, sk);
1391
1392 if (opt) {
1393 iph->ihl += opt->optlen>>2;
1394 ip_options_build(skb, opt, cork->addr, rt, 0);
1395 }
1396
1397 skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1398 skb->mark = sk->sk_mark;
1399 /*
1400 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1401 * on dst refcount
1402 */
1403 cork->dst = NULL;
1404 skb_dst_set(skb, &rt->dst);
1405
1406 if (iph->protocol == IPPROTO_ICMP)
1407 icmp_out_count(net, ((struct icmphdr *)
1408 skb_transport_header(skb))->type);
1409
1410 ip_cork_release(cork);
1411 out:
1412 return skb;
1413 }
1414
ip_send_skb(struct net * net,struct sk_buff * skb)1415 int ip_send_skb(struct net *net, struct sk_buff *skb)
1416 {
1417 int err;
1418
1419 err = ip_local_out(skb);
1420 if (err) {
1421 if (err > 0)
1422 err = net_xmit_errno(err);
1423 if (err)
1424 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1425 }
1426
1427 return err;
1428 }
1429
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)1430 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1431 {
1432 struct sk_buff *skb;
1433
1434 skb = ip_finish_skb(sk, fl4);
1435 if (!skb)
1436 return 0;
1437
1438 /* Netfilter gets whole the not fragmented skb. */
1439 return ip_send_skb(sock_net(sk), skb);
1440 }
1441
1442 /*
1443 * Throw away all pending data on the socket.
1444 */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)1445 static void __ip_flush_pending_frames(struct sock *sk,
1446 struct sk_buff_head *queue,
1447 struct inet_cork *cork)
1448 {
1449 struct sk_buff *skb;
1450
1451 while ((skb = __skb_dequeue_tail(queue)) != NULL)
1452 kfree_skb(skb);
1453
1454 ip_cork_release(cork);
1455 }
1456
ip_flush_pending_frames(struct sock * sk)1457 void ip_flush_pending_frames(struct sock *sk)
1458 {
1459 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1460 }
1461
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1462 struct sk_buff *ip_make_skb(struct sock *sk,
1463 struct flowi4 *fl4,
1464 int getfrag(void *from, char *to, int offset,
1465 int len, int odd, struct sk_buff *skb),
1466 void *from, int length, int transhdrlen,
1467 struct ipcm_cookie *ipc, struct rtable **rtp,
1468 unsigned int flags)
1469 {
1470 struct inet_cork cork;
1471 struct sk_buff_head queue;
1472 int err;
1473
1474 if (flags & MSG_PROBE)
1475 return NULL;
1476
1477 __skb_queue_head_init(&queue);
1478
1479 cork.flags = 0;
1480 cork.addr = 0;
1481 cork.opt = NULL;
1482 err = ip_setup_cork(sk, &cork, ipc, rtp);
1483 if (err)
1484 return ERR_PTR(err);
1485
1486 err = __ip_append_data(sk, fl4, &queue, &cork,
1487 ¤t->task_frag, getfrag,
1488 from, length, transhdrlen, flags);
1489 if (err) {
1490 __ip_flush_pending_frames(sk, &queue, &cork);
1491 return ERR_PTR(err);
1492 }
1493
1494 return __ip_make_skb(sk, fl4, &queue, &cork);
1495 }
1496
1497 /*
1498 * Fetch data from kernel space and fill in checksum if needed.
1499 */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)1500 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1501 int len, int odd, struct sk_buff *skb)
1502 {
1503 __wsum csum;
1504
1505 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1506 skb->csum = csum_block_add(skb->csum, csum, odd);
1507 return 0;
1508 }
1509
1510 /*
1511 * Generic function to send a packet as reply to another packet.
1512 * Used to send some TCP resets/acks so far.
1513 */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len)1514 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1515 const struct ip_options *sopt,
1516 __be32 daddr, __be32 saddr,
1517 const struct ip_reply_arg *arg,
1518 unsigned int len)
1519 {
1520 struct ip_options_data replyopts;
1521 struct ipcm_cookie ipc;
1522 struct flowi4 fl4;
1523 struct rtable *rt = skb_rtable(skb);
1524 struct net *net = sock_net(sk);
1525 struct sk_buff *nskb;
1526 int err;
1527
1528 if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
1529 return;
1530
1531 ipc.addr = daddr;
1532 ipc.opt = NULL;
1533 ipc.tx_flags = 0;
1534 ipc.ttl = 0;
1535 ipc.tos = -1;
1536
1537 if (replyopts.opt.opt.optlen) {
1538 ipc.opt = &replyopts.opt;
1539
1540 if (replyopts.opt.opt.srr)
1541 daddr = replyopts.opt.opt.faddr;
1542 }
1543
1544 flowi4_init_output(&fl4, arg->bound_dev_if,
1545 IP4_REPLY_MARK(net, skb->mark),
1546 RT_TOS(arg->tos),
1547 RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1548 ip_reply_arg_flowi_flags(arg),
1549 daddr, saddr,
1550 tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1551 arg->uid);
1552 security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1553 rt = ip_route_output_key(net, &fl4);
1554 if (IS_ERR(rt))
1555 return;
1556
1557 inet_sk(sk)->tos = arg->tos;
1558
1559 sk->sk_priority = skb->priority;
1560 sk->sk_protocol = ip_hdr(skb)->protocol;
1561 sk->sk_bound_dev_if = arg->bound_dev_if;
1562 sk->sk_sndbuf = sysctl_wmem_default;
1563 err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1564 len, 0, &ipc, &rt, MSG_DONTWAIT);
1565 if (unlikely(err)) {
1566 ip_flush_pending_frames(sk);
1567 goto out;
1568 }
1569
1570 nskb = skb_peek(&sk->sk_write_queue);
1571 if (nskb) {
1572 if (arg->csumoffset >= 0)
1573 *((__sum16 *)skb_transport_header(nskb) +
1574 arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1575 arg->csum));
1576 nskb->ip_summed = CHECKSUM_NONE;
1577 skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb));
1578 ip_push_pending_frames(sk, &fl4);
1579 }
1580 out:
1581 ip_rt_put(rt);
1582 }
1583
ip_init(void)1584 void __init ip_init(void)
1585 {
1586 ip_rt_init();
1587 inet_initpeers();
1588
1589 #if defined(CONFIG_IP_MULTICAST)
1590 igmp_mc_init();
1591 #endif
1592 }
1593