• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * INET		An implementation of the TCP/IP protocol suite for the LINUX
3  *		operating system.  INET is implemented using the  BSD Socket
4  *		interface as the means of communication with the user level.
5  *
6  *		The Internet Protocol (IP) output module.
7  *
8  * Authors:	Ross Biro
9  *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10  *		Donald Becker, <becker@super.org>
11  *		Alan Cox, <Alan.Cox@linux.org>
12  *		Richard Underwood
13  *		Stefan Becker, <stefanb@yello.ping.de>
14  *		Jorge Cwik, <jorge@laser.satlink.net>
15  *		Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16  *		Hirokazu Takahashi, <taka@valinux.co.jp>
17  *
18  *	See ip_input.c for original log
19  *
20  *	Fixes:
21  *		Alan Cox	:	Missing nonblock feature in ip_build_xmit.
22  *		Mike Kilburn	:	htons() missing in ip_build_xmit.
23  *		Bradford Johnson:	Fix faulty handling of some frames when
24  *					no route is found.
25  *		Alexander Demenshin:	Missing sk/skb free in ip_queue_xmit
26  *					(in case if packet not accepted by
27  *					output firewall rules)
28  *		Mike McLagan	:	Routing by source
29  *		Alexey Kuznetsov:	use new route cache
30  *		Andi Kleen:		Fix broken PMTU recovery and remove
31  *					some redundant tests.
32  *	Vitaly E. Lavrov	:	Transparent proxy revived after year coma.
33  *		Andi Kleen	: 	Replace ip_reply with ip_send_reply.
34  *		Andi Kleen	:	Split fast and slow ip_build_xmit path
35  *					for decreased register pressure on x86
36  *					and more readibility.
37  *		Marc Boucher	:	When call_out_firewall returns FW_QUEUE,
38  *					silently drop skb instead of failing with -EPERM.
39  *		Detlev Wengorz	:	Copy protocol for fragments.
40  *		Hirokazu Takahashi:	HW checksumming for outgoing UDP
41  *					datagrams.
42  *		Hirokazu Takahashi:	sendfile() on UDP works now.
43  */
44 
45 #include <asm/uaccess.h>
46 #include <linux/module.h>
47 #include <linux/types.h>
48 #include <linux/kernel.h>
49 #include <linux/mm.h>
50 #include <linux/string.h>
51 #include <linux/errno.h>
52 #include <linux/highmem.h>
53 #include <linux/slab.h>
54 
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
64 
65 #include <net/snmp.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
69 #include <net/xfrm.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/icmp.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <net/lwtunnel.h>
77 #include <linux/bpf-cgroup.h>
78 #include <linux/igmp.h>
79 #include <linux/netfilter_ipv4.h>
80 #include <linux/netfilter_bridge.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
83 
84 static int
85 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
86 	    unsigned int mtu,
87 	    int (*output)(struct net *, struct sock *, struct sk_buff *));
88 
89 /* Generate a checksum for an outgoing IP datagram. */
ip_send_check(struct iphdr * iph)90 void ip_send_check(struct iphdr *iph)
91 {
92 	iph->check = 0;
93 	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
94 }
95 EXPORT_SYMBOL(ip_send_check);
96 
__ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)97 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
98 {
99 	struct iphdr *iph = ip_hdr(skb);
100 
101 	iph->tot_len = htons(skb->len);
102 	ip_send_check(iph);
103 
104 	/* if egress device is enslaved to an L3 master device pass the
105 	 * skb to its handler for processing
106 	 */
107 	skb = l3mdev_ip_out(sk, skb);
108 	if (unlikely(!skb))
109 		return 0;
110 
111 	skb->protocol = htons(ETH_P_IP);
112 
113 	return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
114 		       net, sk, skb, NULL, skb_dst(skb)->dev,
115 		       dst_output);
116 }
117 
ip_local_out(struct net * net,struct sock * sk,struct sk_buff * skb)118 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
119 {
120 	int err;
121 
122 	err = __ip_local_out(net, sk, skb);
123 	if (likely(err == 1))
124 		err = dst_output(net, sk, skb);
125 
126 	return err;
127 }
128 EXPORT_SYMBOL_GPL(ip_local_out);
129 
ip_select_ttl(struct inet_sock * inet,struct dst_entry * dst)130 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
131 {
132 	int ttl = inet->uc_ttl;
133 
134 	if (ttl < 0)
135 		ttl = ip4_dst_hoplimit(dst);
136 	return ttl;
137 }
138 
139 /*
140  *		Add an ip header to a skbuff and send it out.
141  *
142  */
ip_build_and_send_pkt(struct sk_buff * skb,const struct sock * sk,__be32 saddr,__be32 daddr,struct ip_options_rcu * opt)143 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
144 			  __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
145 {
146 	struct inet_sock *inet = inet_sk(sk);
147 	struct rtable *rt = skb_rtable(skb);
148 	struct net *net = sock_net(sk);
149 	struct iphdr *iph;
150 
151 	/* Build the IP header. */
152 	skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
153 	skb_reset_network_header(skb);
154 	iph = ip_hdr(skb);
155 	iph->version  = 4;
156 	iph->ihl      = 5;
157 	iph->tos      = inet->tos;
158 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
159 	iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
160 	iph->saddr    = saddr;
161 	iph->protocol = sk->sk_protocol;
162 	if (ip_dont_fragment(sk, &rt->dst)) {
163 		iph->frag_off = htons(IP_DF);
164 		iph->id = 0;
165 	} else {
166 		iph->frag_off = 0;
167 		__ip_select_ident(net, iph, 1);
168 	}
169 
170 	if (opt && opt->opt.optlen) {
171 		iph->ihl += opt->opt.optlen>>2;
172 		ip_options_build(skb, &opt->opt, daddr, rt, 0);
173 	}
174 
175 	skb->priority = sk->sk_priority;
176 	skb->mark = sk->sk_mark;
177 
178 	/* Send it out. */
179 	return ip_local_out(net, skb->sk, skb);
180 }
181 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
182 
ip_finish_output2(struct net * net,struct sock * sk,struct sk_buff * skb)183 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
184 {
185 	struct dst_entry *dst = skb_dst(skb);
186 	struct rtable *rt = (struct rtable *)dst;
187 	struct net_device *dev = dst->dev;
188 	unsigned int hh_len = LL_RESERVED_SPACE(dev);
189 	struct neighbour *neigh;
190 	u32 nexthop;
191 
192 	if (rt->rt_type == RTN_MULTICAST) {
193 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
194 	} else if (rt->rt_type == RTN_BROADCAST)
195 		IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
196 
197 	/* Be paranoid, rather than too clever. */
198 	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
199 		struct sk_buff *skb2;
200 
201 		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
202 		if (!skb2) {
203 			kfree_skb(skb);
204 			return -ENOMEM;
205 		}
206 		if (skb->sk)
207 			skb_set_owner_w(skb2, skb->sk);
208 		consume_skb(skb);
209 		skb = skb2;
210 	}
211 
212 	if (lwtunnel_xmit_redirect(dst->lwtstate)) {
213 		int res = lwtunnel_xmit(skb);
214 
215 		if (res < 0 || res == LWTUNNEL_XMIT_DONE)
216 			return res;
217 	}
218 
219 	rcu_read_lock_bh();
220 	nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
221 	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
222 	if (unlikely(!neigh))
223 		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
224 	if (!IS_ERR(neigh)) {
225 		int res = dst_neigh_output(dst, neigh, skb);
226 
227 		rcu_read_unlock_bh();
228 		return res;
229 	}
230 	rcu_read_unlock_bh();
231 
232 	net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
233 			    __func__);
234 	kfree_skb(skb);
235 	return -EINVAL;
236 }
237 
ip_finish_output_gso(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu)238 static int ip_finish_output_gso(struct net *net, struct sock *sk,
239 				struct sk_buff *skb, unsigned int mtu)
240 {
241 	netdev_features_t features;
242 	struct sk_buff *segs;
243 	int ret = 0;
244 
245 	/* common case: seglen is <= mtu
246 	 */
247 	if (skb_gso_validate_mtu(skb, mtu))
248 		return ip_finish_output2(net, sk, skb);
249 
250 	/* Slowpath -  GSO segment length exceeds the egress MTU.
251 	 *
252 	 * This can happen in several cases:
253 	 *  - Forwarding of a TCP GRO skb, when DF flag is not set.
254 	 *  - Forwarding of an skb that arrived on a virtualization interface
255 	 *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
256 	 *    stack.
257 	 *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
258 	 *    interface with a smaller MTU.
259 	 *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
260 	 *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
261 	 *    insufficent MTU.
262 	 */
263 	features = netif_skb_features(skb);
264 	BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
265 	segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
266 	if (IS_ERR_OR_NULL(segs)) {
267 		kfree_skb(skb);
268 		return -ENOMEM;
269 	}
270 
271 	consume_skb(skb);
272 
273 	do {
274 		struct sk_buff *nskb = segs->next;
275 		int err;
276 
277 		segs->next = NULL;
278 		err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
279 
280 		if (err && ret == 0)
281 			ret = err;
282 		segs = nskb;
283 	} while (segs);
284 
285 	return ret;
286 }
287 
ip_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)288 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
289 {
290 	unsigned int mtu;
291 	int ret;
292 
293 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
294 	if (ret) {
295 		kfree_skb(skb);
296 		return ret;
297 	}
298 
299 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
300 	/* Policy lookup after SNAT yielded a new policy */
301 	if (skb_dst(skb)->xfrm) {
302 		IPCB(skb)->flags |= IPSKB_REROUTED;
303 		return dst_output(net, sk, skb);
304 	}
305 #endif
306 	mtu = ip_skb_dst_mtu(sk, skb);
307 	if (skb_is_gso(skb))
308 		return ip_finish_output_gso(net, sk, skb, mtu);
309 
310 	if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
311 		return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
312 
313 	return ip_finish_output2(net, sk, skb);
314 }
315 
ip_mc_finish_output(struct net * net,struct sock * sk,struct sk_buff * skb)316 static int ip_mc_finish_output(struct net *net, struct sock *sk,
317 			       struct sk_buff *skb)
318 {
319 	int ret;
320 
321 	ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
322 	if (ret) {
323 		kfree_skb(skb);
324 		return ret;
325 	}
326 
327 	return dev_loopback_xmit(net, sk, skb);
328 }
329 
ip_mc_output(struct net * net,struct sock * sk,struct sk_buff * skb)330 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
331 {
332 	struct rtable *rt = skb_rtable(skb);
333 	struct net_device *dev = rt->dst.dev;
334 
335 	/*
336 	 *	If the indicated interface is up and running, send the packet.
337 	 */
338 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
339 
340 	skb->dev = dev;
341 	skb->protocol = htons(ETH_P_IP);
342 
343 	/*
344 	 *	Multicasts are looped back for other local users
345 	 */
346 
347 	if (rt->rt_flags&RTCF_MULTICAST) {
348 		if (sk_mc_loop(sk)
349 #ifdef CONFIG_IP_MROUTE
350 		/* Small optimization: do not loopback not local frames,
351 		   which returned after forwarding; they will be  dropped
352 		   by ip_mr_input in any case.
353 		   Note, that local frames are looped back to be delivered
354 		   to local recipients.
355 
356 		   This check is duplicated in ip_mr_input at the moment.
357 		 */
358 		    &&
359 		    ((rt->rt_flags & RTCF_LOCAL) ||
360 		     !(IPCB(skb)->flags & IPSKB_FORWARDED))
361 #endif
362 		   ) {
363 			struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
364 			if (newskb)
365 				NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
366 					net, sk, newskb, NULL, newskb->dev,
367 					ip_mc_finish_output);
368 		}
369 
370 		/* Multicasts with ttl 0 must not go beyond the host */
371 
372 		if (ip_hdr(skb)->ttl == 0) {
373 			kfree_skb(skb);
374 			return 0;
375 		}
376 	}
377 
378 	if (rt->rt_flags&RTCF_BROADCAST) {
379 		struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
380 		if (newskb)
381 			NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
382 				net, sk, newskb, NULL, newskb->dev,
383 				ip_mc_finish_output);
384 	}
385 
386 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
387 			    net, sk, skb, NULL, skb->dev,
388 			    ip_finish_output,
389 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
390 }
391 
ip_output(struct net * net,struct sock * sk,struct sk_buff * skb)392 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
393 {
394 	struct net_device *dev = skb_dst(skb)->dev;
395 
396 	IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
397 
398 	skb->dev = dev;
399 	skb->protocol = htons(ETH_P_IP);
400 
401 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
402 			    net, sk, skb, NULL, dev,
403 			    ip_finish_output,
404 			    !(IPCB(skb)->flags & IPSKB_REROUTED));
405 }
406 
407 /*
408  * copy saddr and daddr, possibly using 64bit load/stores
409  * Equivalent to :
410  *   iph->saddr = fl4->saddr;
411  *   iph->daddr = fl4->daddr;
412  */
ip_copy_addrs(struct iphdr * iph,const struct flowi4 * fl4)413 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
414 {
415 	BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
416 		     offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
417 	memcpy(&iph->saddr, &fl4->saddr,
418 	       sizeof(fl4->saddr) + sizeof(fl4->daddr));
419 }
420 
421 /* Note: skb->sk can be different from sk, in case of tunnels */
ip_queue_xmit(struct sock * sk,struct sk_buff * skb,struct flowi * fl)422 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
423 {
424 	struct inet_sock *inet = inet_sk(sk);
425 	struct net *net = sock_net(sk);
426 	struct ip_options_rcu *inet_opt;
427 	struct flowi4 *fl4;
428 	struct rtable *rt;
429 	struct iphdr *iph;
430 	int res;
431 
432 	/* Skip all of this if the packet is already routed,
433 	 * f.e. by something like SCTP.
434 	 */
435 	rcu_read_lock();
436 	inet_opt = rcu_dereference(inet->inet_opt);
437 	fl4 = &fl->u.ip4;
438 	rt = skb_rtable(skb);
439 	if (rt)
440 		goto packet_routed;
441 
442 	/* Make sure we can route this packet. */
443 	rt = (struct rtable *)__sk_dst_check(sk, 0);
444 	if (!rt) {
445 		__be32 daddr;
446 
447 		/* Use correct destination address if we have options. */
448 		daddr = inet->inet_daddr;
449 		if (inet_opt && inet_opt->opt.srr)
450 			daddr = inet_opt->opt.faddr;
451 
452 		/* If this fails, retransmit mechanism of transport layer will
453 		 * keep trying until route appears or the connection times
454 		 * itself out.
455 		 */
456 		rt = ip_route_output_ports(net, fl4, sk,
457 					   daddr, inet->inet_saddr,
458 					   inet->inet_dport,
459 					   inet->inet_sport,
460 					   sk->sk_protocol,
461 					   RT_CONN_FLAGS(sk),
462 					   sk->sk_bound_dev_if);
463 		if (IS_ERR(rt))
464 			goto no_route;
465 		sk_setup_caps(sk, &rt->dst);
466 	}
467 	skb_dst_set_noref(skb, &rt->dst);
468 
469 packet_routed:
470 	if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
471 		goto no_route;
472 
473 	/* OK, we know where to send it, allocate and build IP header. */
474 	skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
475 	skb_reset_network_header(skb);
476 	iph = ip_hdr(skb);
477 	*((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
478 	if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
479 		iph->frag_off = htons(IP_DF);
480 	else
481 		iph->frag_off = 0;
482 	iph->ttl      = ip_select_ttl(inet, &rt->dst);
483 	iph->protocol = sk->sk_protocol;
484 	ip_copy_addrs(iph, fl4);
485 
486 	/* Transport layer set skb->h.foo itself. */
487 
488 	if (inet_opt && inet_opt->opt.optlen) {
489 		iph->ihl += inet_opt->opt.optlen >> 2;
490 		ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
491 	}
492 
493 	ip_select_ident_segs(net, skb, sk,
494 			     skb_shinfo(skb)->gso_segs ?: 1);
495 
496 	/* TODO : should we use skb->sk here instead of sk ? */
497 	skb->priority = sk->sk_priority;
498 	skb->mark = sk->sk_mark;
499 
500 	res = ip_local_out(net, sk, skb);
501 	rcu_read_unlock();
502 	return res;
503 
504 no_route:
505 	rcu_read_unlock();
506 	IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
507 	kfree_skb(skb);
508 	return -EHOSTUNREACH;
509 }
510 EXPORT_SYMBOL(ip_queue_xmit);
511 
ip_copy_metadata(struct sk_buff * to,struct sk_buff * from)512 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
513 {
514 	to->pkt_type = from->pkt_type;
515 	to->priority = from->priority;
516 	to->protocol = from->protocol;
517 	skb_dst_drop(to);
518 	skb_dst_copy(to, from);
519 	to->dev = from->dev;
520 	to->mark = from->mark;
521 
522 	/* Copy the flags to each fragment. */
523 	IPCB(to)->flags = IPCB(from)->flags;
524 
525 #ifdef CONFIG_NET_SCHED
526 	to->tc_index = from->tc_index;
527 #endif
528 	nf_copy(to, from);
529 #if IS_ENABLED(CONFIG_IP_VS)
530 	to->ipvs_property = from->ipvs_property;
531 #endif
532 	skb_copy_secmark(to, from);
533 }
534 
ip_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,unsigned int mtu,int (* output)(struct net *,struct sock *,struct sk_buff *))535 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
536 		       unsigned int mtu,
537 		       int (*output)(struct net *, struct sock *, struct sk_buff *))
538 {
539 	struct iphdr *iph = ip_hdr(skb);
540 
541 	if ((iph->frag_off & htons(IP_DF)) == 0)
542 		return ip_do_fragment(net, sk, skb, output);
543 
544 	if (unlikely(!skb->ignore_df ||
545 		     (IPCB(skb)->frag_max_size &&
546 		      IPCB(skb)->frag_max_size > mtu))) {
547 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
548 		icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
549 			  htonl(mtu));
550 		kfree_skb(skb);
551 		return -EMSGSIZE;
552 	}
553 
554 	return ip_do_fragment(net, sk, skb, output);
555 }
556 
557 /*
558  *	This IP datagram is too large to be sent in one piece.  Break it up into
559  *	smaller pieces (each of size equal to IP header plus
560  *	a block of the data of the original IP data part) that will yet fit in a
561  *	single device frame, and queue such a frame for sending.
562  */
563 
ip_do_fragment(struct net * net,struct sock * sk,struct sk_buff * skb,int (* output)(struct net *,struct sock *,struct sk_buff *))564 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
565 		   int (*output)(struct net *, struct sock *, struct sk_buff *))
566 {
567 	struct iphdr *iph;
568 	int ptr;
569 	struct sk_buff *skb2;
570 	unsigned int mtu, hlen, left, len, ll_rs;
571 	int offset;
572 	__be16 not_last_frag;
573 	struct rtable *rt = skb_rtable(skb);
574 	int err = 0;
575 
576 	/* for offloaded checksums cleanup checksum before fragmentation */
577 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
578 	    (err = skb_checksum_help(skb)))
579 		goto fail;
580 
581 	/*
582 	 *	Point into the IP datagram header.
583 	 */
584 
585 	iph = ip_hdr(skb);
586 
587 	mtu = ip_skb_dst_mtu(sk, skb);
588 	if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
589 		mtu = IPCB(skb)->frag_max_size;
590 
591 	/*
592 	 *	Setup starting values.
593 	 */
594 
595 	hlen = iph->ihl * 4;
596 	mtu = mtu - hlen;	/* Size of data space */
597 	IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
598 
599 	/* When frag_list is given, use it. First, check its validity:
600 	 * some transformers could create wrong frag_list or break existing
601 	 * one, it is not prohibited. In this case fall back to copying.
602 	 *
603 	 * LATER: this step can be merged to real generation of fragments,
604 	 * we can switch to copy when see the first bad fragment.
605 	 */
606 	if (skb_has_frag_list(skb)) {
607 		struct sk_buff *frag, *frag2;
608 		int first_len = skb_pagelen(skb);
609 
610 		if (first_len - hlen > mtu ||
611 		    ((first_len - hlen) & 7) ||
612 		    ip_is_fragment(iph) ||
613 		    skb_cloned(skb))
614 			goto slow_path;
615 
616 		skb_walk_frags(skb, frag) {
617 			/* Correct geometry. */
618 			if (frag->len > mtu ||
619 			    ((frag->len & 7) && frag->next) ||
620 			    skb_headroom(frag) < hlen)
621 				goto slow_path_clean;
622 
623 			/* Partially cloned skb? */
624 			if (skb_shared(frag))
625 				goto slow_path_clean;
626 
627 			BUG_ON(frag->sk);
628 			if (skb->sk) {
629 				frag->sk = skb->sk;
630 				frag->destructor = sock_wfree;
631 			}
632 			skb->truesize -= frag->truesize;
633 		}
634 
635 		/* Everything is OK. Generate! */
636 
637 		err = 0;
638 		offset = 0;
639 		frag = skb_shinfo(skb)->frag_list;
640 		skb_frag_list_init(skb);
641 		skb->data_len = first_len - skb_headlen(skb);
642 		skb->len = first_len;
643 		iph->tot_len = htons(first_len);
644 		iph->frag_off = htons(IP_MF);
645 		ip_send_check(iph);
646 
647 		for (;;) {
648 			/* Prepare header of the next frame,
649 			 * before previous one went down. */
650 			if (frag) {
651 				frag->ip_summed = CHECKSUM_NONE;
652 				skb_reset_transport_header(frag);
653 				__skb_push(frag, hlen);
654 				skb_reset_network_header(frag);
655 				memcpy(skb_network_header(frag), iph, hlen);
656 				iph = ip_hdr(frag);
657 				iph->tot_len = htons(frag->len);
658 				ip_copy_metadata(frag, skb);
659 				if (offset == 0)
660 					ip_options_fragment(frag);
661 				offset += skb->len - hlen;
662 				iph->frag_off = htons(offset>>3);
663 				if (frag->next)
664 					iph->frag_off |= htons(IP_MF);
665 				/* Ready, complete checksum */
666 				ip_send_check(iph);
667 			}
668 
669 			err = output(net, sk, skb);
670 
671 			if (!err)
672 				IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
673 			if (err || !frag)
674 				break;
675 
676 			skb = frag;
677 			frag = skb->next;
678 			skb->next = NULL;
679 		}
680 
681 		if (err == 0) {
682 			IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
683 			return 0;
684 		}
685 
686 		while (frag) {
687 			skb = frag->next;
688 			kfree_skb(frag);
689 			frag = skb;
690 		}
691 		IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
692 		return err;
693 
694 slow_path_clean:
695 		skb_walk_frags(skb, frag2) {
696 			if (frag2 == frag)
697 				break;
698 			frag2->sk = NULL;
699 			frag2->destructor = NULL;
700 			skb->truesize += frag2->truesize;
701 		}
702 	}
703 
704 slow_path:
705 	iph = ip_hdr(skb);
706 
707 	left = skb->len - hlen;		/* Space per frame */
708 	ptr = hlen;		/* Where to start from */
709 
710 	ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
711 
712 	/*
713 	 *	Fragment the datagram.
714 	 */
715 
716 	offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
717 	not_last_frag = iph->frag_off & htons(IP_MF);
718 
719 	/*
720 	 *	Keep copying data until we run out.
721 	 */
722 
723 	while (left > 0) {
724 		len = left;
725 		/* IF: it doesn't fit, use 'mtu' - the data space left */
726 		if (len > mtu)
727 			len = mtu;
728 		/* IF: we are not sending up to and including the packet end
729 		   then align the next start on an eight byte boundary */
730 		if (len < left)	{
731 			len &= ~7;
732 		}
733 
734 		/* Allocate buffer */
735 		skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
736 		if (!skb2) {
737 			err = -ENOMEM;
738 			goto fail;
739 		}
740 
741 		/*
742 		 *	Set up data on packet
743 		 */
744 
745 		ip_copy_metadata(skb2, skb);
746 		skb_reserve(skb2, ll_rs);
747 		skb_put(skb2, len + hlen);
748 		skb_reset_network_header(skb2);
749 		skb2->transport_header = skb2->network_header + hlen;
750 
751 		/*
752 		 *	Charge the memory for the fragment to any owner
753 		 *	it might possess
754 		 */
755 
756 		if (skb->sk)
757 			skb_set_owner_w(skb2, skb->sk);
758 
759 		/*
760 		 *	Copy the packet header into the new buffer.
761 		 */
762 
763 		skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
764 
765 		/*
766 		 *	Copy a block of the IP datagram.
767 		 */
768 		if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
769 			BUG();
770 		left -= len;
771 
772 		/*
773 		 *	Fill in the new header fields.
774 		 */
775 		iph = ip_hdr(skb2);
776 		iph->frag_off = htons((offset >> 3));
777 
778 		if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
779 			iph->frag_off |= htons(IP_DF);
780 
781 		/* ANK: dirty, but effective trick. Upgrade options only if
782 		 * the segment to be fragmented was THE FIRST (otherwise,
783 		 * options are already fixed) and make it ONCE
784 		 * on the initial skb, so that all the following fragments
785 		 * will inherit fixed options.
786 		 */
787 		if (offset == 0)
788 			ip_options_fragment(skb);
789 
790 		/*
791 		 *	Added AC : If we are fragmenting a fragment that's not the
792 		 *		   last fragment then keep MF on each bit
793 		 */
794 		if (left > 0 || not_last_frag)
795 			iph->frag_off |= htons(IP_MF);
796 		ptr += len;
797 		offset += len;
798 
799 		/*
800 		 *	Put this fragment into the sending queue.
801 		 */
802 		iph->tot_len = htons(len + hlen);
803 
804 		ip_send_check(iph);
805 
806 		err = output(net, sk, skb2);
807 		if (err)
808 			goto fail;
809 
810 		IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
811 	}
812 	consume_skb(skb);
813 	IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
814 	return err;
815 
816 fail:
817 	kfree_skb(skb);
818 	IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
819 	return err;
820 }
821 EXPORT_SYMBOL(ip_do_fragment);
822 
823 int
ip_generic_getfrag(void * from,char * to,int offset,int len,int odd,struct sk_buff * skb)824 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
825 {
826 	struct msghdr *msg = from;
827 
828 	if (skb->ip_summed == CHECKSUM_PARTIAL) {
829 		if (copy_from_iter(to, len, &msg->msg_iter) != len)
830 			return -EFAULT;
831 	} else {
832 		__wsum csum = 0;
833 		if (csum_and_copy_from_iter(to, len, &csum, &msg->msg_iter) != len)
834 			return -EFAULT;
835 		skb->csum = csum_block_add(skb->csum, csum, odd);
836 	}
837 	return 0;
838 }
839 EXPORT_SYMBOL(ip_generic_getfrag);
840 
841 static inline __wsum
csum_page(struct page * page,int offset,int copy)842 csum_page(struct page *page, int offset, int copy)
843 {
844 	char *kaddr;
845 	__wsum csum;
846 	kaddr = kmap(page);
847 	csum = csum_partial(kaddr + offset, copy, 0);
848 	kunmap(page);
849 	return csum;
850 }
851 
ip_ufo_append_data(struct sock * sk,struct sk_buff_head * queue,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int hh_len,int fragheaderlen,int transhdrlen,int maxfraglen,unsigned int flags)852 static inline int ip_ufo_append_data(struct sock *sk,
853 			struct sk_buff_head *queue,
854 			int getfrag(void *from, char *to, int offset, int len,
855 			       int odd, struct sk_buff *skb),
856 			void *from, int length, int hh_len, int fragheaderlen,
857 			int transhdrlen, int maxfraglen, unsigned int flags)
858 {
859 	struct sk_buff *skb;
860 	int err;
861 
862 	/* There is support for UDP fragmentation offload by network
863 	 * device, so create one single skb packet containing complete
864 	 * udp datagram
865 	 */
866 	skb = skb_peek_tail(queue);
867 	if (!skb) {
868 		skb = sock_alloc_send_skb(sk,
869 			hh_len + fragheaderlen + transhdrlen + 20,
870 			(flags & MSG_DONTWAIT), &err);
871 
872 		if (!skb)
873 			return err;
874 
875 		/* reserve space for Hardware header */
876 		skb_reserve(skb, hh_len);
877 
878 		/* create space for UDP/IP header */
879 		skb_put(skb, fragheaderlen + transhdrlen);
880 
881 		/* initialize network header pointer */
882 		skb_reset_network_header(skb);
883 
884 		/* initialize protocol header pointer */
885 		skb->transport_header = skb->network_header + fragheaderlen;
886 
887 		skb->csum = 0;
888 
889 		__skb_queue_tail(queue, skb);
890 	} else if (skb_is_gso(skb)) {
891 		goto append;
892 	}
893 
894 	skb->ip_summed = CHECKSUM_PARTIAL;
895 	/* specify the length of each IP datagram fragment */
896 	skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
897 	skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
898 
899 append:
900 	return skb_append_datato_frags(sk, skb, getfrag, from,
901 				       (length - transhdrlen));
902 }
903 
__ip_append_data(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork,struct page_frag * pfrag,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,unsigned int flags)904 static int __ip_append_data(struct sock *sk,
905 			    struct flowi4 *fl4,
906 			    struct sk_buff_head *queue,
907 			    struct inet_cork *cork,
908 			    struct page_frag *pfrag,
909 			    int getfrag(void *from, char *to, int offset,
910 					int len, int odd, struct sk_buff *skb),
911 			    void *from, int length, int transhdrlen,
912 			    unsigned int flags)
913 {
914 	struct inet_sock *inet = inet_sk(sk);
915 	struct sk_buff *skb;
916 
917 	struct ip_options *opt = cork->opt;
918 	int hh_len;
919 	int exthdrlen;
920 	int mtu;
921 	int copy;
922 	int err;
923 	int offset = 0;
924 	unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
925 	int csummode = CHECKSUM_NONE;
926 	struct rtable *rt = (struct rtable *)cork->dst;
927 	u32 tskey = 0;
928 
929 	skb = skb_peek_tail(queue);
930 
931 	exthdrlen = !skb ? rt->dst.header_len : 0;
932 	mtu = cork->fragsize;
933 	if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
934 	    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
935 		tskey = sk->sk_tskey++;
936 
937 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
938 
939 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
940 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
941 	maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
942 
943 	if (cork->length + length > maxnonfragsize - fragheaderlen) {
944 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
945 			       mtu - (opt ? opt->optlen : 0));
946 		return -EMSGSIZE;
947 	}
948 
949 	/*
950 	 * transhdrlen > 0 means that this is the first fragment and we wish
951 	 * it won't be fragmented in the future.
952 	 */
953 	if (transhdrlen &&
954 	    length + fragheaderlen <= mtu &&
955 	    rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
956 	    !(flags & MSG_MORE) &&
957 	    !exthdrlen)
958 		csummode = CHECKSUM_PARTIAL;
959 
960 	cork->length += length;
961 	if ((skb && skb_is_gso(skb)) ||
962 	    ((length > mtu) &&
963 	    (skb_queue_len(queue) <= 1) &&
964 	    (sk->sk_protocol == IPPROTO_UDP) &&
965 	    (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
966 	    (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
967 		err = ip_ufo_append_data(sk, queue, getfrag, from, length,
968 					 hh_len, fragheaderlen, transhdrlen,
969 					 maxfraglen, flags);
970 		if (err)
971 			goto error;
972 		return 0;
973 	}
974 
975 	/* So, what's going on in the loop below?
976 	 *
977 	 * We use calculated fragment length to generate chained skb,
978 	 * each of segments is IP fragment ready for sending to network after
979 	 * adding appropriate IP header.
980 	 */
981 
982 	if (!skb)
983 		goto alloc_new_skb;
984 
985 	while (length > 0) {
986 		/* Check if the remaining data fits into current packet. */
987 		copy = mtu - skb->len;
988 		if (copy < length)
989 			copy = maxfraglen - skb->len;
990 		if (copy <= 0) {
991 			char *data;
992 			unsigned int datalen;
993 			unsigned int fraglen;
994 			unsigned int fraggap;
995 			unsigned int alloclen;
996 			struct sk_buff *skb_prev;
997 alloc_new_skb:
998 			skb_prev = skb;
999 			if (skb_prev)
1000 				fraggap = skb_prev->len - maxfraglen;
1001 			else
1002 				fraggap = 0;
1003 
1004 			/*
1005 			 * If remaining data exceeds the mtu,
1006 			 * we know we need more fragment(s).
1007 			 */
1008 			datalen = length + fraggap;
1009 			if (datalen > mtu - fragheaderlen)
1010 				datalen = maxfraglen - fragheaderlen;
1011 			fraglen = datalen + fragheaderlen;
1012 
1013 			if ((flags & MSG_MORE) &&
1014 			    !(rt->dst.dev->features&NETIF_F_SG))
1015 				alloclen = mtu;
1016 			else
1017 				alloclen = fraglen;
1018 
1019 			alloclen += exthdrlen;
1020 
1021 			/* The last fragment gets additional space at tail.
1022 			 * Note, with MSG_MORE we overallocate on fragments,
1023 			 * because we have no idea what fragment will be
1024 			 * the last.
1025 			 */
1026 			if (datalen == length + fraggap)
1027 				alloclen += rt->dst.trailer_len;
1028 
1029 			if (transhdrlen) {
1030 				skb = sock_alloc_send_skb(sk,
1031 						alloclen + hh_len + 15,
1032 						(flags & MSG_DONTWAIT), &err);
1033 			} else {
1034 				skb = NULL;
1035 				if (atomic_read(&sk->sk_wmem_alloc) <=
1036 				    2 * sk->sk_sndbuf)
1037 					skb = sock_wmalloc(sk,
1038 							   alloclen + hh_len + 15, 1,
1039 							   sk->sk_allocation);
1040 				if (unlikely(!skb))
1041 					err = -ENOBUFS;
1042 			}
1043 			if (!skb)
1044 				goto error;
1045 
1046 			/*
1047 			 *	Fill in the control structures
1048 			 */
1049 			skb->ip_summed = csummode;
1050 			skb->csum = 0;
1051 			skb_reserve(skb, hh_len);
1052 
1053 			/* only the initial fragment is time stamped */
1054 			skb_shinfo(skb)->tx_flags = cork->tx_flags;
1055 			cork->tx_flags = 0;
1056 			skb_shinfo(skb)->tskey = tskey;
1057 			tskey = 0;
1058 
1059 			/*
1060 			 *	Find where to start putting bytes.
1061 			 */
1062 			data = skb_put(skb, fraglen + exthdrlen);
1063 			skb_set_network_header(skb, exthdrlen);
1064 			skb->transport_header = (skb->network_header +
1065 						 fragheaderlen);
1066 			data += fragheaderlen + exthdrlen;
1067 
1068 			if (fraggap) {
1069 				skb->csum = skb_copy_and_csum_bits(
1070 					skb_prev, maxfraglen,
1071 					data + transhdrlen, fraggap, 0);
1072 				skb_prev->csum = csum_sub(skb_prev->csum,
1073 							  skb->csum);
1074 				data += fraggap;
1075 				pskb_trim_unique(skb_prev, maxfraglen);
1076 			}
1077 
1078 			copy = datalen - transhdrlen - fraggap;
1079 			if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1080 				err = -EFAULT;
1081 				kfree_skb(skb);
1082 				goto error;
1083 			}
1084 
1085 			offset += copy;
1086 			length -= datalen - fraggap;
1087 			transhdrlen = 0;
1088 			exthdrlen = 0;
1089 			csummode = CHECKSUM_NONE;
1090 
1091 			/*
1092 			 * Put the packet on the pending queue.
1093 			 */
1094 			__skb_queue_tail(queue, skb);
1095 			continue;
1096 		}
1097 
1098 		if (copy > length)
1099 			copy = length;
1100 
1101 		if (!(rt->dst.dev->features&NETIF_F_SG)) {
1102 			unsigned int off;
1103 
1104 			off = skb->len;
1105 			if (getfrag(from, skb_put(skb, copy),
1106 					offset, copy, off, skb) < 0) {
1107 				__skb_trim(skb, off);
1108 				err = -EFAULT;
1109 				goto error;
1110 			}
1111 		} else {
1112 			int i = skb_shinfo(skb)->nr_frags;
1113 
1114 			err = -ENOMEM;
1115 			if (!sk_page_frag_refill(sk, pfrag))
1116 				goto error;
1117 
1118 			if (!skb_can_coalesce(skb, i, pfrag->page,
1119 					      pfrag->offset)) {
1120 				err = -EMSGSIZE;
1121 				if (i == MAX_SKB_FRAGS)
1122 					goto error;
1123 
1124 				__skb_fill_page_desc(skb, i, pfrag->page,
1125 						     pfrag->offset, 0);
1126 				skb_shinfo(skb)->nr_frags = ++i;
1127 				get_page(pfrag->page);
1128 			}
1129 			copy = min_t(int, copy, pfrag->size - pfrag->offset);
1130 			if (getfrag(from,
1131 				    page_address(pfrag->page) + pfrag->offset,
1132 				    offset, copy, skb->len, skb) < 0)
1133 				goto error_efault;
1134 
1135 			pfrag->offset += copy;
1136 			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1137 			skb->len += copy;
1138 			skb->data_len += copy;
1139 			skb->truesize += copy;
1140 			atomic_add(copy, &sk->sk_wmem_alloc);
1141 		}
1142 		offset += copy;
1143 		length -= copy;
1144 	}
1145 
1146 	return 0;
1147 
1148 error_efault:
1149 	err = -EFAULT;
1150 error:
1151 	cork->length -= length;
1152 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1153 	return err;
1154 }
1155 
ip_setup_cork(struct sock * sk,struct inet_cork * cork,struct ipcm_cookie * ipc,struct rtable ** rtp)1156 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1157 			 struct ipcm_cookie *ipc, struct rtable **rtp)
1158 {
1159 	struct ip_options_rcu *opt;
1160 	struct rtable *rt;
1161 
1162 	/*
1163 	 * setup for corking.
1164 	 */
1165 	opt = ipc->opt;
1166 	if (opt) {
1167 		if (!cork->opt) {
1168 			cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1169 					    sk->sk_allocation);
1170 			if (unlikely(!cork->opt))
1171 				return -ENOBUFS;
1172 		}
1173 		memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1174 		cork->flags |= IPCORK_OPT;
1175 		cork->addr = ipc->addr;
1176 	}
1177 	rt = *rtp;
1178 	if (unlikely(!rt))
1179 		return -EFAULT;
1180 	/*
1181 	 * We steal reference to this route, caller should not release it
1182 	 */
1183 	*rtp = NULL;
1184 	cork->fragsize = ip_sk_use_pmtu(sk) ?
1185 			 dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1186 	cork->dst = &rt->dst;
1187 	cork->length = 0;
1188 	cork->ttl = ipc->ttl;
1189 	cork->tos = ipc->tos;
1190 	cork->priority = ipc->priority;
1191 	cork->tx_flags = ipc->tx_flags;
1192 
1193 	return 0;
1194 }
1195 
1196 /*
1197  *	ip_append_data() and ip_append_page() can make one large IP datagram
1198  *	from many pieces of data. Each pieces will be holded on the socket
1199  *	until ip_push_pending_frames() is called. Each piece can be a page
1200  *	or non-page data.
1201  *
1202  *	Not only UDP, other transport protocols - e.g. raw sockets - can use
1203  *	this interface potentially.
1204  *
1205  *	LATER: length must be adjusted by pad at tail, when it is required.
1206  */
ip_append_data(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1207 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1208 		   int getfrag(void *from, char *to, int offset, int len,
1209 			       int odd, struct sk_buff *skb),
1210 		   void *from, int length, int transhdrlen,
1211 		   struct ipcm_cookie *ipc, struct rtable **rtp,
1212 		   unsigned int flags)
1213 {
1214 	struct inet_sock *inet = inet_sk(sk);
1215 	int err;
1216 
1217 	if (flags&MSG_PROBE)
1218 		return 0;
1219 
1220 	if (skb_queue_empty(&sk->sk_write_queue)) {
1221 		err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1222 		if (err)
1223 			return err;
1224 	} else {
1225 		transhdrlen = 0;
1226 	}
1227 
1228 	return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1229 				sk_page_frag(sk), getfrag,
1230 				from, length, transhdrlen, flags);
1231 }
1232 
ip_append_page(struct sock * sk,struct flowi4 * fl4,struct page * page,int offset,size_t size,int flags)1233 ssize_t	ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1234 		       int offset, size_t size, int flags)
1235 {
1236 	struct inet_sock *inet = inet_sk(sk);
1237 	struct sk_buff *skb;
1238 	struct rtable *rt;
1239 	struct ip_options *opt = NULL;
1240 	struct inet_cork *cork;
1241 	int hh_len;
1242 	int mtu;
1243 	int len;
1244 	int err;
1245 	unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1246 
1247 	if (inet->hdrincl)
1248 		return -EPERM;
1249 
1250 	if (flags&MSG_PROBE)
1251 		return 0;
1252 
1253 	if (skb_queue_empty(&sk->sk_write_queue))
1254 		return -EINVAL;
1255 
1256 	cork = &inet->cork.base;
1257 	rt = (struct rtable *)cork->dst;
1258 	if (cork->flags & IPCORK_OPT)
1259 		opt = cork->opt;
1260 
1261 	if (!(rt->dst.dev->features&NETIF_F_SG))
1262 		return -EOPNOTSUPP;
1263 
1264 	hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1265 	mtu = cork->fragsize;
1266 
1267 	fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1268 	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1269 	maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1270 
1271 	if (cork->length + size > maxnonfragsize - fragheaderlen) {
1272 		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1273 			       mtu - (opt ? opt->optlen : 0));
1274 		return -EMSGSIZE;
1275 	}
1276 
1277 	skb = skb_peek_tail(&sk->sk_write_queue);
1278 	if (!skb)
1279 		return -EINVAL;
1280 
1281 	if ((size + skb->len > mtu) &&
1282 	    (skb_queue_len(&sk->sk_write_queue) == 1) &&
1283 	    (sk->sk_protocol == IPPROTO_UDP) &&
1284 	    (rt->dst.dev->features & NETIF_F_UFO)) {
1285 		if (skb->ip_summed != CHECKSUM_PARTIAL)
1286 			return -EOPNOTSUPP;
1287 
1288 		skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1289 		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1290 	}
1291 	cork->length += size;
1292 
1293 	while (size > 0) {
1294 		if (skb_is_gso(skb)) {
1295 			len = size;
1296 		} else {
1297 
1298 			/* Check if the remaining data fits into current packet. */
1299 			len = mtu - skb->len;
1300 			if (len < size)
1301 				len = maxfraglen - skb->len;
1302 		}
1303 		if (len <= 0) {
1304 			struct sk_buff *skb_prev;
1305 			int alloclen;
1306 
1307 			skb_prev = skb;
1308 			fraggap = skb_prev->len - maxfraglen;
1309 
1310 			alloclen = fragheaderlen + hh_len + fraggap + 15;
1311 			skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1312 			if (unlikely(!skb)) {
1313 				err = -ENOBUFS;
1314 				goto error;
1315 			}
1316 
1317 			/*
1318 			 *	Fill in the control structures
1319 			 */
1320 			skb->ip_summed = CHECKSUM_NONE;
1321 			skb->csum = 0;
1322 			skb_reserve(skb, hh_len);
1323 
1324 			/*
1325 			 *	Find where to start putting bytes.
1326 			 */
1327 			skb_put(skb, fragheaderlen + fraggap);
1328 			skb_reset_network_header(skb);
1329 			skb->transport_header = (skb->network_header +
1330 						 fragheaderlen);
1331 			if (fraggap) {
1332 				skb->csum = skb_copy_and_csum_bits(skb_prev,
1333 								   maxfraglen,
1334 						    skb_transport_header(skb),
1335 								   fraggap, 0);
1336 				skb_prev->csum = csum_sub(skb_prev->csum,
1337 							  skb->csum);
1338 				pskb_trim_unique(skb_prev, maxfraglen);
1339 			}
1340 
1341 			/*
1342 			 * Put the packet on the pending queue.
1343 			 */
1344 			__skb_queue_tail(&sk->sk_write_queue, skb);
1345 			continue;
1346 		}
1347 
1348 		if (len > size)
1349 			len = size;
1350 
1351 		if (skb_append_pagefrags(skb, page, offset, len)) {
1352 			err = -EMSGSIZE;
1353 			goto error;
1354 		}
1355 
1356 		if (skb->ip_summed == CHECKSUM_NONE) {
1357 			__wsum csum;
1358 			csum = csum_page(page, offset, len);
1359 			skb->csum = csum_block_add(skb->csum, csum, skb->len);
1360 		}
1361 
1362 		skb->len += len;
1363 		skb->data_len += len;
1364 		skb->truesize += len;
1365 		atomic_add(len, &sk->sk_wmem_alloc);
1366 		offset += len;
1367 		size -= len;
1368 	}
1369 	return 0;
1370 
1371 error:
1372 	cork->length -= size;
1373 	IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1374 	return err;
1375 }
1376 
ip_cork_release(struct inet_cork * cork)1377 static void ip_cork_release(struct inet_cork *cork)
1378 {
1379 	cork->flags &= ~IPCORK_OPT;
1380 	kfree(cork->opt);
1381 	cork->opt = NULL;
1382 	dst_release(cork->dst);
1383 	cork->dst = NULL;
1384 }
1385 
1386 /*
1387  *	Combined all pending IP fragments on the socket as one IP datagram
1388  *	and push them out.
1389  */
__ip_make_skb(struct sock * sk,struct flowi4 * fl4,struct sk_buff_head * queue,struct inet_cork * cork)1390 struct sk_buff *__ip_make_skb(struct sock *sk,
1391 			      struct flowi4 *fl4,
1392 			      struct sk_buff_head *queue,
1393 			      struct inet_cork *cork)
1394 {
1395 	struct sk_buff *skb, *tmp_skb;
1396 	struct sk_buff **tail_skb;
1397 	struct inet_sock *inet = inet_sk(sk);
1398 	struct net *net = sock_net(sk);
1399 	struct ip_options *opt = NULL;
1400 	struct rtable *rt = (struct rtable *)cork->dst;
1401 	struct iphdr *iph;
1402 	__be16 df = 0;
1403 	__u8 ttl;
1404 
1405 	skb = __skb_dequeue(queue);
1406 	if (!skb)
1407 		goto out;
1408 	tail_skb = &(skb_shinfo(skb)->frag_list);
1409 
1410 	/* move skb->data to ip header from ext header */
1411 	if (skb->data < skb_network_header(skb))
1412 		__skb_pull(skb, skb_network_offset(skb));
1413 	while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1414 		__skb_pull(tmp_skb, skb_network_header_len(skb));
1415 		*tail_skb = tmp_skb;
1416 		tail_skb = &(tmp_skb->next);
1417 		skb->len += tmp_skb->len;
1418 		skb->data_len += tmp_skb->len;
1419 		skb->truesize += tmp_skb->truesize;
1420 		tmp_skb->destructor = NULL;
1421 		tmp_skb->sk = NULL;
1422 	}
1423 
1424 	/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1425 	 * to fragment the frame generated here. No matter, what transforms
1426 	 * how transforms change size of the packet, it will come out.
1427 	 */
1428 	skb->ignore_df = ip_sk_ignore_df(sk);
1429 
1430 	/* DF bit is set when we want to see DF on outgoing frames.
1431 	 * If ignore_df is set too, we still allow to fragment this frame
1432 	 * locally. */
1433 	if (inet->pmtudisc == IP_PMTUDISC_DO ||
1434 	    inet->pmtudisc == IP_PMTUDISC_PROBE ||
1435 	    (skb->len <= dst_mtu(&rt->dst) &&
1436 	     ip_dont_fragment(sk, &rt->dst)))
1437 		df = htons(IP_DF);
1438 
1439 	if (cork->flags & IPCORK_OPT)
1440 		opt = cork->opt;
1441 
1442 	if (cork->ttl != 0)
1443 		ttl = cork->ttl;
1444 	else if (rt->rt_type == RTN_MULTICAST)
1445 		ttl = inet->mc_ttl;
1446 	else
1447 		ttl = ip_select_ttl(inet, &rt->dst);
1448 
1449 	iph = ip_hdr(skb);
1450 	iph->version = 4;
1451 	iph->ihl = 5;
1452 	iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1453 	iph->frag_off = df;
1454 	iph->ttl = ttl;
1455 	iph->protocol = sk->sk_protocol;
1456 	ip_copy_addrs(iph, fl4);
1457 	ip_select_ident(net, skb, sk);
1458 
1459 	if (opt) {
1460 		iph->ihl += opt->optlen>>2;
1461 		ip_options_build(skb, opt, cork->addr, rt, 0);
1462 	}
1463 
1464 	skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1465 	skb->mark = sk->sk_mark;
1466 	/*
1467 	 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1468 	 * on dst refcount
1469 	 */
1470 	cork->dst = NULL;
1471 	skb_dst_set(skb, &rt->dst);
1472 
1473 	if (iph->protocol == IPPROTO_ICMP)
1474 		icmp_out_count(net, ((struct icmphdr *)
1475 			skb_transport_header(skb))->type);
1476 
1477 	ip_cork_release(cork);
1478 out:
1479 	return skb;
1480 }
1481 
ip_send_skb(struct net * net,struct sk_buff * skb)1482 int ip_send_skb(struct net *net, struct sk_buff *skb)
1483 {
1484 	int err;
1485 
1486 	err = ip_local_out(net, skb->sk, skb);
1487 	if (err) {
1488 		if (err > 0)
1489 			err = net_xmit_errno(err);
1490 		if (err)
1491 			IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1492 	}
1493 
1494 	return err;
1495 }
1496 
ip_push_pending_frames(struct sock * sk,struct flowi4 * fl4)1497 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1498 {
1499 	struct sk_buff *skb;
1500 
1501 	skb = ip_finish_skb(sk, fl4);
1502 	if (!skb)
1503 		return 0;
1504 
1505 	/* Netfilter gets whole the not fragmented skb. */
1506 	return ip_send_skb(sock_net(sk), skb);
1507 }
1508 
1509 /*
1510  *	Throw away all pending data on the socket.
1511  */
__ip_flush_pending_frames(struct sock * sk,struct sk_buff_head * queue,struct inet_cork * cork)1512 static void __ip_flush_pending_frames(struct sock *sk,
1513 				      struct sk_buff_head *queue,
1514 				      struct inet_cork *cork)
1515 {
1516 	struct sk_buff *skb;
1517 
1518 	while ((skb = __skb_dequeue_tail(queue)) != NULL)
1519 		kfree_skb(skb);
1520 
1521 	ip_cork_release(cork);
1522 }
1523 
ip_flush_pending_frames(struct sock * sk)1524 void ip_flush_pending_frames(struct sock *sk)
1525 {
1526 	__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1527 }
1528 
ip_make_skb(struct sock * sk,struct flowi4 * fl4,int getfrag (void * from,char * to,int offset,int len,int odd,struct sk_buff * skb),void * from,int length,int transhdrlen,struct ipcm_cookie * ipc,struct rtable ** rtp,unsigned int flags)1529 struct sk_buff *ip_make_skb(struct sock *sk,
1530 			    struct flowi4 *fl4,
1531 			    int getfrag(void *from, char *to, int offset,
1532 					int len, int odd, struct sk_buff *skb),
1533 			    void *from, int length, int transhdrlen,
1534 			    struct ipcm_cookie *ipc, struct rtable **rtp,
1535 			    unsigned int flags)
1536 {
1537 	struct inet_cork cork;
1538 	struct sk_buff_head queue;
1539 	int err;
1540 
1541 	if (flags & MSG_PROBE)
1542 		return NULL;
1543 
1544 	__skb_queue_head_init(&queue);
1545 
1546 	cork.flags = 0;
1547 	cork.addr = 0;
1548 	cork.opt = NULL;
1549 	err = ip_setup_cork(sk, &cork, ipc, rtp);
1550 	if (err)
1551 		return ERR_PTR(err);
1552 
1553 	err = __ip_append_data(sk, fl4, &queue, &cork,
1554 			       &current->task_frag, getfrag,
1555 			       from, length, transhdrlen, flags);
1556 	if (err) {
1557 		__ip_flush_pending_frames(sk, &queue, &cork);
1558 		return ERR_PTR(err);
1559 	}
1560 
1561 	return __ip_make_skb(sk, fl4, &queue, &cork);
1562 }
1563 
1564 /*
1565  *	Fetch data from kernel space and fill in checksum if needed.
1566  */
ip_reply_glue_bits(void * dptr,char * to,int offset,int len,int odd,struct sk_buff * skb)1567 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1568 			      int len, int odd, struct sk_buff *skb)
1569 {
1570 	__wsum csum;
1571 
1572 	csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1573 	skb->csum = csum_block_add(skb->csum, csum, odd);
1574 	return 0;
1575 }
1576 
1577 /*
1578  *	Generic function to send a packet as reply to another packet.
1579  *	Used to send some TCP resets/acks so far.
1580  */
ip_send_unicast_reply(struct sock * sk,struct sk_buff * skb,const struct ip_options * sopt,__be32 daddr,__be32 saddr,const struct ip_reply_arg * arg,unsigned int len)1581 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1582 			   const struct ip_options *sopt,
1583 			   __be32 daddr, __be32 saddr,
1584 			   const struct ip_reply_arg *arg,
1585 			   unsigned int len)
1586 {
1587 	struct ip_options_data replyopts;
1588 	struct ipcm_cookie ipc;
1589 	struct flowi4 fl4;
1590 	struct rtable *rt = skb_rtable(skb);
1591 	struct net *net = sock_net(sk);
1592 	struct sk_buff *nskb;
1593 	int err;
1594 	int oif;
1595 
1596 	if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
1597 		return;
1598 
1599 	ipc.addr = daddr;
1600 	ipc.opt = NULL;
1601 	ipc.tx_flags = 0;
1602 	ipc.ttl = 0;
1603 	ipc.tos = -1;
1604 
1605 	if (replyopts.opt.opt.optlen) {
1606 		ipc.opt = &replyopts.opt;
1607 
1608 		if (replyopts.opt.opt.srr)
1609 			daddr = replyopts.opt.opt.faddr;
1610 	}
1611 
1612 	oif = arg->bound_dev_if;
1613 	if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1614 		oif = skb->skb_iif;
1615 
1616 	flowi4_init_output(&fl4, oif,
1617 			   IP4_REPLY_MARK(net, skb->mark),
1618 			   RT_TOS(arg->tos),
1619 			   RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1620 			   ip_reply_arg_flowi_flags(arg),
1621 			   daddr, saddr,
1622 			   tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1623 			   arg->uid);
1624 	security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1625 	rt = ip_route_output_key(net, &fl4);
1626 	if (IS_ERR(rt))
1627 		return;
1628 
1629 	inet_sk(sk)->tos = arg->tos;
1630 
1631 	sk->sk_priority = skb->priority;
1632 	sk->sk_protocol = ip_hdr(skb)->protocol;
1633 	sk->sk_bound_dev_if = arg->bound_dev_if;
1634 	sk->sk_sndbuf = sysctl_wmem_default;
1635 	sk->sk_mark = fl4.flowi4_mark;
1636 	err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1637 			     len, 0, &ipc, &rt, MSG_DONTWAIT);
1638 	if (unlikely(err)) {
1639 		ip_flush_pending_frames(sk);
1640 		goto out;
1641 	}
1642 
1643 	nskb = skb_peek(&sk->sk_write_queue);
1644 	if (nskb) {
1645 		if (arg->csumoffset >= 0)
1646 			*((__sum16 *)skb_transport_header(nskb) +
1647 			  arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1648 								arg->csum));
1649 		nskb->ip_summed = CHECKSUM_NONE;
1650 		ip_push_pending_frames(sk, &fl4);
1651 	}
1652 out:
1653 	ip_rt_put(rt);
1654 }
1655 
ip_init(void)1656 void __init ip_init(void)
1657 {
1658 	ip_rt_init();
1659 	inet_initpeers();
1660 
1661 #if defined(CONFIG_IP_MULTICAST)
1662 	igmp_mc_init();
1663 #endif
1664 }
1665