• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	IPv6 tunneling device
4  *	Linux INET6 implementation
5  *
6  *	Authors:
7  *	Ville Nuorvala		<vnuorval@tcs.hut.fi>
8  *	Yasuyuki Kozakai	<kozakai@linux-ipv6.org>
9  *
10  *      Based on:
11  *      linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12  *
13  *      RFC 2473
14  */
15 
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39 
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42 
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56 
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62 
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT  5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65 
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69 
HASH(const struct in6_addr * addr1,const struct in6_addr * addr2)70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73 
74 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76 
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80 
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 	/* the IPv6 tunnel fallback device */
84 	struct net_device *fb_tnl_dev;
85 	/* lists for storing tunnels in use */
86 	struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 	struct ip6_tnl __rcu *tnls_wc[1];
88 	struct ip6_tnl __rcu **tnls[2];
89 	struct ip6_tnl __rcu *collect_md_tun;
90 };
91 
ip6_get_stats(struct net_device * dev)92 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
93 {
94 	struct pcpu_sw_netstats tmp, sum = { 0 };
95 	int i;
96 
97 	for_each_possible_cpu(i) {
98 		unsigned int start;
99 		const struct pcpu_sw_netstats *tstats =
100 						   per_cpu_ptr(dev->tstats, i);
101 
102 		do {
103 			start = u64_stats_fetch_begin_irq(&tstats->syncp);
104 			tmp.rx_packets = tstats->rx_packets;
105 			tmp.rx_bytes = tstats->rx_bytes;
106 			tmp.tx_packets = tstats->tx_packets;
107 			tmp.tx_bytes =  tstats->tx_bytes;
108 		} while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
109 
110 		sum.rx_packets += tmp.rx_packets;
111 		sum.rx_bytes   += tmp.rx_bytes;
112 		sum.tx_packets += tmp.tx_packets;
113 		sum.tx_bytes   += tmp.tx_bytes;
114 	}
115 	dev->stats.rx_packets = sum.rx_packets;
116 	dev->stats.rx_bytes   = sum.rx_bytes;
117 	dev->stats.tx_packets = sum.tx_packets;
118 	dev->stats.tx_bytes   = sum.tx_bytes;
119 	return &dev->stats;
120 }
121 
122 /**
123  * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
124  *   @remote: the address of the tunnel exit-point
125  *   @local: the address of the tunnel entry-point
126  *
127  * Return:
128  *   tunnel matching given end-points if found,
129  *   else fallback tunnel if its device is up,
130  *   else %NULL
131  **/
132 
133 #define for_each_ip6_tunnel_rcu(start) \
134 	for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
135 
136 static struct ip6_tnl *
ip6_tnl_lookup(struct net * net,const struct in6_addr * remote,const struct in6_addr * local)137 ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
138 {
139 	unsigned int hash = HASH(remote, local);
140 	struct ip6_tnl *t;
141 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
142 	struct in6_addr any;
143 
144 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
145 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
146 		    ipv6_addr_equal(remote, &t->parms.raddr) &&
147 		    (t->dev->flags & IFF_UP))
148 			return t;
149 	}
150 
151 	memset(&any, 0, sizeof(any));
152 	hash = HASH(&any, local);
153 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
154 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
155 		    ipv6_addr_any(&t->parms.raddr) &&
156 		    (t->dev->flags & IFF_UP))
157 			return t;
158 	}
159 
160 	hash = HASH(remote, &any);
161 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
162 		if (ipv6_addr_equal(remote, &t->parms.raddr) &&
163 		    ipv6_addr_any(&t->parms.laddr) &&
164 		    (t->dev->flags & IFF_UP))
165 			return t;
166 	}
167 
168 	t = rcu_dereference(ip6n->collect_md_tun);
169 	if (t && t->dev->flags & IFF_UP)
170 		return t;
171 
172 	t = rcu_dereference(ip6n->tnls_wc[0]);
173 	if (t && (t->dev->flags & IFF_UP))
174 		return t;
175 
176 	return NULL;
177 }
178 
179 /**
180  * ip6_tnl_bucket - get head of list matching given tunnel parameters
181  *   @p: parameters containing tunnel end-points
182  *
183  * Description:
184  *   ip6_tnl_bucket() returns the head of the list matching the
185  *   &struct in6_addr entries laddr and raddr in @p.
186  *
187  * Return: head of IPv6 tunnel list
188  **/
189 
190 static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net * ip6n,const struct __ip6_tnl_parm * p)191 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
192 {
193 	const struct in6_addr *remote = &p->raddr;
194 	const struct in6_addr *local = &p->laddr;
195 	unsigned int h = 0;
196 	int prio = 0;
197 
198 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
199 		prio = 1;
200 		h = HASH(remote, local);
201 	}
202 	return &ip6n->tnls[prio][h];
203 }
204 
205 /**
206  * ip6_tnl_link - add tunnel to hash table
207  *   @t: tunnel to be added
208  **/
209 
210 static void
ip6_tnl_link(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)211 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
212 {
213 	struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
214 
215 	if (t->parms.collect_md)
216 		rcu_assign_pointer(ip6n->collect_md_tun, t);
217 	rcu_assign_pointer(t->next , rtnl_dereference(*tp));
218 	rcu_assign_pointer(*tp, t);
219 }
220 
221 /**
222  * ip6_tnl_unlink - remove tunnel from hash table
223  *   @t: tunnel to be removed
224  **/
225 
226 static void
ip6_tnl_unlink(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)227 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
228 {
229 	struct ip6_tnl __rcu **tp;
230 	struct ip6_tnl *iter;
231 
232 	if (t->parms.collect_md)
233 		rcu_assign_pointer(ip6n->collect_md_tun, NULL);
234 
235 	for (tp = ip6_tnl_bucket(ip6n, &t->parms);
236 	     (iter = rtnl_dereference(*tp)) != NULL;
237 	     tp = &iter->next) {
238 		if (t == iter) {
239 			rcu_assign_pointer(*tp, t->next);
240 			break;
241 		}
242 	}
243 }
244 
ip6_dev_free(struct net_device * dev)245 static void ip6_dev_free(struct net_device *dev)
246 {
247 	struct ip6_tnl *t = netdev_priv(dev);
248 
249 	gro_cells_destroy(&t->gro_cells);
250 	dst_cache_destroy(&t->dst_cache);
251 	free_percpu(dev->tstats);
252 }
253 
ip6_tnl_create2(struct net_device * dev)254 static int ip6_tnl_create2(struct net_device *dev)
255 {
256 	struct ip6_tnl *t = netdev_priv(dev);
257 	struct net *net = dev_net(dev);
258 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
259 	int err;
260 
261 	t = netdev_priv(dev);
262 
263 	dev->rtnl_link_ops = &ip6_link_ops;
264 	err = register_netdevice(dev);
265 	if (err < 0)
266 		goto out;
267 
268 	strcpy(t->parms.name, dev->name);
269 
270 	dev_hold(dev);
271 	ip6_tnl_link(ip6n, t);
272 	return 0;
273 
274 out:
275 	return err;
276 }
277 
278 /**
279  * ip6_tnl_create - create a new tunnel
280  *   @p: tunnel parameters
281  *   @pt: pointer to new tunnel
282  *
283  * Description:
284  *   Create tunnel matching given parameters.
285  *
286  * Return:
287  *   created tunnel or error pointer
288  **/
289 
ip6_tnl_create(struct net * net,struct __ip6_tnl_parm * p)290 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
291 {
292 	struct net_device *dev;
293 	struct ip6_tnl *t;
294 	char name[IFNAMSIZ];
295 	int err = -E2BIG;
296 
297 	if (p->name[0]) {
298 		if (!dev_valid_name(p->name))
299 			goto failed;
300 		strlcpy(name, p->name, IFNAMSIZ);
301 	} else {
302 		sprintf(name, "ip6tnl%%d");
303 	}
304 	err = -ENOMEM;
305 	dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
306 			   ip6_tnl_dev_setup);
307 	if (!dev)
308 		goto failed;
309 
310 	dev_net_set(dev, net);
311 
312 	t = netdev_priv(dev);
313 	t->parms = *p;
314 	t->net = dev_net(dev);
315 	err = ip6_tnl_create2(dev);
316 	if (err < 0)
317 		goto failed_free;
318 
319 	return t;
320 
321 failed_free:
322 	free_netdev(dev);
323 failed:
324 	return ERR_PTR(err);
325 }
326 
327 /**
328  * ip6_tnl_locate - find or create tunnel matching given parameters
329  *   @p: tunnel parameters
330  *   @create: != 0 if allowed to create new tunnel if no match found
331  *
332  * Description:
333  *   ip6_tnl_locate() first tries to locate an existing tunnel
334  *   based on @parms. If this is unsuccessful, but @create is set a new
335  *   tunnel device is created and registered for use.
336  *
337  * Return:
338  *   matching tunnel or error pointer
339  **/
340 
ip6_tnl_locate(struct net * net,struct __ip6_tnl_parm * p,int create)341 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
342 		struct __ip6_tnl_parm *p, int create)
343 {
344 	const struct in6_addr *remote = &p->raddr;
345 	const struct in6_addr *local = &p->laddr;
346 	struct ip6_tnl __rcu **tp;
347 	struct ip6_tnl *t;
348 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
349 
350 	for (tp = ip6_tnl_bucket(ip6n, p);
351 	     (t = rtnl_dereference(*tp)) != NULL;
352 	     tp = &t->next) {
353 		if (ipv6_addr_equal(local, &t->parms.laddr) &&
354 		    ipv6_addr_equal(remote, &t->parms.raddr)) {
355 			if (create)
356 				return ERR_PTR(-EEXIST);
357 
358 			return t;
359 		}
360 	}
361 	if (!create)
362 		return ERR_PTR(-ENODEV);
363 	return ip6_tnl_create(net, p);
364 }
365 
366 /**
367  * ip6_tnl_dev_uninit - tunnel device uninitializer
368  *   @dev: the device to be destroyed
369  *
370  * Description:
371  *   ip6_tnl_dev_uninit() removes tunnel from its list
372  **/
373 
374 static void
ip6_tnl_dev_uninit(struct net_device * dev)375 ip6_tnl_dev_uninit(struct net_device *dev)
376 {
377 	struct ip6_tnl *t = netdev_priv(dev);
378 	struct net *net = t->net;
379 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
380 
381 	if (dev == ip6n->fb_tnl_dev)
382 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
383 	else
384 		ip6_tnl_unlink(ip6n, t);
385 	dst_cache_reset(&t->dst_cache);
386 	dev_put(dev);
387 }
388 
389 /**
390  * parse_tvl_tnl_enc_lim - handle encapsulation limit option
391  *   @skb: received socket buffer
392  *
393  * Return:
394  *   0 if none was found,
395  *   else index to encapsulation limit
396  **/
397 
ip6_tnl_parse_tlv_enc_lim(struct sk_buff * skb,__u8 * raw)398 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
399 {
400 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
401 	unsigned int nhoff = raw - skb->data;
402 	unsigned int off = nhoff + sizeof(*ipv6h);
403 	u8 next, nexthdr = ipv6h->nexthdr;
404 
405 	while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
406 		struct ipv6_opt_hdr *hdr;
407 		u16 optlen;
408 
409 		if (!pskb_may_pull(skb, off + sizeof(*hdr)))
410 			break;
411 
412 		hdr = (struct ipv6_opt_hdr *)(skb->data + off);
413 		if (nexthdr == NEXTHDR_FRAGMENT) {
414 			struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
415 			if (frag_hdr->frag_off)
416 				break;
417 			optlen = 8;
418 		} else if (nexthdr == NEXTHDR_AUTH) {
419 			optlen = ipv6_authlen(hdr);
420 		} else {
421 			optlen = ipv6_optlen(hdr);
422 		}
423 		/* cache hdr->nexthdr, since pskb_may_pull() might
424 		 * invalidate hdr
425 		 */
426 		next = hdr->nexthdr;
427 		if (nexthdr == NEXTHDR_DEST) {
428 			u16 i = 2;
429 
430 			/* Remember : hdr is no longer valid at this point. */
431 			if (!pskb_may_pull(skb, off + optlen))
432 				break;
433 
434 			while (1) {
435 				struct ipv6_tlv_tnl_enc_lim *tel;
436 
437 				/* No more room for encapsulation limit */
438 				if (i + sizeof(*tel) > optlen)
439 					break;
440 
441 				tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
442 				/* return index of option if found and valid */
443 				if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
444 				    tel->length == 1)
445 					return i + off - nhoff;
446 				/* else jump to next option */
447 				if (tel->type)
448 					i += tel->length + 2;
449 				else
450 					i++;
451 			}
452 		}
453 		nexthdr = next;
454 		off += optlen;
455 	}
456 	return 0;
457 }
458 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
459 
460 /**
461  * ip6_tnl_err - tunnel error handler
462  *
463  * Description:
464  *   ip6_tnl_err() should handle errors in the tunnel according
465  *   to the specifications in RFC 2473.
466  **/
467 
468 static int
ip6_tnl_err(struct sk_buff * skb,__u8 ipproto,struct inet6_skb_parm * opt,u8 * type,u8 * code,int * msg,__u32 * info,int offset)469 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
470 	    u8 *type, u8 *code, int *msg, __u32 *info, int offset)
471 {
472 	const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
473 	struct net *net = dev_net(skb->dev);
474 	u8 rel_type = ICMPV6_DEST_UNREACH;
475 	u8 rel_code = ICMPV6_ADDR_UNREACH;
476 	__u32 rel_info = 0;
477 	struct ip6_tnl *t;
478 	int err = -ENOENT;
479 	int rel_msg = 0;
480 	u8 tproto;
481 	__u16 len;
482 
483 	/* If the packet doesn't contain the original IPv6 header we are
484 	   in trouble since we might need the source address for further
485 	   processing of the error. */
486 
487 	rcu_read_lock();
488 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
489 	if (!t)
490 		goto out;
491 
492 	tproto = READ_ONCE(t->parms.proto);
493 	if (tproto != ipproto && tproto != 0)
494 		goto out;
495 
496 	err = 0;
497 
498 	switch (*type) {
499 		struct ipv6_tlv_tnl_enc_lim *tel;
500 		__u32 mtu, teli;
501 	case ICMPV6_DEST_UNREACH:
502 		net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
503 				    t->parms.name);
504 		rel_msg = 1;
505 		break;
506 	case ICMPV6_TIME_EXCEED:
507 		if ((*code) == ICMPV6_EXC_HOPLIMIT) {
508 			net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
509 					    t->parms.name);
510 			rel_msg = 1;
511 		}
512 		break;
513 	case ICMPV6_PARAMPROB:
514 		teli = 0;
515 		if ((*code) == ICMPV6_HDR_FIELD)
516 			teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
517 
518 		if (teli && teli == *info - 2) {
519 			tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
520 			if (tel->encap_limit == 0) {
521 				net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
522 						    t->parms.name);
523 				rel_msg = 1;
524 			}
525 		} else {
526 			net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
527 					    t->parms.name);
528 		}
529 		break;
530 	case ICMPV6_PKT_TOOBIG:
531 		ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
532 				sock_net_uid(net, NULL));
533 		mtu = *info - offset;
534 		if (mtu < IPV6_MIN_MTU)
535 			mtu = IPV6_MIN_MTU;
536 		len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
537 		if (len > mtu) {
538 			rel_type = ICMPV6_PKT_TOOBIG;
539 			rel_code = 0;
540 			rel_info = mtu;
541 			rel_msg = 1;
542 		}
543 		break;
544 	case NDISC_REDIRECT:
545 		ip6_redirect(skb, net, skb->dev->ifindex, 0,
546 			     sock_net_uid(net, NULL));
547 		break;
548 	}
549 
550 	*type = rel_type;
551 	*code = rel_code;
552 	*info = rel_info;
553 	*msg = rel_msg;
554 
555 out:
556 	rcu_read_unlock();
557 	return err;
558 }
559 
560 static int
ip4ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)561 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
562 	   u8 type, u8 code, int offset, __be32 info)
563 {
564 	__u32 rel_info = ntohl(info);
565 	const struct iphdr *eiph;
566 	struct sk_buff *skb2;
567 	int err, rel_msg = 0;
568 	u8 rel_type = type;
569 	u8 rel_code = code;
570 	struct rtable *rt;
571 	struct flowi4 fl4;
572 
573 	err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
574 			  &rel_msg, &rel_info, offset);
575 	if (err < 0)
576 		return err;
577 
578 	if (rel_msg == 0)
579 		return 0;
580 
581 	switch (rel_type) {
582 	case ICMPV6_DEST_UNREACH:
583 		if (rel_code != ICMPV6_ADDR_UNREACH)
584 			return 0;
585 		rel_type = ICMP_DEST_UNREACH;
586 		rel_code = ICMP_HOST_UNREACH;
587 		break;
588 	case ICMPV6_PKT_TOOBIG:
589 		if (rel_code != 0)
590 			return 0;
591 		rel_type = ICMP_DEST_UNREACH;
592 		rel_code = ICMP_FRAG_NEEDED;
593 		break;
594 	default:
595 		return 0;
596 	}
597 
598 	if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
599 		return 0;
600 
601 	skb2 = skb_clone(skb, GFP_ATOMIC);
602 	if (!skb2)
603 		return 0;
604 
605 	skb_dst_drop(skb2);
606 
607 	skb_pull(skb2, offset);
608 	skb_reset_network_header(skb2);
609 	eiph = ip_hdr(skb2);
610 
611 	/* Try to guess incoming interface */
612 	rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
613 				   0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
614 	if (IS_ERR(rt))
615 		goto out;
616 
617 	skb2->dev = rt->dst.dev;
618 	ip_rt_put(rt);
619 
620 	/* route "incoming" packet */
621 	if (rt->rt_flags & RTCF_LOCAL) {
622 		rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
623 					   eiph->daddr, eiph->saddr, 0, 0,
624 					   IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
625 		if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
626 			if (!IS_ERR(rt))
627 				ip_rt_put(rt);
628 			goto out;
629 		}
630 		skb_dst_set(skb2, &rt->dst);
631 	} else {
632 		if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
633 				   skb2->dev) ||
634 		    skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
635 			goto out;
636 	}
637 
638 	/* change mtu on this route */
639 	if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
640 		if (rel_info > dst_mtu(skb_dst(skb2)))
641 			goto out;
642 
643 		skb_dst_update_pmtu_no_confirm(skb2, rel_info);
644 	}
645 
646 	icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
647 
648 out:
649 	kfree_skb(skb2);
650 	return 0;
651 }
652 
653 static int
ip6ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)654 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
655 	   u8 type, u8 code, int offset, __be32 info)
656 {
657 	__u32 rel_info = ntohl(info);
658 	int err, rel_msg = 0;
659 	u8 rel_type = type;
660 	u8 rel_code = code;
661 
662 	err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
663 			  &rel_msg, &rel_info, offset);
664 	if (err < 0)
665 		return err;
666 
667 	if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
668 		struct rt6_info *rt;
669 		struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
670 
671 		if (!skb2)
672 			return 0;
673 
674 		skb_dst_drop(skb2);
675 		skb_pull(skb2, offset);
676 		skb_reset_network_header(skb2);
677 
678 		/* Try to guess incoming interface */
679 		rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
680 				NULL, 0, skb2, 0);
681 
682 		if (rt && rt->dst.dev)
683 			skb2->dev = rt->dst.dev;
684 
685 		icmpv6_send(skb2, rel_type, rel_code, rel_info);
686 
687 		ip6_rt_put(rt);
688 
689 		kfree_skb(skb2);
690 	}
691 
692 	return 0;
693 }
694 
ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)695 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
696 				       const struct ipv6hdr *ipv6h,
697 				       struct sk_buff *skb)
698 {
699 	__u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
700 
701 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
702 		ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
703 
704 	return IP6_ECN_decapsulate(ipv6h, skb);
705 }
706 
ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)707 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
708 				       const struct ipv6hdr *ipv6h,
709 				       struct sk_buff *skb)
710 {
711 	if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
712 		ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
713 
714 	return IP6_ECN_decapsulate(ipv6h, skb);
715 }
716 
ip6_tnl_get_cap(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)717 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
718 			     const struct in6_addr *laddr,
719 			     const struct in6_addr *raddr)
720 {
721 	struct __ip6_tnl_parm *p = &t->parms;
722 	int ltype = ipv6_addr_type(laddr);
723 	int rtype = ipv6_addr_type(raddr);
724 	__u32 flags = 0;
725 
726 	if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
727 		flags = IP6_TNL_F_CAP_PER_PACKET;
728 	} else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
729 		   rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
730 		   !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
731 		   (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
732 		if (ltype&IPV6_ADDR_UNICAST)
733 			flags |= IP6_TNL_F_CAP_XMIT;
734 		if (rtype&IPV6_ADDR_UNICAST)
735 			flags |= IP6_TNL_F_CAP_RCV;
736 	}
737 	return flags;
738 }
739 EXPORT_SYMBOL(ip6_tnl_get_cap);
740 
741 /* called with rcu_read_lock() */
ip6_tnl_rcv_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)742 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
743 				  const struct in6_addr *laddr,
744 				  const struct in6_addr *raddr)
745 {
746 	struct __ip6_tnl_parm *p = &t->parms;
747 	int ret = 0;
748 	struct net *net = t->net;
749 
750 	if ((p->flags & IP6_TNL_F_CAP_RCV) ||
751 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
752 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
753 		struct net_device *ldev = NULL;
754 
755 		if (p->link)
756 			ldev = dev_get_by_index_rcu(net, p->link);
757 
758 		if ((ipv6_addr_is_multicast(laddr) ||
759 		     likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
760 						    0, IFA_F_TENTATIVE))) &&
761 		    ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
762 		     likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
763 						     0, IFA_F_TENTATIVE))))
764 			ret = 1;
765 	}
766 	return ret;
767 }
768 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
769 
__ip6_tnl_rcv(struct ip6_tnl * tunnel,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb),bool log_ecn_err)770 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
771 			 const struct tnl_ptk_info *tpi,
772 			 struct metadata_dst *tun_dst,
773 			 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
774 						const struct ipv6hdr *ipv6h,
775 						struct sk_buff *skb),
776 			 bool log_ecn_err)
777 {
778 	struct pcpu_sw_netstats *tstats;
779 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
780 	int err;
781 
782 	if ((!(tpi->flags & TUNNEL_CSUM) &&
783 	     (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
784 	    ((tpi->flags & TUNNEL_CSUM) &&
785 	     !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
786 		tunnel->dev->stats.rx_crc_errors++;
787 		tunnel->dev->stats.rx_errors++;
788 		goto drop;
789 	}
790 
791 	if (tunnel->parms.i_flags & TUNNEL_SEQ) {
792 		if (!(tpi->flags & TUNNEL_SEQ) ||
793 		    (tunnel->i_seqno &&
794 		     (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
795 			tunnel->dev->stats.rx_fifo_errors++;
796 			tunnel->dev->stats.rx_errors++;
797 			goto drop;
798 		}
799 		tunnel->i_seqno = ntohl(tpi->seq) + 1;
800 	}
801 
802 	skb->protocol = tpi->proto;
803 
804 	/* Warning: All skb pointers will be invalidated! */
805 	if (tunnel->dev->type == ARPHRD_ETHER) {
806 		if (!pskb_may_pull(skb, ETH_HLEN)) {
807 			tunnel->dev->stats.rx_length_errors++;
808 			tunnel->dev->stats.rx_errors++;
809 			goto drop;
810 		}
811 
812 		ipv6h = ipv6_hdr(skb);
813 		skb->protocol = eth_type_trans(skb, tunnel->dev);
814 		skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
815 	} else {
816 		skb->dev = tunnel->dev;
817 	}
818 
819 	skb_reset_network_header(skb);
820 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
821 
822 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
823 
824 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
825 	if (unlikely(err)) {
826 		if (log_ecn_err)
827 			net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
828 					     &ipv6h->saddr,
829 					     ipv6_get_dsfield(ipv6h));
830 		if (err > 1) {
831 			++tunnel->dev->stats.rx_frame_errors;
832 			++tunnel->dev->stats.rx_errors;
833 			goto drop;
834 		}
835 	}
836 
837 	tstats = this_cpu_ptr(tunnel->dev->tstats);
838 	u64_stats_update_begin(&tstats->syncp);
839 	tstats->rx_packets++;
840 	tstats->rx_bytes += skb->len;
841 	u64_stats_update_end(&tstats->syncp);
842 
843 	skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
844 
845 	if (tun_dst)
846 		skb_dst_set(skb, (struct dst_entry *)tun_dst);
847 
848 	gro_cells_receive(&tunnel->gro_cells, skb);
849 	return 0;
850 
851 drop:
852 	if (tun_dst)
853 		dst_release((struct dst_entry *)tun_dst);
854 	kfree_skb(skb);
855 	return 0;
856 }
857 
ip6_tnl_rcv(struct ip6_tnl * t,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,bool log_ecn_err)858 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
859 		const struct tnl_ptk_info *tpi,
860 		struct metadata_dst *tun_dst,
861 		bool log_ecn_err)
862 {
863 	return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
864 			     log_ecn_err);
865 }
866 EXPORT_SYMBOL(ip6_tnl_rcv);
867 
868 static const struct tnl_ptk_info tpi_v6 = {
869 	/* no tunnel info required for ipxip6. */
870 	.proto = htons(ETH_P_IPV6),
871 };
872 
873 static const struct tnl_ptk_info tpi_v4 = {
874 	/* no tunnel info required for ipxip6. */
875 	.proto = htons(ETH_P_IP),
876 };
877 
ipxip6_rcv(struct sk_buff * skb,u8 ipproto,const struct tnl_ptk_info * tpi,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb))878 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
879 		      const struct tnl_ptk_info *tpi,
880 		      int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
881 						  const struct ipv6hdr *ipv6h,
882 						  struct sk_buff *skb))
883 {
884 	struct ip6_tnl *t;
885 	const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
886 	struct metadata_dst *tun_dst = NULL;
887 	int ret = -1;
888 
889 	rcu_read_lock();
890 	t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
891 
892 	if (t) {
893 		u8 tproto = READ_ONCE(t->parms.proto);
894 
895 		if (tproto != ipproto && tproto != 0)
896 			goto drop;
897 		if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
898 			goto drop;
899 		ipv6h = ipv6_hdr(skb);
900 		if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
901 			goto drop;
902 		if (iptunnel_pull_header(skb, 0, tpi->proto, false))
903 			goto drop;
904 		if (t->parms.collect_md) {
905 			tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
906 			if (!tun_dst)
907 				goto drop;
908 		}
909 		ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
910 				    log_ecn_error);
911 	}
912 
913 	rcu_read_unlock();
914 
915 	return ret;
916 
917 drop:
918 	rcu_read_unlock();
919 	kfree_skb(skb);
920 	return 0;
921 }
922 
ip4ip6_rcv(struct sk_buff * skb)923 static int ip4ip6_rcv(struct sk_buff *skb)
924 {
925 	return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
926 			  ip4ip6_dscp_ecn_decapsulate);
927 }
928 
ip6ip6_rcv(struct sk_buff * skb)929 static int ip6ip6_rcv(struct sk_buff *skb)
930 {
931 	return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
932 			  ip6ip6_dscp_ecn_decapsulate);
933 }
934 
935 struct ipv6_tel_txoption {
936 	struct ipv6_txoptions ops;
937 	__u8 dst_opt[8];
938 };
939 
init_tel_txopt(struct ipv6_tel_txoption * opt,__u8 encap_limit)940 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
941 {
942 	memset(opt, 0, sizeof(struct ipv6_tel_txoption));
943 
944 	opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
945 	opt->dst_opt[3] = 1;
946 	opt->dst_opt[4] = encap_limit;
947 	opt->dst_opt[5] = IPV6_TLV_PADN;
948 	opt->dst_opt[6] = 1;
949 
950 	opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
951 	opt->ops.opt_nflen = 8;
952 }
953 
954 /**
955  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
956  *   @t: the outgoing tunnel device
957  *   @hdr: IPv6 header from the incoming packet
958  *
959  * Description:
960  *   Avoid trivial tunneling loop by checking that tunnel exit-point
961  *   doesn't match source of incoming packet.
962  *
963  * Return:
964  *   1 if conflict,
965  *   0 else
966  **/
967 
968 static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl * t,const struct ipv6hdr * hdr)969 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
970 {
971 	return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
972 }
973 
ip6_tnl_xmit_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)974 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
975 		     const struct in6_addr *laddr,
976 		     const struct in6_addr *raddr)
977 {
978 	struct __ip6_tnl_parm *p = &t->parms;
979 	int ret = 0;
980 	struct net *net = t->net;
981 
982 	if (t->parms.collect_md)
983 		return 1;
984 
985 	if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
986 	    ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
987 	     (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
988 		struct net_device *ldev = NULL;
989 
990 		rcu_read_lock();
991 		if (p->link)
992 			ldev = dev_get_by_index_rcu(net, p->link);
993 
994 		if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
995 						      0, IFA_F_TENTATIVE)))
996 			pr_warn("%s xmit: Local address not yet configured!\n",
997 				p->name);
998 		else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
999 			 !ipv6_addr_is_multicast(raddr) &&
1000 			 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1001 							  true, 0, IFA_F_TENTATIVE)))
1002 			pr_warn("%s xmit: Routing loop! Remote address found on this node!\n",
1003 				p->name);
1004 		else
1005 			ret = 1;
1006 		rcu_read_unlock();
1007 	}
1008 	return ret;
1009 }
1010 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1011 
1012 /**
1013  * ip6_tnl_xmit - encapsulate packet and send
1014  *   @skb: the outgoing socket buffer
1015  *   @dev: the outgoing tunnel device
1016  *   @dsfield: dscp code for outer header
1017  *   @fl6: flow of tunneled packet
1018  *   @encap_limit: encapsulation limit
1019  *   @pmtu: Path MTU is stored if packet is too big
1020  *   @proto: next header value
1021  *
1022  * Description:
1023  *   Build new header and do some sanity checks on the packet before sending
1024  *   it.
1025  *
1026  * Return:
1027  *   0 on success
1028  *   -1 fail
1029  *   %-EMSGSIZE message too big. return mtu in this case.
1030  **/
1031 
ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,__u8 dsfield,struct flowi6 * fl6,int encap_limit,__u32 * pmtu,__u8 proto)1032 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1033 		 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1034 		 __u8 proto)
1035 {
1036 	struct ip6_tnl *t = netdev_priv(dev);
1037 	struct net *net = t->net;
1038 	struct net_device_stats *stats = &t->dev->stats;
1039 	struct ipv6hdr *ipv6h;
1040 	struct ipv6_tel_txoption opt;
1041 	struct dst_entry *dst = NULL, *ndst = NULL;
1042 	struct net_device *tdev;
1043 	int mtu;
1044 	unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1045 	unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1046 	unsigned int max_headroom = psh_hlen;
1047 	bool use_cache = false;
1048 	u8 hop_limit;
1049 	int err = -1;
1050 
1051 	if (t->parms.collect_md) {
1052 		hop_limit = skb_tunnel_info(skb)->key.ttl;
1053 		goto route_lookup;
1054 	} else {
1055 		hop_limit = t->parms.hop_limit;
1056 	}
1057 
1058 	/* NBMA tunnel */
1059 	if (ipv6_addr_any(&t->parms.raddr)) {
1060 		if (skb->protocol == htons(ETH_P_IPV6)) {
1061 			struct in6_addr *addr6;
1062 			struct neighbour *neigh;
1063 			int addr_type;
1064 
1065 			if (!skb_dst(skb))
1066 				goto tx_err_link_failure;
1067 
1068 			neigh = dst_neigh_lookup(skb_dst(skb),
1069 						 &ipv6_hdr(skb)->daddr);
1070 			if (!neigh)
1071 				goto tx_err_link_failure;
1072 
1073 			addr6 = (struct in6_addr *)&neigh->primary_key;
1074 			addr_type = ipv6_addr_type(addr6);
1075 
1076 			if (addr_type == IPV6_ADDR_ANY)
1077 				addr6 = &ipv6_hdr(skb)->daddr;
1078 
1079 			memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1080 			neigh_release(neigh);
1081 		}
1082 	} else if (t->parms.proto != 0 && !(t->parms.flags &
1083 					    (IP6_TNL_F_USE_ORIG_TCLASS |
1084 					     IP6_TNL_F_USE_ORIG_FWMARK))) {
1085 		/* enable the cache only if neither the outer protocol nor the
1086 		 * routing decision depends on the current inner header value
1087 		 */
1088 		use_cache = true;
1089 	}
1090 
1091 	if (use_cache)
1092 		dst = dst_cache_get(&t->dst_cache);
1093 
1094 	if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1095 		goto tx_err_link_failure;
1096 
1097 	if (!dst) {
1098 route_lookup:
1099 		/* add dsfield to flowlabel for route lookup */
1100 		fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1101 
1102 		dst = ip6_route_output(net, NULL, fl6);
1103 
1104 		if (dst->error)
1105 			goto tx_err_link_failure;
1106 		dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1107 		if (IS_ERR(dst)) {
1108 			err = PTR_ERR(dst);
1109 			dst = NULL;
1110 			goto tx_err_link_failure;
1111 		}
1112 		if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1113 		    ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1114 				       &fl6->daddr, 0, &fl6->saddr))
1115 			goto tx_err_link_failure;
1116 		ndst = dst;
1117 	}
1118 
1119 	tdev = dst->dev;
1120 
1121 	if (tdev == dev) {
1122 		stats->collisions++;
1123 		net_warn_ratelimited("%s: Local routing loop detected!\n",
1124 				     t->parms.name);
1125 		goto tx_err_dst_release;
1126 	}
1127 	mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1128 	if (encap_limit >= 0) {
1129 		max_headroom += 8;
1130 		mtu -= 8;
1131 	}
1132 	mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1133 		       IPV6_MIN_MTU : IPV4_MIN_MTU);
1134 
1135 	skb_dst_update_pmtu_no_confirm(skb, mtu);
1136 	if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1137 		*pmtu = mtu;
1138 		err = -EMSGSIZE;
1139 		goto tx_err_dst_release;
1140 	}
1141 
1142 	if (t->err_count > 0) {
1143 		if (time_before(jiffies,
1144 				t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1145 			t->err_count--;
1146 
1147 			dst_link_failure(skb);
1148 		} else {
1149 			t->err_count = 0;
1150 		}
1151 	}
1152 
1153 	skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1154 
1155 	/*
1156 	 * Okay, now see if we can stuff it in the buffer as-is.
1157 	 */
1158 	max_headroom += LL_RESERVED_SPACE(tdev);
1159 
1160 	if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1161 	    (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1162 		struct sk_buff *new_skb;
1163 
1164 		new_skb = skb_realloc_headroom(skb, max_headroom);
1165 		if (!new_skb)
1166 			goto tx_err_dst_release;
1167 
1168 		if (skb->sk)
1169 			skb_set_owner_w(new_skb, skb->sk);
1170 		consume_skb(skb);
1171 		skb = new_skb;
1172 	}
1173 
1174 	if (t->parms.collect_md) {
1175 		if (t->encap.type != TUNNEL_ENCAP_NONE)
1176 			goto tx_err_dst_release;
1177 	} else {
1178 		if (use_cache && ndst)
1179 			dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1180 	}
1181 	skb_dst_set(skb, dst);
1182 
1183 	if (hop_limit == 0) {
1184 		if (skb->protocol == htons(ETH_P_IP))
1185 			hop_limit = ip_hdr(skb)->ttl;
1186 		else if (skb->protocol == htons(ETH_P_IPV6))
1187 			hop_limit = ipv6_hdr(skb)->hop_limit;
1188 		else
1189 			hop_limit = ip6_dst_hoplimit(dst);
1190 	}
1191 
1192 	/* Calculate max headroom for all the headers and adjust
1193 	 * needed_headroom if necessary.
1194 	 */
1195 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1196 			+ dst->header_len + t->hlen;
1197 	if (max_headroom > dev->needed_headroom)
1198 		dev->needed_headroom = max_headroom;
1199 
1200 	err = ip6_tnl_encap(skb, t, &proto, fl6);
1201 	if (err)
1202 		return err;
1203 
1204 	if (encap_limit >= 0) {
1205 		init_tel_txopt(&opt, encap_limit);
1206 		ipv6_push_frag_opts(skb, &opt.ops, &proto);
1207 	}
1208 
1209 	skb_push(skb, sizeof(struct ipv6hdr));
1210 	skb_reset_network_header(skb);
1211 	ipv6h = ipv6_hdr(skb);
1212 	ip6_flow_hdr(ipv6h, dsfield,
1213 		     ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1214 	ipv6h->hop_limit = hop_limit;
1215 	ipv6h->nexthdr = proto;
1216 	ipv6h->saddr = fl6->saddr;
1217 	ipv6h->daddr = fl6->daddr;
1218 	ip6tunnel_xmit(NULL, skb, dev);
1219 	return 0;
1220 tx_err_link_failure:
1221 	stats->tx_carrier_errors++;
1222 	dst_link_failure(skb);
1223 tx_err_dst_release:
1224 	dst_release(dst);
1225 	return err;
1226 }
1227 EXPORT_SYMBOL(ip6_tnl_xmit);
1228 
1229 static inline int
ip4ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev)1230 ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1231 {
1232 	struct ip6_tnl *t = netdev_priv(dev);
1233 	const struct iphdr  *iph;
1234 	int encap_limit = -1;
1235 	struct flowi6 fl6;
1236 	__u8 dsfield;
1237 	__u32 mtu;
1238 	u8 tproto;
1239 	int err;
1240 
1241 	iph = ip_hdr(skb);
1242 	memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1243 
1244 	tproto = READ_ONCE(t->parms.proto);
1245 	if (tproto != IPPROTO_IPIP && tproto != 0)
1246 		return -1;
1247 
1248 	if (t->parms.collect_md) {
1249 		struct ip_tunnel_info *tun_info;
1250 		const struct ip_tunnel_key *key;
1251 
1252 		tun_info = skb_tunnel_info(skb);
1253 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1254 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1255 			return -1;
1256 		key = &tun_info->key;
1257 		memset(&fl6, 0, sizeof(fl6));
1258 		fl6.flowi6_proto = IPPROTO_IPIP;
1259 		fl6.saddr = key->u.ipv6.src;
1260 		fl6.daddr = key->u.ipv6.dst;
1261 		fl6.flowlabel = key->label;
1262 		dsfield =  key->tos;
1263 	} else {
1264 		if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1265 			encap_limit = t->parms.encap_limit;
1266 
1267 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1268 		fl6.flowi6_proto = IPPROTO_IPIP;
1269 
1270 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1271 			dsfield = ipv4_get_dsfield(iph);
1272 		else
1273 			dsfield = ip6_tclass(t->parms.flowinfo);
1274 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1275 			fl6.flowi6_mark = skb->mark;
1276 		else
1277 			fl6.flowi6_mark = t->parms.fwmark;
1278 	}
1279 
1280 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1281 	dsfield = INET_ECN_encapsulate(dsfield, ipv4_get_dsfield(iph));
1282 
1283 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1284 		return -1;
1285 
1286 	skb_set_inner_ipproto(skb, IPPROTO_IPIP);
1287 
1288 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1289 			   IPPROTO_IPIP);
1290 	if (err != 0) {
1291 		/* XXX: send ICMP error even if DF is not set. */
1292 		if (err == -EMSGSIZE)
1293 			icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
1294 				  htonl(mtu));
1295 		return -1;
1296 	}
1297 
1298 	return 0;
1299 }
1300 
1301 static inline int
ip6ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev)1302 ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
1303 {
1304 	struct ip6_tnl *t = netdev_priv(dev);
1305 	struct ipv6hdr *ipv6h;
1306 	int encap_limit = -1;
1307 	__u16 offset;
1308 	struct flowi6 fl6;
1309 	__u8 dsfield;
1310 	__u32 mtu;
1311 	u8 tproto;
1312 	int err;
1313 
1314 	ipv6h = ipv6_hdr(skb);
1315 	tproto = READ_ONCE(t->parms.proto);
1316 	if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
1317 	    ip6_tnl_addr_conflict(t, ipv6h))
1318 		return -1;
1319 
1320 	if (t->parms.collect_md) {
1321 		struct ip_tunnel_info *tun_info;
1322 		const struct ip_tunnel_key *key;
1323 
1324 		tun_info = skb_tunnel_info(skb);
1325 		if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1326 			     ip_tunnel_info_af(tun_info) != AF_INET6))
1327 			return -1;
1328 		key = &tun_info->key;
1329 		memset(&fl6, 0, sizeof(fl6));
1330 		fl6.flowi6_proto = IPPROTO_IPV6;
1331 		fl6.saddr = key->u.ipv6.src;
1332 		fl6.daddr = key->u.ipv6.dst;
1333 		fl6.flowlabel = key->label;
1334 		dsfield = key->tos;
1335 	} else {
1336 		offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
1337 		/* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
1338 		ipv6h = ipv6_hdr(skb);
1339 		if (offset > 0) {
1340 			struct ipv6_tlv_tnl_enc_lim *tel;
1341 
1342 			tel = (void *)&skb_network_header(skb)[offset];
1343 			if (tel->encap_limit == 0) {
1344 				icmpv6_send(skb, ICMPV6_PARAMPROB,
1345 					    ICMPV6_HDR_FIELD, offset + 2);
1346 				return -1;
1347 			}
1348 			encap_limit = tel->encap_limit - 1;
1349 		} else if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) {
1350 			encap_limit = t->parms.encap_limit;
1351 		}
1352 
1353 		memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1354 		fl6.flowi6_proto = IPPROTO_IPV6;
1355 
1356 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1357 			dsfield = ipv6_get_dsfield(ipv6h);
1358 		else
1359 			dsfield = ip6_tclass(t->parms.flowinfo);
1360 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1361 			fl6.flowlabel |= ip6_flowlabel(ipv6h);
1362 		if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1363 			fl6.flowi6_mark = skb->mark;
1364 		else
1365 			fl6.flowi6_mark = t->parms.fwmark;
1366 	}
1367 
1368 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1369 	dsfield = INET_ECN_encapsulate(dsfield, ipv6_get_dsfield(ipv6h));
1370 
1371 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1372 		return -1;
1373 
1374 	skb_set_inner_ipproto(skb, IPPROTO_IPV6);
1375 
1376 	err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1377 			   IPPROTO_IPV6);
1378 	if (err != 0) {
1379 		if (err == -EMSGSIZE)
1380 			icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1381 		return -1;
1382 	}
1383 
1384 	return 0;
1385 }
1386 
1387 static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff * skb,struct net_device * dev)1388 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1389 {
1390 	struct ip6_tnl *t = netdev_priv(dev);
1391 	struct net_device_stats *stats = &t->dev->stats;
1392 	int ret;
1393 
1394 	if (!pskb_inet_may_pull(skb))
1395 		goto tx_err;
1396 
1397 	switch (skb->protocol) {
1398 	case htons(ETH_P_IP):
1399 		ret = ip4ip6_tnl_xmit(skb, dev);
1400 		break;
1401 	case htons(ETH_P_IPV6):
1402 		ret = ip6ip6_tnl_xmit(skb, dev);
1403 		break;
1404 	default:
1405 		goto tx_err;
1406 	}
1407 
1408 	if (ret < 0)
1409 		goto tx_err;
1410 
1411 	return NETDEV_TX_OK;
1412 
1413 tx_err:
1414 	stats->tx_errors++;
1415 	stats->tx_dropped++;
1416 	kfree_skb(skb);
1417 	return NETDEV_TX_OK;
1418 }
1419 
ip6_tnl_link_config(struct ip6_tnl * t)1420 static void ip6_tnl_link_config(struct ip6_tnl *t)
1421 {
1422 	struct net_device *dev = t->dev;
1423 	struct __ip6_tnl_parm *p = &t->parms;
1424 	struct flowi6 *fl6 = &t->fl.u.ip6;
1425 	int t_hlen;
1426 
1427 	memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1428 	memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1429 
1430 	/* Set up flowi template */
1431 	fl6->saddr = p->laddr;
1432 	fl6->daddr = p->raddr;
1433 	fl6->flowi6_oif = p->link;
1434 	fl6->flowlabel = 0;
1435 
1436 	if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1437 		fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1438 	if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1439 		fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1440 
1441 	p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1442 	p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1443 
1444 	if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1445 		dev->flags |= IFF_POINTOPOINT;
1446 	else
1447 		dev->flags &= ~IFF_POINTOPOINT;
1448 
1449 	t->tun_hlen = 0;
1450 	t->hlen = t->encap_hlen + t->tun_hlen;
1451 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1452 
1453 	if (p->flags & IP6_TNL_F_CAP_XMIT) {
1454 		int strict = (ipv6_addr_type(&p->raddr) &
1455 			      (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1456 
1457 		struct rt6_info *rt = rt6_lookup(t->net,
1458 						 &p->raddr, &p->laddr,
1459 						 p->link, NULL, strict);
1460 
1461 		if (!rt)
1462 			return;
1463 
1464 		if (rt->dst.dev) {
1465 			dev->hard_header_len = rt->dst.dev->hard_header_len +
1466 				t_hlen;
1467 
1468 			dev->mtu = rt->dst.dev->mtu - t_hlen;
1469 			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1470 				dev->mtu -= 8;
1471 
1472 			if (dev->mtu < IPV6_MIN_MTU)
1473 				dev->mtu = IPV6_MIN_MTU;
1474 		}
1475 		ip6_rt_put(rt);
1476 	}
1477 }
1478 
1479 /**
1480  * ip6_tnl_change - update the tunnel parameters
1481  *   @t: tunnel to be changed
1482  *   @p: tunnel configuration parameters
1483  *
1484  * Description:
1485  *   ip6_tnl_change() updates the tunnel parameters
1486  **/
1487 
1488 static int
ip6_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p)1489 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1490 {
1491 	t->parms.laddr = p->laddr;
1492 	t->parms.raddr = p->raddr;
1493 	t->parms.flags = p->flags;
1494 	t->parms.hop_limit = p->hop_limit;
1495 	t->parms.encap_limit = p->encap_limit;
1496 	t->parms.flowinfo = p->flowinfo;
1497 	t->parms.link = p->link;
1498 	t->parms.proto = p->proto;
1499 	t->parms.fwmark = p->fwmark;
1500 	dst_cache_reset(&t->dst_cache);
1501 	ip6_tnl_link_config(t);
1502 	return 0;
1503 }
1504 
ip6_tnl_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1505 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1506 {
1507 	struct net *net = t->net;
1508 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1509 	int err;
1510 
1511 	ip6_tnl_unlink(ip6n, t);
1512 	synchronize_net();
1513 	err = ip6_tnl_change(t, p);
1514 	ip6_tnl_link(ip6n, t);
1515 	netdev_state_change(t->dev);
1516 	return err;
1517 }
1518 
ip6_tnl0_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1519 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1520 {
1521 	/* for default tnl0 device allow to change only the proto */
1522 	t->parms.proto = p->proto;
1523 	netdev_state_change(t->dev);
1524 	return 0;
1525 }
1526 
1527 static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm * p,const struct ip6_tnl_parm * u)1528 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1529 {
1530 	p->laddr = u->laddr;
1531 	p->raddr = u->raddr;
1532 	p->flags = u->flags;
1533 	p->hop_limit = u->hop_limit;
1534 	p->encap_limit = u->encap_limit;
1535 	p->flowinfo = u->flowinfo;
1536 	p->link = u->link;
1537 	p->proto = u->proto;
1538 	memcpy(p->name, u->name, sizeof(u->name));
1539 }
1540 
1541 static void
ip6_tnl_parm_to_user(struct ip6_tnl_parm * u,const struct __ip6_tnl_parm * p)1542 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1543 {
1544 	u->laddr = p->laddr;
1545 	u->raddr = p->raddr;
1546 	u->flags = p->flags;
1547 	u->hop_limit = p->hop_limit;
1548 	u->encap_limit = p->encap_limit;
1549 	u->flowinfo = p->flowinfo;
1550 	u->link = p->link;
1551 	u->proto = p->proto;
1552 	memcpy(u->name, p->name, sizeof(u->name));
1553 }
1554 
1555 /**
1556  * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1557  *   @dev: virtual device associated with tunnel
1558  *   @ifr: parameters passed from userspace
1559  *   @cmd: command to be performed
1560  *
1561  * Description:
1562  *   ip6_tnl_ioctl() is used for managing IPv6 tunnels
1563  *   from userspace.
1564  *
1565  *   The possible commands are the following:
1566  *     %SIOCGETTUNNEL: get tunnel parameters for device
1567  *     %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1568  *     %SIOCCHGTUNNEL: change tunnel parameters to those given
1569  *     %SIOCDELTUNNEL: delete tunnel
1570  *
1571  *   The fallback device "ip6tnl0", created during module
1572  *   initialization, can be used for creating other tunnel devices.
1573  *
1574  * Return:
1575  *   0 on success,
1576  *   %-EFAULT if unable to copy data to or from userspace,
1577  *   %-EPERM if current process hasn't %CAP_NET_ADMIN set
1578  *   %-EINVAL if passed tunnel parameters are invalid,
1579  *   %-EEXIST if changing a tunnel's parameters would cause a conflict
1580  *   %-ENODEV if attempting to change or delete a nonexisting device
1581  **/
1582 
1583 static int
ip6_tnl_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1584 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1585 {
1586 	int err = 0;
1587 	struct ip6_tnl_parm p;
1588 	struct __ip6_tnl_parm p1;
1589 	struct ip6_tnl *t = netdev_priv(dev);
1590 	struct net *net = t->net;
1591 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1592 
1593 	memset(&p1, 0, sizeof(p1));
1594 
1595 	switch (cmd) {
1596 	case SIOCGETTUNNEL:
1597 		if (dev == ip6n->fb_tnl_dev) {
1598 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1599 				err = -EFAULT;
1600 				break;
1601 			}
1602 			ip6_tnl_parm_from_user(&p1, &p);
1603 			t = ip6_tnl_locate(net, &p1, 0);
1604 			if (IS_ERR(t))
1605 				t = netdev_priv(dev);
1606 		} else {
1607 			memset(&p, 0, sizeof(p));
1608 		}
1609 		ip6_tnl_parm_to_user(&p, &t->parms);
1610 		if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1611 			err = -EFAULT;
1612 		}
1613 		break;
1614 	case SIOCADDTUNNEL:
1615 	case SIOCCHGTUNNEL:
1616 		err = -EPERM;
1617 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1618 			break;
1619 		err = -EFAULT;
1620 		if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1621 			break;
1622 		err = -EINVAL;
1623 		if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1624 		    p.proto != 0)
1625 			break;
1626 		ip6_tnl_parm_from_user(&p1, &p);
1627 		t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1628 		if (cmd == SIOCCHGTUNNEL) {
1629 			if (!IS_ERR(t)) {
1630 				if (t->dev != dev) {
1631 					err = -EEXIST;
1632 					break;
1633 				}
1634 			} else
1635 				t = netdev_priv(dev);
1636 			if (dev == ip6n->fb_tnl_dev)
1637 				err = ip6_tnl0_update(t, &p1);
1638 			else
1639 				err = ip6_tnl_update(t, &p1);
1640 		}
1641 		if (!IS_ERR(t)) {
1642 			err = 0;
1643 			ip6_tnl_parm_to_user(&p, &t->parms);
1644 			if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1645 				err = -EFAULT;
1646 
1647 		} else {
1648 			err = PTR_ERR(t);
1649 		}
1650 		break;
1651 	case SIOCDELTUNNEL:
1652 		err = -EPERM;
1653 		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1654 			break;
1655 
1656 		if (dev == ip6n->fb_tnl_dev) {
1657 			err = -EFAULT;
1658 			if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1659 				break;
1660 			err = -ENOENT;
1661 			ip6_tnl_parm_from_user(&p1, &p);
1662 			t = ip6_tnl_locate(net, &p1, 0);
1663 			if (IS_ERR(t))
1664 				break;
1665 			err = -EPERM;
1666 			if (t->dev == ip6n->fb_tnl_dev)
1667 				break;
1668 			dev = t->dev;
1669 		}
1670 		err = 0;
1671 		unregister_netdevice(dev);
1672 		break;
1673 	default:
1674 		err = -EINVAL;
1675 	}
1676 	return err;
1677 }
1678 
1679 /**
1680  * ip6_tnl_change_mtu - change mtu manually for tunnel device
1681  *   @dev: virtual device associated with tunnel
1682  *   @new_mtu: the new mtu
1683  *
1684  * Return:
1685  *   0 on success,
1686  *   %-EINVAL if mtu too small
1687  **/
1688 
ip6_tnl_change_mtu(struct net_device * dev,int new_mtu)1689 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1690 {
1691 	struct ip6_tnl *tnl = netdev_priv(dev);
1692 
1693 	if (tnl->parms.proto == IPPROTO_IPV6) {
1694 		if (new_mtu < IPV6_MIN_MTU)
1695 			return -EINVAL;
1696 	} else {
1697 		if (new_mtu < ETH_MIN_MTU)
1698 			return -EINVAL;
1699 	}
1700 	if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1701 		if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1702 			return -EINVAL;
1703 	} else {
1704 		if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1705 			return -EINVAL;
1706 	}
1707 	dev->mtu = new_mtu;
1708 	return 0;
1709 }
1710 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1711 
ip6_tnl_get_iflink(const struct net_device * dev)1712 int ip6_tnl_get_iflink(const struct net_device *dev)
1713 {
1714 	struct ip6_tnl *t = netdev_priv(dev);
1715 
1716 	return t->parms.link;
1717 }
1718 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1719 
ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1720 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1721 			  unsigned int num)
1722 {
1723 	if (num >= MAX_IPTUN_ENCAP_OPS)
1724 		return -ERANGE;
1725 
1726 	return !cmpxchg((const struct ip6_tnl_encap_ops **)
1727 			&ip6tun_encaps[num],
1728 			NULL, ops) ? 0 : -1;
1729 }
1730 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1731 
ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1732 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1733 			  unsigned int num)
1734 {
1735 	int ret;
1736 
1737 	if (num >= MAX_IPTUN_ENCAP_OPS)
1738 		return -ERANGE;
1739 
1740 	ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1741 		       &ip6tun_encaps[num],
1742 		       ops, NULL) == ops) ? 0 : -1;
1743 
1744 	synchronize_net();
1745 
1746 	return ret;
1747 }
1748 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1749 
ip6_tnl_encap_setup(struct ip6_tnl * t,struct ip_tunnel_encap * ipencap)1750 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1751 			struct ip_tunnel_encap *ipencap)
1752 {
1753 	int hlen;
1754 
1755 	memset(&t->encap, 0, sizeof(t->encap));
1756 
1757 	hlen = ip6_encap_hlen(ipencap);
1758 	if (hlen < 0)
1759 		return hlen;
1760 
1761 	t->encap.type = ipencap->type;
1762 	t->encap.sport = ipencap->sport;
1763 	t->encap.dport = ipencap->dport;
1764 	t->encap.flags = ipencap->flags;
1765 
1766 	t->encap_hlen = hlen;
1767 	t->hlen = t->encap_hlen + t->tun_hlen;
1768 
1769 	return 0;
1770 }
1771 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1772 
1773 static const struct net_device_ops ip6_tnl_netdev_ops = {
1774 	.ndo_init	= ip6_tnl_dev_init,
1775 	.ndo_uninit	= ip6_tnl_dev_uninit,
1776 	.ndo_start_xmit = ip6_tnl_start_xmit,
1777 	.ndo_do_ioctl	= ip6_tnl_ioctl,
1778 	.ndo_change_mtu = ip6_tnl_change_mtu,
1779 	.ndo_get_stats	= ip6_get_stats,
1780 	.ndo_get_iflink = ip6_tnl_get_iflink,
1781 };
1782 
1783 #define IPXIPX_FEATURES (NETIF_F_SG |		\
1784 			 NETIF_F_FRAGLIST |	\
1785 			 NETIF_F_HIGHDMA |	\
1786 			 NETIF_F_GSO_SOFTWARE |	\
1787 			 NETIF_F_HW_CSUM)
1788 
1789 /**
1790  * ip6_tnl_dev_setup - setup virtual tunnel device
1791  *   @dev: virtual device associated with tunnel
1792  *
1793  * Description:
1794  *   Initialize function pointers and device parameters
1795  **/
1796 
ip6_tnl_dev_setup(struct net_device * dev)1797 static void ip6_tnl_dev_setup(struct net_device *dev)
1798 {
1799 	dev->netdev_ops = &ip6_tnl_netdev_ops;
1800 	dev->needs_free_netdev = true;
1801 	dev->priv_destructor = ip6_dev_free;
1802 
1803 	dev->type = ARPHRD_TUNNEL6;
1804 	dev->flags |= IFF_NOARP;
1805 	dev->addr_len = sizeof(struct in6_addr);
1806 	dev->features |= NETIF_F_LLTX;
1807 	netif_keep_dst(dev);
1808 
1809 	dev->features		|= IPXIPX_FEATURES;
1810 	dev->hw_features	|= IPXIPX_FEATURES;
1811 
1812 	/* This perm addr will be used as interface identifier by IPv6 */
1813 	dev->addr_assign_type = NET_ADDR_RANDOM;
1814 	eth_random_addr(dev->perm_addr);
1815 }
1816 
1817 
1818 /**
1819  * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1820  *   @dev: virtual device associated with tunnel
1821  **/
1822 
1823 static inline int
ip6_tnl_dev_init_gen(struct net_device * dev)1824 ip6_tnl_dev_init_gen(struct net_device *dev)
1825 {
1826 	struct ip6_tnl *t = netdev_priv(dev);
1827 	int ret;
1828 	int t_hlen;
1829 
1830 	t->dev = dev;
1831 	t->net = dev_net(dev);
1832 	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1833 	if (!dev->tstats)
1834 		return -ENOMEM;
1835 
1836 	ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1837 	if (ret)
1838 		goto free_stats;
1839 
1840 	ret = gro_cells_init(&t->gro_cells, dev);
1841 	if (ret)
1842 		goto destroy_dst;
1843 
1844 	t->tun_hlen = 0;
1845 	t->hlen = t->encap_hlen + t->tun_hlen;
1846 	t_hlen = t->hlen + sizeof(struct ipv6hdr);
1847 
1848 	dev->type = ARPHRD_TUNNEL6;
1849 	dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1850 	dev->mtu = ETH_DATA_LEN - t_hlen;
1851 	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1852 		dev->mtu -= 8;
1853 	dev->min_mtu = ETH_MIN_MTU;
1854 	dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1855 
1856 	return 0;
1857 
1858 destroy_dst:
1859 	dst_cache_destroy(&t->dst_cache);
1860 free_stats:
1861 	free_percpu(dev->tstats);
1862 	dev->tstats = NULL;
1863 
1864 	return ret;
1865 }
1866 
1867 /**
1868  * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1869  *   @dev: virtual device associated with tunnel
1870  **/
1871 
ip6_tnl_dev_init(struct net_device * dev)1872 static int ip6_tnl_dev_init(struct net_device *dev)
1873 {
1874 	struct ip6_tnl *t = netdev_priv(dev);
1875 	int err = ip6_tnl_dev_init_gen(dev);
1876 
1877 	if (err)
1878 		return err;
1879 	ip6_tnl_link_config(t);
1880 	if (t->parms.collect_md)
1881 		netif_keep_dst(dev);
1882 	return 0;
1883 }
1884 
1885 /**
1886  * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1887  *   @dev: fallback device
1888  *
1889  * Return: 0
1890  **/
1891 
ip6_fb_tnl_dev_init(struct net_device * dev)1892 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1893 {
1894 	struct ip6_tnl *t = netdev_priv(dev);
1895 	struct net *net = dev_net(dev);
1896 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1897 
1898 	t->parms.proto = IPPROTO_IPV6;
1899 	dev_hold(dev);
1900 
1901 	rcu_assign_pointer(ip6n->tnls_wc[0], t);
1902 	return 0;
1903 }
1904 
ip6_tnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1905 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1906 			    struct netlink_ext_ack *extack)
1907 {
1908 	u8 proto;
1909 
1910 	if (!data || !data[IFLA_IPTUN_PROTO])
1911 		return 0;
1912 
1913 	proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1914 	if (proto != IPPROTO_IPV6 &&
1915 	    proto != IPPROTO_IPIP &&
1916 	    proto != 0)
1917 		return -EINVAL;
1918 
1919 	return 0;
1920 }
1921 
ip6_tnl_netlink_parms(struct nlattr * data[],struct __ip6_tnl_parm * parms)1922 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1923 				  struct __ip6_tnl_parm *parms)
1924 {
1925 	memset(parms, 0, sizeof(*parms));
1926 
1927 	if (!data)
1928 		return;
1929 
1930 	if (data[IFLA_IPTUN_LINK])
1931 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1932 
1933 	if (data[IFLA_IPTUN_LOCAL])
1934 		parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1935 
1936 	if (data[IFLA_IPTUN_REMOTE])
1937 		parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1938 
1939 	if (data[IFLA_IPTUN_TTL])
1940 		parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
1941 
1942 	if (data[IFLA_IPTUN_ENCAP_LIMIT])
1943 		parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
1944 
1945 	if (data[IFLA_IPTUN_FLOWINFO])
1946 		parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
1947 
1948 	if (data[IFLA_IPTUN_FLAGS])
1949 		parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
1950 
1951 	if (data[IFLA_IPTUN_PROTO])
1952 		parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1953 
1954 	if (data[IFLA_IPTUN_COLLECT_METADATA])
1955 		parms->collect_md = true;
1956 
1957 	if (data[IFLA_IPTUN_FWMARK])
1958 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
1959 }
1960 
ip6_tnl_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)1961 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
1962 					struct ip_tunnel_encap *ipencap)
1963 {
1964 	bool ret = false;
1965 
1966 	memset(ipencap, 0, sizeof(*ipencap));
1967 
1968 	if (!data)
1969 		return ret;
1970 
1971 	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
1972 		ret = true;
1973 		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
1974 	}
1975 
1976 	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
1977 		ret = true;
1978 		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
1979 	}
1980 
1981 	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
1982 		ret = true;
1983 		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
1984 	}
1985 
1986 	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
1987 		ret = true;
1988 		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
1989 	}
1990 
1991 	return ret;
1992 }
1993 
ip6_tnl_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1994 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
1995 			   struct nlattr *tb[], struct nlattr *data[],
1996 			   struct netlink_ext_ack *extack)
1997 {
1998 	struct net *net = dev_net(dev);
1999 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2000 	struct ip_tunnel_encap ipencap;
2001 	struct ip6_tnl *nt, *t;
2002 	int err;
2003 
2004 	nt = netdev_priv(dev);
2005 
2006 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2007 		err = ip6_tnl_encap_setup(nt, &ipencap);
2008 		if (err < 0)
2009 			return err;
2010 	}
2011 
2012 	ip6_tnl_netlink_parms(data, &nt->parms);
2013 
2014 	if (nt->parms.collect_md) {
2015 		if (rtnl_dereference(ip6n->collect_md_tun))
2016 			return -EEXIST;
2017 	} else {
2018 		t = ip6_tnl_locate(net, &nt->parms, 0);
2019 		if (!IS_ERR(t))
2020 			return -EEXIST;
2021 	}
2022 
2023 	err = ip6_tnl_create2(dev);
2024 	if (!err && tb[IFLA_MTU])
2025 		ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2026 
2027 	return err;
2028 }
2029 
ip6_tnl_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2030 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2031 			      struct nlattr *data[],
2032 			      struct netlink_ext_ack *extack)
2033 {
2034 	struct ip6_tnl *t = netdev_priv(dev);
2035 	struct __ip6_tnl_parm p;
2036 	struct net *net = t->net;
2037 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2038 	struct ip_tunnel_encap ipencap;
2039 
2040 	if (dev == ip6n->fb_tnl_dev)
2041 		return -EINVAL;
2042 
2043 	if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2044 		int err = ip6_tnl_encap_setup(t, &ipencap);
2045 
2046 		if (err < 0)
2047 			return err;
2048 	}
2049 	ip6_tnl_netlink_parms(data, &p);
2050 	if (p.collect_md)
2051 		return -EINVAL;
2052 
2053 	t = ip6_tnl_locate(net, &p, 0);
2054 	if (!IS_ERR(t)) {
2055 		if (t->dev != dev)
2056 			return -EEXIST;
2057 	} else
2058 		t = netdev_priv(dev);
2059 
2060 	return ip6_tnl_update(t, &p);
2061 }
2062 
ip6_tnl_dellink(struct net_device * dev,struct list_head * head)2063 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2064 {
2065 	struct net *net = dev_net(dev);
2066 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2067 
2068 	if (dev != ip6n->fb_tnl_dev)
2069 		unregister_netdevice_queue(dev, head);
2070 }
2071 
ip6_tnl_get_size(const struct net_device * dev)2072 static size_t ip6_tnl_get_size(const struct net_device *dev)
2073 {
2074 	return
2075 		/* IFLA_IPTUN_LINK */
2076 		nla_total_size(4) +
2077 		/* IFLA_IPTUN_LOCAL */
2078 		nla_total_size(sizeof(struct in6_addr)) +
2079 		/* IFLA_IPTUN_REMOTE */
2080 		nla_total_size(sizeof(struct in6_addr)) +
2081 		/* IFLA_IPTUN_TTL */
2082 		nla_total_size(1) +
2083 		/* IFLA_IPTUN_ENCAP_LIMIT */
2084 		nla_total_size(1) +
2085 		/* IFLA_IPTUN_FLOWINFO */
2086 		nla_total_size(4) +
2087 		/* IFLA_IPTUN_FLAGS */
2088 		nla_total_size(4) +
2089 		/* IFLA_IPTUN_PROTO */
2090 		nla_total_size(1) +
2091 		/* IFLA_IPTUN_ENCAP_TYPE */
2092 		nla_total_size(2) +
2093 		/* IFLA_IPTUN_ENCAP_FLAGS */
2094 		nla_total_size(2) +
2095 		/* IFLA_IPTUN_ENCAP_SPORT */
2096 		nla_total_size(2) +
2097 		/* IFLA_IPTUN_ENCAP_DPORT */
2098 		nla_total_size(2) +
2099 		/* IFLA_IPTUN_COLLECT_METADATA */
2100 		nla_total_size(0) +
2101 		/* IFLA_IPTUN_FWMARK */
2102 		nla_total_size(4) +
2103 		0;
2104 }
2105 
ip6_tnl_fill_info(struct sk_buff * skb,const struct net_device * dev)2106 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2107 {
2108 	struct ip6_tnl *tunnel = netdev_priv(dev);
2109 	struct __ip6_tnl_parm *parm = &tunnel->parms;
2110 
2111 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2112 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2113 	    nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2114 	    nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2115 	    nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2116 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2117 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2118 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2119 	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2120 		goto nla_put_failure;
2121 
2122 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2123 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2124 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2125 	    nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2126 		goto nla_put_failure;
2127 
2128 	if (parm->collect_md)
2129 		if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2130 			goto nla_put_failure;
2131 
2132 	return 0;
2133 
2134 nla_put_failure:
2135 	return -EMSGSIZE;
2136 }
2137 
ip6_tnl_get_link_net(const struct net_device * dev)2138 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2139 {
2140 	struct ip6_tnl *tunnel = netdev_priv(dev);
2141 
2142 	return tunnel->net;
2143 }
2144 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2145 
2146 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2147 	[IFLA_IPTUN_LINK]		= { .type = NLA_U32 },
2148 	[IFLA_IPTUN_LOCAL]		= { .len = sizeof(struct in6_addr) },
2149 	[IFLA_IPTUN_REMOTE]		= { .len = sizeof(struct in6_addr) },
2150 	[IFLA_IPTUN_TTL]		= { .type = NLA_U8 },
2151 	[IFLA_IPTUN_ENCAP_LIMIT]	= { .type = NLA_U8 },
2152 	[IFLA_IPTUN_FLOWINFO]		= { .type = NLA_U32 },
2153 	[IFLA_IPTUN_FLAGS]		= { .type = NLA_U32 },
2154 	[IFLA_IPTUN_PROTO]		= { .type = NLA_U8 },
2155 	[IFLA_IPTUN_ENCAP_TYPE]		= { .type = NLA_U16 },
2156 	[IFLA_IPTUN_ENCAP_FLAGS]	= { .type = NLA_U16 },
2157 	[IFLA_IPTUN_ENCAP_SPORT]	= { .type = NLA_U16 },
2158 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
2159 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
2160 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
2161 };
2162 
2163 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2164 	.kind		= "ip6tnl",
2165 	.maxtype	= IFLA_IPTUN_MAX,
2166 	.policy		= ip6_tnl_policy,
2167 	.priv_size	= sizeof(struct ip6_tnl),
2168 	.setup		= ip6_tnl_dev_setup,
2169 	.validate	= ip6_tnl_validate,
2170 	.newlink	= ip6_tnl_newlink,
2171 	.changelink	= ip6_tnl_changelink,
2172 	.dellink	= ip6_tnl_dellink,
2173 	.get_size	= ip6_tnl_get_size,
2174 	.fill_info	= ip6_tnl_fill_info,
2175 	.get_link_net	= ip6_tnl_get_link_net,
2176 };
2177 
2178 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2179 	.handler	= ip4ip6_rcv,
2180 	.err_handler	= ip4ip6_err,
2181 	.priority	=	1,
2182 };
2183 
2184 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2185 	.handler	= ip6ip6_rcv,
2186 	.err_handler	= ip6ip6_err,
2187 	.priority	=	1,
2188 };
2189 
ip6_tnl_destroy_tunnels(struct net * net,struct list_head * list)2190 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2191 {
2192 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2193 	struct net_device *dev, *aux;
2194 	int h;
2195 	struct ip6_tnl *t;
2196 
2197 	for_each_netdev_safe(net, dev, aux)
2198 		if (dev->rtnl_link_ops == &ip6_link_ops)
2199 			unregister_netdevice_queue(dev, list);
2200 
2201 	for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2202 		t = rtnl_dereference(ip6n->tnls_r_l[h]);
2203 		while (t) {
2204 			/* If dev is in the same netns, it has already
2205 			 * been added to the list by the previous loop.
2206 			 */
2207 			if (!net_eq(dev_net(t->dev), net))
2208 				unregister_netdevice_queue(t->dev, list);
2209 			t = rtnl_dereference(t->next);
2210 		}
2211 	}
2212 }
2213 
ip6_tnl_init_net(struct net * net)2214 static int __net_init ip6_tnl_init_net(struct net *net)
2215 {
2216 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2217 	struct ip6_tnl *t = NULL;
2218 	int err;
2219 
2220 	ip6n->tnls[0] = ip6n->tnls_wc;
2221 	ip6n->tnls[1] = ip6n->tnls_r_l;
2222 
2223 	if (!net_has_fallback_tunnels(net))
2224 		return 0;
2225 	err = -ENOMEM;
2226 	ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2227 					NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2228 
2229 	if (!ip6n->fb_tnl_dev)
2230 		goto err_alloc_dev;
2231 	dev_net_set(ip6n->fb_tnl_dev, net);
2232 	ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2233 	/* FB netdevice is special: we have one, and only one per netns.
2234 	 * Allowing to move it to another netns is clearly unsafe.
2235 	 */
2236 	ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2237 
2238 	err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2239 	if (err < 0)
2240 		goto err_register;
2241 
2242 	err = register_netdev(ip6n->fb_tnl_dev);
2243 	if (err < 0)
2244 		goto err_register;
2245 
2246 	t = netdev_priv(ip6n->fb_tnl_dev);
2247 
2248 	strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2249 	return 0;
2250 
2251 err_register:
2252 	free_netdev(ip6n->fb_tnl_dev);
2253 err_alloc_dev:
2254 	return err;
2255 }
2256 
ip6_tnl_exit_batch_net(struct list_head * net_list)2257 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2258 {
2259 	struct net *net;
2260 	LIST_HEAD(list);
2261 
2262 	rtnl_lock();
2263 	list_for_each_entry(net, net_list, exit_list)
2264 		ip6_tnl_destroy_tunnels(net, &list);
2265 	unregister_netdevice_many(&list);
2266 	rtnl_unlock();
2267 }
2268 
2269 static struct pernet_operations ip6_tnl_net_ops = {
2270 	.init = ip6_tnl_init_net,
2271 	.exit_batch = ip6_tnl_exit_batch_net,
2272 	.id   = &ip6_tnl_net_id,
2273 	.size = sizeof(struct ip6_tnl_net),
2274 };
2275 
2276 /**
2277  * ip6_tunnel_init - register protocol and reserve needed resources
2278  *
2279  * Return: 0 on success
2280  **/
2281 
ip6_tunnel_init(void)2282 static int __init ip6_tunnel_init(void)
2283 {
2284 	int  err;
2285 
2286 	if (!ipv6_mod_enabled())
2287 		return -EOPNOTSUPP;
2288 
2289 	err = register_pernet_device(&ip6_tnl_net_ops);
2290 	if (err < 0)
2291 		goto out_pernet;
2292 
2293 	err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2294 	if (err < 0) {
2295 		pr_err("%s: can't register ip4ip6\n", __func__);
2296 		goto out_ip4ip6;
2297 	}
2298 
2299 	err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2300 	if (err < 0) {
2301 		pr_err("%s: can't register ip6ip6\n", __func__);
2302 		goto out_ip6ip6;
2303 	}
2304 	err = rtnl_link_register(&ip6_link_ops);
2305 	if (err < 0)
2306 		goto rtnl_link_failed;
2307 
2308 	return 0;
2309 
2310 rtnl_link_failed:
2311 	xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2312 out_ip6ip6:
2313 	xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2314 out_ip4ip6:
2315 	unregister_pernet_device(&ip6_tnl_net_ops);
2316 out_pernet:
2317 	return err;
2318 }
2319 
2320 /**
2321  * ip6_tunnel_cleanup - free resources and unregister protocol
2322  **/
2323 
ip6_tunnel_cleanup(void)2324 static void __exit ip6_tunnel_cleanup(void)
2325 {
2326 	rtnl_link_unregister(&ip6_link_ops);
2327 	if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2328 		pr_info("%s: can't deregister ip4ip6\n", __func__);
2329 
2330 	if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2331 		pr_info("%s: can't deregister ip6ip6\n", __func__);
2332 
2333 	unregister_pernet_device(&ip6_tnl_net_ops);
2334 }
2335 
2336 module_init(ip6_tunnel_init);
2337 module_exit(ip6_tunnel_cleanup);
2338