1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * IPv6 tunneling device
4 * Linux INET6 implementation
5 *
6 * Authors:
7 * Ville Nuorvala <vnuorval@tcs.hut.fi>
8 * Yasuyuki Kozakai <kozakai@linux-ipv6.org>
9 *
10 * Based on:
11 * linux/net/ipv6/sit.c and linux/net/ipv4/ipip.c
12 *
13 * RFC 2473
14 */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/module.h>
19 #include <linux/capability.h>
20 #include <linux/errno.h>
21 #include <linux/types.h>
22 #include <linux/sockios.h>
23 #include <linux/icmp.h>
24 #include <linux/if.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/net.h>
28 #include <linux/in6.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/icmpv6.h>
32 #include <linux/init.h>
33 #include <linux/route.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/netfilter_ipv6.h>
36 #include <linux/slab.h>
37 #include <linux/hash.h>
38 #include <linux/etherdevice.h>
39
40 #include <linux/uaccess.h>
41 #include <linux/atomic.h>
42
43 #include <net/icmp.h>
44 #include <net/ip.h>
45 #include <net/ip_tunnels.h>
46 #include <net/ipv6.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 #include <net/ip6_tunnel.h>
50 #include <net/xfrm.h>
51 #include <net/dsfield.h>
52 #include <net/inet_ecn.h>
53 #include <net/net_namespace.h>
54 #include <net/netns/generic.h>
55 #include <net/dst_metadata.h>
56
57 MODULE_AUTHOR("Ville Nuorvala");
58 MODULE_DESCRIPTION("IPv6 tunneling device");
59 MODULE_LICENSE("GPL");
60 MODULE_ALIAS_RTNL_LINK("ip6tnl");
61 MODULE_ALIAS_NETDEV("ip6tnl0");
62
63 #define IP6_TUNNEL_HASH_SIZE_SHIFT 5
64 #define IP6_TUNNEL_HASH_SIZE (1 << IP6_TUNNEL_HASH_SIZE_SHIFT)
65
66 static bool log_ecn_error = true;
67 module_param(log_ecn_error, bool, 0644);
68 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
69
HASH(const struct in6_addr * addr1,const struct in6_addr * addr2)70 static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
71 {
72 u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
73
74 return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
75 }
76
77 static int ip6_tnl_dev_init(struct net_device *dev);
78 static void ip6_tnl_dev_setup(struct net_device *dev);
79 static struct rtnl_link_ops ip6_link_ops __read_mostly;
80
81 static unsigned int ip6_tnl_net_id __read_mostly;
82 struct ip6_tnl_net {
83 /* the IPv6 tunnel fallback device */
84 struct net_device *fb_tnl_dev;
85 /* lists for storing tunnels in use */
86 struct ip6_tnl __rcu *tnls_r_l[IP6_TUNNEL_HASH_SIZE];
87 struct ip6_tnl __rcu *tnls_wc[1];
88 struct ip6_tnl __rcu **tnls[2];
89 struct ip6_tnl __rcu *collect_md_tun;
90 };
91
ip6_tnl_mpls_supported(void)92 static inline int ip6_tnl_mpls_supported(void)
93 {
94 return IS_ENABLED(CONFIG_MPLS);
95 }
96
ip6_get_stats(struct net_device * dev)97 static struct net_device_stats *ip6_get_stats(struct net_device *dev)
98 {
99 struct pcpu_sw_netstats tmp, sum = { 0 };
100 int i;
101
102 for_each_possible_cpu(i) {
103 unsigned int start;
104 const struct pcpu_sw_netstats *tstats =
105 per_cpu_ptr(dev->tstats, i);
106
107 do {
108 start = u64_stats_fetch_begin_irq(&tstats->syncp);
109 tmp.rx_packets = tstats->rx_packets;
110 tmp.rx_bytes = tstats->rx_bytes;
111 tmp.tx_packets = tstats->tx_packets;
112 tmp.tx_bytes = tstats->tx_bytes;
113 } while (u64_stats_fetch_retry_irq(&tstats->syncp, start));
114
115 sum.rx_packets += tmp.rx_packets;
116 sum.rx_bytes += tmp.rx_bytes;
117 sum.tx_packets += tmp.tx_packets;
118 sum.tx_bytes += tmp.tx_bytes;
119 }
120 dev->stats.rx_packets = sum.rx_packets;
121 dev->stats.rx_bytes = sum.rx_bytes;
122 dev->stats.tx_packets = sum.tx_packets;
123 dev->stats.tx_bytes = sum.tx_bytes;
124 return &dev->stats;
125 }
126
127 #define for_each_ip6_tunnel_rcu(start) \
128 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
129
130 /**
131 * ip6_tnl_lookup - fetch tunnel matching the end-point addresses
132 * @net: network namespace
133 * @link: ifindex of underlying interface
134 * @remote: the address of the tunnel exit-point
135 * @local: the address of the tunnel entry-point
136 *
137 * Return:
138 * tunnel matching given end-points if found,
139 * else fallback tunnel if its device is up,
140 * else %NULL
141 **/
142
143 static struct ip6_tnl *
ip6_tnl_lookup(struct net * net,int link,const struct in6_addr * remote,const struct in6_addr * local)144 ip6_tnl_lookup(struct net *net, int link,
145 const struct in6_addr *remote, const struct in6_addr *local)
146 {
147 unsigned int hash = HASH(remote, local);
148 struct ip6_tnl *t, *cand = NULL;
149 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
150 struct in6_addr any;
151
152 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
153 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
154 !ipv6_addr_equal(remote, &t->parms.raddr) ||
155 !(t->dev->flags & IFF_UP))
156 continue;
157
158 if (link == t->parms.link)
159 return t;
160 else
161 cand = t;
162 }
163
164 memset(&any, 0, sizeof(any));
165 hash = HASH(&any, local);
166 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
167 if (!ipv6_addr_equal(local, &t->parms.laddr) ||
168 !ipv6_addr_any(&t->parms.raddr) ||
169 !(t->dev->flags & IFF_UP))
170 continue;
171
172 if (link == t->parms.link)
173 return t;
174 else if (!cand)
175 cand = t;
176 }
177
178 hash = HASH(remote, &any);
179 for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
180 if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
181 !ipv6_addr_any(&t->parms.laddr) ||
182 !(t->dev->flags & IFF_UP))
183 continue;
184
185 if (link == t->parms.link)
186 return t;
187 else if (!cand)
188 cand = t;
189 }
190
191 if (cand)
192 return cand;
193
194 t = rcu_dereference(ip6n->collect_md_tun);
195 if (t && t->dev->flags & IFF_UP)
196 return t;
197
198 t = rcu_dereference(ip6n->tnls_wc[0]);
199 if (t && (t->dev->flags & IFF_UP))
200 return t;
201
202 return NULL;
203 }
204
205 /**
206 * ip6_tnl_bucket - get head of list matching given tunnel parameters
207 * @p: parameters containing tunnel end-points
208 *
209 * Description:
210 * ip6_tnl_bucket() returns the head of the list matching the
211 * &struct in6_addr entries laddr and raddr in @p.
212 *
213 * Return: head of IPv6 tunnel list
214 **/
215
216 static struct ip6_tnl __rcu **
ip6_tnl_bucket(struct ip6_tnl_net * ip6n,const struct __ip6_tnl_parm * p)217 ip6_tnl_bucket(struct ip6_tnl_net *ip6n, const struct __ip6_tnl_parm *p)
218 {
219 const struct in6_addr *remote = &p->raddr;
220 const struct in6_addr *local = &p->laddr;
221 unsigned int h = 0;
222 int prio = 0;
223
224 if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
225 prio = 1;
226 h = HASH(remote, local);
227 }
228 return &ip6n->tnls[prio][h];
229 }
230
231 /**
232 * ip6_tnl_link - add tunnel to hash table
233 * @t: tunnel to be added
234 **/
235
236 static void
ip6_tnl_link(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)237 ip6_tnl_link(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
238 {
239 struct ip6_tnl __rcu **tp = ip6_tnl_bucket(ip6n, &t->parms);
240
241 if (t->parms.collect_md)
242 rcu_assign_pointer(ip6n->collect_md_tun, t);
243 rcu_assign_pointer(t->next , rtnl_dereference(*tp));
244 rcu_assign_pointer(*tp, t);
245 }
246
247 /**
248 * ip6_tnl_unlink - remove tunnel from hash table
249 * @t: tunnel to be removed
250 **/
251
252 static void
ip6_tnl_unlink(struct ip6_tnl_net * ip6n,struct ip6_tnl * t)253 ip6_tnl_unlink(struct ip6_tnl_net *ip6n, struct ip6_tnl *t)
254 {
255 struct ip6_tnl __rcu **tp;
256 struct ip6_tnl *iter;
257
258 if (t->parms.collect_md)
259 rcu_assign_pointer(ip6n->collect_md_tun, NULL);
260
261 for (tp = ip6_tnl_bucket(ip6n, &t->parms);
262 (iter = rtnl_dereference(*tp)) != NULL;
263 tp = &iter->next) {
264 if (t == iter) {
265 rcu_assign_pointer(*tp, t->next);
266 break;
267 }
268 }
269 }
270
ip6_dev_free(struct net_device * dev)271 static void ip6_dev_free(struct net_device *dev)
272 {
273 struct ip6_tnl *t = netdev_priv(dev);
274
275 gro_cells_destroy(&t->gro_cells);
276 dst_cache_destroy(&t->dst_cache);
277 free_percpu(dev->tstats);
278 }
279
ip6_tnl_create2(struct net_device * dev)280 static int ip6_tnl_create2(struct net_device *dev)
281 {
282 struct ip6_tnl *t = netdev_priv(dev);
283 struct net *net = dev_net(dev);
284 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
285 int err;
286
287 t = netdev_priv(dev);
288
289 dev->rtnl_link_ops = &ip6_link_ops;
290 err = register_netdevice(dev);
291 if (err < 0)
292 goto out;
293
294 strcpy(t->parms.name, dev->name);
295
296 ip6_tnl_link(ip6n, t);
297 return 0;
298
299 out:
300 return err;
301 }
302
303 /**
304 * ip6_tnl_create - create a new tunnel
305 * @net: network namespace
306 * @p: tunnel parameters
307 *
308 * Description:
309 * Create tunnel matching given parameters.
310 *
311 * Return:
312 * created tunnel or error pointer
313 **/
314
ip6_tnl_create(struct net * net,struct __ip6_tnl_parm * p)315 static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
316 {
317 struct net_device *dev;
318 struct ip6_tnl *t;
319 char name[IFNAMSIZ];
320 int err = -E2BIG;
321
322 if (p->name[0]) {
323 if (!dev_valid_name(p->name))
324 goto failed;
325 strlcpy(name, p->name, IFNAMSIZ);
326 } else {
327 sprintf(name, "ip6tnl%%d");
328 }
329 err = -ENOMEM;
330 dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
331 ip6_tnl_dev_setup);
332 if (!dev)
333 goto failed;
334
335 dev_net_set(dev, net);
336
337 t = netdev_priv(dev);
338 t->parms = *p;
339 t->net = dev_net(dev);
340 err = ip6_tnl_create2(dev);
341 if (err < 0)
342 goto failed_free;
343
344 return t;
345
346 failed_free:
347 free_netdev(dev);
348 failed:
349 return ERR_PTR(err);
350 }
351
352 /**
353 * ip6_tnl_locate - find or create tunnel matching given parameters
354 * @net: network namespace
355 * @p: tunnel parameters
356 * @create: != 0 if allowed to create new tunnel if no match found
357 *
358 * Description:
359 * ip6_tnl_locate() first tries to locate an existing tunnel
360 * based on @parms. If this is unsuccessful, but @create is set a new
361 * tunnel device is created and registered for use.
362 *
363 * Return:
364 * matching tunnel or error pointer
365 **/
366
ip6_tnl_locate(struct net * net,struct __ip6_tnl_parm * p,int create)367 static struct ip6_tnl *ip6_tnl_locate(struct net *net,
368 struct __ip6_tnl_parm *p, int create)
369 {
370 const struct in6_addr *remote = &p->raddr;
371 const struct in6_addr *local = &p->laddr;
372 struct ip6_tnl __rcu **tp;
373 struct ip6_tnl *t;
374 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
375
376 for (tp = ip6_tnl_bucket(ip6n, p);
377 (t = rtnl_dereference(*tp)) != NULL;
378 tp = &t->next) {
379 if (ipv6_addr_equal(local, &t->parms.laddr) &&
380 ipv6_addr_equal(remote, &t->parms.raddr) &&
381 p->link == t->parms.link) {
382 if (create)
383 return ERR_PTR(-EEXIST);
384
385 return t;
386 }
387 }
388 if (!create)
389 return ERR_PTR(-ENODEV);
390 return ip6_tnl_create(net, p);
391 }
392
393 /**
394 * ip6_tnl_dev_uninit - tunnel device uninitializer
395 * @dev: the device to be destroyed
396 *
397 * Description:
398 * ip6_tnl_dev_uninit() removes tunnel from its list
399 **/
400
401 static void
ip6_tnl_dev_uninit(struct net_device * dev)402 ip6_tnl_dev_uninit(struct net_device *dev)
403 {
404 struct ip6_tnl *t = netdev_priv(dev);
405 struct net *net = t->net;
406 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
407
408 if (dev == ip6n->fb_tnl_dev)
409 RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
410 else
411 ip6_tnl_unlink(ip6n, t);
412 dst_cache_reset(&t->dst_cache);
413 dev_put(dev);
414 }
415
416 /**
417 * parse_tvl_tnl_enc_lim - handle encapsulation limit option
418 * @skb: received socket buffer
419 *
420 * Return:
421 * 0 if none was found,
422 * else index to encapsulation limit
423 **/
424
ip6_tnl_parse_tlv_enc_lim(struct sk_buff * skb,__u8 * raw)425 __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
426 {
427 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
428 unsigned int nhoff = raw - skb->data;
429 unsigned int off = nhoff + sizeof(*ipv6h);
430 u8 next, nexthdr = ipv6h->nexthdr;
431
432 while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
433 struct ipv6_opt_hdr *hdr;
434 u16 optlen;
435
436 if (!pskb_may_pull(skb, off + sizeof(*hdr)))
437 break;
438
439 hdr = (struct ipv6_opt_hdr *)(skb->data + off);
440 if (nexthdr == NEXTHDR_FRAGMENT) {
441 struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
442 if (frag_hdr->frag_off)
443 break;
444 optlen = 8;
445 } else if (nexthdr == NEXTHDR_AUTH) {
446 optlen = ipv6_authlen(hdr);
447 } else {
448 optlen = ipv6_optlen(hdr);
449 }
450 /* cache hdr->nexthdr, since pskb_may_pull() might
451 * invalidate hdr
452 */
453 next = hdr->nexthdr;
454 if (nexthdr == NEXTHDR_DEST) {
455 u16 i = 2;
456
457 /* Remember : hdr is no longer valid at this point. */
458 if (!pskb_may_pull(skb, off + optlen))
459 break;
460
461 while (1) {
462 struct ipv6_tlv_tnl_enc_lim *tel;
463
464 /* No more room for encapsulation limit */
465 if (i + sizeof(*tel) > optlen)
466 break;
467
468 tel = (struct ipv6_tlv_tnl_enc_lim *)(skb->data + off + i);
469 /* return index of option if found and valid */
470 if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
471 tel->length == 1)
472 return i + off - nhoff;
473 /* else jump to next option */
474 if (tel->type)
475 i += tel->length + 2;
476 else
477 i++;
478 }
479 }
480 nexthdr = next;
481 off += optlen;
482 }
483 return 0;
484 }
485 EXPORT_SYMBOL(ip6_tnl_parse_tlv_enc_lim);
486
487 /**
488 * ip6_tnl_err - tunnel error handler
489 *
490 * Description:
491 * ip6_tnl_err() should handle errors in the tunnel according
492 * to the specifications in RFC 2473.
493 **/
494
495 static int
ip6_tnl_err(struct sk_buff * skb,__u8 ipproto,struct inet6_skb_parm * opt,u8 * type,u8 * code,int * msg,__u32 * info,int offset)496 ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
497 u8 *type, u8 *code, int *msg, __u32 *info, int offset)
498 {
499 const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)skb->data;
500 struct net *net = dev_net(skb->dev);
501 u8 rel_type = ICMPV6_DEST_UNREACH;
502 u8 rel_code = ICMPV6_ADDR_UNREACH;
503 __u32 rel_info = 0;
504 struct ip6_tnl *t;
505 int err = -ENOENT;
506 int rel_msg = 0;
507 u8 tproto;
508 __u16 len;
509
510 /* If the packet doesn't contain the original IPv6 header we are
511 in trouble since we might need the source address for further
512 processing of the error. */
513
514 rcu_read_lock();
515 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
516 if (!t)
517 goto out;
518
519 tproto = READ_ONCE(t->parms.proto);
520 if (tproto != ipproto && tproto != 0)
521 goto out;
522
523 err = 0;
524
525 switch (*type) {
526 case ICMPV6_DEST_UNREACH:
527 net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
528 t->parms.name);
529 rel_msg = 1;
530 break;
531 case ICMPV6_TIME_EXCEED:
532 if ((*code) == ICMPV6_EXC_HOPLIMIT) {
533 net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n",
534 t->parms.name);
535 rel_msg = 1;
536 }
537 break;
538 case ICMPV6_PARAMPROB: {
539 struct ipv6_tlv_tnl_enc_lim *tel;
540 __u32 teli;
541
542 teli = 0;
543 if ((*code) == ICMPV6_HDR_FIELD)
544 teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
545
546 if (teli && teli == *info - 2) {
547 tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->data[teli];
548 if (tel->encap_limit == 0) {
549 net_dbg_ratelimited("%s: Too small encapsulation limit or routing loop in tunnel!\n",
550 t->parms.name);
551 rel_msg = 1;
552 }
553 } else {
554 net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n",
555 t->parms.name);
556 }
557 break;
558 }
559 case ICMPV6_PKT_TOOBIG: {
560 __u32 mtu;
561
562 ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
563 sock_net_uid(net, NULL));
564 mtu = *info - offset;
565 if (mtu < IPV6_MIN_MTU)
566 mtu = IPV6_MIN_MTU;
567 len = sizeof(*ipv6h) + ntohs(ipv6h->payload_len);
568 if (len > mtu) {
569 rel_type = ICMPV6_PKT_TOOBIG;
570 rel_code = 0;
571 rel_info = mtu;
572 rel_msg = 1;
573 }
574 break;
575 }
576 case NDISC_REDIRECT:
577 ip6_redirect(skb, net, skb->dev->ifindex, 0,
578 sock_net_uid(net, NULL));
579 break;
580 }
581
582 *type = rel_type;
583 *code = rel_code;
584 *info = rel_info;
585 *msg = rel_msg;
586
587 out:
588 rcu_read_unlock();
589 return err;
590 }
591
592 static int
ip4ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)593 ip4ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
594 u8 type, u8 code, int offset, __be32 info)
595 {
596 __u32 rel_info = ntohl(info);
597 const struct iphdr *eiph;
598 struct sk_buff *skb2;
599 int err, rel_msg = 0;
600 u8 rel_type = type;
601 u8 rel_code = code;
602 struct rtable *rt;
603 struct flowi4 fl4;
604
605 err = ip6_tnl_err(skb, IPPROTO_IPIP, opt, &rel_type, &rel_code,
606 &rel_msg, &rel_info, offset);
607 if (err < 0)
608 return err;
609
610 if (rel_msg == 0)
611 return 0;
612
613 switch (rel_type) {
614 case ICMPV6_DEST_UNREACH:
615 if (rel_code != ICMPV6_ADDR_UNREACH)
616 return 0;
617 rel_type = ICMP_DEST_UNREACH;
618 rel_code = ICMP_HOST_UNREACH;
619 break;
620 case ICMPV6_PKT_TOOBIG:
621 if (rel_code != 0)
622 return 0;
623 rel_type = ICMP_DEST_UNREACH;
624 rel_code = ICMP_FRAG_NEEDED;
625 break;
626 default:
627 return 0;
628 }
629
630 if (!pskb_may_pull(skb, offset + sizeof(struct iphdr)))
631 return 0;
632
633 skb2 = skb_clone(skb, GFP_ATOMIC);
634 if (!skb2)
635 return 0;
636
637 skb_dst_drop(skb2);
638
639 skb_pull(skb2, offset);
640 skb_reset_network_header(skb2);
641 eiph = ip_hdr(skb2);
642
643 /* Try to guess incoming interface */
644 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL, eiph->saddr,
645 0, 0, 0, IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
646 if (IS_ERR(rt))
647 goto out;
648
649 skb2->dev = rt->dst.dev;
650 ip_rt_put(rt);
651
652 /* route "incoming" packet */
653 if (rt->rt_flags & RTCF_LOCAL) {
654 rt = ip_route_output_ports(dev_net(skb->dev), &fl4, NULL,
655 eiph->daddr, eiph->saddr, 0, 0,
656 IPPROTO_IPIP, RT_TOS(eiph->tos), 0);
657 if (IS_ERR(rt) || rt->dst.dev->type != ARPHRD_TUNNEL6) {
658 if (!IS_ERR(rt))
659 ip_rt_put(rt);
660 goto out;
661 }
662 skb_dst_set(skb2, &rt->dst);
663 } else {
664 if (ip_route_input(skb2, eiph->daddr, eiph->saddr, eiph->tos,
665 skb2->dev) ||
666 skb_dst(skb2)->dev->type != ARPHRD_TUNNEL6)
667 goto out;
668 }
669
670 /* change mtu on this route */
671 if (rel_type == ICMP_DEST_UNREACH && rel_code == ICMP_FRAG_NEEDED) {
672 if (rel_info > dst_mtu(skb_dst(skb2)))
673 goto out;
674
675 skb_dst_update_pmtu_no_confirm(skb2, rel_info);
676 }
677
678 icmp_send(skb2, rel_type, rel_code, htonl(rel_info));
679
680 out:
681 kfree_skb(skb2);
682 return 0;
683 }
684
685 static int
ip6ip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)686 ip6ip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
687 u8 type, u8 code, int offset, __be32 info)
688 {
689 __u32 rel_info = ntohl(info);
690 int err, rel_msg = 0;
691 u8 rel_type = type;
692 u8 rel_code = code;
693
694 err = ip6_tnl_err(skb, IPPROTO_IPV6, opt, &rel_type, &rel_code,
695 &rel_msg, &rel_info, offset);
696 if (err < 0)
697 return err;
698
699 if (rel_msg && pskb_may_pull(skb, offset + sizeof(struct ipv6hdr))) {
700 struct rt6_info *rt;
701 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
702
703 if (!skb2)
704 return 0;
705
706 skb_dst_drop(skb2);
707 skb_pull(skb2, offset);
708 skb_reset_network_header(skb2);
709
710 /* Try to guess incoming interface */
711 rt = rt6_lookup(dev_net(skb->dev), &ipv6_hdr(skb2)->saddr,
712 NULL, 0, skb2, 0);
713
714 if (rt && rt->dst.dev)
715 skb2->dev = rt->dst.dev;
716
717 icmpv6_send(skb2, rel_type, rel_code, rel_info);
718
719 ip6_rt_put(rt);
720
721 kfree_skb(skb2);
722 }
723
724 return 0;
725 }
726
727 static int
mplsip6_err(struct sk_buff * skb,struct inet6_skb_parm * opt,u8 type,u8 code,int offset,__be32 info)728 mplsip6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
729 u8 type, u8 code, int offset, __be32 info)
730 {
731 __u32 rel_info = ntohl(info);
732 int err, rel_msg = 0;
733 u8 rel_type = type;
734 u8 rel_code = code;
735
736 err = ip6_tnl_err(skb, IPPROTO_MPLS, opt, &rel_type, &rel_code,
737 &rel_msg, &rel_info, offset);
738 return err;
739 }
740
ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)741 static int ip4ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
742 const struct ipv6hdr *ipv6h,
743 struct sk_buff *skb)
744 {
745 __u8 dsfield = ipv6_get_dsfield(ipv6h) & ~INET_ECN_MASK;
746
747 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
748 ipv4_change_dsfield(ip_hdr(skb), INET_ECN_MASK, dsfield);
749
750 return IP6_ECN_decapsulate(ipv6h, skb);
751 }
752
ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)753 static int ip6ip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
754 const struct ipv6hdr *ipv6h,
755 struct sk_buff *skb)
756 {
757 if (t->parms.flags & IP6_TNL_F_RCV_DSCP_COPY)
758 ipv6_copy_dscp(ipv6_get_dsfield(ipv6h), ipv6_hdr(skb));
759
760 return IP6_ECN_decapsulate(ipv6h, skb);
761 }
762
mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb)763 static inline int mplsip6_dscp_ecn_decapsulate(const struct ip6_tnl *t,
764 const struct ipv6hdr *ipv6h,
765 struct sk_buff *skb)
766 {
767 /* ECN is not supported in AF_MPLS */
768 return 0;
769 }
770
ip6_tnl_get_cap(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)771 __u32 ip6_tnl_get_cap(struct ip6_tnl *t,
772 const struct in6_addr *laddr,
773 const struct in6_addr *raddr)
774 {
775 struct __ip6_tnl_parm *p = &t->parms;
776 int ltype = ipv6_addr_type(laddr);
777 int rtype = ipv6_addr_type(raddr);
778 __u32 flags = 0;
779
780 if (ltype == IPV6_ADDR_ANY || rtype == IPV6_ADDR_ANY) {
781 flags = IP6_TNL_F_CAP_PER_PACKET;
782 } else if (ltype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
783 rtype & (IPV6_ADDR_UNICAST|IPV6_ADDR_MULTICAST) &&
784 !((ltype|rtype) & IPV6_ADDR_LOOPBACK) &&
785 (!((ltype|rtype) & IPV6_ADDR_LINKLOCAL) || p->link)) {
786 if (ltype&IPV6_ADDR_UNICAST)
787 flags |= IP6_TNL_F_CAP_XMIT;
788 if (rtype&IPV6_ADDR_UNICAST)
789 flags |= IP6_TNL_F_CAP_RCV;
790 }
791 return flags;
792 }
793 EXPORT_SYMBOL(ip6_tnl_get_cap);
794
795 /* called with rcu_read_lock() */
ip6_tnl_rcv_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)796 int ip6_tnl_rcv_ctl(struct ip6_tnl *t,
797 const struct in6_addr *laddr,
798 const struct in6_addr *raddr)
799 {
800 struct __ip6_tnl_parm *p = &t->parms;
801 int ret = 0;
802 struct net *net = t->net;
803
804 if ((p->flags & IP6_TNL_F_CAP_RCV) ||
805 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
806 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_RCV))) {
807 struct net_device *ldev = NULL;
808
809 if (p->link)
810 ldev = dev_get_by_index_rcu(net, p->link);
811
812 if ((ipv6_addr_is_multicast(laddr) ||
813 likely(ipv6_chk_addr_and_flags(net, laddr, ldev, false,
814 0, IFA_F_TENTATIVE))) &&
815 ((p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) ||
816 likely(!ipv6_chk_addr_and_flags(net, raddr, ldev, true,
817 0, IFA_F_TENTATIVE))))
818 ret = 1;
819 }
820 return ret;
821 }
822 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
823
__ip6_tnl_rcv(struct ip6_tnl * tunnel,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb),bool log_ecn_err)824 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
825 const struct tnl_ptk_info *tpi,
826 struct metadata_dst *tun_dst,
827 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
828 const struct ipv6hdr *ipv6h,
829 struct sk_buff *skb),
830 bool log_ecn_err)
831 {
832 struct pcpu_sw_netstats *tstats;
833 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
834 int err;
835
836 if ((!(tpi->flags & TUNNEL_CSUM) &&
837 (tunnel->parms.i_flags & TUNNEL_CSUM)) ||
838 ((tpi->flags & TUNNEL_CSUM) &&
839 !(tunnel->parms.i_flags & TUNNEL_CSUM))) {
840 tunnel->dev->stats.rx_crc_errors++;
841 tunnel->dev->stats.rx_errors++;
842 goto drop;
843 }
844
845 if (tunnel->parms.i_flags & TUNNEL_SEQ) {
846 if (!(tpi->flags & TUNNEL_SEQ) ||
847 (tunnel->i_seqno &&
848 (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) {
849 tunnel->dev->stats.rx_fifo_errors++;
850 tunnel->dev->stats.rx_errors++;
851 goto drop;
852 }
853 tunnel->i_seqno = ntohl(tpi->seq) + 1;
854 }
855
856 skb->protocol = tpi->proto;
857
858 /* Warning: All skb pointers will be invalidated! */
859 if (tunnel->dev->type == ARPHRD_ETHER) {
860 if (!pskb_may_pull(skb, ETH_HLEN)) {
861 tunnel->dev->stats.rx_length_errors++;
862 tunnel->dev->stats.rx_errors++;
863 goto drop;
864 }
865
866 ipv6h = ipv6_hdr(skb);
867 skb->protocol = eth_type_trans(skb, tunnel->dev);
868 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
869 } else {
870 skb->dev = tunnel->dev;
871 }
872
873 skb_reset_network_header(skb);
874 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
875
876 __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
877
878 err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
879 if (unlikely(err)) {
880 if (log_ecn_err)
881 net_info_ratelimited("non-ECT from %pI6 with DS=%#x\n",
882 &ipv6h->saddr,
883 ipv6_get_dsfield(ipv6h));
884 if (err > 1) {
885 ++tunnel->dev->stats.rx_frame_errors;
886 ++tunnel->dev->stats.rx_errors;
887 goto drop;
888 }
889 }
890
891 tstats = this_cpu_ptr(tunnel->dev->tstats);
892 u64_stats_update_begin(&tstats->syncp);
893 tstats->rx_packets++;
894 tstats->rx_bytes += skb->len;
895 u64_stats_update_end(&tstats->syncp);
896
897 skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(tunnel->dev)));
898
899 if (tun_dst)
900 skb_dst_set(skb, (struct dst_entry *)tun_dst);
901
902 gro_cells_receive(&tunnel->gro_cells, skb);
903 return 0;
904
905 drop:
906 if (tun_dst)
907 dst_release((struct dst_entry *)tun_dst);
908 kfree_skb(skb);
909 return 0;
910 }
911
ip6_tnl_rcv(struct ip6_tnl * t,struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct metadata_dst * tun_dst,bool log_ecn_err)912 int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
913 const struct tnl_ptk_info *tpi,
914 struct metadata_dst *tun_dst,
915 bool log_ecn_err)
916 {
917 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
918 const struct ipv6hdr *ipv6h,
919 struct sk_buff *skb);
920
921 dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
922 if (tpi->proto == htons(ETH_P_IP))
923 dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
924
925 return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
926 log_ecn_err);
927 }
928 EXPORT_SYMBOL(ip6_tnl_rcv);
929
930 static const struct tnl_ptk_info tpi_v6 = {
931 /* no tunnel info required for ipxip6. */
932 .proto = htons(ETH_P_IPV6),
933 };
934
935 static const struct tnl_ptk_info tpi_v4 = {
936 /* no tunnel info required for ipxip6. */
937 .proto = htons(ETH_P_IP),
938 };
939
940 static const struct tnl_ptk_info tpi_mpls = {
941 /* no tunnel info required for mplsip6. */
942 .proto = htons(ETH_P_MPLS_UC),
943 };
944
ipxip6_rcv(struct sk_buff * skb,u8 ipproto,const struct tnl_ptk_info * tpi,int (* dscp_ecn_decapsulate)(const struct ip6_tnl * t,const struct ipv6hdr * ipv6h,struct sk_buff * skb))945 static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
946 const struct tnl_ptk_info *tpi,
947 int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
948 const struct ipv6hdr *ipv6h,
949 struct sk_buff *skb))
950 {
951 struct ip6_tnl *t;
952 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
953 struct metadata_dst *tun_dst = NULL;
954 int ret = -1;
955
956 rcu_read_lock();
957 t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
958
959 if (t) {
960 u8 tproto = READ_ONCE(t->parms.proto);
961
962 if (tproto != ipproto && tproto != 0)
963 goto drop;
964 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
965 goto drop;
966 ipv6h = ipv6_hdr(skb);
967 if (!ip6_tnl_rcv_ctl(t, &ipv6h->daddr, &ipv6h->saddr))
968 goto drop;
969 if (iptunnel_pull_header(skb, 0, tpi->proto, false))
970 goto drop;
971 if (t->parms.collect_md) {
972 tun_dst = ipv6_tun_rx_dst(skb, 0, 0, 0);
973 if (!tun_dst)
974 goto drop;
975 }
976 ret = __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
977 log_ecn_error);
978 }
979
980 rcu_read_unlock();
981
982 return ret;
983
984 drop:
985 rcu_read_unlock();
986 kfree_skb(skb);
987 return 0;
988 }
989
ip4ip6_rcv(struct sk_buff * skb)990 static int ip4ip6_rcv(struct sk_buff *skb)
991 {
992 return ipxip6_rcv(skb, IPPROTO_IPIP, &tpi_v4,
993 ip4ip6_dscp_ecn_decapsulate);
994 }
995
ip6ip6_rcv(struct sk_buff * skb)996 static int ip6ip6_rcv(struct sk_buff *skb)
997 {
998 return ipxip6_rcv(skb, IPPROTO_IPV6, &tpi_v6,
999 ip6ip6_dscp_ecn_decapsulate);
1000 }
1001
mplsip6_rcv(struct sk_buff * skb)1002 static int mplsip6_rcv(struct sk_buff *skb)
1003 {
1004 return ipxip6_rcv(skb, IPPROTO_MPLS, &tpi_mpls,
1005 mplsip6_dscp_ecn_decapsulate);
1006 }
1007
1008 struct ipv6_tel_txoption {
1009 struct ipv6_txoptions ops;
1010 __u8 dst_opt[8];
1011 };
1012
init_tel_txopt(struct ipv6_tel_txoption * opt,__u8 encap_limit)1013 static void init_tel_txopt(struct ipv6_tel_txoption *opt, __u8 encap_limit)
1014 {
1015 memset(opt, 0, sizeof(struct ipv6_tel_txoption));
1016
1017 opt->dst_opt[2] = IPV6_TLV_TNL_ENCAP_LIMIT;
1018 opt->dst_opt[3] = 1;
1019 opt->dst_opt[4] = encap_limit;
1020 opt->dst_opt[5] = IPV6_TLV_PADN;
1021 opt->dst_opt[6] = 1;
1022
1023 opt->ops.dst1opt = (struct ipv6_opt_hdr *) opt->dst_opt;
1024 opt->ops.opt_nflen = 8;
1025 }
1026
1027 /**
1028 * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
1029 * @t: the outgoing tunnel device
1030 * @hdr: IPv6 header from the incoming packet
1031 *
1032 * Description:
1033 * Avoid trivial tunneling loop by checking that tunnel exit-point
1034 * doesn't match source of incoming packet.
1035 *
1036 * Return:
1037 * 1 if conflict,
1038 * 0 else
1039 **/
1040
1041 static inline bool
ip6_tnl_addr_conflict(const struct ip6_tnl * t,const struct ipv6hdr * hdr)1042 ip6_tnl_addr_conflict(const struct ip6_tnl *t, const struct ipv6hdr *hdr)
1043 {
1044 return ipv6_addr_equal(&t->parms.raddr, &hdr->saddr);
1045 }
1046
ip6_tnl_xmit_ctl(struct ip6_tnl * t,const struct in6_addr * laddr,const struct in6_addr * raddr)1047 int ip6_tnl_xmit_ctl(struct ip6_tnl *t,
1048 const struct in6_addr *laddr,
1049 const struct in6_addr *raddr)
1050 {
1051 struct __ip6_tnl_parm *p = &t->parms;
1052 int ret = 0;
1053 struct net *net = t->net;
1054
1055 if (t->parms.collect_md)
1056 return 1;
1057
1058 if ((p->flags & IP6_TNL_F_CAP_XMIT) ||
1059 ((p->flags & IP6_TNL_F_CAP_PER_PACKET) &&
1060 (ip6_tnl_get_cap(t, laddr, raddr) & IP6_TNL_F_CAP_XMIT))) {
1061 struct net_device *ldev = NULL;
1062
1063 rcu_read_lock();
1064 if (p->link)
1065 ldev = dev_get_by_index_rcu(net, p->link);
1066
1067 if (unlikely(!ipv6_chk_addr_and_flags(net, laddr, ldev, false,
1068 0, IFA_F_TENTATIVE)))
1069 pr_warn_ratelimited("%s xmit: Local address not yet configured!\n",
1070 p->name);
1071 else if (!(p->flags & IP6_TNL_F_ALLOW_LOCAL_REMOTE) &&
1072 !ipv6_addr_is_multicast(raddr) &&
1073 unlikely(ipv6_chk_addr_and_flags(net, raddr, ldev,
1074 true, 0, IFA_F_TENTATIVE)))
1075 pr_warn_ratelimited("%s xmit: Routing loop! Remote address found on this node!\n",
1076 p->name);
1077 else
1078 ret = 1;
1079 rcu_read_unlock();
1080 }
1081 return ret;
1082 }
1083 EXPORT_SYMBOL_GPL(ip6_tnl_xmit_ctl);
1084
1085 /**
1086 * ip6_tnl_xmit - encapsulate packet and send
1087 * @skb: the outgoing socket buffer
1088 * @dev: the outgoing tunnel device
1089 * @dsfield: dscp code for outer header
1090 * @fl6: flow of tunneled packet
1091 * @encap_limit: encapsulation limit
1092 * @pmtu: Path MTU is stored if packet is too big
1093 * @proto: next header value
1094 *
1095 * Description:
1096 * Build new header and do some sanity checks on the packet before sending
1097 * it.
1098 *
1099 * Return:
1100 * 0 on success
1101 * -1 fail
1102 * %-EMSGSIZE message too big. return mtu in this case.
1103 **/
1104
ip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,__u8 dsfield,struct flowi6 * fl6,int encap_limit,__u32 * pmtu,__u8 proto)1105 int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield,
1106 struct flowi6 *fl6, int encap_limit, __u32 *pmtu,
1107 __u8 proto)
1108 {
1109 struct ip6_tnl *t = netdev_priv(dev);
1110 struct net *net = t->net;
1111 struct net_device_stats *stats = &t->dev->stats;
1112 struct ipv6hdr *ipv6h;
1113 struct ipv6_tel_txoption opt;
1114 struct dst_entry *dst = NULL, *ndst = NULL;
1115 struct net_device *tdev;
1116 int mtu;
1117 unsigned int eth_hlen = t->dev->type == ARPHRD_ETHER ? ETH_HLEN : 0;
1118 unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
1119 unsigned int max_headroom = psh_hlen;
1120 bool use_cache = false;
1121 u8 hop_limit;
1122 int err = -1;
1123
1124 if (t->parms.collect_md) {
1125 hop_limit = skb_tunnel_info(skb)->key.ttl;
1126 goto route_lookup;
1127 } else {
1128 hop_limit = t->parms.hop_limit;
1129 }
1130
1131 /* NBMA tunnel */
1132 if (ipv6_addr_any(&t->parms.raddr)) {
1133 if (skb->protocol == htons(ETH_P_IPV6)) {
1134 struct in6_addr *addr6;
1135 struct neighbour *neigh;
1136 int addr_type;
1137
1138 if (!skb_dst(skb))
1139 goto tx_err_link_failure;
1140
1141 neigh = dst_neigh_lookup(skb_dst(skb),
1142 &ipv6_hdr(skb)->daddr);
1143 if (!neigh)
1144 goto tx_err_link_failure;
1145
1146 addr6 = (struct in6_addr *)&neigh->primary_key;
1147 addr_type = ipv6_addr_type(addr6);
1148
1149 if (addr_type == IPV6_ADDR_ANY)
1150 addr6 = &ipv6_hdr(skb)->daddr;
1151
1152 memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
1153 neigh_release(neigh);
1154 }
1155 } else if (t->parms.proto != 0 && !(t->parms.flags &
1156 (IP6_TNL_F_USE_ORIG_TCLASS |
1157 IP6_TNL_F_USE_ORIG_FWMARK))) {
1158 /* enable the cache only if neither the outer protocol nor the
1159 * routing decision depends on the current inner header value
1160 */
1161 use_cache = true;
1162 }
1163
1164 if (use_cache)
1165 dst = dst_cache_get(&t->dst_cache);
1166
1167 if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
1168 goto tx_err_link_failure;
1169
1170 if (!dst) {
1171 route_lookup:
1172 /* add dsfield to flowlabel for route lookup */
1173 fl6->flowlabel = ip6_make_flowinfo(dsfield, fl6->flowlabel);
1174
1175 dst = ip6_route_output(net, NULL, fl6);
1176
1177 if (dst->error)
1178 goto tx_err_link_failure;
1179 dst = xfrm_lookup(net, dst, flowi6_to_flowi(fl6), NULL, 0);
1180 if (IS_ERR(dst)) {
1181 err = PTR_ERR(dst);
1182 dst = NULL;
1183 goto tx_err_link_failure;
1184 }
1185 if (t->parms.collect_md && ipv6_addr_any(&fl6->saddr) &&
1186 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
1187 &fl6->daddr, 0, &fl6->saddr))
1188 goto tx_err_link_failure;
1189 ndst = dst;
1190 }
1191
1192 tdev = dst->dev;
1193
1194 if (tdev == dev) {
1195 stats->collisions++;
1196 net_warn_ratelimited("%s: Local routing loop detected!\n",
1197 t->parms.name);
1198 goto tx_err_dst_release;
1199 }
1200 mtu = dst_mtu(dst) - eth_hlen - psh_hlen - t->tun_hlen;
1201 if (encap_limit >= 0) {
1202 max_headroom += 8;
1203 mtu -= 8;
1204 }
1205 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ?
1206 IPV6_MIN_MTU : IPV4_MIN_MTU);
1207
1208 skb_dst_update_pmtu_no_confirm(skb, mtu);
1209 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) {
1210 *pmtu = mtu;
1211 err = -EMSGSIZE;
1212 goto tx_err_dst_release;
1213 }
1214
1215 if (t->err_count > 0) {
1216 if (time_before(jiffies,
1217 t->err_time + IP6TUNNEL_ERR_TIMEO)) {
1218 t->err_count--;
1219
1220 dst_link_failure(skb);
1221 } else {
1222 t->err_count = 0;
1223 }
1224 }
1225
1226 skb_scrub_packet(skb, !net_eq(t->net, dev_net(dev)));
1227
1228 /*
1229 * Okay, now see if we can stuff it in the buffer as-is.
1230 */
1231 max_headroom += LL_RESERVED_SPACE(tdev);
1232
1233 if (skb_headroom(skb) < max_headroom || skb_shared(skb) ||
1234 (skb_cloned(skb) && !skb_clone_writable(skb, 0))) {
1235 struct sk_buff *new_skb;
1236
1237 new_skb = skb_realloc_headroom(skb, max_headroom);
1238 if (!new_skb)
1239 goto tx_err_dst_release;
1240
1241 if (skb->sk)
1242 skb_set_owner_w(new_skb, skb->sk);
1243 consume_skb(skb);
1244 skb = new_skb;
1245 }
1246
1247 if (t->parms.collect_md) {
1248 if (t->encap.type != TUNNEL_ENCAP_NONE)
1249 goto tx_err_dst_release;
1250 } else {
1251 if (use_cache && ndst)
1252 dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
1253 }
1254 skb_dst_set(skb, dst);
1255
1256 if (hop_limit == 0) {
1257 if (skb->protocol == htons(ETH_P_IP))
1258 hop_limit = ip_hdr(skb)->ttl;
1259 else if (skb->protocol == htons(ETH_P_IPV6))
1260 hop_limit = ipv6_hdr(skb)->hop_limit;
1261 else
1262 hop_limit = ip6_dst_hoplimit(dst);
1263 }
1264
1265 /* Calculate max headroom for all the headers and adjust
1266 * needed_headroom if necessary.
1267 */
1268 max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
1269 + dst->header_len + t->hlen;
1270 if (max_headroom > dev->needed_headroom)
1271 dev->needed_headroom = max_headroom;
1272
1273 err = ip6_tnl_encap(skb, t, &proto, fl6);
1274 if (err)
1275 return err;
1276
1277 if (encap_limit >= 0) {
1278 init_tel_txopt(&opt, encap_limit);
1279 ipv6_push_frag_opts(skb, &opt.ops, &proto);
1280 }
1281
1282 skb_push(skb, sizeof(struct ipv6hdr));
1283 skb_reset_network_header(skb);
1284 ipv6h = ipv6_hdr(skb);
1285 ip6_flow_hdr(ipv6h, dsfield,
1286 ip6_make_flowlabel(net, skb, fl6->flowlabel, true, fl6));
1287 ipv6h->hop_limit = hop_limit;
1288 ipv6h->nexthdr = proto;
1289 ipv6h->saddr = fl6->saddr;
1290 ipv6h->daddr = fl6->daddr;
1291 ip6tunnel_xmit(NULL, skb, dev);
1292 return 0;
1293 tx_err_link_failure:
1294 stats->tx_carrier_errors++;
1295 dst_link_failure(skb);
1296 tx_err_dst_release:
1297 dst_release(dst);
1298 return err;
1299 }
1300 EXPORT_SYMBOL(ip6_tnl_xmit);
1301
1302 static inline int
ipxip6_tnl_xmit(struct sk_buff * skb,struct net_device * dev,u8 protocol)1303 ipxip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev,
1304 u8 protocol)
1305 {
1306 struct ip6_tnl *t = netdev_priv(dev);
1307 struct ipv6hdr *ipv6h;
1308 const struct iphdr *iph;
1309 int encap_limit = -1;
1310 __u16 offset;
1311 struct flowi6 fl6;
1312 __u8 dsfield, orig_dsfield;
1313 __u32 mtu;
1314 u8 tproto;
1315 int err;
1316
1317 tproto = READ_ONCE(t->parms.proto);
1318 if (tproto != protocol && tproto != 0)
1319 return -1;
1320
1321 if (t->parms.collect_md) {
1322 struct ip_tunnel_info *tun_info;
1323 const struct ip_tunnel_key *key;
1324
1325 tun_info = skb_tunnel_info(skb);
1326 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
1327 ip_tunnel_info_af(tun_info) != AF_INET6))
1328 return -1;
1329 key = &tun_info->key;
1330 memset(&fl6, 0, sizeof(fl6));
1331 fl6.flowi6_proto = protocol;
1332 fl6.saddr = key->u.ipv6.src;
1333 fl6.daddr = key->u.ipv6.dst;
1334 fl6.flowlabel = key->label;
1335 dsfield = key->tos;
1336 switch (protocol) {
1337 case IPPROTO_IPIP:
1338 iph = ip_hdr(skb);
1339 orig_dsfield = ipv4_get_dsfield(iph);
1340 break;
1341 case IPPROTO_IPV6:
1342 ipv6h = ipv6_hdr(skb);
1343 orig_dsfield = ipv6_get_dsfield(ipv6h);
1344 break;
1345 default:
1346 orig_dsfield = dsfield;
1347 break;
1348 }
1349 } else {
1350 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1351 encap_limit = t->parms.encap_limit;
1352 if (protocol == IPPROTO_IPV6) {
1353 offset = ip6_tnl_parse_tlv_enc_lim(skb,
1354 skb_network_header(skb));
1355 /* ip6_tnl_parse_tlv_enc_lim() might have
1356 * reallocated skb->head
1357 */
1358 if (offset > 0) {
1359 struct ipv6_tlv_tnl_enc_lim *tel;
1360
1361 tel = (void *)&skb_network_header(skb)[offset];
1362 if (tel->encap_limit == 0) {
1363 icmpv6_ndo_send(skb, ICMPV6_PARAMPROB,
1364 ICMPV6_HDR_FIELD, offset + 2);
1365 return -1;
1366 }
1367 encap_limit = tel->encap_limit - 1;
1368 }
1369 }
1370
1371 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
1372 fl6.flowi6_proto = protocol;
1373
1374 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FWMARK)
1375 fl6.flowi6_mark = skb->mark;
1376 else
1377 fl6.flowi6_mark = t->parms.fwmark;
1378 switch (protocol) {
1379 case IPPROTO_IPIP:
1380 iph = ip_hdr(skb);
1381 orig_dsfield = ipv4_get_dsfield(iph);
1382 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1383 dsfield = orig_dsfield;
1384 else
1385 dsfield = ip6_tclass(t->parms.flowinfo);
1386 break;
1387 case IPPROTO_IPV6:
1388 ipv6h = ipv6_hdr(skb);
1389 orig_dsfield = ipv6_get_dsfield(ipv6h);
1390 if (t->parms.flags & IP6_TNL_F_USE_ORIG_TCLASS)
1391 dsfield = orig_dsfield;
1392 else
1393 dsfield = ip6_tclass(t->parms.flowinfo);
1394 if (t->parms.flags & IP6_TNL_F_USE_ORIG_FLOWLABEL)
1395 fl6.flowlabel |= ip6_flowlabel(ipv6h);
1396 break;
1397 default:
1398 orig_dsfield = dsfield = ip6_tclass(t->parms.flowinfo);
1399 break;
1400 }
1401 }
1402
1403 fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
1404 dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
1405
1406 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
1407 return -1;
1408
1409 skb_set_inner_ipproto(skb, protocol);
1410
1411 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu,
1412 protocol);
1413 if (err != 0) {
1414 /* XXX: send ICMP error even if DF is not set. */
1415 if (err == -EMSGSIZE)
1416 switch (protocol) {
1417 case IPPROTO_IPIP:
1418 icmp_ndo_send(skb, ICMP_DEST_UNREACH,
1419 ICMP_FRAG_NEEDED, htonl(mtu));
1420 break;
1421 case IPPROTO_IPV6:
1422 icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1423 break;
1424 default:
1425 break;
1426 }
1427 return -1;
1428 }
1429
1430 return 0;
1431 }
1432
1433 static netdev_tx_t
ip6_tnl_start_xmit(struct sk_buff * skb,struct net_device * dev)1434 ip6_tnl_start_xmit(struct sk_buff *skb, struct net_device *dev)
1435 {
1436 struct ip6_tnl *t = netdev_priv(dev);
1437 struct net_device_stats *stats = &t->dev->stats;
1438 u8 ipproto;
1439 int ret;
1440
1441 if (!pskb_inet_may_pull(skb))
1442 goto tx_err;
1443
1444 switch (skb->protocol) {
1445 case htons(ETH_P_IP):
1446 ipproto = IPPROTO_IPIP;
1447 break;
1448 case htons(ETH_P_IPV6):
1449 if (ip6_tnl_addr_conflict(t, ipv6_hdr(skb)))
1450 goto tx_err;
1451 ipproto = IPPROTO_IPV6;
1452 break;
1453 case htons(ETH_P_MPLS_UC):
1454 ipproto = IPPROTO_MPLS;
1455 break;
1456 default:
1457 goto tx_err;
1458 }
1459
1460 ret = ipxip6_tnl_xmit(skb, dev, ipproto);
1461 if (ret < 0)
1462 goto tx_err;
1463
1464 return NETDEV_TX_OK;
1465
1466 tx_err:
1467 stats->tx_errors++;
1468 stats->tx_dropped++;
1469 kfree_skb(skb);
1470 return NETDEV_TX_OK;
1471 }
1472
ip6_tnl_link_config(struct ip6_tnl * t)1473 static void ip6_tnl_link_config(struct ip6_tnl *t)
1474 {
1475 struct net_device *dev = t->dev;
1476 struct net_device *tdev = NULL;
1477 struct __ip6_tnl_parm *p = &t->parms;
1478 struct flowi6 *fl6 = &t->fl.u.ip6;
1479 unsigned int mtu;
1480 int t_hlen;
1481
1482 memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
1483 memcpy(dev->broadcast, &p->raddr, sizeof(struct in6_addr));
1484
1485 /* Set up flowi template */
1486 fl6->saddr = p->laddr;
1487 fl6->daddr = p->raddr;
1488 fl6->flowi6_oif = p->link;
1489 fl6->flowlabel = 0;
1490
1491 if (!(p->flags&IP6_TNL_F_USE_ORIG_TCLASS))
1492 fl6->flowlabel |= IPV6_TCLASS_MASK & p->flowinfo;
1493 if (!(p->flags&IP6_TNL_F_USE_ORIG_FLOWLABEL))
1494 fl6->flowlabel |= IPV6_FLOWLABEL_MASK & p->flowinfo;
1495
1496 p->flags &= ~(IP6_TNL_F_CAP_XMIT|IP6_TNL_F_CAP_RCV|IP6_TNL_F_CAP_PER_PACKET);
1497 p->flags |= ip6_tnl_get_cap(t, &p->laddr, &p->raddr);
1498
1499 if (p->flags&IP6_TNL_F_CAP_XMIT && p->flags&IP6_TNL_F_CAP_RCV)
1500 dev->flags |= IFF_POINTOPOINT;
1501 else
1502 dev->flags &= ~IFF_POINTOPOINT;
1503
1504 t->tun_hlen = 0;
1505 t->hlen = t->encap_hlen + t->tun_hlen;
1506 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1507
1508 if (p->flags & IP6_TNL_F_CAP_XMIT) {
1509 int strict = (ipv6_addr_type(&p->raddr) &
1510 (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL));
1511
1512 struct rt6_info *rt = rt6_lookup(t->net,
1513 &p->raddr, &p->laddr,
1514 p->link, NULL, strict);
1515 if (rt) {
1516 tdev = rt->dst.dev;
1517 ip6_rt_put(rt);
1518 }
1519
1520 if (!tdev && p->link)
1521 tdev = __dev_get_by_index(t->net, p->link);
1522
1523 if (tdev) {
1524 dev->hard_header_len = tdev->hard_header_len + t_hlen;
1525 mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
1526
1527 dev->mtu = mtu - t_hlen;
1528 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1529 dev->mtu -= 8;
1530
1531 if (dev->mtu < IPV6_MIN_MTU)
1532 dev->mtu = IPV6_MIN_MTU;
1533 }
1534 }
1535 }
1536
1537 /**
1538 * ip6_tnl_change - update the tunnel parameters
1539 * @t: tunnel to be changed
1540 * @p: tunnel configuration parameters
1541 *
1542 * Description:
1543 * ip6_tnl_change() updates the tunnel parameters
1544 **/
1545
1546 static int
ip6_tnl_change(struct ip6_tnl * t,const struct __ip6_tnl_parm * p)1547 ip6_tnl_change(struct ip6_tnl *t, const struct __ip6_tnl_parm *p)
1548 {
1549 t->parms.laddr = p->laddr;
1550 t->parms.raddr = p->raddr;
1551 t->parms.flags = p->flags;
1552 t->parms.hop_limit = p->hop_limit;
1553 t->parms.encap_limit = p->encap_limit;
1554 t->parms.flowinfo = p->flowinfo;
1555 t->parms.link = p->link;
1556 t->parms.proto = p->proto;
1557 t->parms.fwmark = p->fwmark;
1558 dst_cache_reset(&t->dst_cache);
1559 ip6_tnl_link_config(t);
1560 return 0;
1561 }
1562
ip6_tnl_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1563 static int ip6_tnl_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1564 {
1565 struct net *net = t->net;
1566 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1567 int err;
1568
1569 ip6_tnl_unlink(ip6n, t);
1570 synchronize_net();
1571 err = ip6_tnl_change(t, p);
1572 ip6_tnl_link(ip6n, t);
1573 netdev_state_change(t->dev);
1574 return err;
1575 }
1576
ip6_tnl0_update(struct ip6_tnl * t,struct __ip6_tnl_parm * p)1577 static int ip6_tnl0_update(struct ip6_tnl *t, struct __ip6_tnl_parm *p)
1578 {
1579 /* for default tnl0 device allow to change only the proto */
1580 t->parms.proto = p->proto;
1581 netdev_state_change(t->dev);
1582 return 0;
1583 }
1584
1585 static void
ip6_tnl_parm_from_user(struct __ip6_tnl_parm * p,const struct ip6_tnl_parm * u)1586 ip6_tnl_parm_from_user(struct __ip6_tnl_parm *p, const struct ip6_tnl_parm *u)
1587 {
1588 p->laddr = u->laddr;
1589 p->raddr = u->raddr;
1590 p->flags = u->flags;
1591 p->hop_limit = u->hop_limit;
1592 p->encap_limit = u->encap_limit;
1593 p->flowinfo = u->flowinfo;
1594 p->link = u->link;
1595 p->proto = u->proto;
1596 memcpy(p->name, u->name, sizeof(u->name));
1597 }
1598
1599 static void
ip6_tnl_parm_to_user(struct ip6_tnl_parm * u,const struct __ip6_tnl_parm * p)1600 ip6_tnl_parm_to_user(struct ip6_tnl_parm *u, const struct __ip6_tnl_parm *p)
1601 {
1602 u->laddr = p->laddr;
1603 u->raddr = p->raddr;
1604 u->flags = p->flags;
1605 u->hop_limit = p->hop_limit;
1606 u->encap_limit = p->encap_limit;
1607 u->flowinfo = p->flowinfo;
1608 u->link = p->link;
1609 u->proto = p->proto;
1610 memcpy(u->name, p->name, sizeof(u->name));
1611 }
1612
1613 /**
1614 * ip6_tnl_ioctl - configure ipv6 tunnels from userspace
1615 * @dev: virtual device associated with tunnel
1616 * @ifr: parameters passed from userspace
1617 * @cmd: command to be performed
1618 *
1619 * Description:
1620 * ip6_tnl_ioctl() is used for managing IPv6 tunnels
1621 * from userspace.
1622 *
1623 * The possible commands are the following:
1624 * %SIOCGETTUNNEL: get tunnel parameters for device
1625 * %SIOCADDTUNNEL: add tunnel matching given tunnel parameters
1626 * %SIOCCHGTUNNEL: change tunnel parameters to those given
1627 * %SIOCDELTUNNEL: delete tunnel
1628 *
1629 * The fallback device "ip6tnl0", created during module
1630 * initialization, can be used for creating other tunnel devices.
1631 *
1632 * Return:
1633 * 0 on success,
1634 * %-EFAULT if unable to copy data to or from userspace,
1635 * %-EPERM if current process hasn't %CAP_NET_ADMIN set
1636 * %-EINVAL if passed tunnel parameters are invalid,
1637 * %-EEXIST if changing a tunnel's parameters would cause a conflict
1638 * %-ENODEV if attempting to change or delete a nonexisting device
1639 **/
1640
1641 static int
ip6_tnl_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1642 ip6_tnl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1643 {
1644 int err = 0;
1645 struct ip6_tnl_parm p;
1646 struct __ip6_tnl_parm p1;
1647 struct ip6_tnl *t = netdev_priv(dev);
1648 struct net *net = t->net;
1649 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1650
1651 memset(&p1, 0, sizeof(p1));
1652
1653 switch (cmd) {
1654 case SIOCGETTUNNEL:
1655 if (dev == ip6n->fb_tnl_dev) {
1656 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p))) {
1657 err = -EFAULT;
1658 break;
1659 }
1660 ip6_tnl_parm_from_user(&p1, &p);
1661 t = ip6_tnl_locate(net, &p1, 0);
1662 if (IS_ERR(t))
1663 t = netdev_priv(dev);
1664 } else {
1665 memset(&p, 0, sizeof(p));
1666 }
1667 ip6_tnl_parm_to_user(&p, &t->parms);
1668 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) {
1669 err = -EFAULT;
1670 }
1671 break;
1672 case SIOCADDTUNNEL:
1673 case SIOCCHGTUNNEL:
1674 err = -EPERM;
1675 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1676 break;
1677 err = -EFAULT;
1678 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1679 break;
1680 err = -EINVAL;
1681 if (p.proto != IPPROTO_IPV6 && p.proto != IPPROTO_IPIP &&
1682 p.proto != 0)
1683 break;
1684 ip6_tnl_parm_from_user(&p1, &p);
1685 t = ip6_tnl_locate(net, &p1, cmd == SIOCADDTUNNEL);
1686 if (cmd == SIOCCHGTUNNEL) {
1687 if (!IS_ERR(t)) {
1688 if (t->dev != dev) {
1689 err = -EEXIST;
1690 break;
1691 }
1692 } else
1693 t = netdev_priv(dev);
1694 if (dev == ip6n->fb_tnl_dev)
1695 err = ip6_tnl0_update(t, &p1);
1696 else
1697 err = ip6_tnl_update(t, &p1);
1698 }
1699 if (!IS_ERR(t)) {
1700 err = 0;
1701 ip6_tnl_parm_to_user(&p, &t->parms);
1702 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1703 err = -EFAULT;
1704
1705 } else {
1706 err = PTR_ERR(t);
1707 }
1708 break;
1709 case SIOCDELTUNNEL:
1710 err = -EPERM;
1711 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1712 break;
1713
1714 if (dev == ip6n->fb_tnl_dev) {
1715 err = -EFAULT;
1716 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
1717 break;
1718 err = -ENOENT;
1719 ip6_tnl_parm_from_user(&p1, &p);
1720 t = ip6_tnl_locate(net, &p1, 0);
1721 if (IS_ERR(t))
1722 break;
1723 err = -EPERM;
1724 if (t->dev == ip6n->fb_tnl_dev)
1725 break;
1726 dev = t->dev;
1727 }
1728 err = 0;
1729 unregister_netdevice(dev);
1730 break;
1731 default:
1732 err = -EINVAL;
1733 }
1734 return err;
1735 }
1736
1737 /**
1738 * ip6_tnl_change_mtu - change mtu manually for tunnel device
1739 * @dev: virtual device associated with tunnel
1740 * @new_mtu: the new mtu
1741 *
1742 * Return:
1743 * 0 on success,
1744 * %-EINVAL if mtu too small
1745 **/
1746
ip6_tnl_change_mtu(struct net_device * dev,int new_mtu)1747 int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1748 {
1749 struct ip6_tnl *tnl = netdev_priv(dev);
1750
1751 if (tnl->parms.proto == IPPROTO_IPV6) {
1752 if (new_mtu < IPV6_MIN_MTU)
1753 return -EINVAL;
1754 } else {
1755 if (new_mtu < ETH_MIN_MTU)
1756 return -EINVAL;
1757 }
1758 if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1759 if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1760 return -EINVAL;
1761 } else {
1762 if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1763 return -EINVAL;
1764 }
1765 dev->mtu = new_mtu;
1766 return 0;
1767 }
1768 EXPORT_SYMBOL(ip6_tnl_change_mtu);
1769
ip6_tnl_get_iflink(const struct net_device * dev)1770 int ip6_tnl_get_iflink(const struct net_device *dev)
1771 {
1772 struct ip6_tnl *t = netdev_priv(dev);
1773
1774 return t->parms.link;
1775 }
1776 EXPORT_SYMBOL(ip6_tnl_get_iflink);
1777
ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1778 int ip6_tnl_encap_add_ops(const struct ip6_tnl_encap_ops *ops,
1779 unsigned int num)
1780 {
1781 if (num >= MAX_IPTUN_ENCAP_OPS)
1782 return -ERANGE;
1783
1784 return !cmpxchg((const struct ip6_tnl_encap_ops **)
1785 &ip6tun_encaps[num],
1786 NULL, ops) ? 0 : -1;
1787 }
1788 EXPORT_SYMBOL(ip6_tnl_encap_add_ops);
1789
ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops * ops,unsigned int num)1790 int ip6_tnl_encap_del_ops(const struct ip6_tnl_encap_ops *ops,
1791 unsigned int num)
1792 {
1793 int ret;
1794
1795 if (num >= MAX_IPTUN_ENCAP_OPS)
1796 return -ERANGE;
1797
1798 ret = (cmpxchg((const struct ip6_tnl_encap_ops **)
1799 &ip6tun_encaps[num],
1800 ops, NULL) == ops) ? 0 : -1;
1801
1802 synchronize_net();
1803
1804 return ret;
1805 }
1806 EXPORT_SYMBOL(ip6_tnl_encap_del_ops);
1807
ip6_tnl_encap_setup(struct ip6_tnl * t,struct ip_tunnel_encap * ipencap)1808 int ip6_tnl_encap_setup(struct ip6_tnl *t,
1809 struct ip_tunnel_encap *ipencap)
1810 {
1811 int hlen;
1812
1813 memset(&t->encap, 0, sizeof(t->encap));
1814
1815 hlen = ip6_encap_hlen(ipencap);
1816 if (hlen < 0)
1817 return hlen;
1818
1819 t->encap.type = ipencap->type;
1820 t->encap.sport = ipencap->sport;
1821 t->encap.dport = ipencap->dport;
1822 t->encap.flags = ipencap->flags;
1823
1824 t->encap_hlen = hlen;
1825 t->hlen = t->encap_hlen + t->tun_hlen;
1826
1827 return 0;
1828 }
1829 EXPORT_SYMBOL_GPL(ip6_tnl_encap_setup);
1830
1831 static const struct net_device_ops ip6_tnl_netdev_ops = {
1832 .ndo_init = ip6_tnl_dev_init,
1833 .ndo_uninit = ip6_tnl_dev_uninit,
1834 .ndo_start_xmit = ip6_tnl_start_xmit,
1835 .ndo_do_ioctl = ip6_tnl_ioctl,
1836 .ndo_change_mtu = ip6_tnl_change_mtu,
1837 .ndo_get_stats = ip6_get_stats,
1838 .ndo_get_iflink = ip6_tnl_get_iflink,
1839 };
1840
1841 #define IPXIPX_FEATURES (NETIF_F_SG | \
1842 NETIF_F_FRAGLIST | \
1843 NETIF_F_HIGHDMA | \
1844 NETIF_F_GSO_SOFTWARE | \
1845 NETIF_F_HW_CSUM)
1846
1847 /**
1848 * ip6_tnl_dev_setup - setup virtual tunnel device
1849 * @dev: virtual device associated with tunnel
1850 *
1851 * Description:
1852 * Initialize function pointers and device parameters
1853 **/
1854
ip6_tnl_dev_setup(struct net_device * dev)1855 static void ip6_tnl_dev_setup(struct net_device *dev)
1856 {
1857 dev->netdev_ops = &ip6_tnl_netdev_ops;
1858 dev->header_ops = &ip_tunnel_header_ops;
1859 dev->needs_free_netdev = true;
1860 dev->priv_destructor = ip6_dev_free;
1861
1862 dev->type = ARPHRD_TUNNEL6;
1863 dev->flags |= IFF_NOARP;
1864 dev->addr_len = sizeof(struct in6_addr);
1865 dev->features |= NETIF_F_LLTX;
1866 netif_keep_dst(dev);
1867
1868 dev->features |= IPXIPX_FEATURES;
1869 dev->hw_features |= IPXIPX_FEATURES;
1870
1871 /* This perm addr will be used as interface identifier by IPv6 */
1872 dev->addr_assign_type = NET_ADDR_RANDOM;
1873 eth_random_addr(dev->perm_addr);
1874 }
1875
1876
1877 /**
1878 * ip6_tnl_dev_init_gen - general initializer for all tunnel devices
1879 * @dev: virtual device associated with tunnel
1880 **/
1881
1882 static inline int
ip6_tnl_dev_init_gen(struct net_device * dev)1883 ip6_tnl_dev_init_gen(struct net_device *dev)
1884 {
1885 struct ip6_tnl *t = netdev_priv(dev);
1886 int ret;
1887 int t_hlen;
1888
1889 t->dev = dev;
1890 t->net = dev_net(dev);
1891 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1892 if (!dev->tstats)
1893 return -ENOMEM;
1894
1895 ret = dst_cache_init(&t->dst_cache, GFP_KERNEL);
1896 if (ret)
1897 goto free_stats;
1898
1899 ret = gro_cells_init(&t->gro_cells, dev);
1900 if (ret)
1901 goto destroy_dst;
1902
1903 t->tun_hlen = 0;
1904 t->hlen = t->encap_hlen + t->tun_hlen;
1905 t_hlen = t->hlen + sizeof(struct ipv6hdr);
1906
1907 dev->type = ARPHRD_TUNNEL6;
1908 dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1909 dev->mtu = ETH_DATA_LEN - t_hlen;
1910 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1911 dev->mtu -= 8;
1912 dev->min_mtu = ETH_MIN_MTU;
1913 dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1914
1915 dev_hold(dev);
1916 return 0;
1917
1918 destroy_dst:
1919 dst_cache_destroy(&t->dst_cache);
1920 free_stats:
1921 free_percpu(dev->tstats);
1922 dev->tstats = NULL;
1923
1924 return ret;
1925 }
1926
1927 /**
1928 * ip6_tnl_dev_init - initializer for all non fallback tunnel devices
1929 * @dev: virtual device associated with tunnel
1930 **/
1931
ip6_tnl_dev_init(struct net_device * dev)1932 static int ip6_tnl_dev_init(struct net_device *dev)
1933 {
1934 struct ip6_tnl *t = netdev_priv(dev);
1935 int err = ip6_tnl_dev_init_gen(dev);
1936
1937 if (err)
1938 return err;
1939 ip6_tnl_link_config(t);
1940 if (t->parms.collect_md)
1941 netif_keep_dst(dev);
1942 return 0;
1943 }
1944
1945 /**
1946 * ip6_fb_tnl_dev_init - initializer for fallback tunnel device
1947 * @dev: fallback device
1948 *
1949 * Return: 0
1950 **/
1951
ip6_fb_tnl_dev_init(struct net_device * dev)1952 static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1953 {
1954 struct ip6_tnl *t = netdev_priv(dev);
1955 struct net *net = dev_net(dev);
1956 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1957
1958 t->parms.proto = IPPROTO_IPV6;
1959
1960 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1961 return 0;
1962 }
1963
ip6_tnl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1964 static int ip6_tnl_validate(struct nlattr *tb[], struct nlattr *data[],
1965 struct netlink_ext_ack *extack)
1966 {
1967 u8 proto;
1968
1969 if (!data || !data[IFLA_IPTUN_PROTO])
1970 return 0;
1971
1972 proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
1973 if (proto != IPPROTO_IPV6 &&
1974 proto != IPPROTO_IPIP &&
1975 proto != 0)
1976 return -EINVAL;
1977
1978 return 0;
1979 }
1980
ip6_tnl_netlink_parms(struct nlattr * data[],struct __ip6_tnl_parm * parms)1981 static void ip6_tnl_netlink_parms(struct nlattr *data[],
1982 struct __ip6_tnl_parm *parms)
1983 {
1984 memset(parms, 0, sizeof(*parms));
1985
1986 if (!data)
1987 return;
1988
1989 if (data[IFLA_IPTUN_LINK])
1990 parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
1991
1992 if (data[IFLA_IPTUN_LOCAL])
1993 parms->laddr = nla_get_in6_addr(data[IFLA_IPTUN_LOCAL]);
1994
1995 if (data[IFLA_IPTUN_REMOTE])
1996 parms->raddr = nla_get_in6_addr(data[IFLA_IPTUN_REMOTE]);
1997
1998 if (data[IFLA_IPTUN_TTL])
1999 parms->hop_limit = nla_get_u8(data[IFLA_IPTUN_TTL]);
2000
2001 if (data[IFLA_IPTUN_ENCAP_LIMIT])
2002 parms->encap_limit = nla_get_u8(data[IFLA_IPTUN_ENCAP_LIMIT]);
2003
2004 if (data[IFLA_IPTUN_FLOWINFO])
2005 parms->flowinfo = nla_get_be32(data[IFLA_IPTUN_FLOWINFO]);
2006
2007 if (data[IFLA_IPTUN_FLAGS])
2008 parms->flags = nla_get_u32(data[IFLA_IPTUN_FLAGS]);
2009
2010 if (data[IFLA_IPTUN_PROTO])
2011 parms->proto = nla_get_u8(data[IFLA_IPTUN_PROTO]);
2012
2013 if (data[IFLA_IPTUN_COLLECT_METADATA])
2014 parms->collect_md = true;
2015
2016 if (data[IFLA_IPTUN_FWMARK])
2017 parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
2018 }
2019
ip6_tnl_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)2020 static bool ip6_tnl_netlink_encap_parms(struct nlattr *data[],
2021 struct ip_tunnel_encap *ipencap)
2022 {
2023 bool ret = false;
2024
2025 memset(ipencap, 0, sizeof(*ipencap));
2026
2027 if (!data)
2028 return ret;
2029
2030 if (data[IFLA_IPTUN_ENCAP_TYPE]) {
2031 ret = true;
2032 ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
2033 }
2034
2035 if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
2036 ret = true;
2037 ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
2038 }
2039
2040 if (data[IFLA_IPTUN_ENCAP_SPORT]) {
2041 ret = true;
2042 ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
2043 }
2044
2045 if (data[IFLA_IPTUN_ENCAP_DPORT]) {
2046 ret = true;
2047 ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
2048 }
2049
2050 return ret;
2051 }
2052
ip6_tnl_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2053 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
2054 struct nlattr *tb[], struct nlattr *data[],
2055 struct netlink_ext_ack *extack)
2056 {
2057 struct net *net = dev_net(dev);
2058 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2059 struct ip_tunnel_encap ipencap;
2060 struct ip6_tnl *nt, *t;
2061 int err;
2062
2063 nt = netdev_priv(dev);
2064
2065 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2066 err = ip6_tnl_encap_setup(nt, &ipencap);
2067 if (err < 0)
2068 return err;
2069 }
2070
2071 ip6_tnl_netlink_parms(data, &nt->parms);
2072
2073 if (nt->parms.collect_md) {
2074 if (rtnl_dereference(ip6n->collect_md_tun))
2075 return -EEXIST;
2076 } else {
2077 t = ip6_tnl_locate(net, &nt->parms, 0);
2078 if (!IS_ERR(t))
2079 return -EEXIST;
2080 }
2081
2082 err = ip6_tnl_create2(dev);
2083 if (!err && tb[IFLA_MTU])
2084 ip6_tnl_change_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
2085
2086 return err;
2087 }
2088
ip6_tnl_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)2089 static int ip6_tnl_changelink(struct net_device *dev, struct nlattr *tb[],
2090 struct nlattr *data[],
2091 struct netlink_ext_ack *extack)
2092 {
2093 struct ip6_tnl *t = netdev_priv(dev);
2094 struct __ip6_tnl_parm p;
2095 struct net *net = t->net;
2096 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2097 struct ip_tunnel_encap ipencap;
2098
2099 if (dev == ip6n->fb_tnl_dev)
2100 return -EINVAL;
2101
2102 if (ip6_tnl_netlink_encap_parms(data, &ipencap)) {
2103 int err = ip6_tnl_encap_setup(t, &ipencap);
2104
2105 if (err < 0)
2106 return err;
2107 }
2108 ip6_tnl_netlink_parms(data, &p);
2109 if (p.collect_md)
2110 return -EINVAL;
2111
2112 t = ip6_tnl_locate(net, &p, 0);
2113 if (!IS_ERR(t)) {
2114 if (t->dev != dev)
2115 return -EEXIST;
2116 } else
2117 t = netdev_priv(dev);
2118
2119 return ip6_tnl_update(t, &p);
2120 }
2121
ip6_tnl_dellink(struct net_device * dev,struct list_head * head)2122 static void ip6_tnl_dellink(struct net_device *dev, struct list_head *head)
2123 {
2124 struct net *net = dev_net(dev);
2125 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2126
2127 if (dev != ip6n->fb_tnl_dev)
2128 unregister_netdevice_queue(dev, head);
2129 }
2130
ip6_tnl_get_size(const struct net_device * dev)2131 static size_t ip6_tnl_get_size(const struct net_device *dev)
2132 {
2133 return
2134 /* IFLA_IPTUN_LINK */
2135 nla_total_size(4) +
2136 /* IFLA_IPTUN_LOCAL */
2137 nla_total_size(sizeof(struct in6_addr)) +
2138 /* IFLA_IPTUN_REMOTE */
2139 nla_total_size(sizeof(struct in6_addr)) +
2140 /* IFLA_IPTUN_TTL */
2141 nla_total_size(1) +
2142 /* IFLA_IPTUN_ENCAP_LIMIT */
2143 nla_total_size(1) +
2144 /* IFLA_IPTUN_FLOWINFO */
2145 nla_total_size(4) +
2146 /* IFLA_IPTUN_FLAGS */
2147 nla_total_size(4) +
2148 /* IFLA_IPTUN_PROTO */
2149 nla_total_size(1) +
2150 /* IFLA_IPTUN_ENCAP_TYPE */
2151 nla_total_size(2) +
2152 /* IFLA_IPTUN_ENCAP_FLAGS */
2153 nla_total_size(2) +
2154 /* IFLA_IPTUN_ENCAP_SPORT */
2155 nla_total_size(2) +
2156 /* IFLA_IPTUN_ENCAP_DPORT */
2157 nla_total_size(2) +
2158 /* IFLA_IPTUN_COLLECT_METADATA */
2159 nla_total_size(0) +
2160 /* IFLA_IPTUN_FWMARK */
2161 nla_total_size(4) +
2162 0;
2163 }
2164
ip6_tnl_fill_info(struct sk_buff * skb,const struct net_device * dev)2165 static int ip6_tnl_fill_info(struct sk_buff *skb, const struct net_device *dev)
2166 {
2167 struct ip6_tnl *tunnel = netdev_priv(dev);
2168 struct __ip6_tnl_parm *parm = &tunnel->parms;
2169
2170 if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
2171 nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
2172 nla_put_in6_addr(skb, IFLA_IPTUN_REMOTE, &parm->raddr) ||
2173 nla_put_u8(skb, IFLA_IPTUN_TTL, parm->hop_limit) ||
2174 nla_put_u8(skb, IFLA_IPTUN_ENCAP_LIMIT, parm->encap_limit) ||
2175 nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
2176 nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
2177 nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
2178 nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
2179 goto nla_put_failure;
2180
2181 if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
2182 nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
2183 nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
2184 nla_put_u16(skb, IFLA_IPTUN_ENCAP_FLAGS, tunnel->encap.flags))
2185 goto nla_put_failure;
2186
2187 if (parm->collect_md)
2188 if (nla_put_flag(skb, IFLA_IPTUN_COLLECT_METADATA))
2189 goto nla_put_failure;
2190
2191 return 0;
2192
2193 nla_put_failure:
2194 return -EMSGSIZE;
2195 }
2196
ip6_tnl_get_link_net(const struct net_device * dev)2197 struct net *ip6_tnl_get_link_net(const struct net_device *dev)
2198 {
2199 struct ip6_tnl *tunnel = netdev_priv(dev);
2200
2201 return tunnel->net;
2202 }
2203 EXPORT_SYMBOL(ip6_tnl_get_link_net);
2204
2205 static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
2206 [IFLA_IPTUN_LINK] = { .type = NLA_U32 },
2207 [IFLA_IPTUN_LOCAL] = { .len = sizeof(struct in6_addr) },
2208 [IFLA_IPTUN_REMOTE] = { .len = sizeof(struct in6_addr) },
2209 [IFLA_IPTUN_TTL] = { .type = NLA_U8 },
2210 [IFLA_IPTUN_ENCAP_LIMIT] = { .type = NLA_U8 },
2211 [IFLA_IPTUN_FLOWINFO] = { .type = NLA_U32 },
2212 [IFLA_IPTUN_FLAGS] = { .type = NLA_U32 },
2213 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
2214 [IFLA_IPTUN_ENCAP_TYPE] = { .type = NLA_U16 },
2215 [IFLA_IPTUN_ENCAP_FLAGS] = { .type = NLA_U16 },
2216 [IFLA_IPTUN_ENCAP_SPORT] = { .type = NLA_U16 },
2217 [IFLA_IPTUN_ENCAP_DPORT] = { .type = NLA_U16 },
2218 [IFLA_IPTUN_COLLECT_METADATA] = { .type = NLA_FLAG },
2219 [IFLA_IPTUN_FWMARK] = { .type = NLA_U32 },
2220 };
2221
2222 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
2223 .kind = "ip6tnl",
2224 .maxtype = IFLA_IPTUN_MAX,
2225 .policy = ip6_tnl_policy,
2226 .priv_size = sizeof(struct ip6_tnl),
2227 .setup = ip6_tnl_dev_setup,
2228 .validate = ip6_tnl_validate,
2229 .newlink = ip6_tnl_newlink,
2230 .changelink = ip6_tnl_changelink,
2231 .dellink = ip6_tnl_dellink,
2232 .get_size = ip6_tnl_get_size,
2233 .fill_info = ip6_tnl_fill_info,
2234 .get_link_net = ip6_tnl_get_link_net,
2235 };
2236
2237 static struct xfrm6_tunnel ip4ip6_handler __read_mostly = {
2238 .handler = ip4ip6_rcv,
2239 .err_handler = ip4ip6_err,
2240 .priority = 1,
2241 };
2242
2243 static struct xfrm6_tunnel ip6ip6_handler __read_mostly = {
2244 .handler = ip6ip6_rcv,
2245 .err_handler = ip6ip6_err,
2246 .priority = 1,
2247 };
2248
2249 static struct xfrm6_tunnel mplsip6_handler __read_mostly = {
2250 .handler = mplsip6_rcv,
2251 .err_handler = mplsip6_err,
2252 .priority = 1,
2253 };
2254
ip6_tnl_destroy_tunnels(struct net * net,struct list_head * list)2255 static void __net_exit ip6_tnl_destroy_tunnels(struct net *net, struct list_head *list)
2256 {
2257 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2258 struct net_device *dev, *aux;
2259 int h;
2260 struct ip6_tnl *t;
2261
2262 for_each_netdev_safe(net, dev, aux)
2263 if (dev->rtnl_link_ops == &ip6_link_ops)
2264 unregister_netdevice_queue(dev, list);
2265
2266 for (h = 0; h < IP6_TUNNEL_HASH_SIZE; h++) {
2267 t = rtnl_dereference(ip6n->tnls_r_l[h]);
2268 while (t) {
2269 /* If dev is in the same netns, it has already
2270 * been added to the list by the previous loop.
2271 */
2272 if (!net_eq(dev_net(t->dev), net))
2273 unregister_netdevice_queue(t->dev, list);
2274 t = rtnl_dereference(t->next);
2275 }
2276 }
2277
2278 t = rtnl_dereference(ip6n->tnls_wc[0]);
2279 while (t) {
2280 /* If dev is in the same netns, it has already
2281 * been added to the list by the previous loop.
2282 */
2283 if (!net_eq(dev_net(t->dev), net))
2284 unregister_netdevice_queue(t->dev, list);
2285 t = rtnl_dereference(t->next);
2286 }
2287 }
2288
ip6_tnl_init_net(struct net * net)2289 static int __net_init ip6_tnl_init_net(struct net *net)
2290 {
2291 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
2292 struct ip6_tnl *t = NULL;
2293 int err;
2294
2295 ip6n->tnls[0] = ip6n->tnls_wc;
2296 ip6n->tnls[1] = ip6n->tnls_r_l;
2297
2298 if (!net_has_fallback_tunnels(net))
2299 return 0;
2300 err = -ENOMEM;
2301 ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
2302 NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
2303
2304 if (!ip6n->fb_tnl_dev)
2305 goto err_alloc_dev;
2306 dev_net_set(ip6n->fb_tnl_dev, net);
2307 ip6n->fb_tnl_dev->rtnl_link_ops = &ip6_link_ops;
2308 /* FB netdevice is special: we have one, and only one per netns.
2309 * Allowing to move it to another netns is clearly unsafe.
2310 */
2311 ip6n->fb_tnl_dev->features |= NETIF_F_NETNS_LOCAL;
2312
2313 err = ip6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
2314 if (err < 0)
2315 goto err_register;
2316
2317 err = register_netdev(ip6n->fb_tnl_dev);
2318 if (err < 0)
2319 goto err_register;
2320
2321 t = netdev_priv(ip6n->fb_tnl_dev);
2322
2323 strcpy(t->parms.name, ip6n->fb_tnl_dev->name);
2324 return 0;
2325
2326 err_register:
2327 free_netdev(ip6n->fb_tnl_dev);
2328 err_alloc_dev:
2329 return err;
2330 }
2331
ip6_tnl_exit_batch_net(struct list_head * net_list)2332 static void __net_exit ip6_tnl_exit_batch_net(struct list_head *net_list)
2333 {
2334 struct net *net;
2335 LIST_HEAD(list);
2336
2337 rtnl_lock();
2338 list_for_each_entry(net, net_list, exit_list)
2339 ip6_tnl_destroy_tunnels(net, &list);
2340 unregister_netdevice_many(&list);
2341 rtnl_unlock();
2342 }
2343
2344 static struct pernet_operations ip6_tnl_net_ops = {
2345 .init = ip6_tnl_init_net,
2346 .exit_batch = ip6_tnl_exit_batch_net,
2347 .id = &ip6_tnl_net_id,
2348 .size = sizeof(struct ip6_tnl_net),
2349 };
2350
2351 /**
2352 * ip6_tunnel_init - register protocol and reserve needed resources
2353 *
2354 * Return: 0 on success
2355 **/
2356
ip6_tunnel_init(void)2357 static int __init ip6_tunnel_init(void)
2358 {
2359 int err;
2360
2361 if (!ipv6_mod_enabled())
2362 return -EOPNOTSUPP;
2363
2364 err = register_pernet_device(&ip6_tnl_net_ops);
2365 if (err < 0)
2366 goto out_pernet;
2367
2368 err = xfrm6_tunnel_register(&ip4ip6_handler, AF_INET);
2369 if (err < 0) {
2370 pr_err("%s: can't register ip4ip6\n", __func__);
2371 goto out_ip4ip6;
2372 }
2373
2374 err = xfrm6_tunnel_register(&ip6ip6_handler, AF_INET6);
2375 if (err < 0) {
2376 pr_err("%s: can't register ip6ip6\n", __func__);
2377 goto out_ip6ip6;
2378 }
2379
2380 if (ip6_tnl_mpls_supported()) {
2381 err = xfrm6_tunnel_register(&mplsip6_handler, AF_MPLS);
2382 if (err < 0) {
2383 pr_err("%s: can't register mplsip6\n", __func__);
2384 goto out_mplsip6;
2385 }
2386 }
2387
2388 err = rtnl_link_register(&ip6_link_ops);
2389 if (err < 0)
2390 goto rtnl_link_failed;
2391
2392 return 0;
2393
2394 rtnl_link_failed:
2395 if (ip6_tnl_mpls_supported())
2396 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS);
2397 out_mplsip6:
2398 xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6);
2399 out_ip6ip6:
2400 xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET);
2401 out_ip4ip6:
2402 unregister_pernet_device(&ip6_tnl_net_ops);
2403 out_pernet:
2404 return err;
2405 }
2406
2407 /**
2408 * ip6_tunnel_cleanup - free resources and unregister protocol
2409 **/
2410
ip6_tunnel_cleanup(void)2411 static void __exit ip6_tunnel_cleanup(void)
2412 {
2413 rtnl_link_unregister(&ip6_link_ops);
2414 if (xfrm6_tunnel_deregister(&ip4ip6_handler, AF_INET))
2415 pr_info("%s: can't deregister ip4ip6\n", __func__);
2416
2417 if (xfrm6_tunnel_deregister(&ip6ip6_handler, AF_INET6))
2418 pr_info("%s: can't deregister ip6ip6\n", __func__);
2419
2420 if (ip6_tnl_mpls_supported() &&
2421 xfrm6_tunnel_deregister(&mplsip6_handler, AF_MPLS))
2422 pr_info("%s: can't deregister mplsip6\n", __func__);
2423 unregister_pernet_device(&ip6_tnl_net_ops);
2424 }
2425
2426 module_init(ip6_tunnel_init);
2427 module_exit(ip6_tunnel_cleanup);
2428