1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Linux NET3: GRE over IP protocol decoder.
4 *
5 * Authors: Alexey Kuznetsov (kuznet@ms2.inr.ac.ru)
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/uaccess.h>
16 #include <linux/skbuff.h>
17 #include <linux/netdevice.h>
18 #include <linux/in.h>
19 #include <linux/tcp.h>
20 #include <linux/udp.h>
21 #include <linux/if_arp.h>
22 #include <linux/if_vlan.h>
23 #include <linux/init.h>
24 #include <linux/in6.h>
25 #include <linux/inetdevice.h>
26 #include <linux/igmp.h>
27 #include <linux/netfilter_ipv4.h>
28 #include <linux/etherdevice.h>
29 #include <linux/if_ether.h>
30
31 #include <net/sock.h>
32 #include <net/ip.h>
33 #include <net/icmp.h>
34 #include <net/protocol.h>
35 #include <net/ip_tunnels.h>
36 #include <net/arp.h>
37 #include <net/checksum.h>
38 #include <net/dsfield.h>
39 #include <net/inet_ecn.h>
40 #include <net/xfrm.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <net/rtnetlink.h>
44 #include <net/gre.h>
45 #include <net/dst_metadata.h>
46 #include <net/erspan.h>
47
48 /*
49 Problems & solutions
50 --------------------
51
52 1. The most important issue is detecting local dead loops.
53 They would cause complete host lockup in transmit, which
54 would be "resolved" by stack overflow or, if queueing is enabled,
55 with infinite looping in net_bh.
56
57 We cannot track such dead loops during route installation,
58 it is infeasible task. The most general solutions would be
59 to keep skb->encapsulation counter (sort of local ttl),
60 and silently drop packet when it expires. It is a good
61 solution, but it supposes maintaining new variable in ALL
62 skb, even if no tunneling is used.
63
64 Current solution: xmit_recursion breaks dead loops. This is a percpu
65 counter, since when we enter the first ndo_xmit(), cpu migration is
66 forbidden. We force an exit if this counter reaches RECURSION_LIMIT
67
68 2. Networking dead loops would not kill routers, but would really
69 kill network. IP hop limit plays role of "t->recursion" in this case,
70 if we copy it from packet being encapsulated to upper header.
71 It is very good solution, but it introduces two problems:
72
73 - Routing protocols, using packets with ttl=1 (OSPF, RIP2),
74 do not work over tunnels.
75 - traceroute does not work. I planned to relay ICMP from tunnel,
76 so that this problem would be solved and traceroute output
77 would even more informative. This idea appeared to be wrong:
78 only Linux complies to rfc1812 now (yes, guys, Linux is the only
79 true router now :-)), all routers (at least, in neighbourhood of mine)
80 return only 8 bytes of payload. It is the end.
81
82 Hence, if we want that OSPF worked or traceroute said something reasonable,
83 we should search for another solution.
84
85 One of them is to parse packet trying to detect inner encapsulation
86 made by our node. It is difficult or even impossible, especially,
87 taking into account fragmentation. TO be short, ttl is not solution at all.
88
89 Current solution: The solution was UNEXPECTEDLY SIMPLE.
90 We force DF flag on tunnels with preconfigured hop limit,
91 that is ALL. :-) Well, it does not remove the problem completely,
92 but exponential growth of network traffic is changed to linear
93 (branches, that exceed pmtu are pruned) and tunnel mtu
94 rapidly degrades to value <68, where looping stops.
95 Yes, it is not good if there exists a router in the loop,
96 which does not force DF, even when encapsulating packets have DF set.
97 But it is not our problem! Nobody could accuse us, we made
98 all that we could make. Even if it is your gated who injected
99 fatal route to network, even if it were you who configured
100 fatal static route: you are innocent. :-)
101
102 Alexey Kuznetsov.
103 */
104
105 static bool log_ecn_error = true;
106 module_param(log_ecn_error, bool, 0644);
107 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
108
109 static struct rtnl_link_ops ipgre_link_ops __read_mostly;
110 static int ipgre_tunnel_init(struct net_device *dev);
111 static void erspan_build_header(struct sk_buff *skb,
112 u32 id, u32 index,
113 bool truncate, bool is_ipv4);
114
115 static unsigned int ipgre_net_id __read_mostly;
116 static unsigned int gre_tap_net_id __read_mostly;
117 static unsigned int erspan_net_id __read_mostly;
118
ipgre_err(struct sk_buff * skb,u32 info,const struct tnl_ptk_info * tpi)119 static int ipgre_err(struct sk_buff *skb, u32 info,
120 const struct tnl_ptk_info *tpi)
121 {
122
123 /* All the routers (except for Linux) return only
124 8 bytes of packet payload. It means, that precise relaying of
125 ICMP in the real Internet is absolutely infeasible.
126
127 Moreover, Cisco "wise men" put GRE key to the third word
128 in GRE header. It makes impossible maintaining even soft
129 state for keyed GRE tunnels with enabled checksum. Tell
130 them "thank you".
131
132 Well, I wonder, rfc1812 was written by Cisco employee,
133 what the hell these idiots break standards established
134 by themselves???
135 */
136 struct net *net = dev_net(skb->dev);
137 struct ip_tunnel_net *itn;
138 const struct iphdr *iph;
139 const int type = icmp_hdr(skb)->type;
140 const int code = icmp_hdr(skb)->code;
141 unsigned int data_len = 0;
142 struct ip_tunnel *t;
143
144 if (tpi->proto == htons(ETH_P_TEB))
145 itn = net_generic(net, gre_tap_net_id);
146 else if (tpi->proto == htons(ETH_P_ERSPAN) ||
147 tpi->proto == htons(ETH_P_ERSPAN2))
148 itn = net_generic(net, erspan_net_id);
149 else
150 itn = net_generic(net, ipgre_net_id);
151
152 iph = (const struct iphdr *)(icmp_hdr(skb) + 1);
153 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
154 iph->daddr, iph->saddr, tpi->key);
155
156 if (!t)
157 return -ENOENT;
158
159 switch (type) {
160 default:
161 case ICMP_PARAMETERPROB:
162 return 0;
163
164 case ICMP_DEST_UNREACH:
165 switch (code) {
166 case ICMP_SR_FAILED:
167 case ICMP_PORT_UNREACH:
168 /* Impossible event. */
169 return 0;
170 default:
171 /* All others are translated to HOST_UNREACH.
172 rfc2003 contains "deep thoughts" about NET_UNREACH,
173 I believe they are just ether pollution. --ANK
174 */
175 break;
176 }
177 break;
178
179 case ICMP_TIME_EXCEEDED:
180 if (code != ICMP_EXC_TTL)
181 return 0;
182 data_len = icmp_hdr(skb)->un.reserved[1] * 4; /* RFC 4884 4.1 */
183 break;
184
185 case ICMP_REDIRECT:
186 break;
187 }
188
189 #if IS_ENABLED(CONFIG_IPV6)
190 if (tpi->proto == htons(ETH_P_IPV6) &&
191 !ip6_err_gen_icmpv6_unreach(skb, iph->ihl * 4 + tpi->hdr_len,
192 type, data_len))
193 return 0;
194 #endif
195
196 if (t->parms.iph.daddr == 0 ||
197 ipv4_is_multicast(t->parms.iph.daddr))
198 return 0;
199
200 if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED)
201 return 0;
202
203 if (time_before(jiffies, t->err_time + IPTUNNEL_ERR_TIMEO))
204 t->err_count++;
205 else
206 t->err_count = 1;
207 t->err_time = jiffies;
208
209 return 0;
210 }
211
gre_err(struct sk_buff * skb,u32 info)212 static void gre_err(struct sk_buff *skb, u32 info)
213 {
214 /* All the routers (except for Linux) return only
215 * 8 bytes of packet payload. It means, that precise relaying of
216 * ICMP in the real Internet is absolutely infeasible.
217 *
218 * Moreover, Cisco "wise men" put GRE key to the third word
219 * in GRE header. It makes impossible maintaining even soft
220 * state for keyed
221 * GRE tunnels with enabled checksum. Tell them "thank you".
222 *
223 * Well, I wonder, rfc1812 was written by Cisco employee,
224 * what the hell these idiots break standards established
225 * by themselves???
226 */
227
228 const struct iphdr *iph = (struct iphdr *)skb->data;
229 const int type = icmp_hdr(skb)->type;
230 const int code = icmp_hdr(skb)->code;
231 struct tnl_ptk_info tpi;
232
233 if (gre_parse_header(skb, &tpi, NULL, htons(ETH_P_IP),
234 iph->ihl * 4) < 0)
235 return;
236
237 if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
238 ipv4_update_pmtu(skb, dev_net(skb->dev), info,
239 skb->dev->ifindex, IPPROTO_GRE);
240 return;
241 }
242 if (type == ICMP_REDIRECT) {
243 ipv4_redirect(skb, dev_net(skb->dev), skb->dev->ifindex,
244 IPPROTO_GRE);
245 return;
246 }
247
248 ipgre_err(skb, info, &tpi);
249 }
250
is_erspan_type1(int gre_hdr_len)251 static bool is_erspan_type1(int gre_hdr_len)
252 {
253 /* Both ERSPAN type I (version 0) and type II (version 1) use
254 * protocol 0x88BE, but the type I has only 4-byte GRE header,
255 * while type II has 8-byte.
256 */
257 return gre_hdr_len == 4;
258 }
259
erspan_rcv(struct sk_buff * skb,struct tnl_ptk_info * tpi,int gre_hdr_len)260 static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
261 int gre_hdr_len)
262 {
263 struct net *net = dev_net(skb->dev);
264 struct metadata_dst *tun_dst = NULL;
265 struct erspan_base_hdr *ershdr;
266 struct ip_tunnel_net *itn;
267 struct ip_tunnel *tunnel;
268 const struct iphdr *iph;
269 struct erspan_md2 *md2;
270 int ver;
271 int len;
272
273 itn = net_generic(net, erspan_net_id);
274 iph = ip_hdr(skb);
275 if (is_erspan_type1(gre_hdr_len)) {
276 ver = 0;
277 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
278 tpi->flags | TUNNEL_NO_KEY,
279 iph->saddr, iph->daddr, 0);
280 } else {
281 ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len);
282 ver = ershdr->ver;
283 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex,
284 tpi->flags | TUNNEL_KEY,
285 iph->saddr, iph->daddr, tpi->key);
286 }
287
288 if (tunnel) {
289 if (is_erspan_type1(gre_hdr_len))
290 len = gre_hdr_len;
291 else
292 len = gre_hdr_len + erspan_hdr_len(ver);
293
294 if (unlikely(!pskb_may_pull(skb, len)))
295 return PACKET_REJECT;
296
297 if (__iptunnel_pull_header(skb,
298 len,
299 htons(ETH_P_TEB),
300 false, false) < 0)
301 goto drop;
302
303 if (tunnel->collect_md) {
304 struct erspan_metadata *pkt_md, *md;
305 struct ip_tunnel_info *info;
306 unsigned char *gh;
307 __be64 tun_id;
308 __be16 flags;
309
310 tpi->flags |= TUNNEL_KEY;
311 flags = tpi->flags;
312 tun_id = key32_to_tunnel_id(tpi->key);
313
314 tun_dst = ip_tun_rx_dst(skb, flags,
315 tun_id, sizeof(*md));
316 if (!tun_dst)
317 return PACKET_REJECT;
318
319 /* skb can be uncloned in __iptunnel_pull_header, so
320 * old pkt_md is no longer valid and we need to reset
321 * it
322 */
323 gh = skb_network_header(skb) +
324 skb_network_header_len(skb);
325 pkt_md = (struct erspan_metadata *)(gh + gre_hdr_len +
326 sizeof(*ershdr));
327 md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
328 md->version = ver;
329 md2 = &md->u.md2;
330 memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE :
331 ERSPAN_V2_MDSIZE);
332
333 info = &tun_dst->u.tun_info;
334 info->key.tun_flags |= TUNNEL_ERSPAN_OPT;
335 info->options_len = sizeof(*md);
336 }
337
338 skb_reset_mac_header(skb);
339 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
340 return PACKET_RCVD;
341 }
342 return PACKET_REJECT;
343
344 drop:
345 kfree_skb(skb);
346 return PACKET_RCVD;
347 }
348
__ipgre_rcv(struct sk_buff * skb,const struct tnl_ptk_info * tpi,struct ip_tunnel_net * itn,int hdr_len,bool raw_proto)349 static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
350 struct ip_tunnel_net *itn, int hdr_len, bool raw_proto)
351 {
352 struct metadata_dst *tun_dst = NULL;
353 const struct iphdr *iph;
354 struct ip_tunnel *tunnel;
355
356 iph = ip_hdr(skb);
357 tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags,
358 iph->saddr, iph->daddr, tpi->key);
359
360 if (tunnel) {
361 const struct iphdr *tnl_params;
362
363 if (__iptunnel_pull_header(skb, hdr_len, tpi->proto,
364 raw_proto, false) < 0)
365 goto drop;
366
367 if (tunnel->dev->type != ARPHRD_NONE)
368 skb_pop_mac_header(skb);
369 else
370 skb_reset_mac_header(skb);
371
372 tnl_params = &tunnel->parms.iph;
373 if (tunnel->collect_md || tnl_params->daddr == 0) {
374 __be16 flags;
375 __be64 tun_id;
376
377 flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY);
378 tun_id = key32_to_tunnel_id(tpi->key);
379 tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0);
380 if (!tun_dst)
381 return PACKET_REJECT;
382 }
383
384 ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
385 return PACKET_RCVD;
386 }
387 return PACKET_NEXT;
388
389 drop:
390 kfree_skb(skb);
391 return PACKET_RCVD;
392 }
393
ipgre_rcv(struct sk_buff * skb,const struct tnl_ptk_info * tpi,int hdr_len)394 static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
395 int hdr_len)
396 {
397 struct net *net = dev_net(skb->dev);
398 struct ip_tunnel_net *itn;
399 int res;
400
401 if (tpi->proto == htons(ETH_P_TEB))
402 itn = net_generic(net, gre_tap_net_id);
403 else
404 itn = net_generic(net, ipgre_net_id);
405
406 res = __ipgre_rcv(skb, tpi, itn, hdr_len, false);
407 if (res == PACKET_NEXT && tpi->proto == htons(ETH_P_TEB)) {
408 /* ipgre tunnels in collect metadata mode should receive
409 * also ETH_P_TEB traffic.
410 */
411 itn = net_generic(net, ipgre_net_id);
412 res = __ipgre_rcv(skb, tpi, itn, hdr_len, true);
413 }
414 return res;
415 }
416
gre_rcv(struct sk_buff * skb)417 static int gre_rcv(struct sk_buff *skb)
418 {
419 struct tnl_ptk_info tpi;
420 bool csum_err = false;
421 int hdr_len;
422
423 #ifdef CONFIG_NET_IPGRE_BROADCAST
424 if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
425 /* Looped back packet, drop it! */
426 if (rt_is_output_route(skb_rtable(skb)))
427 goto drop;
428 }
429 #endif
430
431 hdr_len = gre_parse_header(skb, &tpi, &csum_err, htons(ETH_P_IP), 0);
432 if (hdr_len < 0)
433 goto drop;
434
435 if (unlikely(tpi.proto == htons(ETH_P_ERSPAN) ||
436 tpi.proto == htons(ETH_P_ERSPAN2))) {
437 if (erspan_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
438 return 0;
439 goto out;
440 }
441
442 if (ipgre_rcv(skb, &tpi, hdr_len) == PACKET_RCVD)
443 return 0;
444
445 out:
446 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
447 drop:
448 kfree_skb(skb);
449 return 0;
450 }
451
__gre_xmit(struct sk_buff * skb,struct net_device * dev,const struct iphdr * tnl_params,__be16 proto)452 static void __gre_xmit(struct sk_buff *skb, struct net_device *dev,
453 const struct iphdr *tnl_params,
454 __be16 proto)
455 {
456 struct ip_tunnel *tunnel = netdev_priv(dev);
457
458 if (tunnel->parms.o_flags & TUNNEL_SEQ)
459 tunnel->o_seqno++;
460
461 /* Push GRE header. */
462 gre_build_header(skb, tunnel->tun_hlen,
463 tunnel->parms.o_flags, proto, tunnel->parms.o_key,
464 htonl(tunnel->o_seqno));
465
466 ip_tunnel_xmit(skb, dev, tnl_params, tnl_params->protocol);
467 }
468
gre_handle_offloads(struct sk_buff * skb,bool csum)469 static int gre_handle_offloads(struct sk_buff *skb, bool csum)
470 {
471 return iptunnel_handle_offloads(skb, csum ? SKB_GSO_GRE_CSUM : SKB_GSO_GRE);
472 }
473
gre_fb_xmit(struct sk_buff * skb,struct net_device * dev,__be16 proto)474 static void gre_fb_xmit(struct sk_buff *skb, struct net_device *dev,
475 __be16 proto)
476 {
477 struct ip_tunnel *tunnel = netdev_priv(dev);
478 struct ip_tunnel_info *tun_info;
479 const struct ip_tunnel_key *key;
480 int tunnel_hlen;
481 __be16 flags;
482
483 tun_info = skb_tunnel_info(skb);
484 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
485 ip_tunnel_info_af(tun_info) != AF_INET))
486 goto err_free_skb;
487
488 key = &tun_info->key;
489 tunnel_hlen = gre_calc_hlen(key->tun_flags);
490
491 if (skb_cow_head(skb, dev->needed_headroom))
492 goto err_free_skb;
493
494 /* Push Tunnel header. */
495 if (gre_handle_offloads(skb, !!(tun_info->key.tun_flags & TUNNEL_CSUM)))
496 goto err_free_skb;
497
498 flags = tun_info->key.tun_flags &
499 (TUNNEL_CSUM | TUNNEL_KEY | TUNNEL_SEQ);
500 gre_build_header(skb, tunnel_hlen, flags, proto,
501 tunnel_id_to_key32(tun_info->key.tun_id),
502 (flags & TUNNEL_SEQ) ? htonl(tunnel->o_seqno++) : 0);
503
504 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
505
506 return;
507
508 err_free_skb:
509 kfree_skb(skb);
510 dev->stats.tx_dropped++;
511 }
512
erspan_fb_xmit(struct sk_buff * skb,struct net_device * dev)513 static void erspan_fb_xmit(struct sk_buff *skb, struct net_device *dev)
514 {
515 struct ip_tunnel *tunnel = netdev_priv(dev);
516 struct ip_tunnel_info *tun_info;
517 const struct ip_tunnel_key *key;
518 struct erspan_metadata *md;
519 bool truncate = false;
520 __be16 proto;
521 int tunnel_hlen;
522 int version;
523 int nhoff;
524 int thoff;
525
526 tun_info = skb_tunnel_info(skb);
527 if (unlikely(!tun_info || !(tun_info->mode & IP_TUNNEL_INFO_TX) ||
528 ip_tunnel_info_af(tun_info) != AF_INET))
529 goto err_free_skb;
530
531 key = &tun_info->key;
532 if (!(tun_info->key.tun_flags & TUNNEL_ERSPAN_OPT))
533 goto err_free_skb;
534 if (tun_info->options_len < sizeof(*md))
535 goto err_free_skb;
536 md = ip_tunnel_info_opts(tun_info);
537
538 /* ERSPAN has fixed 8 byte GRE header */
539 version = md->version;
540 tunnel_hlen = 8 + erspan_hdr_len(version);
541
542 if (skb_cow_head(skb, dev->needed_headroom))
543 goto err_free_skb;
544
545 if (gre_handle_offloads(skb, false))
546 goto err_free_skb;
547
548 if (skb->len > dev->mtu + dev->hard_header_len) {
549 pskb_trim(skb, dev->mtu + dev->hard_header_len);
550 truncate = true;
551 }
552
553 nhoff = skb_network_header(skb) - skb_mac_header(skb);
554 if (skb->protocol == htons(ETH_P_IP) &&
555 (ntohs(ip_hdr(skb)->tot_len) > skb->len - nhoff))
556 truncate = true;
557
558 thoff = skb_transport_header(skb) - skb_mac_header(skb);
559 if (skb->protocol == htons(ETH_P_IPV6) &&
560 (ntohs(ipv6_hdr(skb)->payload_len) > skb->len - thoff))
561 truncate = true;
562
563 if (version == 1) {
564 erspan_build_header(skb, ntohl(tunnel_id_to_key32(key->tun_id)),
565 ntohl(md->u.index), truncate, true);
566 proto = htons(ETH_P_ERSPAN);
567 } else if (version == 2) {
568 erspan_build_header_v2(skb,
569 ntohl(tunnel_id_to_key32(key->tun_id)),
570 md->u.md2.dir,
571 get_hwid(&md->u.md2),
572 truncate, true);
573 proto = htons(ETH_P_ERSPAN2);
574 } else {
575 goto err_free_skb;
576 }
577
578 gre_build_header(skb, 8, TUNNEL_SEQ,
579 proto, 0, htonl(tunnel->o_seqno++));
580
581 ip_md_tunnel_xmit(skb, dev, IPPROTO_GRE, tunnel_hlen);
582
583 return;
584
585 err_free_skb:
586 kfree_skb(skb);
587 dev->stats.tx_dropped++;
588 }
589
gre_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)590 static int gre_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
591 {
592 struct ip_tunnel_info *info = skb_tunnel_info(skb);
593 const struct ip_tunnel_key *key;
594 struct rtable *rt;
595 struct flowi4 fl4;
596
597 if (ip_tunnel_info_af(info) != AF_INET)
598 return -EINVAL;
599
600 key = &info->key;
601 ip_tunnel_init_flow(&fl4, IPPROTO_GRE, key->u.ipv4.dst, key->u.ipv4.src,
602 tunnel_id_to_key32(key->tun_id),
603 key->tos & ~INET_ECN_MASK, 0, skb->mark,
604 skb_get_hash(skb));
605 rt = ip_route_output_key(dev_net(dev), &fl4);
606 if (IS_ERR(rt))
607 return PTR_ERR(rt);
608
609 ip_rt_put(rt);
610 info->key.u.ipv4.src = fl4.saddr;
611 return 0;
612 }
613
ipgre_xmit(struct sk_buff * skb,struct net_device * dev)614 static netdev_tx_t ipgre_xmit(struct sk_buff *skb,
615 struct net_device *dev)
616 {
617 struct ip_tunnel *tunnel = netdev_priv(dev);
618 const struct iphdr *tnl_params;
619
620 if (!pskb_inet_may_pull(skb))
621 goto free_skb;
622
623 if (tunnel->collect_md) {
624 gre_fb_xmit(skb, dev, skb->protocol);
625 return NETDEV_TX_OK;
626 }
627
628 if (dev->header_ops) {
629 const int pull_len = tunnel->hlen + sizeof(struct iphdr);
630
631 if (skb_cow_head(skb, 0))
632 goto free_skb;
633
634 tnl_params = (const struct iphdr *)skb->data;
635
636 if (pull_len > skb_transport_offset(skb))
637 goto free_skb;
638
639 /* Pull skb since ip_tunnel_xmit() needs skb->data pointing
640 * to gre header.
641 */
642 skb_pull(skb, pull_len);
643 skb_reset_mac_header(skb);
644 } else {
645 if (skb_cow_head(skb, dev->needed_headroom))
646 goto free_skb;
647
648 tnl_params = &tunnel->parms.iph;
649 }
650
651 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
652 goto free_skb;
653
654 __gre_xmit(skb, dev, tnl_params, skb->protocol);
655 return NETDEV_TX_OK;
656
657 free_skb:
658 kfree_skb(skb);
659 dev->stats.tx_dropped++;
660 return NETDEV_TX_OK;
661 }
662
erspan_xmit(struct sk_buff * skb,struct net_device * dev)663 static netdev_tx_t erspan_xmit(struct sk_buff *skb,
664 struct net_device *dev)
665 {
666 struct ip_tunnel *tunnel = netdev_priv(dev);
667 bool truncate = false;
668 __be16 proto;
669
670 if (!pskb_inet_may_pull(skb))
671 goto free_skb;
672
673 if (tunnel->collect_md) {
674 erspan_fb_xmit(skb, dev);
675 return NETDEV_TX_OK;
676 }
677
678 if (gre_handle_offloads(skb, false))
679 goto free_skb;
680
681 if (skb_cow_head(skb, dev->needed_headroom))
682 goto free_skb;
683
684 if (skb->len > dev->mtu + dev->hard_header_len) {
685 pskb_trim(skb, dev->mtu + dev->hard_header_len);
686 truncate = true;
687 }
688
689 /* Push ERSPAN header */
690 if (tunnel->erspan_ver == 0) {
691 proto = htons(ETH_P_ERSPAN);
692 tunnel->parms.o_flags &= ~TUNNEL_SEQ;
693 } else if (tunnel->erspan_ver == 1) {
694 erspan_build_header(skb, ntohl(tunnel->parms.o_key),
695 tunnel->index,
696 truncate, true);
697 proto = htons(ETH_P_ERSPAN);
698 } else if (tunnel->erspan_ver == 2) {
699 erspan_build_header_v2(skb, ntohl(tunnel->parms.o_key),
700 tunnel->dir, tunnel->hwid,
701 truncate, true);
702 proto = htons(ETH_P_ERSPAN2);
703 } else {
704 goto free_skb;
705 }
706
707 tunnel->parms.o_flags &= ~TUNNEL_KEY;
708 __gre_xmit(skb, dev, &tunnel->parms.iph, proto);
709 return NETDEV_TX_OK;
710
711 free_skb:
712 kfree_skb(skb);
713 dev->stats.tx_dropped++;
714 return NETDEV_TX_OK;
715 }
716
gre_tap_xmit(struct sk_buff * skb,struct net_device * dev)717 static netdev_tx_t gre_tap_xmit(struct sk_buff *skb,
718 struct net_device *dev)
719 {
720 struct ip_tunnel *tunnel = netdev_priv(dev);
721
722 if (!pskb_inet_may_pull(skb))
723 goto free_skb;
724
725 if (tunnel->collect_md) {
726 gre_fb_xmit(skb, dev, htons(ETH_P_TEB));
727 return NETDEV_TX_OK;
728 }
729
730 if (gre_handle_offloads(skb, !!(tunnel->parms.o_flags & TUNNEL_CSUM)))
731 goto free_skb;
732
733 if (skb_cow_head(skb, dev->needed_headroom))
734 goto free_skb;
735
736 __gre_xmit(skb, dev, &tunnel->parms.iph, htons(ETH_P_TEB));
737 return NETDEV_TX_OK;
738
739 free_skb:
740 kfree_skb(skb);
741 dev->stats.tx_dropped++;
742 return NETDEV_TX_OK;
743 }
744
ipgre_link_update(struct net_device * dev,bool set_mtu)745 static void ipgre_link_update(struct net_device *dev, bool set_mtu)
746 {
747 struct ip_tunnel *tunnel = netdev_priv(dev);
748 int len;
749
750 len = tunnel->tun_hlen;
751 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
752 len = tunnel->tun_hlen - len;
753 tunnel->hlen = tunnel->hlen + len;
754
755 if (dev->header_ops)
756 dev->hard_header_len += len;
757 else
758 dev->needed_headroom += len;
759
760 if (set_mtu)
761 dev->mtu = max_t(int, dev->mtu - len, 68);
762
763 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
764 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
765 tunnel->encap.type == TUNNEL_ENCAP_NONE) {
766 dev->features |= NETIF_F_GSO_SOFTWARE;
767 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
768 } else {
769 dev->features &= ~NETIF_F_GSO_SOFTWARE;
770 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
771 }
772 dev->features |= NETIF_F_LLTX;
773 } else {
774 dev->hw_features &= ~NETIF_F_GSO_SOFTWARE;
775 dev->features &= ~(NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE);
776 }
777 }
778
ipgre_tunnel_ctl(struct net_device * dev,struct ip_tunnel_parm * p,int cmd)779 static int ipgre_tunnel_ctl(struct net_device *dev, struct ip_tunnel_parm *p,
780 int cmd)
781 {
782 int err;
783
784 if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
785 if (p->iph.version != 4 || p->iph.protocol != IPPROTO_GRE ||
786 p->iph.ihl != 5 || (p->iph.frag_off & htons(~IP_DF)) ||
787 ((p->i_flags | p->o_flags) & (GRE_VERSION | GRE_ROUTING)))
788 return -EINVAL;
789 }
790
791 p->i_flags = gre_flags_to_tnl_flags(p->i_flags);
792 p->o_flags = gre_flags_to_tnl_flags(p->o_flags);
793
794 err = ip_tunnel_ctl(dev, p, cmd);
795 if (err)
796 return err;
797
798 if (cmd == SIOCCHGTUNNEL) {
799 struct ip_tunnel *t = netdev_priv(dev);
800
801 t->parms.i_flags = p->i_flags;
802 t->parms.o_flags = p->o_flags;
803
804 if (strcmp(dev->rtnl_link_ops->kind, "erspan"))
805 ipgre_link_update(dev, true);
806 }
807
808 p->i_flags = gre_tnl_flags_to_gre_flags(p->i_flags);
809 p->o_flags = gre_tnl_flags_to_gre_flags(p->o_flags);
810 return 0;
811 }
812
813 /* Nice toy. Unfortunately, useless in real life :-)
814 It allows to construct virtual multiprotocol broadcast "LAN"
815 over the Internet, provided multicast routing is tuned.
816
817
818 I have no idea was this bicycle invented before me,
819 so that I had to set ARPHRD_IPGRE to a random value.
820 I have an impression, that Cisco could make something similar,
821 but this feature is apparently missing in IOS<=11.2(8).
822
823 I set up 10.66.66/24 and fec0:6666:6666::0/96 as virtual networks
824 with broadcast 224.66.66.66. If you have access to mbone, play with me :-)
825
826 ping -t 255 224.66.66.66
827
828 If nobody answers, mbone does not work.
829
830 ip tunnel add Universe mode gre remote 224.66.66.66 local <Your_real_addr> ttl 255
831 ip addr add 10.66.66.<somewhat>/24 dev Universe
832 ifconfig Universe up
833 ifconfig Universe add fe80::<Your_real_addr>/10
834 ifconfig Universe add fec0:6666:6666::<Your_real_addr>/96
835 ftp 10.66.66.66
836 ...
837 ftp fec0:6666:6666::193.233.7.65
838 ...
839 */
ipgre_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned int len)840 static int ipgre_header(struct sk_buff *skb, struct net_device *dev,
841 unsigned short type,
842 const void *daddr, const void *saddr, unsigned int len)
843 {
844 struct ip_tunnel *t = netdev_priv(dev);
845 struct iphdr *iph;
846 struct gre_base_hdr *greh;
847
848 iph = skb_push(skb, t->hlen + sizeof(*iph));
849 greh = (struct gre_base_hdr *)(iph+1);
850 greh->flags = gre_tnl_flags_to_gre_flags(t->parms.o_flags);
851 greh->protocol = htons(type);
852
853 memcpy(iph, &t->parms.iph, sizeof(struct iphdr));
854
855 /* Set the source hardware address. */
856 if (saddr)
857 memcpy(&iph->saddr, saddr, 4);
858 if (daddr)
859 memcpy(&iph->daddr, daddr, 4);
860 if (iph->daddr)
861 return t->hlen + sizeof(*iph);
862
863 return -(t->hlen + sizeof(*iph));
864 }
865
ipgre_header_parse(const struct sk_buff * skb,unsigned char * haddr)866 static int ipgre_header_parse(const struct sk_buff *skb, unsigned char *haddr)
867 {
868 const struct iphdr *iph = (const struct iphdr *) skb_mac_header(skb);
869 memcpy(haddr, &iph->saddr, 4);
870 return 4;
871 }
872
873 static const struct header_ops ipgre_header_ops = {
874 .create = ipgre_header,
875 .parse = ipgre_header_parse,
876 };
877
878 #ifdef CONFIG_NET_IPGRE_BROADCAST
ipgre_open(struct net_device * dev)879 static int ipgre_open(struct net_device *dev)
880 {
881 struct ip_tunnel *t = netdev_priv(dev);
882
883 if (ipv4_is_multicast(t->parms.iph.daddr)) {
884 struct flowi4 fl4;
885 struct rtable *rt;
886
887 rt = ip_route_output_gre(t->net, &fl4,
888 t->parms.iph.daddr,
889 t->parms.iph.saddr,
890 t->parms.o_key,
891 RT_TOS(t->parms.iph.tos),
892 t->parms.link);
893 if (IS_ERR(rt))
894 return -EADDRNOTAVAIL;
895 dev = rt->dst.dev;
896 ip_rt_put(rt);
897 if (!__in_dev_get_rtnl(dev))
898 return -EADDRNOTAVAIL;
899 t->mlink = dev->ifindex;
900 ip_mc_inc_group(__in_dev_get_rtnl(dev), t->parms.iph.daddr);
901 }
902 return 0;
903 }
904
ipgre_close(struct net_device * dev)905 static int ipgre_close(struct net_device *dev)
906 {
907 struct ip_tunnel *t = netdev_priv(dev);
908
909 if (ipv4_is_multicast(t->parms.iph.daddr) && t->mlink) {
910 struct in_device *in_dev;
911 in_dev = inetdev_by_index(t->net, t->mlink);
912 if (in_dev)
913 ip_mc_dec_group(in_dev, t->parms.iph.daddr);
914 }
915 return 0;
916 }
917 #endif
918
919 static const struct net_device_ops ipgre_netdev_ops = {
920 .ndo_init = ipgre_tunnel_init,
921 .ndo_uninit = ip_tunnel_uninit,
922 #ifdef CONFIG_NET_IPGRE_BROADCAST
923 .ndo_open = ipgre_open,
924 .ndo_stop = ipgre_close,
925 #endif
926 .ndo_start_xmit = ipgre_xmit,
927 .ndo_do_ioctl = ip_tunnel_ioctl,
928 .ndo_change_mtu = ip_tunnel_change_mtu,
929 .ndo_get_stats64 = ip_tunnel_get_stats64,
930 .ndo_get_iflink = ip_tunnel_get_iflink,
931 .ndo_tunnel_ctl = ipgre_tunnel_ctl,
932 };
933
934 #define GRE_FEATURES (NETIF_F_SG | \
935 NETIF_F_FRAGLIST | \
936 NETIF_F_HIGHDMA | \
937 NETIF_F_HW_CSUM)
938
ipgre_tunnel_setup(struct net_device * dev)939 static void ipgre_tunnel_setup(struct net_device *dev)
940 {
941 dev->netdev_ops = &ipgre_netdev_ops;
942 dev->type = ARPHRD_IPGRE;
943 ip_tunnel_setup(dev, ipgre_net_id);
944 }
945
__gre_tunnel_init(struct net_device * dev)946 static void __gre_tunnel_init(struct net_device *dev)
947 {
948 struct ip_tunnel *tunnel;
949
950 tunnel = netdev_priv(dev);
951 tunnel->tun_hlen = gre_calc_hlen(tunnel->parms.o_flags);
952 tunnel->parms.iph.protocol = IPPROTO_GRE;
953
954 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen;
955 dev->needed_headroom = tunnel->hlen + sizeof(tunnel->parms.iph);
956
957 dev->features |= GRE_FEATURES;
958 dev->hw_features |= GRE_FEATURES;
959
960 if (!(tunnel->parms.o_flags & TUNNEL_SEQ)) {
961 /* TCP offload with GRE SEQ is not supported, nor
962 * can we support 2 levels of outer headers requiring
963 * an update.
964 */
965 if (!(tunnel->parms.o_flags & TUNNEL_CSUM) ||
966 (tunnel->encap.type == TUNNEL_ENCAP_NONE)) {
967 dev->features |= NETIF_F_GSO_SOFTWARE;
968 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
969 }
970
971 /* Can use a lockless transmit, unless we generate
972 * output sequences
973 */
974 dev->features |= NETIF_F_LLTX;
975 }
976 }
977
ipgre_tunnel_init(struct net_device * dev)978 static int ipgre_tunnel_init(struct net_device *dev)
979 {
980 struct ip_tunnel *tunnel = netdev_priv(dev);
981 struct iphdr *iph = &tunnel->parms.iph;
982
983 __gre_tunnel_init(dev);
984
985 memcpy(dev->dev_addr, &iph->saddr, 4);
986 memcpy(dev->broadcast, &iph->daddr, 4);
987
988 dev->flags = IFF_NOARP;
989 netif_keep_dst(dev);
990 dev->addr_len = 4;
991
992 if (iph->daddr && !tunnel->collect_md) {
993 #ifdef CONFIG_NET_IPGRE_BROADCAST
994 if (ipv4_is_multicast(iph->daddr)) {
995 if (!iph->saddr)
996 return -EINVAL;
997 dev->flags = IFF_BROADCAST;
998 dev->header_ops = &ipgre_header_ops;
999 dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1000 dev->needed_headroom = 0;
1001 }
1002 #endif
1003 } else if (!tunnel->collect_md) {
1004 dev->header_ops = &ipgre_header_ops;
1005 dev->hard_header_len = tunnel->hlen + sizeof(*iph);
1006 dev->needed_headroom = 0;
1007 }
1008
1009 return ip_tunnel_init(dev);
1010 }
1011
1012 static const struct gre_protocol ipgre_protocol = {
1013 .handler = gre_rcv,
1014 .err_handler = gre_err,
1015 };
1016
ipgre_init_net(struct net * net)1017 static int __net_init ipgre_init_net(struct net *net)
1018 {
1019 return ip_tunnel_init_net(net, ipgre_net_id, &ipgre_link_ops, NULL);
1020 }
1021
ipgre_exit_batch_net(struct list_head * list_net)1022 static void __net_exit ipgre_exit_batch_net(struct list_head *list_net)
1023 {
1024 ip_tunnel_delete_nets(list_net, ipgre_net_id, &ipgre_link_ops);
1025 }
1026
1027 static struct pernet_operations ipgre_net_ops = {
1028 .init = ipgre_init_net,
1029 .exit_batch = ipgre_exit_batch_net,
1030 .id = &ipgre_net_id,
1031 .size = sizeof(struct ip_tunnel_net),
1032 };
1033
ipgre_tunnel_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1034 static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[],
1035 struct netlink_ext_ack *extack)
1036 {
1037 __be16 flags;
1038
1039 if (!data)
1040 return 0;
1041
1042 flags = 0;
1043 if (data[IFLA_GRE_IFLAGS])
1044 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1045 if (data[IFLA_GRE_OFLAGS])
1046 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1047 if (flags & (GRE_VERSION|GRE_ROUTING))
1048 return -EINVAL;
1049
1050 if (data[IFLA_GRE_COLLECT_METADATA] &&
1051 data[IFLA_GRE_ENCAP_TYPE] &&
1052 nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]) != TUNNEL_ENCAP_NONE)
1053 return -EINVAL;
1054
1055 return 0;
1056 }
1057
ipgre_tap_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1058 static int ipgre_tap_validate(struct nlattr *tb[], struct nlattr *data[],
1059 struct netlink_ext_ack *extack)
1060 {
1061 __be32 daddr;
1062
1063 if (tb[IFLA_ADDRESS]) {
1064 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1065 return -EINVAL;
1066 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1067 return -EADDRNOTAVAIL;
1068 }
1069
1070 if (!data)
1071 goto out;
1072
1073 if (data[IFLA_GRE_REMOTE]) {
1074 memcpy(&daddr, nla_data(data[IFLA_GRE_REMOTE]), 4);
1075 if (!daddr)
1076 return -EINVAL;
1077 }
1078
1079 out:
1080 return ipgre_tunnel_validate(tb, data, extack);
1081 }
1082
erspan_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1083 static int erspan_validate(struct nlattr *tb[], struct nlattr *data[],
1084 struct netlink_ext_ack *extack)
1085 {
1086 __be16 flags = 0;
1087 int ret;
1088
1089 if (!data)
1090 return 0;
1091
1092 ret = ipgre_tap_validate(tb, data, extack);
1093 if (ret)
1094 return ret;
1095
1096 if (data[IFLA_GRE_ERSPAN_VER] &&
1097 nla_get_u8(data[IFLA_GRE_ERSPAN_VER]) == 0)
1098 return 0;
1099
1100 /* ERSPAN type II/III should only have GRE sequence and key flag */
1101 if (data[IFLA_GRE_OFLAGS])
1102 flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
1103 if (data[IFLA_GRE_IFLAGS])
1104 flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
1105 if (!data[IFLA_GRE_COLLECT_METADATA] &&
1106 flags != (GRE_SEQ | GRE_KEY))
1107 return -EINVAL;
1108
1109 /* ERSPAN Session ID only has 10-bit. Since we reuse
1110 * 32-bit key field as ID, check it's range.
1111 */
1112 if (data[IFLA_GRE_IKEY] &&
1113 (ntohl(nla_get_be32(data[IFLA_GRE_IKEY])) & ~ID_MASK))
1114 return -EINVAL;
1115
1116 if (data[IFLA_GRE_OKEY] &&
1117 (ntohl(nla_get_be32(data[IFLA_GRE_OKEY])) & ~ID_MASK))
1118 return -EINVAL;
1119
1120 return 0;
1121 }
1122
ipgre_netlink_parms(struct net_device * dev,struct nlattr * data[],struct nlattr * tb[],struct ip_tunnel_parm * parms,__u32 * fwmark)1123 static int ipgre_netlink_parms(struct net_device *dev,
1124 struct nlattr *data[],
1125 struct nlattr *tb[],
1126 struct ip_tunnel_parm *parms,
1127 __u32 *fwmark)
1128 {
1129 struct ip_tunnel *t = netdev_priv(dev);
1130
1131 memset(parms, 0, sizeof(*parms));
1132
1133 parms->iph.protocol = IPPROTO_GRE;
1134
1135 if (!data)
1136 return 0;
1137
1138 if (data[IFLA_GRE_LINK])
1139 parms->link = nla_get_u32(data[IFLA_GRE_LINK]);
1140
1141 if (data[IFLA_GRE_IFLAGS])
1142 parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));
1143
1144 if (data[IFLA_GRE_OFLAGS])
1145 parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));
1146
1147 if (data[IFLA_GRE_IKEY])
1148 parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);
1149
1150 if (data[IFLA_GRE_OKEY])
1151 parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);
1152
1153 if (data[IFLA_GRE_LOCAL])
1154 parms->iph.saddr = nla_get_in_addr(data[IFLA_GRE_LOCAL]);
1155
1156 if (data[IFLA_GRE_REMOTE])
1157 parms->iph.daddr = nla_get_in_addr(data[IFLA_GRE_REMOTE]);
1158
1159 if (data[IFLA_GRE_TTL])
1160 parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);
1161
1162 if (data[IFLA_GRE_TOS])
1163 parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);
1164
1165 if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC])) {
1166 if (t->ignore_df)
1167 return -EINVAL;
1168 parms->iph.frag_off = htons(IP_DF);
1169 }
1170
1171 if (data[IFLA_GRE_COLLECT_METADATA]) {
1172 t->collect_md = true;
1173 if (dev->type == ARPHRD_IPGRE)
1174 dev->type = ARPHRD_NONE;
1175 }
1176
1177 if (data[IFLA_GRE_IGNORE_DF]) {
1178 if (nla_get_u8(data[IFLA_GRE_IGNORE_DF])
1179 && (parms->iph.frag_off & htons(IP_DF)))
1180 return -EINVAL;
1181 t->ignore_df = !!nla_get_u8(data[IFLA_GRE_IGNORE_DF]);
1182 }
1183
1184 if (data[IFLA_GRE_FWMARK])
1185 *fwmark = nla_get_u32(data[IFLA_GRE_FWMARK]);
1186
1187 return 0;
1188 }
1189
erspan_netlink_parms(struct net_device * dev,struct nlattr * data[],struct nlattr * tb[],struct ip_tunnel_parm * parms,__u32 * fwmark)1190 static int erspan_netlink_parms(struct net_device *dev,
1191 struct nlattr *data[],
1192 struct nlattr *tb[],
1193 struct ip_tunnel_parm *parms,
1194 __u32 *fwmark)
1195 {
1196 struct ip_tunnel *t = netdev_priv(dev);
1197 int err;
1198
1199 err = ipgre_netlink_parms(dev, data, tb, parms, fwmark);
1200 if (err)
1201 return err;
1202 if (!data)
1203 return 0;
1204
1205 if (data[IFLA_GRE_ERSPAN_VER]) {
1206 t->erspan_ver = nla_get_u8(data[IFLA_GRE_ERSPAN_VER]);
1207
1208 if (t->erspan_ver > 2)
1209 return -EINVAL;
1210 }
1211
1212 if (t->erspan_ver == 1) {
1213 if (data[IFLA_GRE_ERSPAN_INDEX]) {
1214 t->index = nla_get_u32(data[IFLA_GRE_ERSPAN_INDEX]);
1215 if (t->index & ~INDEX_MASK)
1216 return -EINVAL;
1217 }
1218 } else if (t->erspan_ver == 2) {
1219 if (data[IFLA_GRE_ERSPAN_DIR]) {
1220 t->dir = nla_get_u8(data[IFLA_GRE_ERSPAN_DIR]);
1221 if (t->dir & ~(DIR_MASK >> DIR_OFFSET))
1222 return -EINVAL;
1223 }
1224 if (data[IFLA_GRE_ERSPAN_HWID]) {
1225 t->hwid = nla_get_u16(data[IFLA_GRE_ERSPAN_HWID]);
1226 if (t->hwid & ~(HWID_MASK >> HWID_OFFSET))
1227 return -EINVAL;
1228 }
1229 }
1230
1231 return 0;
1232 }
1233
1234 /* This function returns true when ENCAP attributes are present in the nl msg */
ipgre_netlink_encap_parms(struct nlattr * data[],struct ip_tunnel_encap * ipencap)1235 static bool ipgre_netlink_encap_parms(struct nlattr *data[],
1236 struct ip_tunnel_encap *ipencap)
1237 {
1238 bool ret = false;
1239
1240 memset(ipencap, 0, sizeof(*ipencap));
1241
1242 if (!data)
1243 return ret;
1244
1245 if (data[IFLA_GRE_ENCAP_TYPE]) {
1246 ret = true;
1247 ipencap->type = nla_get_u16(data[IFLA_GRE_ENCAP_TYPE]);
1248 }
1249
1250 if (data[IFLA_GRE_ENCAP_FLAGS]) {
1251 ret = true;
1252 ipencap->flags = nla_get_u16(data[IFLA_GRE_ENCAP_FLAGS]);
1253 }
1254
1255 if (data[IFLA_GRE_ENCAP_SPORT]) {
1256 ret = true;
1257 ipencap->sport = nla_get_be16(data[IFLA_GRE_ENCAP_SPORT]);
1258 }
1259
1260 if (data[IFLA_GRE_ENCAP_DPORT]) {
1261 ret = true;
1262 ipencap->dport = nla_get_be16(data[IFLA_GRE_ENCAP_DPORT]);
1263 }
1264
1265 return ret;
1266 }
1267
gre_tap_init(struct net_device * dev)1268 static int gre_tap_init(struct net_device *dev)
1269 {
1270 __gre_tunnel_init(dev);
1271 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1272 netif_keep_dst(dev);
1273
1274 return ip_tunnel_init(dev);
1275 }
1276
1277 static const struct net_device_ops gre_tap_netdev_ops = {
1278 .ndo_init = gre_tap_init,
1279 .ndo_uninit = ip_tunnel_uninit,
1280 .ndo_start_xmit = gre_tap_xmit,
1281 .ndo_set_mac_address = eth_mac_addr,
1282 .ndo_validate_addr = eth_validate_addr,
1283 .ndo_change_mtu = ip_tunnel_change_mtu,
1284 .ndo_get_stats64 = ip_tunnel_get_stats64,
1285 .ndo_get_iflink = ip_tunnel_get_iflink,
1286 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1287 };
1288
erspan_tunnel_init(struct net_device * dev)1289 static int erspan_tunnel_init(struct net_device *dev)
1290 {
1291 struct ip_tunnel *tunnel = netdev_priv(dev);
1292
1293 if (tunnel->erspan_ver == 0)
1294 tunnel->tun_hlen = 4; /* 4-byte GRE hdr. */
1295 else
1296 tunnel->tun_hlen = 8; /* 8-byte GRE hdr. */
1297
1298 tunnel->parms.iph.protocol = IPPROTO_GRE;
1299 tunnel->hlen = tunnel->tun_hlen + tunnel->encap_hlen +
1300 erspan_hdr_len(tunnel->erspan_ver);
1301
1302 dev->features |= GRE_FEATURES;
1303 dev->hw_features |= GRE_FEATURES;
1304 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1305 netif_keep_dst(dev);
1306
1307 return ip_tunnel_init(dev);
1308 }
1309
1310 static const struct net_device_ops erspan_netdev_ops = {
1311 .ndo_init = erspan_tunnel_init,
1312 .ndo_uninit = ip_tunnel_uninit,
1313 .ndo_start_xmit = erspan_xmit,
1314 .ndo_set_mac_address = eth_mac_addr,
1315 .ndo_validate_addr = eth_validate_addr,
1316 .ndo_change_mtu = ip_tunnel_change_mtu,
1317 .ndo_get_stats64 = ip_tunnel_get_stats64,
1318 .ndo_get_iflink = ip_tunnel_get_iflink,
1319 .ndo_fill_metadata_dst = gre_fill_metadata_dst,
1320 };
1321
ipgre_tap_setup(struct net_device * dev)1322 static void ipgre_tap_setup(struct net_device *dev)
1323 {
1324 ether_setup(dev);
1325 dev->max_mtu = 0;
1326 dev->netdev_ops = &gre_tap_netdev_ops;
1327 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1328 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1329 ip_tunnel_setup(dev, gre_tap_net_id);
1330 }
1331
1332 static int
ipgre_newlink_encap_setup(struct net_device * dev,struct nlattr * data[])1333 ipgre_newlink_encap_setup(struct net_device *dev, struct nlattr *data[])
1334 {
1335 struct ip_tunnel_encap ipencap;
1336
1337 if (ipgre_netlink_encap_parms(data, &ipencap)) {
1338 struct ip_tunnel *t = netdev_priv(dev);
1339 int err = ip_tunnel_encap_setup(t, &ipencap);
1340
1341 if (err < 0)
1342 return err;
1343 }
1344
1345 return 0;
1346 }
1347
ipgre_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1348 static int ipgre_newlink(struct net *src_net, struct net_device *dev,
1349 struct nlattr *tb[], struct nlattr *data[],
1350 struct netlink_ext_ack *extack)
1351 {
1352 struct ip_tunnel_parm p;
1353 __u32 fwmark = 0;
1354 int err;
1355
1356 err = ipgre_newlink_encap_setup(dev, data);
1357 if (err)
1358 return err;
1359
1360 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1361 if (err < 0)
1362 return err;
1363 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1364 }
1365
erspan_newlink(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1366 static int erspan_newlink(struct net *src_net, struct net_device *dev,
1367 struct nlattr *tb[], struct nlattr *data[],
1368 struct netlink_ext_ack *extack)
1369 {
1370 struct ip_tunnel_parm p;
1371 __u32 fwmark = 0;
1372 int err;
1373
1374 err = ipgre_newlink_encap_setup(dev, data);
1375 if (err)
1376 return err;
1377
1378 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1379 if (err)
1380 return err;
1381 return ip_tunnel_newlink(dev, tb, &p, fwmark);
1382 }
1383
ipgre_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1384 static int ipgre_changelink(struct net_device *dev, struct nlattr *tb[],
1385 struct nlattr *data[],
1386 struct netlink_ext_ack *extack)
1387 {
1388 struct ip_tunnel *t = netdev_priv(dev);
1389 __u32 fwmark = t->fwmark;
1390 struct ip_tunnel_parm p;
1391 int err;
1392
1393 err = ipgre_newlink_encap_setup(dev, data);
1394 if (err)
1395 return err;
1396
1397 err = ipgre_netlink_parms(dev, data, tb, &p, &fwmark);
1398 if (err < 0)
1399 return err;
1400
1401 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1402 if (err < 0)
1403 return err;
1404
1405 t->parms.i_flags = p.i_flags;
1406 t->parms.o_flags = p.o_flags;
1407
1408 ipgre_link_update(dev, !tb[IFLA_MTU]);
1409
1410 return 0;
1411 }
1412
erspan_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)1413 static int erspan_changelink(struct net_device *dev, struct nlattr *tb[],
1414 struct nlattr *data[],
1415 struct netlink_ext_ack *extack)
1416 {
1417 struct ip_tunnel *t = netdev_priv(dev);
1418 __u32 fwmark = t->fwmark;
1419 struct ip_tunnel_parm p;
1420 int err;
1421
1422 err = ipgre_newlink_encap_setup(dev, data);
1423 if (err)
1424 return err;
1425
1426 err = erspan_netlink_parms(dev, data, tb, &p, &fwmark);
1427 if (err < 0)
1428 return err;
1429
1430 err = ip_tunnel_changelink(dev, tb, &p, fwmark);
1431 if (err < 0)
1432 return err;
1433
1434 t->parms.i_flags = p.i_flags;
1435 t->parms.o_flags = p.o_flags;
1436
1437 return 0;
1438 }
1439
ipgre_get_size(const struct net_device * dev)1440 static size_t ipgre_get_size(const struct net_device *dev)
1441 {
1442 return
1443 /* IFLA_GRE_LINK */
1444 nla_total_size(4) +
1445 /* IFLA_GRE_IFLAGS */
1446 nla_total_size(2) +
1447 /* IFLA_GRE_OFLAGS */
1448 nla_total_size(2) +
1449 /* IFLA_GRE_IKEY */
1450 nla_total_size(4) +
1451 /* IFLA_GRE_OKEY */
1452 nla_total_size(4) +
1453 /* IFLA_GRE_LOCAL */
1454 nla_total_size(4) +
1455 /* IFLA_GRE_REMOTE */
1456 nla_total_size(4) +
1457 /* IFLA_GRE_TTL */
1458 nla_total_size(1) +
1459 /* IFLA_GRE_TOS */
1460 nla_total_size(1) +
1461 /* IFLA_GRE_PMTUDISC */
1462 nla_total_size(1) +
1463 /* IFLA_GRE_ENCAP_TYPE */
1464 nla_total_size(2) +
1465 /* IFLA_GRE_ENCAP_FLAGS */
1466 nla_total_size(2) +
1467 /* IFLA_GRE_ENCAP_SPORT */
1468 nla_total_size(2) +
1469 /* IFLA_GRE_ENCAP_DPORT */
1470 nla_total_size(2) +
1471 /* IFLA_GRE_COLLECT_METADATA */
1472 nla_total_size(0) +
1473 /* IFLA_GRE_IGNORE_DF */
1474 nla_total_size(1) +
1475 /* IFLA_GRE_FWMARK */
1476 nla_total_size(4) +
1477 /* IFLA_GRE_ERSPAN_INDEX */
1478 nla_total_size(4) +
1479 /* IFLA_GRE_ERSPAN_VER */
1480 nla_total_size(1) +
1481 /* IFLA_GRE_ERSPAN_DIR */
1482 nla_total_size(1) +
1483 /* IFLA_GRE_ERSPAN_HWID */
1484 nla_total_size(2) +
1485 0;
1486 }
1487
ipgre_fill_info(struct sk_buff * skb,const struct net_device * dev)1488 static int ipgre_fill_info(struct sk_buff *skb, const struct net_device *dev)
1489 {
1490 struct ip_tunnel *t = netdev_priv(dev);
1491 struct ip_tunnel_parm *p = &t->parms;
1492 __be16 o_flags = p->o_flags;
1493
1494 if (t->erspan_ver <= 2) {
1495 if (t->erspan_ver != 0 && !t->collect_md)
1496 o_flags |= TUNNEL_KEY;
1497
1498 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_VER, t->erspan_ver))
1499 goto nla_put_failure;
1500
1501 if (t->erspan_ver == 1) {
1502 if (nla_put_u32(skb, IFLA_GRE_ERSPAN_INDEX, t->index))
1503 goto nla_put_failure;
1504 } else if (t->erspan_ver == 2) {
1505 if (nla_put_u8(skb, IFLA_GRE_ERSPAN_DIR, t->dir))
1506 goto nla_put_failure;
1507 if (nla_put_u16(skb, IFLA_GRE_ERSPAN_HWID, t->hwid))
1508 goto nla_put_failure;
1509 }
1510 }
1511
1512 if (nla_put_u32(skb, IFLA_GRE_LINK, p->link) ||
1513 nla_put_be16(skb, IFLA_GRE_IFLAGS,
1514 gre_tnl_flags_to_gre_flags(p->i_flags)) ||
1515 nla_put_be16(skb, IFLA_GRE_OFLAGS,
1516 gre_tnl_flags_to_gre_flags(o_flags)) ||
1517 nla_put_be32(skb, IFLA_GRE_IKEY, p->i_key) ||
1518 nla_put_be32(skb, IFLA_GRE_OKEY, p->o_key) ||
1519 nla_put_in_addr(skb, IFLA_GRE_LOCAL, p->iph.saddr) ||
1520 nla_put_in_addr(skb, IFLA_GRE_REMOTE, p->iph.daddr) ||
1521 nla_put_u8(skb, IFLA_GRE_TTL, p->iph.ttl) ||
1522 nla_put_u8(skb, IFLA_GRE_TOS, p->iph.tos) ||
1523 nla_put_u8(skb, IFLA_GRE_PMTUDISC,
1524 !!(p->iph.frag_off & htons(IP_DF))) ||
1525 nla_put_u32(skb, IFLA_GRE_FWMARK, t->fwmark))
1526 goto nla_put_failure;
1527
1528 if (nla_put_u16(skb, IFLA_GRE_ENCAP_TYPE,
1529 t->encap.type) ||
1530 nla_put_be16(skb, IFLA_GRE_ENCAP_SPORT,
1531 t->encap.sport) ||
1532 nla_put_be16(skb, IFLA_GRE_ENCAP_DPORT,
1533 t->encap.dport) ||
1534 nla_put_u16(skb, IFLA_GRE_ENCAP_FLAGS,
1535 t->encap.flags))
1536 goto nla_put_failure;
1537
1538 if (nla_put_u8(skb, IFLA_GRE_IGNORE_DF, t->ignore_df))
1539 goto nla_put_failure;
1540
1541 if (t->collect_md) {
1542 if (nla_put_flag(skb, IFLA_GRE_COLLECT_METADATA))
1543 goto nla_put_failure;
1544 }
1545
1546 return 0;
1547
1548 nla_put_failure:
1549 return -EMSGSIZE;
1550 }
1551
erspan_setup(struct net_device * dev)1552 static void erspan_setup(struct net_device *dev)
1553 {
1554 struct ip_tunnel *t = netdev_priv(dev);
1555
1556 ether_setup(dev);
1557 dev->max_mtu = 0;
1558 dev->netdev_ops = &erspan_netdev_ops;
1559 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1560 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1561 ip_tunnel_setup(dev, erspan_net_id);
1562 t->erspan_ver = 1;
1563 }
1564
1565 static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
1566 [IFLA_GRE_LINK] = { .type = NLA_U32 },
1567 [IFLA_GRE_IFLAGS] = { .type = NLA_U16 },
1568 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 },
1569 [IFLA_GRE_IKEY] = { .type = NLA_U32 },
1570 [IFLA_GRE_OKEY] = { .type = NLA_U32 },
1571 [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) },
1572 [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) },
1573 [IFLA_GRE_TTL] = { .type = NLA_U8 },
1574 [IFLA_GRE_TOS] = { .type = NLA_U8 },
1575 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
1576 [IFLA_GRE_ENCAP_TYPE] = { .type = NLA_U16 },
1577 [IFLA_GRE_ENCAP_FLAGS] = { .type = NLA_U16 },
1578 [IFLA_GRE_ENCAP_SPORT] = { .type = NLA_U16 },
1579 [IFLA_GRE_ENCAP_DPORT] = { .type = NLA_U16 },
1580 [IFLA_GRE_COLLECT_METADATA] = { .type = NLA_FLAG },
1581 [IFLA_GRE_IGNORE_DF] = { .type = NLA_U8 },
1582 [IFLA_GRE_FWMARK] = { .type = NLA_U32 },
1583 [IFLA_GRE_ERSPAN_INDEX] = { .type = NLA_U32 },
1584 [IFLA_GRE_ERSPAN_VER] = { .type = NLA_U8 },
1585 [IFLA_GRE_ERSPAN_DIR] = { .type = NLA_U8 },
1586 [IFLA_GRE_ERSPAN_HWID] = { .type = NLA_U16 },
1587 };
1588
1589 static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
1590 .kind = "gre",
1591 .maxtype = IFLA_GRE_MAX,
1592 .policy = ipgre_policy,
1593 .priv_size = sizeof(struct ip_tunnel),
1594 .setup = ipgre_tunnel_setup,
1595 .validate = ipgre_tunnel_validate,
1596 .newlink = ipgre_newlink,
1597 .changelink = ipgre_changelink,
1598 .dellink = ip_tunnel_dellink,
1599 .get_size = ipgre_get_size,
1600 .fill_info = ipgre_fill_info,
1601 .get_link_net = ip_tunnel_get_link_net,
1602 };
1603
1604 static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
1605 .kind = "gretap",
1606 .maxtype = IFLA_GRE_MAX,
1607 .policy = ipgre_policy,
1608 .priv_size = sizeof(struct ip_tunnel),
1609 .setup = ipgre_tap_setup,
1610 .validate = ipgre_tap_validate,
1611 .newlink = ipgre_newlink,
1612 .changelink = ipgre_changelink,
1613 .dellink = ip_tunnel_dellink,
1614 .get_size = ipgre_get_size,
1615 .fill_info = ipgre_fill_info,
1616 .get_link_net = ip_tunnel_get_link_net,
1617 };
1618
1619 static struct rtnl_link_ops erspan_link_ops __read_mostly = {
1620 .kind = "erspan",
1621 .maxtype = IFLA_GRE_MAX,
1622 .policy = ipgre_policy,
1623 .priv_size = sizeof(struct ip_tunnel),
1624 .setup = erspan_setup,
1625 .validate = erspan_validate,
1626 .newlink = erspan_newlink,
1627 .changelink = erspan_changelink,
1628 .dellink = ip_tunnel_dellink,
1629 .get_size = ipgre_get_size,
1630 .fill_info = ipgre_fill_info,
1631 .get_link_net = ip_tunnel_get_link_net,
1632 };
1633
gretap_fb_dev_create(struct net * net,const char * name,u8 name_assign_type)1634 struct net_device *gretap_fb_dev_create(struct net *net, const char *name,
1635 u8 name_assign_type)
1636 {
1637 struct nlattr *tb[IFLA_MAX + 1];
1638 struct net_device *dev;
1639 LIST_HEAD(list_kill);
1640 struct ip_tunnel *t;
1641 int err;
1642
1643 memset(&tb, 0, sizeof(tb));
1644
1645 dev = rtnl_create_link(net, name, name_assign_type,
1646 &ipgre_tap_ops, tb, NULL);
1647 if (IS_ERR(dev))
1648 return dev;
1649
1650 /* Configure flow based GRE device. */
1651 t = netdev_priv(dev);
1652 t->collect_md = true;
1653
1654 err = ipgre_newlink(net, dev, tb, NULL, NULL);
1655 if (err < 0) {
1656 free_netdev(dev);
1657 return ERR_PTR(err);
1658 }
1659
1660 /* openvswitch users expect packet sizes to be unrestricted,
1661 * so set the largest MTU we can.
1662 */
1663 err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
1664 if (err)
1665 goto out;
1666
1667 err = rtnl_configure_link(dev, NULL);
1668 if (err < 0)
1669 goto out;
1670
1671 return dev;
1672 out:
1673 ip_tunnel_dellink(dev, &list_kill);
1674 unregister_netdevice_many(&list_kill);
1675 return ERR_PTR(err);
1676 }
1677 EXPORT_SYMBOL_GPL(gretap_fb_dev_create);
1678
ipgre_tap_init_net(struct net * net)1679 static int __net_init ipgre_tap_init_net(struct net *net)
1680 {
1681 return ip_tunnel_init_net(net, gre_tap_net_id, &ipgre_tap_ops, "gretap0");
1682 }
1683
ipgre_tap_exit_batch_net(struct list_head * list_net)1684 static void __net_exit ipgre_tap_exit_batch_net(struct list_head *list_net)
1685 {
1686 ip_tunnel_delete_nets(list_net, gre_tap_net_id, &ipgre_tap_ops);
1687 }
1688
1689 static struct pernet_operations ipgre_tap_net_ops = {
1690 .init = ipgre_tap_init_net,
1691 .exit_batch = ipgre_tap_exit_batch_net,
1692 .id = &gre_tap_net_id,
1693 .size = sizeof(struct ip_tunnel_net),
1694 };
1695
erspan_init_net(struct net * net)1696 static int __net_init erspan_init_net(struct net *net)
1697 {
1698 return ip_tunnel_init_net(net, erspan_net_id,
1699 &erspan_link_ops, "erspan0");
1700 }
1701
erspan_exit_batch_net(struct list_head * net_list)1702 static void __net_exit erspan_exit_batch_net(struct list_head *net_list)
1703 {
1704 ip_tunnel_delete_nets(net_list, erspan_net_id, &erspan_link_ops);
1705 }
1706
1707 static struct pernet_operations erspan_net_ops = {
1708 .init = erspan_init_net,
1709 .exit_batch = erspan_exit_batch_net,
1710 .id = &erspan_net_id,
1711 .size = sizeof(struct ip_tunnel_net),
1712 };
1713
ipgre_init(void)1714 static int __init ipgre_init(void)
1715 {
1716 int err;
1717
1718 pr_info("GRE over IPv4 tunneling driver\n");
1719
1720 err = register_pernet_device(&ipgre_net_ops);
1721 if (err < 0)
1722 return err;
1723
1724 err = register_pernet_device(&ipgre_tap_net_ops);
1725 if (err < 0)
1726 goto pnet_tap_failed;
1727
1728 err = register_pernet_device(&erspan_net_ops);
1729 if (err < 0)
1730 goto pnet_erspan_failed;
1731
1732 err = gre_add_protocol(&ipgre_protocol, GREPROTO_CISCO);
1733 if (err < 0) {
1734 pr_info("%s: can't add protocol\n", __func__);
1735 goto add_proto_failed;
1736 }
1737
1738 err = rtnl_link_register(&ipgre_link_ops);
1739 if (err < 0)
1740 goto rtnl_link_failed;
1741
1742 err = rtnl_link_register(&ipgre_tap_ops);
1743 if (err < 0)
1744 goto tap_ops_failed;
1745
1746 err = rtnl_link_register(&erspan_link_ops);
1747 if (err < 0)
1748 goto erspan_link_failed;
1749
1750 return 0;
1751
1752 erspan_link_failed:
1753 rtnl_link_unregister(&ipgre_tap_ops);
1754 tap_ops_failed:
1755 rtnl_link_unregister(&ipgre_link_ops);
1756 rtnl_link_failed:
1757 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1758 add_proto_failed:
1759 unregister_pernet_device(&erspan_net_ops);
1760 pnet_erspan_failed:
1761 unregister_pernet_device(&ipgre_tap_net_ops);
1762 pnet_tap_failed:
1763 unregister_pernet_device(&ipgre_net_ops);
1764 return err;
1765 }
1766
ipgre_fini(void)1767 static void __exit ipgre_fini(void)
1768 {
1769 rtnl_link_unregister(&ipgre_tap_ops);
1770 rtnl_link_unregister(&ipgre_link_ops);
1771 rtnl_link_unregister(&erspan_link_ops);
1772 gre_del_protocol(&ipgre_protocol, GREPROTO_CISCO);
1773 unregister_pernet_device(&ipgre_tap_net_ops);
1774 unregister_pernet_device(&ipgre_net_ops);
1775 unregister_pernet_device(&erspan_net_ops);
1776 }
1777
1778 module_init(ipgre_init);
1779 module_exit(ipgre_fini);
1780 MODULE_LICENSE("GPL");
1781 MODULE_ALIAS_RTNL_LINK("gre");
1782 MODULE_ALIAS_RTNL_LINK("gretap");
1783 MODULE_ALIAS_RTNL_LINK("erspan");
1784 MODULE_ALIAS_NETDEV("gre0");
1785 MODULE_ALIAS_NETDEV("gretap0");
1786 MODULE_ALIAS_NETDEV("erspan0");
1787