1 // SPDX-License-Identifier: GPL-2.0
2 /* Bareudp: UDP tunnel encasulation for different Payload types like
3 * MPLS, NSH, IP, etc.
4 * Copyright (c) 2019 Nokia, Inc.
5 * Authors: Martin Varghese, <martin.varghese@nokia.com>
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/kernel.h>
11 #include <linux/module.h>
12 #include <linux/etherdevice.h>
13 #include <linux/hash.h>
14 #include <net/dst_metadata.h>
15 #include <net/gro_cells.h>
16 #include <net/rtnetlink.h>
17 #include <net/protocol.h>
18 #include <net/ip6_tunnel.h>
19 #include <net/ip_tunnels.h>
20 #include <net/udp_tunnel.h>
21 #include <net/bareudp.h>
22
23 #define BAREUDP_BASE_HLEN sizeof(struct udphdr)
24 #define BAREUDP_IPV4_HLEN (sizeof(struct iphdr) + \
25 sizeof(struct udphdr))
26 #define BAREUDP_IPV6_HLEN (sizeof(struct ipv6hdr) + \
27 sizeof(struct udphdr))
28
29 static bool log_ecn_error = true;
30 module_param(log_ecn_error, bool, 0644);
31 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
32
33 /* per-network namespace private data for this module */
34
35 static unsigned int bareudp_net_id;
36
37 struct bareudp_net {
38 struct list_head bareudp_list;
39 };
40
41 /* Pseudo network device */
42 struct bareudp_dev {
43 struct net *net; /* netns for packet i/o */
44 struct net_device *dev; /* netdev for bareudp tunnel */
45 __be16 ethertype;
46 __be16 port;
47 u16 sport_min;
48 bool multi_proto_mode;
49 struct socket __rcu *sock;
50 struct list_head next; /* bareudp node on namespace list */
51 struct gro_cells gro_cells;
52 };
53
bareudp_udp_encap_recv(struct sock * sk,struct sk_buff * skb)54 static int bareudp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
55 {
56 struct metadata_dst *tun_dst = NULL;
57 struct bareudp_dev *bareudp;
58 unsigned short family;
59 unsigned int len;
60 __be16 proto;
61 void *oiph;
62 int err;
63
64 bareudp = rcu_dereference_sk_user_data(sk);
65 if (!bareudp)
66 goto drop;
67
68 if (skb->protocol == htons(ETH_P_IP))
69 family = AF_INET;
70 else
71 family = AF_INET6;
72
73 if (bareudp->ethertype == htons(ETH_P_IP)) {
74 __u8 ipversion;
75
76 if (skb_copy_bits(skb, BAREUDP_BASE_HLEN, &ipversion,
77 sizeof(ipversion))) {
78 bareudp->dev->stats.rx_dropped++;
79 goto drop;
80 }
81 ipversion >>= 4;
82
83 if (ipversion == 4) {
84 proto = htons(ETH_P_IP);
85 } else if (ipversion == 6 && bareudp->multi_proto_mode) {
86 proto = htons(ETH_P_IPV6);
87 } else {
88 bareudp->dev->stats.rx_dropped++;
89 goto drop;
90 }
91 } else if (bareudp->ethertype == htons(ETH_P_MPLS_UC)) {
92 struct iphdr *tunnel_hdr;
93
94 tunnel_hdr = (struct iphdr *)skb_network_header(skb);
95 if (tunnel_hdr->version == 4) {
96 if (!ipv4_is_multicast(tunnel_hdr->daddr)) {
97 proto = bareudp->ethertype;
98 } else if (bareudp->multi_proto_mode &&
99 ipv4_is_multicast(tunnel_hdr->daddr)) {
100 proto = htons(ETH_P_MPLS_MC);
101 } else {
102 bareudp->dev->stats.rx_dropped++;
103 goto drop;
104 }
105 } else {
106 int addr_type;
107 struct ipv6hdr *tunnel_hdr_v6;
108
109 tunnel_hdr_v6 = (struct ipv6hdr *)skb_network_header(skb);
110 addr_type =
111 ipv6_addr_type((struct in6_addr *)&tunnel_hdr_v6->daddr);
112 if (!(addr_type & IPV6_ADDR_MULTICAST)) {
113 proto = bareudp->ethertype;
114 } else if (bareudp->multi_proto_mode &&
115 (addr_type & IPV6_ADDR_MULTICAST)) {
116 proto = htons(ETH_P_MPLS_MC);
117 } else {
118 bareudp->dev->stats.rx_dropped++;
119 goto drop;
120 }
121 }
122 } else {
123 proto = bareudp->ethertype;
124 }
125
126 if (iptunnel_pull_header(skb, BAREUDP_BASE_HLEN,
127 proto,
128 !net_eq(bareudp->net,
129 dev_net(bareudp->dev)))) {
130 bareudp->dev->stats.rx_dropped++;
131 goto drop;
132 }
133 tun_dst = udp_tun_rx_dst(skb, family, TUNNEL_KEY, 0, 0);
134 if (!tun_dst) {
135 bareudp->dev->stats.rx_dropped++;
136 goto drop;
137 }
138 skb_dst_set(skb, &tun_dst->dst);
139 skb->dev = bareudp->dev;
140 oiph = skb_network_header(skb);
141 skb_reset_network_header(skb);
142
143 if (!ipv6_mod_enabled() || family == AF_INET)
144 err = IP_ECN_decapsulate(oiph, skb);
145 else
146 err = IP6_ECN_decapsulate(oiph, skb);
147
148 if (unlikely(err)) {
149 if (log_ecn_error) {
150 if (!ipv6_mod_enabled() || family == AF_INET)
151 net_info_ratelimited("non-ECT from %pI4 "
152 "with TOS=%#x\n",
153 &((struct iphdr *)oiph)->saddr,
154 ((struct iphdr *)oiph)->tos);
155 else
156 net_info_ratelimited("non-ECT from %pI6\n",
157 &((struct ipv6hdr *)oiph)->saddr);
158 }
159 if (err > 1) {
160 ++bareudp->dev->stats.rx_frame_errors;
161 ++bareudp->dev->stats.rx_errors;
162 goto drop;
163 }
164 }
165
166 len = skb->len;
167 err = gro_cells_receive(&bareudp->gro_cells, skb);
168 if (likely(err == NET_RX_SUCCESS))
169 dev_sw_netstats_rx_add(bareudp->dev, len);
170
171 return 0;
172 drop:
173 /* Consume bad packet */
174 kfree_skb(skb);
175
176 return 0;
177 }
178
bareudp_err_lookup(struct sock * sk,struct sk_buff * skb)179 static int bareudp_err_lookup(struct sock *sk, struct sk_buff *skb)
180 {
181 return 0;
182 }
183
bareudp_init(struct net_device * dev)184 static int bareudp_init(struct net_device *dev)
185 {
186 struct bareudp_dev *bareudp = netdev_priv(dev);
187 int err;
188
189 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
190 if (!dev->tstats)
191 return -ENOMEM;
192
193 err = gro_cells_init(&bareudp->gro_cells, dev);
194 if (err) {
195 free_percpu(dev->tstats);
196 return err;
197 }
198 return 0;
199 }
200
bareudp_uninit(struct net_device * dev)201 static void bareudp_uninit(struct net_device *dev)
202 {
203 struct bareudp_dev *bareudp = netdev_priv(dev);
204
205 gro_cells_destroy(&bareudp->gro_cells);
206 free_percpu(dev->tstats);
207 }
208
bareudp_create_sock(struct net * net,__be16 port)209 static struct socket *bareudp_create_sock(struct net *net, __be16 port)
210 {
211 struct udp_port_cfg udp_conf;
212 struct socket *sock;
213 int err;
214
215 memset(&udp_conf, 0, sizeof(udp_conf));
216
217 if (ipv6_mod_enabled())
218 udp_conf.family = AF_INET6;
219 else
220 udp_conf.family = AF_INET;
221
222 udp_conf.local_udp_port = port;
223 /* Open UDP socket */
224 err = udp_sock_create(net, &udp_conf, &sock);
225 if (err < 0)
226 return ERR_PTR(err);
227
228 return sock;
229 }
230
231 /* Create new listen socket if needed */
bareudp_socket_create(struct bareudp_dev * bareudp,__be16 port)232 static int bareudp_socket_create(struct bareudp_dev *bareudp, __be16 port)
233 {
234 struct udp_tunnel_sock_cfg tunnel_cfg;
235 struct socket *sock;
236
237 sock = bareudp_create_sock(bareudp->net, port);
238 if (IS_ERR(sock))
239 return PTR_ERR(sock);
240
241 /* Mark socket as an encapsulation socket */
242 memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
243 tunnel_cfg.sk_user_data = bareudp;
244 tunnel_cfg.encap_type = 1;
245 tunnel_cfg.encap_rcv = bareudp_udp_encap_recv;
246 tunnel_cfg.encap_err_lookup = bareudp_err_lookup;
247 tunnel_cfg.encap_destroy = NULL;
248 setup_udp_tunnel_sock(bareudp->net, sock, &tunnel_cfg);
249
250 rcu_assign_pointer(bareudp->sock, sock);
251 return 0;
252 }
253
bareudp_open(struct net_device * dev)254 static int bareudp_open(struct net_device *dev)
255 {
256 struct bareudp_dev *bareudp = netdev_priv(dev);
257 int ret = 0;
258
259 ret = bareudp_socket_create(bareudp, bareudp->port);
260 return ret;
261 }
262
bareudp_sock_release(struct bareudp_dev * bareudp)263 static void bareudp_sock_release(struct bareudp_dev *bareudp)
264 {
265 struct socket *sock;
266
267 sock = bareudp->sock;
268 rcu_assign_pointer(bareudp->sock, NULL);
269 synchronize_net();
270 udp_tunnel_sock_release(sock);
271 }
272
bareudp_stop(struct net_device * dev)273 static int bareudp_stop(struct net_device *dev)
274 {
275 struct bareudp_dev *bareudp = netdev_priv(dev);
276
277 bareudp_sock_release(bareudp);
278 return 0;
279 }
280
bareudp_xmit_skb(struct sk_buff * skb,struct net_device * dev,struct bareudp_dev * bareudp,const struct ip_tunnel_info * info)281 static int bareudp_xmit_skb(struct sk_buff *skb, struct net_device *dev,
282 struct bareudp_dev *bareudp,
283 const struct ip_tunnel_info *info)
284 {
285 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
286 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
287 struct socket *sock = rcu_dereference(bareudp->sock);
288 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
289 const struct ip_tunnel_key *key = &info->key;
290 struct rtable *rt;
291 __be16 sport, df;
292 int min_headroom;
293 __u8 tos, ttl;
294 __be32 saddr;
295 int err;
296
297 if (!sock)
298 return -ESHUTDOWN;
299
300 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr, info,
301 IPPROTO_UDP, use_cache);
302
303 if (IS_ERR(rt))
304 return PTR_ERR(rt);
305
306 skb_tunnel_check_pmtu(skb, &rt->dst,
307 BAREUDP_IPV4_HLEN + info->options_len, false);
308
309 sport = udp_flow_src_port(bareudp->net, skb,
310 bareudp->sport_min, USHRT_MAX,
311 true);
312 tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
313 ttl = key->ttl;
314 df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0;
315 skb_scrub_packet(skb, xnet);
316
317 err = -ENOSPC;
318 if (!skb_pull(skb, skb_network_offset(skb)))
319 goto free_dst;
320
321 min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len +
322 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct iphdr);
323
324 err = skb_cow_head(skb, min_headroom);
325 if (unlikely(err))
326 goto free_dst;
327
328 err = udp_tunnel_handle_offloads(skb, udp_sum);
329 if (err)
330 goto free_dst;
331
332 skb_set_inner_protocol(skb, bareudp->ethertype);
333 udp_tunnel_xmit_skb(rt, sock->sk, skb, saddr, info->key.u.ipv4.dst,
334 tos, ttl, df, sport, bareudp->port,
335 !net_eq(bareudp->net, dev_net(bareudp->dev)),
336 !(info->key.tun_flags & TUNNEL_CSUM));
337 return 0;
338
339 free_dst:
340 dst_release(&rt->dst);
341 return err;
342 }
343
bareudp6_xmit_skb(struct sk_buff * skb,struct net_device * dev,struct bareudp_dev * bareudp,const struct ip_tunnel_info * info)344 static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
345 struct bareudp_dev *bareudp,
346 const struct ip_tunnel_info *info)
347 {
348 bool xnet = !net_eq(bareudp->net, dev_net(bareudp->dev));
349 bool use_cache = ip_tunnel_dst_cache_usable(skb, info);
350 struct socket *sock = rcu_dereference(bareudp->sock);
351 bool udp_sum = !!(info->key.tun_flags & TUNNEL_CSUM);
352 const struct ip_tunnel_key *key = &info->key;
353 struct dst_entry *dst = NULL;
354 struct in6_addr saddr, daddr;
355 int min_headroom;
356 __u8 prio, ttl;
357 __be16 sport;
358 int err;
359
360 if (!sock)
361 return -ESHUTDOWN;
362
363 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock, &saddr, info,
364 IPPROTO_UDP, use_cache);
365 if (IS_ERR(dst))
366 return PTR_ERR(dst);
367
368 skb_tunnel_check_pmtu(skb, dst, BAREUDP_IPV6_HLEN + info->options_len,
369 false);
370
371 sport = udp_flow_src_port(bareudp->net, skb,
372 bareudp->sport_min, USHRT_MAX,
373 true);
374 prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb);
375 ttl = key->ttl;
376
377 skb_scrub_packet(skb, xnet);
378
379 err = -ENOSPC;
380 if (!skb_pull(skb, skb_network_offset(skb)))
381 goto free_dst;
382
383 min_headroom = LL_RESERVED_SPACE(dst->dev) + dst->header_len +
384 BAREUDP_BASE_HLEN + info->options_len + sizeof(struct ipv6hdr);
385
386 err = skb_cow_head(skb, min_headroom);
387 if (unlikely(err))
388 goto free_dst;
389
390 err = udp_tunnel_handle_offloads(skb, udp_sum);
391 if (err)
392 goto free_dst;
393
394 daddr = info->key.u.ipv6.dst;
395 udp_tunnel6_xmit_skb(dst, sock->sk, skb, dev,
396 &saddr, &daddr, prio, ttl,
397 info->key.label, sport, bareudp->port,
398 !(info->key.tun_flags & TUNNEL_CSUM));
399 return 0;
400
401 free_dst:
402 dst_release(dst);
403 return err;
404 }
405
bareudp_proto_valid(struct bareudp_dev * bareudp,__be16 proto)406 static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
407 {
408 if (bareudp->ethertype == proto)
409 return true;
410
411 if (!bareudp->multi_proto_mode)
412 return false;
413
414 if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
415 proto == htons(ETH_P_MPLS_MC))
416 return true;
417
418 if (bareudp->ethertype == htons(ETH_P_IP) &&
419 proto == htons(ETH_P_IPV6))
420 return true;
421
422 return false;
423 }
424
bareudp_xmit(struct sk_buff * skb,struct net_device * dev)425 static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
426 {
427 struct bareudp_dev *bareudp = netdev_priv(dev);
428 struct ip_tunnel_info *info = NULL;
429 int err;
430
431 if (!bareudp_proto_valid(bareudp, skb->protocol)) {
432 err = -EINVAL;
433 goto tx_error;
434 }
435
436 info = skb_tunnel_info(skb);
437 if (unlikely(!info || !(info->mode & IP_TUNNEL_INFO_TX))) {
438 err = -EINVAL;
439 goto tx_error;
440 }
441
442 rcu_read_lock();
443 if (ipv6_mod_enabled() && info->mode & IP_TUNNEL_INFO_IPV6)
444 err = bareudp6_xmit_skb(skb, dev, bareudp, info);
445 else
446 err = bareudp_xmit_skb(skb, dev, bareudp, info);
447
448 rcu_read_unlock();
449
450 if (likely(!err))
451 return NETDEV_TX_OK;
452 tx_error:
453 dev_kfree_skb(skb);
454
455 if (err == -ELOOP)
456 dev->stats.collisions++;
457 else if (err == -ENETUNREACH)
458 dev->stats.tx_carrier_errors++;
459
460 dev->stats.tx_errors++;
461 return NETDEV_TX_OK;
462 }
463
bareudp_fill_metadata_dst(struct net_device * dev,struct sk_buff * skb)464 static int bareudp_fill_metadata_dst(struct net_device *dev,
465 struct sk_buff *skb)
466 {
467 struct ip_tunnel_info *info = skb_tunnel_info(skb);
468 struct bareudp_dev *bareudp = netdev_priv(dev);
469 bool use_cache;
470
471 use_cache = ip_tunnel_dst_cache_usable(skb, info);
472
473 if (!ipv6_mod_enabled() || ip_tunnel_info_af(info) == AF_INET) {
474 struct rtable *rt;
475 __be32 saddr;
476
477 rt = ip_route_output_tunnel(skb, dev, bareudp->net, &saddr,
478 info, IPPROTO_UDP, use_cache);
479 if (IS_ERR(rt))
480 return PTR_ERR(rt);
481
482 ip_rt_put(rt);
483 info->key.u.ipv4.src = saddr;
484 } else if (ip_tunnel_info_af(info) == AF_INET6) {
485 struct dst_entry *dst;
486 struct in6_addr saddr;
487 struct socket *sock = rcu_dereference(bareudp->sock);
488
489 dst = ip6_dst_lookup_tunnel(skb, dev, bareudp->net, sock,
490 &saddr, info, IPPROTO_UDP,
491 use_cache);
492 if (IS_ERR(dst))
493 return PTR_ERR(dst);
494
495 dst_release(dst);
496 info->key.u.ipv6.src = saddr;
497 } else {
498 return -EINVAL;
499 }
500
501 info->key.tp_src = udp_flow_src_port(bareudp->net, skb,
502 bareudp->sport_min,
503 USHRT_MAX, true);
504 info->key.tp_dst = bareudp->port;
505 return 0;
506 }
507
508 static const struct net_device_ops bareudp_netdev_ops = {
509 .ndo_init = bareudp_init,
510 .ndo_uninit = bareudp_uninit,
511 .ndo_open = bareudp_open,
512 .ndo_stop = bareudp_stop,
513 .ndo_start_xmit = bareudp_xmit,
514 .ndo_get_stats64 = ip_tunnel_get_stats64,
515 .ndo_fill_metadata_dst = bareudp_fill_metadata_dst,
516 };
517
518 static const struct nla_policy bareudp_policy[IFLA_BAREUDP_MAX + 1] = {
519 [IFLA_BAREUDP_PORT] = { .type = NLA_U16 },
520 [IFLA_BAREUDP_ETHERTYPE] = { .type = NLA_U16 },
521 [IFLA_BAREUDP_SRCPORT_MIN] = { .type = NLA_U16 },
522 [IFLA_BAREUDP_MULTIPROTO_MODE] = { .type = NLA_FLAG },
523 };
524
525 /* Info for udev, that this is a virtual tunnel endpoint */
526 static struct device_type bareudp_type = {
527 .name = "bareudp",
528 };
529
530 /* Initialize the device structure. */
bareudp_setup(struct net_device * dev)531 static void bareudp_setup(struct net_device *dev)
532 {
533 dev->netdev_ops = &bareudp_netdev_ops;
534 dev->needs_free_netdev = true;
535 SET_NETDEV_DEVTYPE(dev, &bareudp_type);
536 dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
537 dev->features |= NETIF_F_RXCSUM;
538 dev->features |= NETIF_F_LLTX;
539 dev->features |= NETIF_F_GSO_SOFTWARE;
540 dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
541 dev->hw_features |= NETIF_F_GSO_SOFTWARE;
542 dev->hard_header_len = 0;
543 dev->addr_len = 0;
544 dev->mtu = ETH_DATA_LEN;
545 dev->min_mtu = IPV4_MIN_MTU;
546 dev->max_mtu = IP_MAX_MTU - BAREUDP_BASE_HLEN;
547 dev->type = ARPHRD_NONE;
548 netif_keep_dst(dev);
549 dev->priv_flags |= IFF_NO_QUEUE;
550 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
551 }
552
bareudp_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)553 static int bareudp_validate(struct nlattr *tb[], struct nlattr *data[],
554 struct netlink_ext_ack *extack)
555 {
556 if (!data) {
557 NL_SET_ERR_MSG(extack,
558 "Not enough attributes provided to perform the operation");
559 return -EINVAL;
560 }
561 return 0;
562 }
563
bareudp2info(struct nlattr * data[],struct bareudp_conf * conf,struct netlink_ext_ack * extack)564 static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
565 struct netlink_ext_ack *extack)
566 {
567 memset(conf, 0, sizeof(*conf));
568
569 if (!data[IFLA_BAREUDP_PORT]) {
570 NL_SET_ERR_MSG(extack, "port not specified");
571 return -EINVAL;
572 }
573 if (!data[IFLA_BAREUDP_ETHERTYPE]) {
574 NL_SET_ERR_MSG(extack, "ethertype not specified");
575 return -EINVAL;
576 }
577
578 if (data[IFLA_BAREUDP_PORT])
579 conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
580
581 if (data[IFLA_BAREUDP_ETHERTYPE])
582 conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
583
584 if (data[IFLA_BAREUDP_SRCPORT_MIN])
585 conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
586
587 if (data[IFLA_BAREUDP_MULTIPROTO_MODE])
588 conf->multi_proto_mode = true;
589
590 return 0;
591 }
592
bareudp_find_dev(struct bareudp_net * bn,const struct bareudp_conf * conf)593 static struct bareudp_dev *bareudp_find_dev(struct bareudp_net *bn,
594 const struct bareudp_conf *conf)
595 {
596 struct bareudp_dev *bareudp, *t = NULL;
597
598 list_for_each_entry(bareudp, &bn->bareudp_list, next) {
599 if (conf->port == bareudp->port)
600 t = bareudp;
601 }
602 return t;
603 }
604
bareudp_configure(struct net * net,struct net_device * dev,struct bareudp_conf * conf)605 static int bareudp_configure(struct net *net, struct net_device *dev,
606 struct bareudp_conf *conf)
607 {
608 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
609 struct bareudp_dev *t, *bareudp = netdev_priv(dev);
610 int err;
611
612 bareudp->net = net;
613 bareudp->dev = dev;
614 t = bareudp_find_dev(bn, conf);
615 if (t)
616 return -EBUSY;
617
618 if (conf->multi_proto_mode &&
619 (conf->ethertype != htons(ETH_P_MPLS_UC) &&
620 conf->ethertype != htons(ETH_P_IP)))
621 return -EINVAL;
622
623 bareudp->port = conf->port;
624 bareudp->ethertype = conf->ethertype;
625 bareudp->sport_min = conf->sport_min;
626 bareudp->multi_proto_mode = conf->multi_proto_mode;
627
628 err = register_netdevice(dev);
629 if (err)
630 return err;
631
632 list_add(&bareudp->next, &bn->bareudp_list);
633 return 0;
634 }
635
bareudp_link_config(struct net_device * dev,struct nlattr * tb[])636 static int bareudp_link_config(struct net_device *dev,
637 struct nlattr *tb[])
638 {
639 int err;
640
641 if (tb[IFLA_MTU]) {
642 err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
643 if (err)
644 return err;
645 }
646 return 0;
647 }
648
bareudp_dellink(struct net_device * dev,struct list_head * head)649 static void bareudp_dellink(struct net_device *dev, struct list_head *head)
650 {
651 struct bareudp_dev *bareudp = netdev_priv(dev);
652
653 list_del(&bareudp->next);
654 unregister_netdevice_queue(dev, head);
655 }
656
bareudp_newlink(struct net * net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)657 static int bareudp_newlink(struct net *net, struct net_device *dev,
658 struct nlattr *tb[], struct nlattr *data[],
659 struct netlink_ext_ack *extack)
660 {
661 struct bareudp_conf conf;
662 LIST_HEAD(list_kill);
663 int err;
664
665 err = bareudp2info(data, &conf, extack);
666 if (err)
667 return err;
668
669 err = bareudp_configure(net, dev, &conf);
670 if (err)
671 return err;
672
673 err = bareudp_link_config(dev, tb);
674 if (err)
675 goto err_unconfig;
676
677 return 0;
678
679 err_unconfig:
680 bareudp_dellink(dev, &list_kill);
681 unregister_netdevice_many(&list_kill);
682 return err;
683 }
684
bareudp_get_size(const struct net_device * dev)685 static size_t bareudp_get_size(const struct net_device *dev)
686 {
687 return nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_PORT */
688 nla_total_size(sizeof(__be16)) + /* IFLA_BAREUDP_ETHERTYPE */
689 nla_total_size(sizeof(__u16)) + /* IFLA_BAREUDP_SRCPORT_MIN */
690 nla_total_size(0) + /* IFLA_BAREUDP_MULTIPROTO_MODE */
691 0;
692 }
693
bareudp_fill_info(struct sk_buff * skb,const struct net_device * dev)694 static int bareudp_fill_info(struct sk_buff *skb, const struct net_device *dev)
695 {
696 struct bareudp_dev *bareudp = netdev_priv(dev);
697
698 if (nla_put_be16(skb, IFLA_BAREUDP_PORT, bareudp->port))
699 goto nla_put_failure;
700 if (nla_put_be16(skb, IFLA_BAREUDP_ETHERTYPE, bareudp->ethertype))
701 goto nla_put_failure;
702 if (nla_put_u16(skb, IFLA_BAREUDP_SRCPORT_MIN, bareudp->sport_min))
703 goto nla_put_failure;
704 if (bareudp->multi_proto_mode &&
705 nla_put_flag(skb, IFLA_BAREUDP_MULTIPROTO_MODE))
706 goto nla_put_failure;
707
708 return 0;
709
710 nla_put_failure:
711 return -EMSGSIZE;
712 }
713
714 static struct rtnl_link_ops bareudp_link_ops __read_mostly = {
715 .kind = "bareudp",
716 .maxtype = IFLA_BAREUDP_MAX,
717 .policy = bareudp_policy,
718 .priv_size = sizeof(struct bareudp_dev),
719 .setup = bareudp_setup,
720 .validate = bareudp_validate,
721 .newlink = bareudp_newlink,
722 .dellink = bareudp_dellink,
723 .get_size = bareudp_get_size,
724 .fill_info = bareudp_fill_info,
725 };
726
bareudp_dev_create(struct net * net,const char * name,u8 name_assign_type,struct bareudp_conf * conf)727 struct net_device *bareudp_dev_create(struct net *net, const char *name,
728 u8 name_assign_type,
729 struct bareudp_conf *conf)
730 {
731 struct nlattr *tb[IFLA_MAX + 1];
732 struct net_device *dev;
733 LIST_HEAD(list_kill);
734 int err;
735
736 memset(tb, 0, sizeof(tb));
737 dev = rtnl_create_link(net, name, name_assign_type,
738 &bareudp_link_ops, tb, NULL);
739 if (IS_ERR(dev))
740 return dev;
741
742 err = bareudp_configure(net, dev, conf);
743 if (err) {
744 free_netdev(dev);
745 return ERR_PTR(err);
746 }
747 err = dev_set_mtu(dev, IP_MAX_MTU - BAREUDP_BASE_HLEN);
748 if (err)
749 goto err;
750
751 err = rtnl_configure_link(dev, NULL);
752 if (err < 0)
753 goto err;
754
755 return dev;
756 err:
757 bareudp_dellink(dev, &list_kill);
758 unregister_netdevice_many(&list_kill);
759 return ERR_PTR(err);
760 }
761 EXPORT_SYMBOL_GPL(bareudp_dev_create);
762
bareudp_init_net(struct net * net)763 static __net_init int bareudp_init_net(struct net *net)
764 {
765 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
766
767 INIT_LIST_HEAD(&bn->bareudp_list);
768 return 0;
769 }
770
bareudp_destroy_tunnels(struct net * net,struct list_head * head)771 static void bareudp_destroy_tunnels(struct net *net, struct list_head *head)
772 {
773 struct bareudp_net *bn = net_generic(net, bareudp_net_id);
774 struct bareudp_dev *bareudp, *next;
775
776 list_for_each_entry_safe(bareudp, next, &bn->bareudp_list, next)
777 unregister_netdevice_queue(bareudp->dev, head);
778 }
779
bareudp_exit_batch_net(struct list_head * net_list)780 static void __net_exit bareudp_exit_batch_net(struct list_head *net_list)
781 {
782 struct net *net;
783 LIST_HEAD(list);
784
785 rtnl_lock();
786 list_for_each_entry(net, net_list, exit_list)
787 bareudp_destroy_tunnels(net, &list);
788
789 /* unregister the devices gathered above */
790 unregister_netdevice_many(&list);
791 rtnl_unlock();
792 }
793
794 static struct pernet_operations bareudp_net_ops = {
795 .init = bareudp_init_net,
796 .exit_batch = bareudp_exit_batch_net,
797 .id = &bareudp_net_id,
798 .size = sizeof(struct bareudp_net),
799 };
800
bareudp_init_module(void)801 static int __init bareudp_init_module(void)
802 {
803 int rc;
804
805 rc = register_pernet_subsys(&bareudp_net_ops);
806 if (rc)
807 goto out1;
808
809 rc = rtnl_link_register(&bareudp_link_ops);
810 if (rc)
811 goto out2;
812
813 return 0;
814 out2:
815 unregister_pernet_subsys(&bareudp_net_ops);
816 out1:
817 return rc;
818 }
819 late_initcall(bareudp_init_module);
820
bareudp_cleanup_module(void)821 static void __exit bareudp_cleanup_module(void)
822 {
823 rtnl_link_unregister(&bareudp_link_ops);
824 unregister_pernet_subsys(&bareudp_net_ops);
825 }
826 module_exit(bareudp_cleanup_module);
827
828 MODULE_ALIAS_RTNL_LINK("bareudp");
829 MODULE_LICENSE("GPL");
830 MODULE_AUTHOR("Martin Varghese <martin.varghese@nokia.com>");
831 MODULE_DESCRIPTION("Interface driver for UDP encapsulated traffic");
832