1 /* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of
6 * the License, or (at your option) any later version.
7 *
8 */
9
10 #include "ipvlan.h"
11
12 static unsigned int ipvlan_netid __read_mostly;
13
14 struct ipvlan_netns {
15 unsigned int ipvl_nf_hook_refcnt;
16 };
17
18 static const struct nf_hook_ops ipvl_nfops[] = {
19 {
20 .hook = ipvlan_nf_input,
21 .pf = NFPROTO_IPV4,
22 .hooknum = NF_INET_LOCAL_IN,
23 .priority = INT_MAX,
24 },
25 {
26 .hook = ipvlan_nf_input,
27 .pf = NFPROTO_IPV6,
28 .hooknum = NF_INET_LOCAL_IN,
29 .priority = INT_MAX,
30 },
31 };
32
33 static const struct l3mdev_ops ipvl_l3mdev_ops = {
34 .l3mdev_l3_rcv = ipvlan_l3_rcv,
35 };
36
ipvlan_adjust_mtu(struct ipvl_dev * ipvlan,struct net_device * dev)37 static void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
38 {
39 ipvlan->dev->mtu = dev->mtu;
40 }
41
ipvlan_register_nf_hook(struct net * net)42 static int ipvlan_register_nf_hook(struct net *net)
43 {
44 struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
45 int err = 0;
46
47 if (!vnet->ipvl_nf_hook_refcnt) {
48 err = nf_register_net_hooks(net, ipvl_nfops,
49 ARRAY_SIZE(ipvl_nfops));
50 if (!err)
51 vnet->ipvl_nf_hook_refcnt = 1;
52 } else {
53 vnet->ipvl_nf_hook_refcnt++;
54 }
55
56 return err;
57 }
58
ipvlan_unregister_nf_hook(struct net * net)59 static void ipvlan_unregister_nf_hook(struct net *net)
60 {
61 struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
62
63 if (WARN_ON(!vnet->ipvl_nf_hook_refcnt))
64 return;
65
66 vnet->ipvl_nf_hook_refcnt--;
67 if (!vnet->ipvl_nf_hook_refcnt)
68 nf_unregister_net_hooks(net, ipvl_nfops,
69 ARRAY_SIZE(ipvl_nfops));
70 }
71
ipvlan_set_port_mode(struct ipvl_port * port,u16 nval)72 static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
73 {
74 struct ipvl_dev *ipvlan;
75 struct net_device *mdev = port->dev;
76 unsigned int flags;
77 int err;
78
79 ASSERT_RTNL();
80 if (port->mode != nval) {
81 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
82 flags = ipvlan->dev->flags;
83 if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
84 err = dev_change_flags(ipvlan->dev,
85 flags | IFF_NOARP);
86 } else {
87 err = dev_change_flags(ipvlan->dev,
88 flags & ~IFF_NOARP);
89 }
90 if (unlikely(err))
91 goto fail;
92 }
93 if (nval == IPVLAN_MODE_L3S) {
94 /* New mode is L3S */
95 err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
96 if (!err) {
97 mdev->l3mdev_ops = &ipvl_l3mdev_ops;
98 mdev->priv_flags |= IFF_L3MDEV_RX_HANDLER;
99 } else
100 goto fail;
101 } else if (port->mode == IPVLAN_MODE_L3S) {
102 /* Old mode was L3S */
103 mdev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
104 ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
105 mdev->l3mdev_ops = NULL;
106 }
107 port->mode = nval;
108 }
109 return 0;
110
111 fail:
112 /* Undo the flags changes that have been done so far. */
113 list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
114 flags = ipvlan->dev->flags;
115 if (port->mode == IPVLAN_MODE_L3 ||
116 port->mode == IPVLAN_MODE_L3S)
117 dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
118 else
119 dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
120 }
121
122 return err;
123 }
124
ipvlan_port_create(struct net_device * dev)125 static int ipvlan_port_create(struct net_device *dev)
126 {
127 struct ipvl_port *port;
128 int err, idx;
129
130 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
131 netdev_err(dev, "Master is either lo or non-ether device\n");
132 return -EINVAL;
133 }
134
135 if (netdev_is_rx_handler_busy(dev)) {
136 netdev_err(dev, "Device is already in use.\n");
137 return -EBUSY;
138 }
139
140 port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
141 if (!port)
142 return -ENOMEM;
143
144 write_pnet(&port->pnet, dev_net(dev));
145 port->dev = dev;
146 port->mode = IPVLAN_MODE_L3;
147 INIT_LIST_HEAD(&port->ipvlans);
148 for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
149 INIT_HLIST_HEAD(&port->hlhead[idx]);
150
151 skb_queue_head_init(&port->backlog);
152 INIT_WORK(&port->wq, ipvlan_process_multicast);
153 ida_init(&port->ida);
154 port->dev_id_start = 1;
155
156 err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
157 if (err)
158 goto err;
159
160 dev->priv_flags |= IFF_IPVLAN_MASTER;
161 return 0;
162
163 err:
164 kfree(port);
165 return err;
166 }
167
ipvlan_port_destroy(struct net_device * dev)168 static void ipvlan_port_destroy(struct net_device *dev)
169 {
170 struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
171 struct sk_buff *skb;
172
173 dev->priv_flags &= ~IFF_IPVLAN_MASTER;
174 if (port->mode == IPVLAN_MODE_L3S) {
175 dev->priv_flags &= ~IFF_L3MDEV_RX_HANDLER;
176 ipvlan_unregister_nf_hook(dev_net(dev));
177 dev->l3mdev_ops = NULL;
178 }
179 netdev_rx_handler_unregister(dev);
180 cancel_work_sync(&port->wq);
181 while ((skb = __skb_dequeue(&port->backlog)) != NULL) {
182 if (skb->dev)
183 dev_put(skb->dev);
184 kfree_skb(skb);
185 }
186 ida_destroy(&port->ida);
187 kfree(port);
188 }
189
190 #define IPVLAN_FEATURES \
191 (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
192 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
193 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
194 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
195
196 #define IPVLAN_STATE_MASK \
197 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
198
ipvlan_init(struct net_device * dev)199 static int ipvlan_init(struct net_device *dev)
200 {
201 struct ipvl_dev *ipvlan = netdev_priv(dev);
202 const struct net_device *phy_dev = ipvlan->phy_dev;
203 struct ipvl_port *port = ipvlan->port;
204
205 dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
206 (phy_dev->state & IPVLAN_STATE_MASK);
207 dev->features = phy_dev->features & IPVLAN_FEATURES;
208 dev->features |= NETIF_F_LLTX;
209 dev->gso_max_size = phy_dev->gso_max_size;
210 dev->gso_max_segs = phy_dev->gso_max_segs;
211 dev->hard_header_len = phy_dev->hard_header_len;
212
213 netdev_lockdep_set_classes(dev);
214
215 ipvlan->pcpu_stats = netdev_alloc_pcpu_stats(struct ipvl_pcpu_stats);
216 if (!ipvlan->pcpu_stats)
217 return -ENOMEM;
218
219 port->count += 1;
220
221 return 0;
222 }
223
ipvlan_uninit(struct net_device * dev)224 static void ipvlan_uninit(struct net_device *dev)
225 {
226 struct ipvl_dev *ipvlan = netdev_priv(dev);
227 struct ipvl_port *port = ipvlan->port;
228
229 free_percpu(ipvlan->pcpu_stats);
230
231 port->count -= 1;
232 if (!port->count)
233 ipvlan_port_destroy(port->dev);
234 }
235
ipvlan_open(struct net_device * dev)236 static int ipvlan_open(struct net_device *dev)
237 {
238 struct ipvl_dev *ipvlan = netdev_priv(dev);
239 struct ipvl_addr *addr;
240
241 if (ipvlan->port->mode == IPVLAN_MODE_L3 ||
242 ipvlan->port->mode == IPVLAN_MODE_L3S)
243 dev->flags |= IFF_NOARP;
244 else
245 dev->flags &= ~IFF_NOARP;
246
247 list_for_each_entry(addr, &ipvlan->addrs, anode)
248 ipvlan_ht_addr_add(ipvlan, addr);
249
250 return 0;
251 }
252
ipvlan_stop(struct net_device * dev)253 static int ipvlan_stop(struct net_device *dev)
254 {
255 struct ipvl_dev *ipvlan = netdev_priv(dev);
256 struct net_device *phy_dev = ipvlan->phy_dev;
257 struct ipvl_addr *addr;
258
259 dev_uc_unsync(phy_dev, dev);
260 dev_mc_unsync(phy_dev, dev);
261
262 list_for_each_entry(addr, &ipvlan->addrs, anode)
263 ipvlan_ht_addr_del(addr);
264
265 return 0;
266 }
267
ipvlan_start_xmit(struct sk_buff * skb,struct net_device * dev)268 static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
269 struct net_device *dev)
270 {
271 const struct ipvl_dev *ipvlan = netdev_priv(dev);
272 int skblen = skb->len;
273 int ret;
274
275 ret = ipvlan_queue_xmit(skb, dev);
276 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
277 struct ipvl_pcpu_stats *pcptr;
278
279 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
280
281 u64_stats_update_begin(&pcptr->syncp);
282 pcptr->tx_pkts++;
283 pcptr->tx_bytes += skblen;
284 u64_stats_update_end(&pcptr->syncp);
285 } else {
286 this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
287 }
288 return ret;
289 }
290
ipvlan_fix_features(struct net_device * dev,netdev_features_t features)291 static netdev_features_t ipvlan_fix_features(struct net_device *dev,
292 netdev_features_t features)
293 {
294 struct ipvl_dev *ipvlan = netdev_priv(dev);
295
296 return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
297 }
298
ipvlan_change_rx_flags(struct net_device * dev,int change)299 static void ipvlan_change_rx_flags(struct net_device *dev, int change)
300 {
301 struct ipvl_dev *ipvlan = netdev_priv(dev);
302 struct net_device *phy_dev = ipvlan->phy_dev;
303
304 if (change & IFF_ALLMULTI)
305 dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
306 }
307
ipvlan_set_multicast_mac_filter(struct net_device * dev)308 static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
309 {
310 struct ipvl_dev *ipvlan = netdev_priv(dev);
311
312 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
313 bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
314 } else {
315 struct netdev_hw_addr *ha;
316 DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
317
318 bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
319 netdev_for_each_mc_addr(ha, dev)
320 __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
321
322 /* Turn-on broadcast bit irrespective of address family,
323 * since broadcast is deferred to a work-queue, hence no
324 * impact on fast-path processing.
325 */
326 __set_bit(ipvlan_mac_hash(dev->broadcast), mc_filters);
327
328 bitmap_copy(ipvlan->mac_filters, mc_filters,
329 IPVLAN_MAC_FILTER_SIZE);
330 }
331 dev_uc_sync(ipvlan->phy_dev, dev);
332 dev_mc_sync(ipvlan->phy_dev, dev);
333 }
334
ipvlan_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * s)335 static void ipvlan_get_stats64(struct net_device *dev,
336 struct rtnl_link_stats64 *s)
337 {
338 struct ipvl_dev *ipvlan = netdev_priv(dev);
339
340 if (ipvlan->pcpu_stats) {
341 struct ipvl_pcpu_stats *pcptr;
342 u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
343 u32 rx_errs = 0, tx_drps = 0;
344 u32 strt;
345 int idx;
346
347 for_each_possible_cpu(idx) {
348 pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
349 do {
350 strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
351 rx_pkts = pcptr->rx_pkts;
352 rx_bytes = pcptr->rx_bytes;
353 rx_mcast = pcptr->rx_mcast;
354 tx_pkts = pcptr->tx_pkts;
355 tx_bytes = pcptr->tx_bytes;
356 } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
357 strt));
358
359 s->rx_packets += rx_pkts;
360 s->rx_bytes += rx_bytes;
361 s->multicast += rx_mcast;
362 s->tx_packets += tx_pkts;
363 s->tx_bytes += tx_bytes;
364
365 /* u32 values are updated without syncp protection. */
366 rx_errs += pcptr->rx_errs;
367 tx_drps += pcptr->tx_drps;
368 }
369 s->rx_errors = rx_errs;
370 s->rx_dropped = rx_errs;
371 s->tx_dropped = tx_drps;
372 }
373 }
374
ipvlan_vlan_rx_add_vid(struct net_device * dev,__be16 proto,u16 vid)375 static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
376 {
377 struct ipvl_dev *ipvlan = netdev_priv(dev);
378 struct net_device *phy_dev = ipvlan->phy_dev;
379
380 return vlan_vid_add(phy_dev, proto, vid);
381 }
382
ipvlan_vlan_rx_kill_vid(struct net_device * dev,__be16 proto,u16 vid)383 static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
384 u16 vid)
385 {
386 struct ipvl_dev *ipvlan = netdev_priv(dev);
387 struct net_device *phy_dev = ipvlan->phy_dev;
388
389 vlan_vid_del(phy_dev, proto, vid);
390 return 0;
391 }
392
ipvlan_get_iflink(const struct net_device * dev)393 static int ipvlan_get_iflink(const struct net_device *dev)
394 {
395 struct ipvl_dev *ipvlan = netdev_priv(dev);
396
397 return ipvlan->phy_dev->ifindex;
398 }
399
400 static const struct net_device_ops ipvlan_netdev_ops = {
401 .ndo_init = ipvlan_init,
402 .ndo_uninit = ipvlan_uninit,
403 .ndo_open = ipvlan_open,
404 .ndo_stop = ipvlan_stop,
405 .ndo_start_xmit = ipvlan_start_xmit,
406 .ndo_fix_features = ipvlan_fix_features,
407 .ndo_change_rx_flags = ipvlan_change_rx_flags,
408 .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter,
409 .ndo_get_stats64 = ipvlan_get_stats64,
410 .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
411 .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
412 .ndo_get_iflink = ipvlan_get_iflink,
413 };
414
ipvlan_hard_header(struct sk_buff * skb,struct net_device * dev,unsigned short type,const void * daddr,const void * saddr,unsigned len)415 static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
416 unsigned short type, const void *daddr,
417 const void *saddr, unsigned len)
418 {
419 const struct ipvl_dev *ipvlan = netdev_priv(dev);
420 struct net_device *phy_dev = ipvlan->phy_dev;
421
422 /* TODO Probably use a different field than dev_addr so that the
423 * mac-address on the virtual device is portable and can be carried
424 * while the packets use the mac-addr on the physical device.
425 */
426 return dev_hard_header(skb, phy_dev, type, daddr,
427 saddr ? : dev->dev_addr, len);
428 }
429
430 static const struct header_ops ipvlan_header_ops = {
431 .create = ipvlan_hard_header,
432 .parse = eth_header_parse,
433 .cache = eth_header_cache,
434 .cache_update = eth_header_cache_update,
435 };
436
ipvlan_ethtool_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)437 static int ipvlan_ethtool_get_link_ksettings(struct net_device *dev,
438 struct ethtool_link_ksettings *cmd)
439 {
440 const struct ipvl_dev *ipvlan = netdev_priv(dev);
441
442 return __ethtool_get_link_ksettings(ipvlan->phy_dev, cmd);
443 }
444
ipvlan_ethtool_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)445 static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
446 struct ethtool_drvinfo *drvinfo)
447 {
448 strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
449 strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
450 }
451
ipvlan_ethtool_get_msglevel(struct net_device * dev)452 static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
453 {
454 const struct ipvl_dev *ipvlan = netdev_priv(dev);
455
456 return ipvlan->msg_enable;
457 }
458
ipvlan_ethtool_set_msglevel(struct net_device * dev,u32 value)459 static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
460 {
461 struct ipvl_dev *ipvlan = netdev_priv(dev);
462
463 ipvlan->msg_enable = value;
464 }
465
466 static const struct ethtool_ops ipvlan_ethtool_ops = {
467 .get_link = ethtool_op_get_link,
468 .get_link_ksettings = ipvlan_ethtool_get_link_ksettings,
469 .get_drvinfo = ipvlan_ethtool_get_drvinfo,
470 .get_msglevel = ipvlan_ethtool_get_msglevel,
471 .set_msglevel = ipvlan_ethtool_set_msglevel,
472 };
473
ipvlan_nl_changelink(struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)474 static int ipvlan_nl_changelink(struct net_device *dev,
475 struct nlattr *tb[], struct nlattr *data[],
476 struct netlink_ext_ack *extack)
477 {
478 struct ipvl_dev *ipvlan = netdev_priv(dev);
479 struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
480 int err = 0;
481
482 if (!data)
483 return 0;
484 if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
485 return -EPERM;
486
487 if (data[IFLA_IPVLAN_MODE]) {
488 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
489
490 err = ipvlan_set_port_mode(port, nmode);
491 }
492 return err;
493 }
494
ipvlan_nl_getsize(const struct net_device * dev)495 static size_t ipvlan_nl_getsize(const struct net_device *dev)
496 {
497 return (0
498 + nla_total_size(2) /* IFLA_IPVLAN_MODE */
499 );
500 }
501
ipvlan_nl_validate(struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)502 static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[],
503 struct netlink_ext_ack *extack)
504 {
505 if (data && data[IFLA_IPVLAN_MODE]) {
506 u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
507
508 if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
509 return -EINVAL;
510 }
511 return 0;
512 }
513
ipvlan_nl_fillinfo(struct sk_buff * skb,const struct net_device * dev)514 static int ipvlan_nl_fillinfo(struct sk_buff *skb,
515 const struct net_device *dev)
516 {
517 struct ipvl_dev *ipvlan = netdev_priv(dev);
518 struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
519 int ret = -EINVAL;
520
521 if (!port)
522 goto err;
523
524 ret = -EMSGSIZE;
525 if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
526 goto err;
527
528 return 0;
529
530 err:
531 return ret;
532 }
533
ipvlan_link_new(struct net * src_net,struct net_device * dev,struct nlattr * tb[],struct nlattr * data[],struct netlink_ext_ack * extack)534 int ipvlan_link_new(struct net *src_net, struct net_device *dev,
535 struct nlattr *tb[], struct nlattr *data[],
536 struct netlink_ext_ack *extack)
537 {
538 struct ipvl_dev *ipvlan = netdev_priv(dev);
539 struct ipvl_port *port;
540 struct net_device *phy_dev;
541 int err;
542 u16 mode = IPVLAN_MODE_L3;
543 bool create = false;
544
545 if (!tb[IFLA_LINK])
546 return -EINVAL;
547
548 phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
549 if (!phy_dev)
550 return -ENODEV;
551
552 if (netif_is_ipvlan(phy_dev)) {
553 struct ipvl_dev *tmp = netdev_priv(phy_dev);
554
555 phy_dev = tmp->phy_dev;
556 if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
557 return -EPERM;
558 } else if (!netif_is_ipvlan_port(phy_dev)) {
559 err = ipvlan_port_create(phy_dev);
560 if (err < 0)
561 return err;
562 create = true;
563 }
564
565 if (data && data[IFLA_IPVLAN_MODE])
566 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
567
568 port = ipvlan_port_get_rtnl(phy_dev);
569 ipvlan->phy_dev = phy_dev;
570 ipvlan->dev = dev;
571 ipvlan->port = port;
572 ipvlan->sfeatures = IPVLAN_FEATURES;
573 if (!tb[IFLA_MTU])
574 ipvlan_adjust_mtu(ipvlan, phy_dev);
575 INIT_LIST_HEAD(&ipvlan->addrs);
576
577 /* If the port-id base is at the MAX value, then wrap it around and
578 * begin from 0x1 again. This may be due to a busy system where lots
579 * of slaves are getting created and deleted.
580 */
581 if (port->dev_id_start == 0xFFFE)
582 port->dev_id_start = 0x1;
583
584 /* Since L2 address is shared among all IPvlan slaves including
585 * master, use unique 16 bit dev-ids to diffentiate among them.
586 * Assign IDs between 0x1 and 0xFFFE (used by the master) to each
587 * slave link [see addrconf_ifid_eui48()].
588 */
589 err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
590 GFP_KERNEL);
591 if (err < 0)
592 err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
593 GFP_KERNEL);
594 if (err < 0)
595 goto destroy_ipvlan_port;
596 dev->dev_id = err;
597 /* Increment id-base to the next slot for the future assignment */
598 port->dev_id_start = err + 1;
599
600 /* TODO Probably put random address here to be presented to the
601 * world but keep using the physical-dev address for the outgoing
602 * packets.
603 */
604 memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
605
606 dev->priv_flags |= IFF_IPVLAN_SLAVE;
607
608 err = register_netdevice(dev);
609 if (err < 0)
610 goto remove_ida;
611
612 err = netdev_upper_dev_link(phy_dev, dev);
613 if (err) {
614 goto unregister_netdev;
615 }
616 err = ipvlan_set_port_mode(port, mode);
617 if (err) {
618 goto unlink_netdev;
619 }
620
621 list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
622 netif_stacked_transfer_operstate(phy_dev, dev);
623 return 0;
624
625 unlink_netdev:
626 netdev_upper_dev_unlink(phy_dev, dev);
627 unregister_netdev:
628 unregister_netdevice(dev);
629 remove_ida:
630 ida_simple_remove(&port->ida, dev->dev_id);
631 destroy_ipvlan_port:
632 if (create)
633 ipvlan_port_destroy(phy_dev);
634 return err;
635 }
636 EXPORT_SYMBOL_GPL(ipvlan_link_new);
637
ipvlan_link_delete(struct net_device * dev,struct list_head * head)638 void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
639 {
640 struct ipvl_dev *ipvlan = netdev_priv(dev);
641 struct ipvl_addr *addr, *next;
642
643 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
644 ipvlan_ht_addr_del(addr);
645 list_del(&addr->anode);
646 kfree_rcu(addr, rcu);
647 }
648
649 ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
650 list_del_rcu(&ipvlan->pnode);
651 unregister_netdevice_queue(dev, head);
652 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
653 }
654 EXPORT_SYMBOL_GPL(ipvlan_link_delete);
655
ipvlan_link_setup(struct net_device * dev)656 void ipvlan_link_setup(struct net_device *dev)
657 {
658 ether_setup(dev);
659
660 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
661 dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
662 dev->netdev_ops = &ipvlan_netdev_ops;
663 dev->needs_free_netdev = true;
664 dev->header_ops = &ipvlan_header_ops;
665 dev->ethtool_ops = &ipvlan_ethtool_ops;
666 }
667 EXPORT_SYMBOL_GPL(ipvlan_link_setup);
668
669 static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
670 {
671 [IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
672 };
673
674 static struct rtnl_link_ops ipvlan_link_ops = {
675 .kind = "ipvlan",
676 .priv_size = sizeof(struct ipvl_dev),
677
678 .setup = ipvlan_link_setup,
679 .newlink = ipvlan_link_new,
680 .dellink = ipvlan_link_delete,
681 };
682
ipvlan_link_register(struct rtnl_link_ops * ops)683 int ipvlan_link_register(struct rtnl_link_ops *ops)
684 {
685 ops->get_size = ipvlan_nl_getsize;
686 ops->policy = ipvlan_nl_policy;
687 ops->validate = ipvlan_nl_validate;
688 ops->fill_info = ipvlan_nl_fillinfo;
689 ops->changelink = ipvlan_nl_changelink;
690 ops->maxtype = IFLA_IPVLAN_MAX;
691 return rtnl_link_register(ops);
692 }
693 EXPORT_SYMBOL_GPL(ipvlan_link_register);
694
ipvlan_device_event(struct notifier_block * unused,unsigned long event,void * ptr)695 static int ipvlan_device_event(struct notifier_block *unused,
696 unsigned long event, void *ptr)
697 {
698 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
699 struct ipvl_dev *ipvlan, *next;
700 struct ipvl_port *port;
701 LIST_HEAD(lst_kill);
702
703 if (!netif_is_ipvlan_port(dev))
704 return NOTIFY_DONE;
705
706 port = ipvlan_port_get_rtnl(dev);
707
708 switch (event) {
709 case NETDEV_CHANGE:
710 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
711 netif_stacked_transfer_operstate(ipvlan->phy_dev,
712 ipvlan->dev);
713 break;
714
715 case NETDEV_REGISTER: {
716 struct net *oldnet, *newnet = dev_net(dev);
717 struct ipvlan_netns *old_vnet;
718
719 oldnet = read_pnet(&port->pnet);
720 if (net_eq(newnet, oldnet))
721 break;
722
723 write_pnet(&port->pnet, newnet);
724
725 old_vnet = net_generic(oldnet, ipvlan_netid);
726 if (!old_vnet->ipvl_nf_hook_refcnt)
727 break;
728
729 ipvlan_register_nf_hook(newnet);
730 ipvlan_unregister_nf_hook(oldnet);
731 break;
732 }
733 case NETDEV_UNREGISTER:
734 if (dev->reg_state != NETREG_UNREGISTERING)
735 break;
736
737 list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
738 pnode)
739 ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
740 &lst_kill);
741 unregister_netdevice_many(&lst_kill);
742 break;
743
744 case NETDEV_FEAT_CHANGE:
745 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
746 ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
747 ipvlan->dev->gso_max_size = dev->gso_max_size;
748 ipvlan->dev->gso_max_segs = dev->gso_max_segs;
749 netdev_features_change(ipvlan->dev);
750 }
751 break;
752
753 case NETDEV_CHANGEMTU:
754 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
755 ipvlan_adjust_mtu(ipvlan, dev);
756 break;
757
758 case NETDEV_PRE_TYPE_CHANGE:
759 /* Forbid underlying device to change its type. */
760 return NOTIFY_BAD;
761 }
762 return NOTIFY_DONE;
763 }
764
ipvlan_add_addr(struct ipvl_dev * ipvlan,void * iaddr,bool is_v6)765 static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
766 {
767 struct ipvl_addr *addr;
768
769 addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
770 if (!addr)
771 return -ENOMEM;
772
773 addr->master = ipvlan;
774 if (is_v6) {
775 memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
776 addr->atype = IPVL_IPV6;
777 } else {
778 memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr));
779 addr->atype = IPVL_IPV4;
780 }
781 list_add_tail(&addr->anode, &ipvlan->addrs);
782
783 /* If the interface is not up, the address will be added to the hash
784 * list by ipvlan_open.
785 */
786 if (netif_running(ipvlan->dev))
787 ipvlan_ht_addr_add(ipvlan, addr);
788
789 return 0;
790 }
791
ipvlan_del_addr(struct ipvl_dev * ipvlan,void * iaddr,bool is_v6)792 static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
793 {
794 struct ipvl_addr *addr;
795
796 addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
797 if (!addr)
798 return;
799
800 ipvlan_ht_addr_del(addr);
801 list_del(&addr->anode);
802 kfree_rcu(addr, rcu);
803
804 return;
805 }
806
ipvlan_add_addr6(struct ipvl_dev * ipvlan,struct in6_addr * ip6_addr)807 static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
808 {
809 if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
810 netif_err(ipvlan, ifup, ipvlan->dev,
811 "Failed to add IPv6=%pI6c addr for %s intf\n",
812 ip6_addr, ipvlan->dev->name);
813 return -EINVAL;
814 }
815
816 return ipvlan_add_addr(ipvlan, ip6_addr, true);
817 }
818
ipvlan_del_addr6(struct ipvl_dev * ipvlan,struct in6_addr * ip6_addr)819 static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
820 {
821 return ipvlan_del_addr(ipvlan, ip6_addr, true);
822 }
823
ipvlan_addr6_event(struct notifier_block * unused,unsigned long event,void * ptr)824 static int ipvlan_addr6_event(struct notifier_block *unused,
825 unsigned long event, void *ptr)
826 {
827 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
828 struct net_device *dev = (struct net_device *)if6->idev->dev;
829 struct ipvl_dev *ipvlan = netdev_priv(dev);
830
831 /* FIXME IPv6 autoconf calls us from bh without RTNL */
832 if (in_softirq())
833 return NOTIFY_DONE;
834
835 if (!netif_is_ipvlan(dev))
836 return NOTIFY_DONE;
837
838 if (!ipvlan || !ipvlan->port)
839 return NOTIFY_DONE;
840
841 switch (event) {
842 case NETDEV_UP:
843 if (ipvlan_add_addr6(ipvlan, &if6->addr))
844 return NOTIFY_BAD;
845 break;
846
847 case NETDEV_DOWN:
848 ipvlan_del_addr6(ipvlan, &if6->addr);
849 break;
850 }
851
852 return NOTIFY_OK;
853 }
854
ipvlan_addr6_validator_event(struct notifier_block * unused,unsigned long event,void * ptr)855 static int ipvlan_addr6_validator_event(struct notifier_block *unused,
856 unsigned long event, void *ptr)
857 {
858 struct in6_validator_info *i6vi = (struct in6_validator_info *)ptr;
859 struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
860 struct ipvl_dev *ipvlan = netdev_priv(dev);
861
862 /* FIXME IPv6 autoconf calls us from bh without RTNL */
863 if (in_softirq())
864 return NOTIFY_DONE;
865
866 if (!netif_is_ipvlan(dev))
867 return NOTIFY_DONE;
868
869 if (!ipvlan || !ipvlan->port)
870 return NOTIFY_DONE;
871
872 switch (event) {
873 case NETDEV_UP:
874 if (ipvlan_addr_busy(ipvlan->port, &i6vi->i6vi_addr, true))
875 return notifier_from_errno(-EADDRINUSE);
876 break;
877 }
878
879 return NOTIFY_OK;
880 }
881
ipvlan_add_addr4(struct ipvl_dev * ipvlan,struct in_addr * ip4_addr)882 static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
883 {
884 if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
885 netif_err(ipvlan, ifup, ipvlan->dev,
886 "Failed to add IPv4=%pI4 on %s intf.\n",
887 ip4_addr, ipvlan->dev->name);
888 return -EINVAL;
889 }
890
891 return ipvlan_add_addr(ipvlan, ip4_addr, false);
892 }
893
ipvlan_del_addr4(struct ipvl_dev * ipvlan,struct in_addr * ip4_addr)894 static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
895 {
896 return ipvlan_del_addr(ipvlan, ip4_addr, false);
897 }
898
ipvlan_addr4_event(struct notifier_block * unused,unsigned long event,void * ptr)899 static int ipvlan_addr4_event(struct notifier_block *unused,
900 unsigned long event, void *ptr)
901 {
902 struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
903 struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
904 struct ipvl_dev *ipvlan = netdev_priv(dev);
905 struct in_addr ip4_addr;
906
907 if (!netif_is_ipvlan(dev))
908 return NOTIFY_DONE;
909
910 if (!ipvlan || !ipvlan->port)
911 return NOTIFY_DONE;
912
913 switch (event) {
914 case NETDEV_UP:
915 ip4_addr.s_addr = if4->ifa_address;
916 if (ipvlan_add_addr4(ipvlan, &ip4_addr))
917 return NOTIFY_BAD;
918 break;
919
920 case NETDEV_DOWN:
921 ip4_addr.s_addr = if4->ifa_address;
922 ipvlan_del_addr4(ipvlan, &ip4_addr);
923 break;
924 }
925
926 return NOTIFY_OK;
927 }
928
ipvlan_addr4_validator_event(struct notifier_block * unused,unsigned long event,void * ptr)929 static int ipvlan_addr4_validator_event(struct notifier_block *unused,
930 unsigned long event, void *ptr)
931 {
932 struct in_validator_info *ivi = (struct in_validator_info *)ptr;
933 struct net_device *dev = (struct net_device *)ivi->ivi_dev->dev;
934 struct ipvl_dev *ipvlan = netdev_priv(dev);
935
936 if (!netif_is_ipvlan(dev))
937 return NOTIFY_DONE;
938
939 if (!ipvlan || !ipvlan->port)
940 return NOTIFY_DONE;
941
942 switch (event) {
943 case NETDEV_UP:
944 if (ipvlan_addr_busy(ipvlan->port, &ivi->ivi_addr, false))
945 return notifier_from_errno(-EADDRINUSE);
946 break;
947 }
948
949 return NOTIFY_OK;
950 }
951
952 static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
953 .notifier_call = ipvlan_addr4_event,
954 };
955
956 static struct notifier_block ipvlan_addr4_vtor_notifier_block __read_mostly = {
957 .notifier_call = ipvlan_addr4_validator_event,
958 };
959
960 static struct notifier_block ipvlan_notifier_block __read_mostly = {
961 .notifier_call = ipvlan_device_event,
962 };
963
964 static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
965 .notifier_call = ipvlan_addr6_event,
966 };
967
968 static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
969 .notifier_call = ipvlan_addr6_validator_event,
970 };
971
ipvlan_ns_exit(struct net * net)972 static void ipvlan_ns_exit(struct net *net)
973 {
974 struct ipvlan_netns *vnet = net_generic(net, ipvlan_netid);
975
976 if (WARN_ON_ONCE(vnet->ipvl_nf_hook_refcnt)) {
977 vnet->ipvl_nf_hook_refcnt = 0;
978 nf_unregister_net_hooks(net, ipvl_nfops,
979 ARRAY_SIZE(ipvl_nfops));
980 }
981 }
982
983 static struct pernet_operations ipvlan_net_ops = {
984 .id = &ipvlan_netid,
985 .size = sizeof(struct ipvlan_netns),
986 .exit = ipvlan_ns_exit,
987 };
988
ipvlan_init_module(void)989 static int __init ipvlan_init_module(void)
990 {
991 int err;
992
993 ipvlan_init_secret();
994 register_netdevice_notifier(&ipvlan_notifier_block);
995 register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
996 register_inet6addr_validator_notifier(
997 &ipvlan_addr6_vtor_notifier_block);
998 register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
999 register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
1000
1001 err = register_pernet_subsys(&ipvlan_net_ops);
1002 if (err < 0)
1003 goto error;
1004
1005 err = ipvlan_link_register(&ipvlan_link_ops);
1006 if (err < 0) {
1007 unregister_pernet_subsys(&ipvlan_net_ops);
1008 goto error;
1009 }
1010
1011 return 0;
1012 error:
1013 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
1014 unregister_inetaddr_validator_notifier(
1015 &ipvlan_addr4_vtor_notifier_block);
1016 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
1017 unregister_inet6addr_validator_notifier(
1018 &ipvlan_addr6_vtor_notifier_block);
1019 unregister_netdevice_notifier(&ipvlan_notifier_block);
1020 return err;
1021 }
1022
ipvlan_cleanup_module(void)1023 static void __exit ipvlan_cleanup_module(void)
1024 {
1025 rtnl_link_unregister(&ipvlan_link_ops);
1026 unregister_pernet_subsys(&ipvlan_net_ops);
1027 unregister_netdevice_notifier(&ipvlan_notifier_block);
1028 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
1029 unregister_inetaddr_validator_notifier(
1030 &ipvlan_addr4_vtor_notifier_block);
1031 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
1032 unregister_inet6addr_validator_notifier(
1033 &ipvlan_addr6_vtor_notifier_block);
1034 }
1035
1036 module_init(ipvlan_init_module);
1037 module_exit(ipvlan_cleanup_module);
1038
1039 MODULE_LICENSE("GPL");
1040 MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
1041 MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
1042 MODULE_ALIAS_RTNL_LINK("ipvlan");
1043