1 /*
2 * Userspace interface
3 * Linux ethernet bridge
4 *
5 * Authors:
6 * Lennert Buytenhek <buytenh@gnu.org>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/netpoll.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_arp.h>
20 #include <linux/module.h>
21 #include <linux/init.h>
22 #include <linux/rtnetlink.h>
23 #include <linux/if_ether.h>
24 #include <linux/slab.h>
25 #include <net/sock.h>
26 #include <linux/if_vlan.h>
27 #include <net/switchdev.h>
28
29 #include "br_private.h"
30
31 /*
32 * Determine initial path cost based on speed.
33 * using recommendations from 802.1d standard
34 *
35 * Since driver might sleep need to not be holding any locks.
36 */
port_cost(struct net_device * dev)37 static int port_cost(struct net_device *dev)
38 {
39 struct ethtool_link_ksettings ecmd;
40
41 if (!__ethtool_get_link_ksettings(dev, &ecmd)) {
42 switch (ecmd.base.speed) {
43 case SPEED_10000:
44 return 2;
45 case SPEED_1000:
46 return 4;
47 case SPEED_100:
48 return 19;
49 case SPEED_10:
50 return 100;
51 }
52 }
53
54 /* Old silly heuristics based on name */
55 if (!strncmp(dev->name, "lec", 3))
56 return 7;
57
58 if (!strncmp(dev->name, "plip", 4))
59 return 2500;
60
61 return 100; /* assume old 10Mbps */
62 }
63
64
65 /* Check for port carrier transitions. */
br_port_carrier_check(struct net_bridge_port * p)66 void br_port_carrier_check(struct net_bridge_port *p)
67 {
68 struct net_device *dev = p->dev;
69 struct net_bridge *br = p->br;
70
71 if (!(p->flags & BR_ADMIN_COST) &&
72 netif_running(dev) && netif_oper_up(dev))
73 p->path_cost = port_cost(dev);
74
75 if (!netif_running(br->dev))
76 return;
77
78 spin_lock_bh(&br->lock);
79 if (netif_running(dev) && netif_oper_up(dev)) {
80 if (p->state == BR_STATE_DISABLED)
81 br_stp_enable_port(p);
82 } else {
83 if (p->state != BR_STATE_DISABLED)
84 br_stp_disable_port(p);
85 }
86 spin_unlock_bh(&br->lock);
87 }
88
br_port_set_promisc(struct net_bridge_port * p)89 static void br_port_set_promisc(struct net_bridge_port *p)
90 {
91 int err = 0;
92
93 if (br_promisc_port(p))
94 return;
95
96 err = dev_set_promiscuity(p->dev, 1);
97 if (err)
98 return;
99
100 br_fdb_unsync_static(p->br, p);
101 p->flags |= BR_PROMISC;
102 }
103
br_port_clear_promisc(struct net_bridge_port * p)104 static void br_port_clear_promisc(struct net_bridge_port *p)
105 {
106 int err;
107
108 /* Check if the port is already non-promisc or if it doesn't
109 * support UNICAST filtering. Without unicast filtering support
110 * we'll end up re-enabling promisc mode anyway, so just check for
111 * it here.
112 */
113 if (!br_promisc_port(p) || !(p->dev->priv_flags & IFF_UNICAST_FLT))
114 return;
115
116 /* Since we'll be clearing the promisc mode, program the port
117 * first so that we don't have interruption in traffic.
118 */
119 err = br_fdb_sync_static(p->br, p);
120 if (err)
121 return;
122
123 dev_set_promiscuity(p->dev, -1);
124 p->flags &= ~BR_PROMISC;
125 }
126
127 /* When a port is added or removed or when certain port flags
128 * change, this function is called to automatically manage
129 * promiscuity setting of all the bridge ports. We are always called
130 * under RTNL so can skip using rcu primitives.
131 */
br_manage_promisc(struct net_bridge * br)132 void br_manage_promisc(struct net_bridge *br)
133 {
134 struct net_bridge_port *p;
135 bool set_all = false;
136
137 /* If vlan filtering is disabled or bridge interface is placed
138 * into promiscuous mode, place all ports in promiscuous mode.
139 */
140 if ((br->dev->flags & IFF_PROMISC) || !br_vlan_enabled(br))
141 set_all = true;
142
143 list_for_each_entry(p, &br->port_list, list) {
144 if (set_all) {
145 br_port_set_promisc(p);
146 } else {
147 /* If the number of auto-ports is <= 1, then all other
148 * ports will have their output configuration
149 * statically specified through fdbs. Since ingress
150 * on the auto-port becomes forwarding/egress to other
151 * ports and egress configuration is statically known,
152 * we can say that ingress configuration of the
153 * auto-port is also statically known.
154 * This lets us disable promiscuous mode and write
155 * this config to hw.
156 */
157 if (br->auto_cnt == 0 ||
158 (br->auto_cnt == 1 && br_auto_port(p)))
159 br_port_clear_promisc(p);
160 else
161 br_port_set_promisc(p);
162 }
163 }
164 }
165
nbp_update_port_count(struct net_bridge * br)166 static void nbp_update_port_count(struct net_bridge *br)
167 {
168 struct net_bridge_port *p;
169 u32 cnt = 0;
170
171 list_for_each_entry(p, &br->port_list, list) {
172 if (br_auto_port(p))
173 cnt++;
174 }
175 if (br->auto_cnt != cnt) {
176 br->auto_cnt = cnt;
177 br_manage_promisc(br);
178 }
179 }
180
nbp_delete_promisc(struct net_bridge_port * p)181 static void nbp_delete_promisc(struct net_bridge_port *p)
182 {
183 /* If port is currently promiscuous, unset promiscuity.
184 * Otherwise, it is a static port so remove all addresses
185 * from it.
186 */
187 dev_set_allmulti(p->dev, -1);
188 if (br_promisc_port(p))
189 dev_set_promiscuity(p->dev, -1);
190 else
191 br_fdb_unsync_static(p->br, p);
192 }
193
release_nbp(struct kobject * kobj)194 static void release_nbp(struct kobject *kobj)
195 {
196 struct net_bridge_port *p
197 = container_of(kobj, struct net_bridge_port, kobj);
198 kfree(p);
199 }
200
201 static struct kobj_type brport_ktype = {
202 #ifdef CONFIG_SYSFS
203 .sysfs_ops = &brport_sysfs_ops,
204 #endif
205 .release = release_nbp,
206 };
207
destroy_nbp(struct net_bridge_port * p)208 static void destroy_nbp(struct net_bridge_port *p)
209 {
210 struct net_device *dev = p->dev;
211
212 p->br = NULL;
213 p->dev = NULL;
214 dev_put(dev);
215
216 kobject_put(&p->kobj);
217 }
218
destroy_nbp_rcu(struct rcu_head * head)219 static void destroy_nbp_rcu(struct rcu_head *head)
220 {
221 struct net_bridge_port *p =
222 container_of(head, struct net_bridge_port, rcu);
223 destroy_nbp(p);
224 }
225
get_max_headroom(struct net_bridge * br)226 static unsigned get_max_headroom(struct net_bridge *br)
227 {
228 unsigned max_headroom = 0;
229 struct net_bridge_port *p;
230
231 list_for_each_entry(p, &br->port_list, list) {
232 unsigned dev_headroom = netdev_get_fwd_headroom(p->dev);
233
234 if (dev_headroom > max_headroom)
235 max_headroom = dev_headroom;
236 }
237
238 return max_headroom;
239 }
240
update_headroom(struct net_bridge * br,int new_hr)241 static void update_headroom(struct net_bridge *br, int new_hr)
242 {
243 struct net_bridge_port *p;
244
245 list_for_each_entry(p, &br->port_list, list)
246 netdev_set_rx_headroom(p->dev, new_hr);
247
248 br->dev->needed_headroom = new_hr;
249 }
250
251 /* Delete port(interface) from bridge is done in two steps.
252 * via RCU. First step, marks device as down. That deletes
253 * all the timers and stops new packets from flowing through.
254 *
255 * Final cleanup doesn't occur until after all CPU's finished
256 * processing packets.
257 *
258 * Protected from multiple admin operations by RTNL mutex
259 */
del_nbp(struct net_bridge_port * p)260 static void del_nbp(struct net_bridge_port *p)
261 {
262 struct net_bridge *br = p->br;
263 struct net_device *dev = p->dev;
264
265 sysfs_remove_link(br->ifobj, p->dev->name);
266
267 nbp_delete_promisc(p);
268
269 spin_lock_bh(&br->lock);
270 br_stp_disable_port(p);
271 spin_unlock_bh(&br->lock);
272
273 br_ifinfo_notify(RTM_DELLINK, p);
274
275 list_del_rcu(&p->list);
276 if (netdev_get_fwd_headroom(dev) == br->dev->needed_headroom)
277 update_headroom(br, get_max_headroom(br));
278 netdev_reset_rx_headroom(dev);
279
280 nbp_vlan_flush(p);
281 br_fdb_delete_by_port(br, p, 0, 1);
282 switchdev_deferred_process();
283
284 nbp_update_port_count(br);
285
286 netdev_upper_dev_unlink(dev, br->dev);
287
288 dev->priv_flags &= ~IFF_BRIDGE_PORT;
289
290 netdev_rx_handler_unregister(dev);
291
292 br_multicast_del_port(p);
293
294 kobject_uevent(&p->kobj, KOBJ_REMOVE);
295 kobject_del(&p->kobj);
296
297 br_netpoll_disable(p);
298
299 call_rcu(&p->rcu, destroy_nbp_rcu);
300 }
301
302 /* Delete bridge device */
br_dev_delete(struct net_device * dev,struct list_head * head)303 void br_dev_delete(struct net_device *dev, struct list_head *head)
304 {
305 struct net_bridge *br = netdev_priv(dev);
306 struct net_bridge_port *p, *n;
307
308 list_for_each_entry_safe(p, n, &br->port_list, list) {
309 del_nbp(p);
310 }
311
312 br_fdb_delete_by_port(br, NULL, 0, 1);
313
314 br_vlan_flush(br);
315 br_multicast_dev_del(br);
316 del_timer_sync(&br->gc_timer);
317
318 br_sysfs_delbr(br->dev);
319 unregister_netdevice_queue(br->dev, head);
320 }
321
322 /* find an available port number */
find_portno(struct net_bridge * br)323 static int find_portno(struct net_bridge *br)
324 {
325 int index;
326 struct net_bridge_port *p;
327 unsigned long *inuse;
328
329 inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
330 GFP_KERNEL);
331 if (!inuse)
332 return -ENOMEM;
333
334 set_bit(0, inuse); /* zero is reserved */
335 list_for_each_entry(p, &br->port_list, list) {
336 set_bit(p->port_no, inuse);
337 }
338 index = find_first_zero_bit(inuse, BR_MAX_PORTS);
339 kfree(inuse);
340
341 return (index >= BR_MAX_PORTS) ? -EXFULL : index;
342 }
343
344 /* called with RTNL but without bridge lock */
new_nbp(struct net_bridge * br,struct net_device * dev)345 static struct net_bridge_port *new_nbp(struct net_bridge *br,
346 struct net_device *dev)
347 {
348 struct net_bridge_port *p;
349 int index, err;
350
351 index = find_portno(br);
352 if (index < 0)
353 return ERR_PTR(index);
354
355 p = kzalloc(sizeof(*p), GFP_KERNEL);
356 if (p == NULL)
357 return ERR_PTR(-ENOMEM);
358
359 p->br = br;
360 dev_hold(dev);
361 p->dev = dev;
362 p->path_cost = port_cost(dev);
363 p->priority = 0x8000 >> BR_PORT_BITS;
364 p->port_no = index;
365 p->flags = BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD;
366 br_init_port(p);
367 br_set_state(p, BR_STATE_DISABLED);
368 br_stp_port_timer_init(p);
369 err = br_multicast_add_port(p);
370 if (err) {
371 dev_put(dev);
372 kfree(p);
373 p = ERR_PTR(err);
374 }
375
376 return p;
377 }
378
br_add_bridge(struct net * net,const char * name)379 int br_add_bridge(struct net *net, const char *name)
380 {
381 struct net_device *dev;
382 int res;
383
384 dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
385 br_dev_setup);
386
387 if (!dev)
388 return -ENOMEM;
389
390 dev_net_set(dev, net);
391 dev->rtnl_link_ops = &br_link_ops;
392
393 res = register_netdev(dev);
394 if (res)
395 free_netdev(dev);
396 return res;
397 }
398
br_del_bridge(struct net * net,const char * name)399 int br_del_bridge(struct net *net, const char *name)
400 {
401 struct net_device *dev;
402 int ret = 0;
403
404 rtnl_lock();
405 dev = __dev_get_by_name(net, name);
406 if (dev == NULL)
407 ret = -ENXIO; /* Could not find device */
408
409 else if (!(dev->priv_flags & IFF_EBRIDGE)) {
410 /* Attempt to delete non bridge device! */
411 ret = -EPERM;
412 }
413
414 else if (dev->flags & IFF_UP) {
415 /* Not shutdown yet. */
416 ret = -EBUSY;
417 }
418
419 else
420 br_dev_delete(dev, NULL);
421
422 rtnl_unlock();
423 return ret;
424 }
425
426 /* MTU of the bridge pseudo-device: ETH_DATA_LEN or the minimum of the ports */
br_min_mtu(const struct net_bridge * br)427 int br_min_mtu(const struct net_bridge *br)
428 {
429 const struct net_bridge_port *p;
430 int mtu = 0;
431
432 ASSERT_RTNL();
433
434 if (list_empty(&br->port_list))
435 mtu = ETH_DATA_LEN;
436 else {
437 list_for_each_entry(p, &br->port_list, list) {
438 if (!mtu || p->dev->mtu < mtu)
439 mtu = p->dev->mtu;
440 }
441 }
442 return mtu;
443 }
444
br_set_gso_limits(struct net_bridge * br)445 static void br_set_gso_limits(struct net_bridge *br)
446 {
447 unsigned int gso_max_size = GSO_MAX_SIZE;
448 u16 gso_max_segs = GSO_MAX_SEGS;
449 const struct net_bridge_port *p;
450
451 list_for_each_entry(p, &br->port_list, list) {
452 gso_max_size = min(gso_max_size, p->dev->gso_max_size);
453 gso_max_segs = min(gso_max_segs, p->dev->gso_max_segs);
454 }
455 br->dev->gso_max_size = gso_max_size;
456 br->dev->gso_max_segs = gso_max_segs;
457 }
458
459 /*
460 * Recomputes features using slave's features
461 */
br_features_recompute(struct net_bridge * br,netdev_features_t features)462 netdev_features_t br_features_recompute(struct net_bridge *br,
463 netdev_features_t features)
464 {
465 struct net_bridge_port *p;
466 netdev_features_t mask;
467
468 if (list_empty(&br->port_list))
469 return features;
470
471 mask = features;
472 features &= ~NETIF_F_ONE_FOR_ALL;
473
474 list_for_each_entry(p, &br->port_list, list) {
475 features = netdev_increment_features(features,
476 p->dev->features, mask);
477 }
478 features = netdev_add_tso_features(features, mask);
479
480 return features;
481 }
482
483 /* called with RTNL */
br_add_if(struct net_bridge * br,struct net_device * dev)484 int br_add_if(struct net_bridge *br, struct net_device *dev)
485 {
486 struct net_bridge_port *p;
487 int err = 0;
488 unsigned br_hr, dev_hr;
489 bool changed_addr;
490
491 /* Don't allow bridging non-ethernet like devices, or DSA-enabled
492 * master network devices since the bridge layer rx_handler prevents
493 * the DSA fake ethertype handler to be invoked, so we do not strip off
494 * the DSA switch tag protocol header and the bridge layer just return
495 * RX_HANDLER_CONSUMED, stopping RX processing for these frames.
496 */
497 if ((dev->flags & IFF_LOOPBACK) ||
498 dev->type != ARPHRD_ETHER || dev->addr_len != ETH_ALEN ||
499 !is_valid_ether_addr(dev->dev_addr) ||
500 netdev_uses_dsa(dev))
501 return -EINVAL;
502
503 /* No bridging of bridges */
504 if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
505 return -ELOOP;
506
507 /* Device is already being bridged */
508 if (br_port_exists(dev))
509 return -EBUSY;
510
511 /* No bridging devices that dislike that (e.g. wireless) */
512 if (dev->priv_flags & IFF_DONT_BRIDGE)
513 return -EOPNOTSUPP;
514
515 p = new_nbp(br, dev);
516 if (IS_ERR(p))
517 return PTR_ERR(p);
518
519 call_netdevice_notifiers(NETDEV_JOIN, dev);
520
521 err = dev_set_allmulti(dev, 1);
522 if (err)
523 goto put_back;
524
525 err = kobject_init_and_add(&p->kobj, &brport_ktype, &(dev->dev.kobj),
526 SYSFS_BRIDGE_PORT_ATTR);
527 if (err)
528 goto err1;
529
530 err = br_sysfs_addif(p);
531 if (err)
532 goto err2;
533
534 err = br_netpoll_enable(p);
535 if (err)
536 goto err3;
537
538 err = netdev_rx_handler_register(dev, br_handle_frame, p);
539 if (err)
540 goto err4;
541
542 dev->priv_flags |= IFF_BRIDGE_PORT;
543
544 err = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL);
545 if (err)
546 goto err5;
547
548 err = nbp_switchdev_mark_set(p);
549 if (err)
550 goto err6;
551
552 dev_disable_lro(dev);
553
554 list_add_rcu(&p->list, &br->port_list);
555
556 nbp_update_port_count(br);
557
558 netdev_update_features(br->dev);
559
560 br_hr = br->dev->needed_headroom;
561 dev_hr = netdev_get_fwd_headroom(dev);
562 if (br_hr < dev_hr)
563 update_headroom(br, dev_hr);
564 else
565 netdev_set_rx_headroom(dev, br_hr);
566
567 if (br_fdb_insert(br, p, dev->dev_addr, 0))
568 netdev_err(dev, "failed insert local address bridge forwarding table\n");
569
570 err = nbp_vlan_init(p);
571 if (err) {
572 netdev_err(dev, "failed to initialize vlan filtering on this port\n");
573 goto err7;
574 }
575
576 spin_lock_bh(&br->lock);
577 changed_addr = br_stp_recalculate_bridge_id(br);
578
579 if (netif_running(dev) && netif_oper_up(dev) &&
580 (br->dev->flags & IFF_UP))
581 br_stp_enable_port(p);
582 spin_unlock_bh(&br->lock);
583
584 br_ifinfo_notify(RTM_NEWLINK, p);
585
586 if (changed_addr)
587 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
588
589 dev_set_mtu(br->dev, br_min_mtu(br));
590 br_set_gso_limits(br);
591
592 kobject_uevent(&p->kobj, KOBJ_ADD);
593
594 return 0;
595
596 err7:
597 list_del_rcu(&p->list);
598 br_fdb_delete_by_port(br, p, 0, 1);
599 nbp_update_port_count(br);
600 err6:
601 netdev_upper_dev_unlink(dev, br->dev);
602 err5:
603 dev->priv_flags &= ~IFF_BRIDGE_PORT;
604 netdev_rx_handler_unregister(dev);
605 err4:
606 br_netpoll_disable(p);
607 err3:
608 sysfs_remove_link(br->ifobj, p->dev->name);
609 err2:
610 kobject_put(&p->kobj);
611 p = NULL; /* kobject_put frees */
612 err1:
613 dev_set_allmulti(dev, -1);
614 put_back:
615 dev_put(dev);
616 kfree(p);
617 return err;
618 }
619
620 /* called with RTNL */
br_del_if(struct net_bridge * br,struct net_device * dev)621 int br_del_if(struct net_bridge *br, struct net_device *dev)
622 {
623 struct net_bridge_port *p;
624 bool changed_addr;
625
626 p = br_port_get_rtnl(dev);
627 if (!p || p->br != br)
628 return -EINVAL;
629
630 /* Since more than one interface can be attached to a bridge,
631 * there still maybe an alternate path for netconsole to use;
632 * therefore there is no reason for a NETDEV_RELEASE event.
633 */
634 del_nbp(p);
635
636 dev_set_mtu(br->dev, br_min_mtu(br));
637 br_set_gso_limits(br);
638
639 spin_lock_bh(&br->lock);
640 changed_addr = br_stp_recalculate_bridge_id(br);
641 spin_unlock_bh(&br->lock);
642
643 if (changed_addr)
644 call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);
645
646 netdev_update_features(br->dev);
647
648 return 0;
649 }
650
br_port_flags_change(struct net_bridge_port * p,unsigned long mask)651 void br_port_flags_change(struct net_bridge_port *p, unsigned long mask)
652 {
653 struct net_bridge *br = p->br;
654
655 if (mask & BR_AUTO_MASK)
656 nbp_update_port_count(br);
657 }
658