1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/err.h>
3 #include <linux/igmp.h>
4 #include <linux/kernel.h>
5 #include <linux/netdevice.h>
6 #include <linux/rculist.h>
7 #include <linux/skbuff.h>
8 #include <linux/if_ether.h>
9 #include <net/ip.h>
10 #include <net/netlink.h>
11 #include <net/switchdev.h>
12 #if IS_ENABLED(CONFIG_IPV6)
13 #include <net/ipv6.h>
14 #include <net/addrconf.h>
15 #endif
16
17 #include "br_private.h"
18
br_rports_fill_info(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev)19 static int br_rports_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
20 struct net_device *dev)
21 {
22 struct net_bridge *br = netdev_priv(dev);
23 struct net_bridge_port *p;
24 struct nlattr *nest, *port_nest;
25
26 if (!br->multicast_router || hlist_empty(&br->router_list))
27 return 0;
28
29 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
30 if (nest == NULL)
31 return -EMSGSIZE;
32
33 hlist_for_each_entry_rcu(p, &br->router_list, rlist) {
34 if (!p)
35 continue;
36 port_nest = nla_nest_start_noflag(skb, MDBA_ROUTER_PORT);
37 if (!port_nest)
38 goto fail;
39 if (nla_put_nohdr(skb, sizeof(u32), &p->dev->ifindex) ||
40 nla_put_u32(skb, MDBA_ROUTER_PATTR_TIMER,
41 br_timer_value(&p->multicast_router_timer)) ||
42 nla_put_u8(skb, MDBA_ROUTER_PATTR_TYPE,
43 p->multicast_router)) {
44 nla_nest_cancel(skb, port_nest);
45 goto fail;
46 }
47 nla_nest_end(skb, port_nest);
48 }
49
50 nla_nest_end(skb, nest);
51 return 0;
52 fail:
53 nla_nest_cancel(skb, nest);
54 return -EMSGSIZE;
55 }
56
__mdb_entry_fill_flags(struct br_mdb_entry * e,unsigned char flags)57 static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
58 {
59 e->state = flags & MDB_PG_FLAGS_PERMANENT;
60 e->flags = 0;
61 if (flags & MDB_PG_FLAGS_OFFLOAD)
62 e->flags |= MDB_FLAGS_OFFLOAD;
63 if (flags & MDB_PG_FLAGS_FAST_LEAVE)
64 e->flags |= MDB_FLAGS_FAST_LEAVE;
65 if (flags & MDB_PG_FLAGS_STAR_EXCL)
66 e->flags |= MDB_FLAGS_STAR_EXCL;
67 if (flags & MDB_PG_FLAGS_BLOCKED)
68 e->flags |= MDB_FLAGS_BLOCKED;
69 }
70
__mdb_entry_to_br_ip(struct br_mdb_entry * entry,struct br_ip * ip,struct nlattr ** mdb_attrs)71 static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip,
72 struct nlattr **mdb_attrs)
73 {
74 memset(ip, 0, sizeof(struct br_ip));
75 ip->vid = entry->vid;
76 ip->proto = entry->addr.proto;
77 switch (ip->proto) {
78 case htons(ETH_P_IP):
79 ip->dst.ip4 = entry->addr.u.ip4;
80 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
81 ip->src.ip4 = nla_get_in_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
82 break;
83 #if IS_ENABLED(CONFIG_IPV6)
84 case htons(ETH_P_IPV6):
85 ip->dst.ip6 = entry->addr.u.ip6;
86 if (mdb_attrs && mdb_attrs[MDBE_ATTR_SOURCE])
87 ip->src.ip6 = nla_get_in6_addr(mdb_attrs[MDBE_ATTR_SOURCE]);
88 break;
89 #endif
90 }
91
92 }
93
__mdb_fill_srcs(struct sk_buff * skb,struct net_bridge_port_group * p)94 static int __mdb_fill_srcs(struct sk_buff *skb,
95 struct net_bridge_port_group *p)
96 {
97 struct net_bridge_group_src *ent;
98 struct nlattr *nest, *nest_ent;
99
100 if (hlist_empty(&p->src_list))
101 return 0;
102
103 nest = nla_nest_start(skb, MDBA_MDB_EATTR_SRC_LIST);
104 if (!nest)
105 return -EMSGSIZE;
106
107 hlist_for_each_entry_rcu(ent, &p->src_list, node,
108 lockdep_is_held(&p->key.port->br->multicast_lock)) {
109 nest_ent = nla_nest_start(skb, MDBA_MDB_SRCLIST_ENTRY);
110 if (!nest_ent)
111 goto out_cancel_err;
112 switch (ent->addr.proto) {
113 case htons(ETH_P_IP):
114 if (nla_put_in_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
115 ent->addr.src.ip4)) {
116 nla_nest_cancel(skb, nest_ent);
117 goto out_cancel_err;
118 }
119 break;
120 #if IS_ENABLED(CONFIG_IPV6)
121 case htons(ETH_P_IPV6):
122 if (nla_put_in6_addr(skb, MDBA_MDB_SRCATTR_ADDRESS,
123 &ent->addr.src.ip6)) {
124 nla_nest_cancel(skb, nest_ent);
125 goto out_cancel_err;
126 }
127 break;
128 #endif
129 default:
130 nla_nest_cancel(skb, nest_ent);
131 continue;
132 }
133 if (nla_put_u32(skb, MDBA_MDB_SRCATTR_TIMER,
134 br_timer_value(&ent->timer))) {
135 nla_nest_cancel(skb, nest_ent);
136 goto out_cancel_err;
137 }
138 nla_nest_end(skb, nest_ent);
139 }
140
141 nla_nest_end(skb, nest);
142
143 return 0;
144
145 out_cancel_err:
146 nla_nest_cancel(skb, nest);
147 return -EMSGSIZE;
148 }
149
__mdb_fill_info(struct sk_buff * skb,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * p)150 static int __mdb_fill_info(struct sk_buff *skb,
151 struct net_bridge_mdb_entry *mp,
152 struct net_bridge_port_group *p)
153 {
154 bool dump_srcs_mode = false;
155 struct timer_list *mtimer;
156 struct nlattr *nest_ent;
157 struct br_mdb_entry e;
158 u8 flags = 0;
159 int ifindex;
160
161 memset(&e, 0, sizeof(e));
162 if (p) {
163 ifindex = p->key.port->dev->ifindex;
164 mtimer = &p->timer;
165 flags = p->flags;
166 } else {
167 ifindex = mp->br->dev->ifindex;
168 mtimer = &mp->timer;
169 }
170
171 __mdb_entry_fill_flags(&e, flags);
172 e.ifindex = ifindex;
173 e.vid = mp->addr.vid;
174 if (mp->addr.proto == htons(ETH_P_IP))
175 e.addr.u.ip4 = mp->addr.dst.ip4;
176 #if IS_ENABLED(CONFIG_IPV6)
177 if (mp->addr.proto == htons(ETH_P_IPV6))
178 e.addr.u.ip6 = mp->addr.dst.ip6;
179 #endif
180 e.addr.proto = mp->addr.proto;
181 nest_ent = nla_nest_start_noflag(skb,
182 MDBA_MDB_ENTRY_INFO);
183 if (!nest_ent)
184 return -EMSGSIZE;
185
186 if (nla_put_nohdr(skb, sizeof(e), &e) ||
187 nla_put_u32(skb,
188 MDBA_MDB_EATTR_TIMER,
189 br_timer_value(mtimer)))
190 goto nest_err;
191
192 switch (mp->addr.proto) {
193 case htons(ETH_P_IP):
194 dump_srcs_mode = !!(mp->br->multicast_igmp_version == 3);
195 if (mp->addr.src.ip4) {
196 if (nla_put_in_addr(skb, MDBA_MDB_EATTR_SOURCE,
197 mp->addr.src.ip4))
198 goto nest_err;
199 break;
200 }
201 break;
202 #if IS_ENABLED(CONFIG_IPV6)
203 case htons(ETH_P_IPV6):
204 dump_srcs_mode = !!(mp->br->multicast_mld_version == 2);
205 if (!ipv6_addr_any(&mp->addr.src.ip6)) {
206 if (nla_put_in6_addr(skb, MDBA_MDB_EATTR_SOURCE,
207 &mp->addr.src.ip6))
208 goto nest_err;
209 break;
210 }
211 break;
212 #endif
213 }
214 if (p) {
215 if (nla_put_u8(skb, MDBA_MDB_EATTR_RTPROT, p->rt_protocol))
216 goto nest_err;
217 if (dump_srcs_mode &&
218 (__mdb_fill_srcs(skb, p) ||
219 nla_put_u8(skb, MDBA_MDB_EATTR_GROUP_MODE,
220 p->filter_mode)))
221 goto nest_err;
222 }
223 nla_nest_end(skb, nest_ent);
224
225 return 0;
226
227 nest_err:
228 nla_nest_cancel(skb, nest_ent);
229 return -EMSGSIZE;
230 }
231
br_mdb_fill_info(struct sk_buff * skb,struct netlink_callback * cb,struct net_device * dev)232 static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
233 struct net_device *dev)
234 {
235 int idx = 0, s_idx = cb->args[1], err = 0, pidx = 0, s_pidx = cb->args[2];
236 struct net_bridge *br = netdev_priv(dev);
237 struct net_bridge_mdb_entry *mp;
238 struct nlattr *nest, *nest2;
239
240 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
241 return 0;
242
243 nest = nla_nest_start_noflag(skb, MDBA_MDB);
244 if (nest == NULL)
245 return -EMSGSIZE;
246
247 hlist_for_each_entry_rcu(mp, &br->mdb_list, mdb_node) {
248 struct net_bridge_port_group *p;
249 struct net_bridge_port_group __rcu **pp;
250
251 if (idx < s_idx)
252 goto skip;
253
254 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
255 if (!nest2) {
256 err = -EMSGSIZE;
257 break;
258 }
259
260 if (!s_pidx && mp->host_joined) {
261 err = __mdb_fill_info(skb, mp, NULL);
262 if (err) {
263 nla_nest_cancel(skb, nest2);
264 break;
265 }
266 }
267
268 for (pp = &mp->ports; (p = rcu_dereference(*pp)) != NULL;
269 pp = &p->next) {
270 if (!p->key.port)
271 continue;
272 if (pidx < s_pidx)
273 goto skip_pg;
274
275 err = __mdb_fill_info(skb, mp, p);
276 if (err) {
277 nla_nest_end(skb, nest2);
278 goto out;
279 }
280 skip_pg:
281 pidx++;
282 }
283 pidx = 0;
284 s_pidx = 0;
285 nla_nest_end(skb, nest2);
286 skip:
287 idx++;
288 }
289
290 out:
291 cb->args[1] = idx;
292 cb->args[2] = pidx;
293 nla_nest_end(skb, nest);
294 return err;
295 }
296
br_mdb_valid_dump_req(const struct nlmsghdr * nlh,struct netlink_ext_ack * extack)297 static int br_mdb_valid_dump_req(const struct nlmsghdr *nlh,
298 struct netlink_ext_ack *extack)
299 {
300 struct br_port_msg *bpm;
301
302 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*bpm))) {
303 NL_SET_ERR_MSG_MOD(extack, "Invalid header for mdb dump request");
304 return -EINVAL;
305 }
306
307 bpm = nlmsg_data(nlh);
308 if (bpm->ifindex) {
309 NL_SET_ERR_MSG_MOD(extack, "Filtering by device index is not supported for mdb dump request");
310 return -EINVAL;
311 }
312 if (nlmsg_attrlen(nlh, sizeof(*bpm))) {
313 NL_SET_ERR_MSG(extack, "Invalid data after header in mdb dump request");
314 return -EINVAL;
315 }
316
317 return 0;
318 }
319
br_mdb_dump(struct sk_buff * skb,struct netlink_callback * cb)320 static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
321 {
322 struct net_device *dev;
323 struct net *net = sock_net(skb->sk);
324 struct nlmsghdr *nlh = NULL;
325 int idx = 0, s_idx;
326
327 if (cb->strict_check) {
328 int err = br_mdb_valid_dump_req(cb->nlh, cb->extack);
329
330 if (err < 0)
331 return err;
332 }
333
334 s_idx = cb->args[0];
335
336 rcu_read_lock();
337
338 cb->seq = net->dev_base_seq;
339
340 for_each_netdev_rcu(net, dev) {
341 if (dev->priv_flags & IFF_EBRIDGE) {
342 struct br_port_msg *bpm;
343
344 if (idx < s_idx)
345 goto skip;
346
347 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid,
348 cb->nlh->nlmsg_seq, RTM_GETMDB,
349 sizeof(*bpm), NLM_F_MULTI);
350 if (nlh == NULL)
351 break;
352
353 bpm = nlmsg_data(nlh);
354 memset(bpm, 0, sizeof(*bpm));
355 bpm->ifindex = dev->ifindex;
356 if (br_mdb_fill_info(skb, cb, dev) < 0)
357 goto out;
358 if (br_rports_fill_info(skb, cb, dev) < 0)
359 goto out;
360
361 cb->args[1] = 0;
362 nlmsg_end(skb, nlh);
363 skip:
364 idx++;
365 }
366 }
367
368 out:
369 if (nlh)
370 nlmsg_end(skb, nlh);
371 rcu_read_unlock();
372 cb->args[0] = idx;
373 return skb->len;
374 }
375
nlmsg_populate_mdb_fill(struct sk_buff * skb,struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)376 static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
377 struct net_device *dev,
378 struct net_bridge_mdb_entry *mp,
379 struct net_bridge_port_group *pg,
380 int type)
381 {
382 struct nlmsghdr *nlh;
383 struct br_port_msg *bpm;
384 struct nlattr *nest, *nest2;
385
386 nlh = nlmsg_put(skb, 0, 0, type, sizeof(*bpm), 0);
387 if (!nlh)
388 return -EMSGSIZE;
389
390 bpm = nlmsg_data(nlh);
391 memset(bpm, 0, sizeof(*bpm));
392 bpm->family = AF_BRIDGE;
393 bpm->ifindex = dev->ifindex;
394 nest = nla_nest_start_noflag(skb, MDBA_MDB);
395 if (nest == NULL)
396 goto cancel;
397 nest2 = nla_nest_start_noflag(skb, MDBA_MDB_ENTRY);
398 if (nest2 == NULL)
399 goto end;
400
401 if (__mdb_fill_info(skb, mp, pg))
402 goto end;
403
404 nla_nest_end(skb, nest2);
405 nla_nest_end(skb, nest);
406 nlmsg_end(skb, nlh);
407 return 0;
408
409 end:
410 nla_nest_end(skb, nest);
411 cancel:
412 nlmsg_cancel(skb, nlh);
413 return -EMSGSIZE;
414 }
415
rtnl_mdb_nlmsg_size(struct net_bridge_port_group * pg)416 static size_t rtnl_mdb_nlmsg_size(struct net_bridge_port_group *pg)
417 {
418 size_t nlmsg_size = NLMSG_ALIGN(sizeof(struct br_port_msg)) +
419 nla_total_size(sizeof(struct br_mdb_entry)) +
420 nla_total_size(sizeof(u32));
421 struct net_bridge_group_src *ent;
422 size_t addr_size = 0;
423
424 if (!pg)
425 goto out;
426
427 /* MDBA_MDB_EATTR_RTPROT */
428 nlmsg_size += nla_total_size(sizeof(u8));
429
430 switch (pg->key.addr.proto) {
431 case htons(ETH_P_IP):
432 /* MDBA_MDB_EATTR_SOURCE */
433 if (pg->key.addr.src.ip4)
434 nlmsg_size += nla_total_size(sizeof(__be32));
435 if (pg->key.port->br->multicast_igmp_version == 2)
436 goto out;
437 addr_size = sizeof(__be32);
438 break;
439 #if IS_ENABLED(CONFIG_IPV6)
440 case htons(ETH_P_IPV6):
441 /* MDBA_MDB_EATTR_SOURCE */
442 if (!ipv6_addr_any(&pg->key.addr.src.ip6))
443 nlmsg_size += nla_total_size(sizeof(struct in6_addr));
444 if (pg->key.port->br->multicast_mld_version == 1)
445 goto out;
446 addr_size = sizeof(struct in6_addr);
447 break;
448 #endif
449 }
450
451 /* MDBA_MDB_EATTR_GROUP_MODE */
452 nlmsg_size += nla_total_size(sizeof(u8));
453
454 /* MDBA_MDB_EATTR_SRC_LIST nested attr */
455 if (!hlist_empty(&pg->src_list))
456 nlmsg_size += nla_total_size(0);
457
458 hlist_for_each_entry(ent, &pg->src_list, node) {
459 /* MDBA_MDB_SRCLIST_ENTRY nested attr +
460 * MDBA_MDB_SRCATTR_ADDRESS + MDBA_MDB_SRCATTR_TIMER
461 */
462 nlmsg_size += nla_total_size(0) +
463 nla_total_size(addr_size) +
464 nla_total_size(sizeof(u32));
465 }
466 out:
467 return nlmsg_size;
468 }
469
470 struct br_mdb_complete_info {
471 struct net_bridge_port *port;
472 struct br_ip ip;
473 };
474
br_mdb_complete(struct net_device * dev,int err,void * priv)475 static void br_mdb_complete(struct net_device *dev, int err, void *priv)
476 {
477 struct br_mdb_complete_info *data = priv;
478 struct net_bridge_port_group __rcu **pp;
479 struct net_bridge_port_group *p;
480 struct net_bridge_mdb_entry *mp;
481 struct net_bridge_port *port = data->port;
482 struct net_bridge *br = port->br;
483
484 if (err)
485 goto err;
486
487 spin_lock_bh(&br->multicast_lock);
488 mp = br_mdb_ip_get(br, &data->ip);
489 if (!mp)
490 goto out;
491 for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
492 pp = &p->next) {
493 if (p->key.port != port)
494 continue;
495 p->flags |= MDB_PG_FLAGS_OFFLOAD;
496 }
497 out:
498 spin_unlock_bh(&br->multicast_lock);
499 err:
500 kfree(priv);
501 }
502
br_mdb_switchdev_host_port(struct net_device * dev,struct net_device * lower_dev,struct net_bridge_mdb_entry * mp,int type)503 static void br_mdb_switchdev_host_port(struct net_device *dev,
504 struct net_device *lower_dev,
505 struct net_bridge_mdb_entry *mp,
506 int type)
507 {
508 struct switchdev_obj_port_mdb mdb = {
509 .obj = {
510 .id = SWITCHDEV_OBJ_ID_HOST_MDB,
511 .flags = SWITCHDEV_F_DEFER,
512 },
513 .vid = mp->addr.vid,
514 };
515
516 if (mp->addr.proto == htons(ETH_P_IP))
517 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
518 #if IS_ENABLED(CONFIG_IPV6)
519 else
520 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
521 #endif
522
523 mdb.obj.orig_dev = dev;
524 switch (type) {
525 case RTM_NEWMDB:
526 switchdev_port_obj_add(lower_dev, &mdb.obj, NULL);
527 break;
528 case RTM_DELMDB:
529 switchdev_port_obj_del(lower_dev, &mdb.obj);
530 break;
531 }
532 }
533
br_mdb_switchdev_host(struct net_device * dev,struct net_bridge_mdb_entry * mp,int type)534 static void br_mdb_switchdev_host(struct net_device *dev,
535 struct net_bridge_mdb_entry *mp, int type)
536 {
537 struct net_device *lower_dev;
538 struct list_head *iter;
539
540 netdev_for_each_lower_dev(dev, lower_dev, iter)
541 br_mdb_switchdev_host_port(dev, lower_dev, mp, type);
542 }
543
br_mdb_notify(struct net_device * dev,struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,int type)544 void br_mdb_notify(struct net_device *dev,
545 struct net_bridge_mdb_entry *mp,
546 struct net_bridge_port_group *pg,
547 int type)
548 {
549 struct br_mdb_complete_info *complete_info;
550 struct switchdev_obj_port_mdb mdb = {
551 .obj = {
552 .id = SWITCHDEV_OBJ_ID_PORT_MDB,
553 .flags = SWITCHDEV_F_DEFER,
554 },
555 .vid = mp->addr.vid,
556 };
557 struct net *net = dev_net(dev);
558 struct sk_buff *skb;
559 int err = -ENOBUFS;
560
561 if (pg) {
562 if (mp->addr.proto == htons(ETH_P_IP))
563 ip_eth_mc_map(mp->addr.dst.ip4, mdb.addr);
564 #if IS_ENABLED(CONFIG_IPV6)
565 else
566 ipv6_eth_mc_map(&mp->addr.dst.ip6, mdb.addr);
567 #endif
568 mdb.obj.orig_dev = pg->key.port->dev;
569 switch (type) {
570 case RTM_NEWMDB:
571 complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
572 if (!complete_info)
573 break;
574 complete_info->port = pg->key.port;
575 complete_info->ip = mp->addr;
576 mdb.obj.complete_priv = complete_info;
577 mdb.obj.complete = br_mdb_complete;
578 if (switchdev_port_obj_add(pg->key.port->dev, &mdb.obj, NULL))
579 kfree(complete_info);
580 break;
581 case RTM_DELMDB:
582 switchdev_port_obj_del(pg->key.port->dev, &mdb.obj);
583 break;
584 }
585 } else {
586 br_mdb_switchdev_host(dev, mp, type);
587 }
588
589 skb = nlmsg_new(rtnl_mdb_nlmsg_size(pg), GFP_ATOMIC);
590 if (!skb)
591 goto errout;
592
593 err = nlmsg_populate_mdb_fill(skb, dev, mp, pg, type);
594 if (err < 0) {
595 kfree_skb(skb);
596 goto errout;
597 }
598
599 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
600 return;
601 errout:
602 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
603 }
604
nlmsg_populate_rtr_fill(struct sk_buff * skb,struct net_device * dev,int ifindex,u32 pid,u32 seq,int type,unsigned int flags)605 static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
606 struct net_device *dev,
607 int ifindex, u32 pid,
608 u32 seq, int type, unsigned int flags)
609 {
610 struct br_port_msg *bpm;
611 struct nlmsghdr *nlh;
612 struct nlattr *nest;
613
614 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
615 if (!nlh)
616 return -EMSGSIZE;
617
618 bpm = nlmsg_data(nlh);
619 memset(bpm, 0, sizeof(*bpm));
620 bpm->family = AF_BRIDGE;
621 bpm->ifindex = dev->ifindex;
622 nest = nla_nest_start_noflag(skb, MDBA_ROUTER);
623 if (!nest)
624 goto cancel;
625
626 if (nla_put_u32(skb, MDBA_ROUTER_PORT, ifindex))
627 goto end;
628
629 nla_nest_end(skb, nest);
630 nlmsg_end(skb, nlh);
631 return 0;
632
633 end:
634 nla_nest_end(skb, nest);
635 cancel:
636 nlmsg_cancel(skb, nlh);
637 return -EMSGSIZE;
638 }
639
rtnl_rtr_nlmsg_size(void)640 static inline size_t rtnl_rtr_nlmsg_size(void)
641 {
642 return NLMSG_ALIGN(sizeof(struct br_port_msg))
643 + nla_total_size(sizeof(__u32));
644 }
645
br_rtr_notify(struct net_device * dev,struct net_bridge_port * port,int type)646 void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
647 int type)
648 {
649 struct net *net = dev_net(dev);
650 struct sk_buff *skb;
651 int err = -ENOBUFS;
652 int ifindex;
653
654 ifindex = port ? port->dev->ifindex : 0;
655 skb = nlmsg_new(rtnl_rtr_nlmsg_size(), GFP_ATOMIC);
656 if (!skb)
657 goto errout;
658
659 err = nlmsg_populate_rtr_fill(skb, dev, ifindex, 0, 0, type, NTF_SELF);
660 if (err < 0) {
661 kfree_skb(skb);
662 goto errout;
663 }
664
665 rtnl_notify(skb, net, 0, RTNLGRP_MDB, NULL, GFP_ATOMIC);
666 return;
667
668 errout:
669 rtnl_set_sk_err(net, RTNLGRP_MDB, err);
670 }
671
is_valid_mdb_entry(struct br_mdb_entry * entry,struct netlink_ext_ack * extack)672 static bool is_valid_mdb_entry(struct br_mdb_entry *entry,
673 struct netlink_ext_ack *extack)
674 {
675 if (entry->ifindex == 0) {
676 NL_SET_ERR_MSG_MOD(extack, "Zero entry ifindex is not allowed");
677 return false;
678 }
679
680 if (entry->addr.proto == htons(ETH_P_IP)) {
681 if (!ipv4_is_multicast(entry->addr.u.ip4)) {
682 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is not multicast");
683 return false;
684 }
685 if (ipv4_is_local_multicast(entry->addr.u.ip4)) {
686 NL_SET_ERR_MSG_MOD(extack, "IPv4 entry group address is local multicast");
687 return false;
688 }
689 #if IS_ENABLED(CONFIG_IPV6)
690 } else if (entry->addr.proto == htons(ETH_P_IPV6)) {
691 if (ipv6_addr_is_ll_all_nodes(&entry->addr.u.ip6)) {
692 NL_SET_ERR_MSG_MOD(extack, "IPv6 entry group address is link-local all nodes");
693 return false;
694 }
695 #endif
696 } else {
697 NL_SET_ERR_MSG_MOD(extack, "Unknown entry protocol");
698 return false;
699 }
700
701 if (entry->state != MDB_PERMANENT && entry->state != MDB_TEMPORARY) {
702 NL_SET_ERR_MSG_MOD(extack, "Unknown entry state");
703 return false;
704 }
705 if (entry->vid >= VLAN_VID_MASK) {
706 NL_SET_ERR_MSG_MOD(extack, "Invalid entry VLAN id");
707 return false;
708 }
709
710 return true;
711 }
712
is_valid_mdb_source(struct nlattr * attr,__be16 proto,struct netlink_ext_ack * extack)713 static bool is_valid_mdb_source(struct nlattr *attr, __be16 proto,
714 struct netlink_ext_ack *extack)
715 {
716 switch (proto) {
717 case htons(ETH_P_IP):
718 if (nla_len(attr) != sizeof(struct in_addr)) {
719 NL_SET_ERR_MSG_MOD(extack, "IPv4 invalid source address length");
720 return false;
721 }
722 if (ipv4_is_multicast(nla_get_in_addr(attr))) {
723 NL_SET_ERR_MSG_MOD(extack, "IPv4 multicast source address is not allowed");
724 return false;
725 }
726 break;
727 #if IS_ENABLED(CONFIG_IPV6)
728 case htons(ETH_P_IPV6): {
729 struct in6_addr src;
730
731 if (nla_len(attr) != sizeof(struct in6_addr)) {
732 NL_SET_ERR_MSG_MOD(extack, "IPv6 invalid source address length");
733 return false;
734 }
735 src = nla_get_in6_addr(attr);
736 if (ipv6_addr_is_multicast(&src)) {
737 NL_SET_ERR_MSG_MOD(extack, "IPv6 multicast source address is not allowed");
738 return false;
739 }
740 break;
741 }
742 #endif
743 default:
744 NL_SET_ERR_MSG_MOD(extack, "Invalid protocol used with source address");
745 return false;
746 }
747
748 return true;
749 }
750
751 static const struct nla_policy br_mdbe_attrs_pol[MDBE_ATTR_MAX + 1] = {
752 [MDBE_ATTR_SOURCE] = NLA_POLICY_RANGE(NLA_BINARY,
753 sizeof(struct in_addr),
754 sizeof(struct in6_addr)),
755 };
756
br_mdb_parse(struct sk_buff * skb,struct nlmsghdr * nlh,struct net_device ** pdev,struct br_mdb_entry ** pentry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)757 static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
758 struct net_device **pdev, struct br_mdb_entry **pentry,
759 struct nlattr **mdb_attrs, struct netlink_ext_ack *extack)
760 {
761 struct net *net = sock_net(skb->sk);
762 struct br_mdb_entry *entry;
763 struct br_port_msg *bpm;
764 struct nlattr *tb[MDBA_SET_ENTRY_MAX+1];
765 struct net_device *dev;
766 int err;
767
768 err = nlmsg_parse_deprecated(nlh, sizeof(*bpm), tb,
769 MDBA_SET_ENTRY_MAX, NULL, NULL);
770 if (err < 0)
771 return err;
772
773 bpm = nlmsg_data(nlh);
774 if (bpm->ifindex == 0) {
775 NL_SET_ERR_MSG_MOD(extack, "Invalid bridge ifindex");
776 return -EINVAL;
777 }
778
779 dev = __dev_get_by_index(net, bpm->ifindex);
780 if (dev == NULL) {
781 NL_SET_ERR_MSG_MOD(extack, "Bridge device doesn't exist");
782 return -ENODEV;
783 }
784
785 if (!(dev->priv_flags & IFF_EBRIDGE)) {
786 NL_SET_ERR_MSG_MOD(extack, "Device is not a bridge");
787 return -EOPNOTSUPP;
788 }
789
790 *pdev = dev;
791
792 if (!tb[MDBA_SET_ENTRY]) {
793 NL_SET_ERR_MSG_MOD(extack, "Missing MDBA_SET_ENTRY attribute");
794 return -EINVAL;
795 }
796 if (nla_len(tb[MDBA_SET_ENTRY]) != sizeof(struct br_mdb_entry)) {
797 NL_SET_ERR_MSG_MOD(extack, "Invalid MDBA_SET_ENTRY attribute length");
798 return -EINVAL;
799 }
800
801 entry = nla_data(tb[MDBA_SET_ENTRY]);
802 if (!is_valid_mdb_entry(entry, extack))
803 return -EINVAL;
804 *pentry = entry;
805
806 if (tb[MDBA_SET_ENTRY_ATTRS]) {
807 err = nla_parse_nested(mdb_attrs, MDBE_ATTR_MAX,
808 tb[MDBA_SET_ENTRY_ATTRS],
809 br_mdbe_attrs_pol, extack);
810 if (err)
811 return err;
812 if (mdb_attrs[MDBE_ATTR_SOURCE] &&
813 !is_valid_mdb_source(mdb_attrs[MDBE_ATTR_SOURCE],
814 entry->addr.proto, extack))
815 return -EINVAL;
816 } else {
817 memset(mdb_attrs, 0,
818 sizeof(struct nlattr *) * (MDBE_ATTR_MAX + 1));
819 }
820
821 return 0;
822 }
823
br_mdb_add_group(struct net_bridge * br,struct net_bridge_port * port,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)824 static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
825 struct br_mdb_entry *entry,
826 struct nlattr **mdb_attrs,
827 struct netlink_ext_ack *extack)
828 {
829 struct net_bridge_mdb_entry *mp, *star_mp;
830 struct net_bridge_port_group *p;
831 struct net_bridge_port_group __rcu **pp;
832 struct br_ip group, star_group;
833 unsigned long now = jiffies;
834 u8 filter_mode;
835 int err;
836
837 __mdb_entry_to_br_ip(entry, &group, mdb_attrs);
838
839 /* host join errors which can happen before creating the group */
840 if (!port) {
841 /* don't allow any flags for host-joined groups */
842 if (entry->state) {
843 NL_SET_ERR_MSG_MOD(extack, "Flags are not allowed for host groups");
844 return -EINVAL;
845 }
846 if (!br_multicast_is_star_g(&group)) {
847 NL_SET_ERR_MSG_MOD(extack, "Groups with sources cannot be manually host joined");
848 return -EINVAL;
849 }
850 }
851
852 mp = br_mdb_ip_get(br, &group);
853 if (!mp) {
854 mp = br_multicast_new_group(br, &group);
855 err = PTR_ERR_OR_ZERO(mp);
856 if (err)
857 return err;
858 }
859
860 /* host join */
861 if (!port) {
862 if (mp->host_joined) {
863 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by host");
864 return -EEXIST;
865 }
866
867 br_multicast_host_join(mp, false);
868 br_mdb_notify(br->dev, mp, NULL, RTM_NEWMDB);
869
870 return 0;
871 }
872
873 for (pp = &mp->ports;
874 (p = mlock_dereference(*pp, br)) != NULL;
875 pp = &p->next) {
876 if (p->key.port == port) {
877 NL_SET_ERR_MSG_MOD(extack, "Group is already joined by port");
878 return -EEXIST;
879 }
880 if ((unsigned long)p->key.port < (unsigned long)port)
881 break;
882 }
883
884 filter_mode = br_multicast_is_star_g(&group) ? MCAST_EXCLUDE :
885 MCAST_INCLUDE;
886
887 p = br_multicast_new_port_group(port, &group, *pp, entry->state, NULL,
888 filter_mode, RTPROT_STATIC);
889 if (unlikely(!p)) {
890 NL_SET_ERR_MSG_MOD(extack, "Couldn't allocate new port group");
891 return -ENOMEM;
892 }
893 rcu_assign_pointer(*pp, p);
894 if (entry->state == MDB_TEMPORARY)
895 mod_timer(&p->timer, now + br->multicast_membership_interval);
896 br_mdb_notify(br->dev, mp, p, RTM_NEWMDB);
897 /* if we are adding a new EXCLUDE port group (*,G) it needs to be also
898 * added to all S,G entries for proper replication, if we are adding
899 * a new INCLUDE port (S,G) then all of *,G EXCLUDE ports need to be
900 * added to it for proper replication
901 */
902 if (br_multicast_should_handle_mode(br, group.proto)) {
903 switch (filter_mode) {
904 case MCAST_EXCLUDE:
905 br_multicast_star_g_handle_mode(p, MCAST_EXCLUDE);
906 break;
907 case MCAST_INCLUDE:
908 star_group = p->key.addr;
909 memset(&star_group.src, 0, sizeof(star_group.src));
910 star_mp = br_mdb_ip_get(br, &star_group);
911 if (star_mp)
912 br_multicast_sg_add_exclude_ports(star_mp, p);
913 break;
914 }
915 }
916
917 return 0;
918 }
919
__br_mdb_add(struct net * net,struct net_bridge * br,struct net_bridge_port * p,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs,struct netlink_ext_ack * extack)920 static int __br_mdb_add(struct net *net, struct net_bridge *br,
921 struct net_bridge_port *p,
922 struct br_mdb_entry *entry,
923 struct nlattr **mdb_attrs,
924 struct netlink_ext_ack *extack)
925 {
926 int ret;
927
928 spin_lock_bh(&br->multicast_lock);
929 ret = br_mdb_add_group(br, p, entry, mdb_attrs, extack);
930 spin_unlock_bh(&br->multicast_lock);
931
932 return ret;
933 }
934
br_mdb_add(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)935 static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh,
936 struct netlink_ext_ack *extack)
937 {
938 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
939 struct net *net = sock_net(skb->sk);
940 struct net_bridge_vlan_group *vg;
941 struct net_bridge_port *p = NULL;
942 struct net_device *dev, *pdev;
943 struct br_mdb_entry *entry;
944 struct net_bridge_vlan *v;
945 struct net_bridge *br;
946 int err;
947
948 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
949 if (err < 0)
950 return err;
951
952 br = netdev_priv(dev);
953
954 if (!netif_running(br->dev)) {
955 NL_SET_ERR_MSG_MOD(extack, "Bridge device is not running");
956 return -EINVAL;
957 }
958
959 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
960 NL_SET_ERR_MSG_MOD(extack, "Bridge's multicast processing is disabled");
961 return -EINVAL;
962 }
963
964 if (entry->ifindex != br->dev->ifindex) {
965 pdev = __dev_get_by_index(net, entry->ifindex);
966 if (!pdev) {
967 NL_SET_ERR_MSG_MOD(extack, "Port net device doesn't exist");
968 return -ENODEV;
969 }
970
971 p = br_port_get_rtnl(pdev);
972 if (!p) {
973 NL_SET_ERR_MSG_MOD(extack, "Net device is not a bridge port");
974 return -EINVAL;
975 }
976
977 if (p->br != br) {
978 NL_SET_ERR_MSG_MOD(extack, "Port belongs to a different bridge device");
979 return -EINVAL;
980 }
981 if (p->state == BR_STATE_DISABLED) {
982 NL_SET_ERR_MSG_MOD(extack, "Port is in disabled state");
983 return -EINVAL;
984 }
985 vg = nbp_vlan_group(p);
986 } else {
987 vg = br_vlan_group(br);
988 }
989
990 /* If vlan filtering is enabled and VLAN is not specified
991 * install mdb entry on all vlans configured on the port.
992 */
993 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
994 list_for_each_entry(v, &vg->vlan_list, vlist) {
995 entry->vid = v->vid;
996 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
997 if (err)
998 break;
999 }
1000 } else {
1001 err = __br_mdb_add(net, br, p, entry, mdb_attrs, extack);
1002 }
1003
1004 return err;
1005 }
1006
__br_mdb_del(struct net_bridge * br,struct br_mdb_entry * entry,struct nlattr ** mdb_attrs)1007 static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry,
1008 struct nlattr **mdb_attrs)
1009 {
1010 struct net_bridge_mdb_entry *mp;
1011 struct net_bridge_port_group *p;
1012 struct net_bridge_port_group __rcu **pp;
1013 struct br_ip ip;
1014 int err = -EINVAL;
1015
1016 if (!netif_running(br->dev) || !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1017 return -EINVAL;
1018
1019 __mdb_entry_to_br_ip(entry, &ip, mdb_attrs);
1020
1021 spin_lock_bh(&br->multicast_lock);
1022 mp = br_mdb_ip_get(br, &ip);
1023 if (!mp)
1024 goto unlock;
1025
1026 /* host leave */
1027 if (entry->ifindex == mp->br->dev->ifindex && mp->host_joined) {
1028 br_multicast_host_leave(mp, false);
1029 err = 0;
1030 br_mdb_notify(br->dev, mp, NULL, RTM_DELMDB);
1031 if (!mp->ports && netif_running(br->dev))
1032 mod_timer(&mp->timer, jiffies);
1033 goto unlock;
1034 }
1035
1036 for (pp = &mp->ports;
1037 (p = mlock_dereference(*pp, br)) != NULL;
1038 pp = &p->next) {
1039 if (!p->key.port || p->key.port->dev->ifindex != entry->ifindex)
1040 continue;
1041
1042 if (p->key.port->state == BR_STATE_DISABLED)
1043 goto unlock;
1044
1045 br_multicast_del_pg(mp, p, pp);
1046 err = 0;
1047 break;
1048 }
1049
1050 unlock:
1051 spin_unlock_bh(&br->multicast_lock);
1052 return err;
1053 }
1054
br_mdb_del(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)1055 static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh,
1056 struct netlink_ext_ack *extack)
1057 {
1058 struct nlattr *mdb_attrs[MDBE_ATTR_MAX + 1];
1059 struct net *net = sock_net(skb->sk);
1060 struct net_bridge_vlan_group *vg;
1061 struct net_bridge_port *p = NULL;
1062 struct net_device *dev, *pdev;
1063 struct br_mdb_entry *entry;
1064 struct net_bridge_vlan *v;
1065 struct net_bridge *br;
1066 int err;
1067
1068 err = br_mdb_parse(skb, nlh, &dev, &entry, mdb_attrs, extack);
1069 if (err < 0)
1070 return err;
1071
1072 br = netdev_priv(dev);
1073
1074 if (entry->ifindex != br->dev->ifindex) {
1075 pdev = __dev_get_by_index(net, entry->ifindex);
1076 if (!pdev)
1077 return -ENODEV;
1078
1079 p = br_port_get_rtnl(pdev);
1080 if (!p || p->br != br || p->state == BR_STATE_DISABLED)
1081 return -EINVAL;
1082 vg = nbp_vlan_group(p);
1083 } else {
1084 vg = br_vlan_group(br);
1085 }
1086
1087 /* If vlan filtering is enabled and VLAN is not specified
1088 * delete mdb entry on all vlans configured on the port.
1089 */
1090 if (br_vlan_enabled(br->dev) && vg && entry->vid == 0) {
1091 list_for_each_entry(v, &vg->vlan_list, vlist) {
1092 entry->vid = v->vid;
1093 err = __br_mdb_del(br, entry, mdb_attrs);
1094 }
1095 } else {
1096 err = __br_mdb_del(br, entry, mdb_attrs);
1097 }
1098
1099 return err;
1100 }
1101
br_mdb_init(void)1102 void br_mdb_init(void)
1103 {
1104 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_GETMDB, NULL, br_mdb_dump, 0);
1105 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_NEWMDB, br_mdb_add, NULL, 0);
1106 rtnl_register_module(THIS_MODULE, PF_BRIDGE, RTM_DELMDB, br_mdb_del, NULL, 0);
1107 }
1108
br_mdb_uninit(void)1109 void br_mdb_uninit(void)
1110 {
1111 rtnl_unregister(PF_BRIDGE, RTM_GETMDB);
1112 rtnl_unregister(PF_BRIDGE, RTM_NEWMDB);
1113 rtnl_unregister(PF_BRIDGE, RTM_DELMDB);
1114 }
1115