1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Bridge multicast support.
4 *
5 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
6 */
7
8 #include <linux/err.h>
9 #include <linux/export.h>
10 #include <linux/if_ether.h>
11 #include <linux/igmp.h>
12 #include <linux/in.h>
13 #include <linux/jhash.h>
14 #include <linux/kernel.h>
15 #include <linux/log2.h>
16 #include <linux/netdevice.h>
17 #include <linux/netfilter_bridge.h>
18 #include <linux/random.h>
19 #include <linux/rculist.h>
20 #include <linux/skbuff.h>
21 #include <linux/slab.h>
22 #include <linux/timer.h>
23 #include <linux/inetdevice.h>
24 #include <linux/mroute.h>
25 #include <net/ip.h>
26 #include <net/switchdev.h>
27 #if IS_ENABLED(CONFIG_IPV6)
28 #include <linux/icmpv6.h>
29 #include <net/ipv6.h>
30 #include <net/mld.h>
31 #include <net/ip6_checksum.h>
32 #include <net/addrconf.h>
33 #endif
34
35 #include "br_private.h"
36 #include "br_private_mcast_eht.h"
37
38 static const struct rhashtable_params br_mdb_rht_params = {
39 .head_offset = offsetof(struct net_bridge_mdb_entry, rhnode),
40 .key_offset = offsetof(struct net_bridge_mdb_entry, addr),
41 .key_len = sizeof(struct br_ip),
42 .automatic_shrinking = true,
43 };
44
45 static const struct rhashtable_params br_sg_port_rht_params = {
46 .head_offset = offsetof(struct net_bridge_port_group, rhnode),
47 .key_offset = offsetof(struct net_bridge_port_group, key),
48 .key_len = sizeof(struct net_bridge_port_group_sg_key),
49 .automatic_shrinking = true,
50 };
51
52 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
53 struct bridge_mcast_own_query *query);
54 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
55 struct net_bridge_mcast_port *pmctx);
56 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
57 struct net_bridge_mcast_port *pmctx,
58 __be32 group,
59 __u16 vid,
60 const unsigned char *src);
61 static void br_multicast_port_group_rexmit(struct timer_list *t);
62
63 static void
64 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted);
65 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
66 struct net_bridge_mcast_port *pmctx);
67 #if IS_ENABLED(CONFIG_IPV6)
68 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
69 struct net_bridge_mcast_port *pmctx,
70 const struct in6_addr *group,
71 __u16 vid, const unsigned char *src);
72 #endif
73 static struct net_bridge_port_group *
74 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
75 struct net_bridge_mcast_port *pmctx,
76 struct br_ip *group,
77 const unsigned char *src,
78 u8 filter_mode,
79 bool igmpv2_mldv1,
80 bool blocked);
81 static void br_multicast_find_del_pg(struct net_bridge *br,
82 struct net_bridge_port_group *pg);
83 static void __br_multicast_stop(struct net_bridge_mcast *brmctx);
84
85 static int br_mc_disabled_update(struct net_device *dev, bool value,
86 struct netlink_ext_ack *extack);
87
88 static struct net_bridge_port_group *
br_sg_port_find(struct net_bridge * br,struct net_bridge_port_group_sg_key * sg_p)89 br_sg_port_find(struct net_bridge *br,
90 struct net_bridge_port_group_sg_key *sg_p)
91 {
92 lockdep_assert_held_once(&br->multicast_lock);
93
94 return rhashtable_lookup_fast(&br->sg_port_tbl, sg_p,
95 br_sg_port_rht_params);
96 }
97
br_mdb_ip_get_rcu(struct net_bridge * br,struct br_ip * dst)98 static struct net_bridge_mdb_entry *br_mdb_ip_get_rcu(struct net_bridge *br,
99 struct br_ip *dst)
100 {
101 return rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
102 }
103
br_mdb_ip_get(struct net_bridge * br,struct br_ip * dst)104 struct net_bridge_mdb_entry *br_mdb_ip_get(struct net_bridge *br,
105 struct br_ip *dst)
106 {
107 struct net_bridge_mdb_entry *ent;
108
109 lockdep_assert_held_once(&br->multicast_lock);
110
111 rcu_read_lock();
112 ent = rhashtable_lookup(&br->mdb_hash_tbl, dst, br_mdb_rht_params);
113 rcu_read_unlock();
114
115 return ent;
116 }
117
br_mdb_ip4_get(struct net_bridge * br,__be32 dst,__u16 vid)118 static struct net_bridge_mdb_entry *br_mdb_ip4_get(struct net_bridge *br,
119 __be32 dst, __u16 vid)
120 {
121 struct br_ip br_dst;
122
123 memset(&br_dst, 0, sizeof(br_dst));
124 br_dst.dst.ip4 = dst;
125 br_dst.proto = htons(ETH_P_IP);
126 br_dst.vid = vid;
127
128 return br_mdb_ip_get(br, &br_dst);
129 }
130
131 #if IS_ENABLED(CONFIG_IPV6)
br_mdb_ip6_get(struct net_bridge * br,const struct in6_addr * dst,__u16 vid)132 static struct net_bridge_mdb_entry *br_mdb_ip6_get(struct net_bridge *br,
133 const struct in6_addr *dst,
134 __u16 vid)
135 {
136 struct br_ip br_dst;
137
138 memset(&br_dst, 0, sizeof(br_dst));
139 br_dst.dst.ip6 = *dst;
140 br_dst.proto = htons(ETH_P_IPV6);
141 br_dst.vid = vid;
142
143 return br_mdb_ip_get(br, &br_dst);
144 }
145 #endif
146
br_mdb_get(struct net_bridge_mcast * brmctx,struct sk_buff * skb,u16 vid)147 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge_mcast *brmctx,
148 struct sk_buff *skb, u16 vid)
149 {
150 struct net_bridge *br = brmctx->br;
151 struct br_ip ip;
152
153 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
154 br_multicast_ctx_vlan_global_disabled(brmctx))
155 return NULL;
156
157 if (BR_INPUT_SKB_CB(skb)->igmp)
158 return NULL;
159
160 memset(&ip, 0, sizeof(ip));
161 ip.proto = skb->protocol;
162 ip.vid = vid;
163
164 switch (skb->protocol) {
165 case htons(ETH_P_IP):
166 ip.dst.ip4 = ip_hdr(skb)->daddr;
167 if (brmctx->multicast_igmp_version == 3) {
168 struct net_bridge_mdb_entry *mdb;
169
170 ip.src.ip4 = ip_hdr(skb)->saddr;
171 mdb = br_mdb_ip_get_rcu(br, &ip);
172 if (mdb)
173 return mdb;
174 ip.src.ip4 = 0;
175 }
176 break;
177 #if IS_ENABLED(CONFIG_IPV6)
178 case htons(ETH_P_IPV6):
179 ip.dst.ip6 = ipv6_hdr(skb)->daddr;
180 if (brmctx->multicast_mld_version == 2) {
181 struct net_bridge_mdb_entry *mdb;
182
183 ip.src.ip6 = ipv6_hdr(skb)->saddr;
184 mdb = br_mdb_ip_get_rcu(br, &ip);
185 if (mdb)
186 return mdb;
187 memset(&ip.src.ip6, 0, sizeof(ip.src.ip6));
188 }
189 break;
190 #endif
191 default:
192 ip.proto = 0;
193 ether_addr_copy(ip.dst.mac_addr, eth_hdr(skb)->h_dest);
194 }
195
196 return br_mdb_ip_get_rcu(br, &ip);
197 }
198
199 /* IMPORTANT: this function must be used only when the contexts cannot be
200 * passed down (e.g. timer) and must be used for read-only purposes because
201 * the vlan snooping option can change, so it can return any context
202 * (non-vlan or vlan). Its initial intended purpose is to read timer values
203 * from the *current* context based on the option. At worst that could lead
204 * to inconsistent timers when the contexts are changed, i.e. src timer
205 * which needs to re-arm with a specific delay taken from the old context
206 */
207 static struct net_bridge_mcast_port *
br_multicast_pg_to_port_ctx(const struct net_bridge_port_group * pg)208 br_multicast_pg_to_port_ctx(const struct net_bridge_port_group *pg)
209 {
210 struct net_bridge_mcast_port *pmctx = &pg->key.port->multicast_ctx;
211 struct net_bridge_vlan *vlan;
212
213 lockdep_assert_held_once(&pg->key.port->br->multicast_lock);
214
215 /* if vlan snooping is disabled use the port's multicast context */
216 if (!pg->key.addr.vid ||
217 !br_opt_get(pg->key.port->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED))
218 goto out;
219
220 /* locking is tricky here, due to different rules for multicast and
221 * vlans we need to take rcu to find the vlan and make sure it has
222 * the BR_VLFLAG_MCAST_ENABLED flag set, it can only change under
223 * multicast_lock which must be already held here, so the vlan's pmctx
224 * can safely be used on return
225 */
226 rcu_read_lock();
227 vlan = br_vlan_find(nbp_vlan_group_rcu(pg->key.port), pg->key.addr.vid);
228 if (vlan && !br_multicast_port_ctx_vlan_disabled(&vlan->port_mcast_ctx))
229 pmctx = &vlan->port_mcast_ctx;
230 else
231 pmctx = NULL;
232 rcu_read_unlock();
233 out:
234 return pmctx;
235 }
236
237 /* when snooping we need to check if the contexts should be used
238 * in the following order:
239 * - if pmctx is non-NULL (port), check if it should be used
240 * - if pmctx is NULL (bridge), check if brmctx should be used
241 */
242 static bool
br_multicast_ctx_should_use(const struct net_bridge_mcast * brmctx,const struct net_bridge_mcast_port * pmctx)243 br_multicast_ctx_should_use(const struct net_bridge_mcast *brmctx,
244 const struct net_bridge_mcast_port *pmctx)
245 {
246 if (!netif_running(brmctx->br->dev))
247 return false;
248
249 if (pmctx)
250 return !br_multicast_port_ctx_state_disabled(pmctx);
251 else
252 return !br_multicast_ctx_vlan_disabled(brmctx);
253 }
254
br_port_group_equal(struct net_bridge_port_group * p,struct net_bridge_port * port,const unsigned char * src)255 static bool br_port_group_equal(struct net_bridge_port_group *p,
256 struct net_bridge_port *port,
257 const unsigned char *src)
258 {
259 if (p->key.port != port)
260 return false;
261
262 if (!(port->flags & BR_MULTICAST_TO_UNICAST))
263 return true;
264
265 return ether_addr_equal(src, p->eth_addr);
266 }
267
__fwd_add_star_excl(struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * sg_ip)268 static void __fwd_add_star_excl(struct net_bridge_mcast_port *pmctx,
269 struct net_bridge_port_group *pg,
270 struct br_ip *sg_ip)
271 {
272 struct net_bridge_port_group_sg_key sg_key;
273 struct net_bridge_port_group *src_pg;
274 struct net_bridge_mcast *brmctx;
275
276 memset(&sg_key, 0, sizeof(sg_key));
277 brmctx = br_multicast_port_ctx_get_global(pmctx);
278 sg_key.port = pg->key.port;
279 sg_key.addr = *sg_ip;
280 if (br_sg_port_find(brmctx->br, &sg_key))
281 return;
282
283 src_pg = __br_multicast_add_group(brmctx, pmctx,
284 sg_ip, pg->eth_addr,
285 MCAST_INCLUDE, false, false);
286 if (IS_ERR_OR_NULL(src_pg) ||
287 src_pg->rt_protocol != RTPROT_KERNEL)
288 return;
289
290 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
291 }
292
__fwd_del_star_excl(struct net_bridge_port_group * pg,struct br_ip * sg_ip)293 static void __fwd_del_star_excl(struct net_bridge_port_group *pg,
294 struct br_ip *sg_ip)
295 {
296 struct net_bridge_port_group_sg_key sg_key;
297 struct net_bridge *br = pg->key.port->br;
298 struct net_bridge_port_group *src_pg;
299
300 memset(&sg_key, 0, sizeof(sg_key));
301 sg_key.port = pg->key.port;
302 sg_key.addr = *sg_ip;
303 src_pg = br_sg_port_find(br, &sg_key);
304 if (!src_pg || !(src_pg->flags & MDB_PG_FLAGS_STAR_EXCL) ||
305 src_pg->rt_protocol != RTPROT_KERNEL)
306 return;
307
308 br_multicast_find_del_pg(br, src_pg);
309 }
310
311 /* When a port group transitions to (or is added as) EXCLUDE we need to add it
312 * to all other ports' S,G entries which are not blocked by the current group
313 * for proper replication, the assumption is that any S,G blocked entries
314 * are already added so the S,G,port lookup should skip them.
315 * When a port group transitions from EXCLUDE -> INCLUDE mode or is being
316 * deleted we need to remove it from all ports' S,G entries where it was
317 * automatically installed before (i.e. where it's MDB_PG_FLAGS_STAR_EXCL).
318 */
br_multicast_star_g_handle_mode(struct net_bridge_port_group * pg,u8 filter_mode)319 void br_multicast_star_g_handle_mode(struct net_bridge_port_group *pg,
320 u8 filter_mode)
321 {
322 struct net_bridge *br = pg->key.port->br;
323 struct net_bridge_port_group *pg_lst;
324 struct net_bridge_mcast_port *pmctx;
325 struct net_bridge_mdb_entry *mp;
326 struct br_ip sg_ip;
327
328 if (WARN_ON(!br_multicast_is_star_g(&pg->key.addr)))
329 return;
330
331 mp = br_mdb_ip_get(br, &pg->key.addr);
332 if (!mp)
333 return;
334 pmctx = br_multicast_pg_to_port_ctx(pg);
335 if (!pmctx)
336 return;
337
338 memset(&sg_ip, 0, sizeof(sg_ip));
339 sg_ip = pg->key.addr;
340
341 for (pg_lst = mlock_dereference(mp->ports, br);
342 pg_lst;
343 pg_lst = mlock_dereference(pg_lst->next, br)) {
344 struct net_bridge_group_src *src_ent;
345
346 if (pg_lst == pg)
347 continue;
348 hlist_for_each_entry(src_ent, &pg_lst->src_list, node) {
349 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
350 continue;
351 sg_ip.src = src_ent->addr.src;
352 switch (filter_mode) {
353 case MCAST_INCLUDE:
354 __fwd_del_star_excl(pg, &sg_ip);
355 break;
356 case MCAST_EXCLUDE:
357 __fwd_add_star_excl(pmctx, pg, &sg_ip);
358 break;
359 }
360 }
361 }
362 }
363
364 /* called when adding a new S,G with host_joined == false by default */
br_multicast_sg_host_state(struct net_bridge_mdb_entry * star_mp,struct net_bridge_port_group * sg)365 static void br_multicast_sg_host_state(struct net_bridge_mdb_entry *star_mp,
366 struct net_bridge_port_group *sg)
367 {
368 struct net_bridge_mdb_entry *sg_mp;
369
370 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
371 return;
372 if (!star_mp->host_joined)
373 return;
374
375 sg_mp = br_mdb_ip_get(star_mp->br, &sg->key.addr);
376 if (!sg_mp)
377 return;
378 sg_mp->host_joined = true;
379 }
380
381 /* set the host_joined state of all of *,G's S,G entries */
br_multicast_star_g_host_state(struct net_bridge_mdb_entry * star_mp)382 static void br_multicast_star_g_host_state(struct net_bridge_mdb_entry *star_mp)
383 {
384 struct net_bridge *br = star_mp->br;
385 struct net_bridge_mdb_entry *sg_mp;
386 struct net_bridge_port_group *pg;
387 struct br_ip sg_ip;
388
389 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
390 return;
391
392 memset(&sg_ip, 0, sizeof(sg_ip));
393 sg_ip = star_mp->addr;
394 for (pg = mlock_dereference(star_mp->ports, br);
395 pg;
396 pg = mlock_dereference(pg->next, br)) {
397 struct net_bridge_group_src *src_ent;
398
399 hlist_for_each_entry(src_ent, &pg->src_list, node) {
400 if (!(src_ent->flags & BR_SGRP_F_INSTALLED))
401 continue;
402 sg_ip.src = src_ent->addr.src;
403 sg_mp = br_mdb_ip_get(br, &sg_ip);
404 if (!sg_mp)
405 continue;
406 sg_mp->host_joined = star_mp->host_joined;
407 }
408 }
409 }
410
br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry * sgmp)411 static void br_multicast_sg_del_exclude_ports(struct net_bridge_mdb_entry *sgmp)
412 {
413 struct net_bridge_port_group __rcu **pp;
414 struct net_bridge_port_group *p;
415
416 /* *,G exclude ports are only added to S,G entries */
417 if (WARN_ON(br_multicast_is_star_g(&sgmp->addr)))
418 return;
419
420 /* we need the STAR_EXCLUDE ports if there are non-STAR_EXCLUDE ports
421 * we should ignore perm entries since they're managed by user-space
422 */
423 for (pp = &sgmp->ports;
424 (p = mlock_dereference(*pp, sgmp->br)) != NULL;
425 pp = &p->next)
426 if (!(p->flags & (MDB_PG_FLAGS_STAR_EXCL |
427 MDB_PG_FLAGS_PERMANENT)))
428 return;
429
430 /* currently the host can only have joined the *,G which means
431 * we treat it as EXCLUDE {}, so for an S,G it's considered a
432 * STAR_EXCLUDE entry and we can safely leave it
433 */
434 sgmp->host_joined = false;
435
436 for (pp = &sgmp->ports;
437 (p = mlock_dereference(*pp, sgmp->br)) != NULL;) {
438 if (!(p->flags & MDB_PG_FLAGS_PERMANENT))
439 br_multicast_del_pg(sgmp, p, pp);
440 else
441 pp = &p->next;
442 }
443 }
444
br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry * star_mp,struct net_bridge_port_group * sg)445 void br_multicast_sg_add_exclude_ports(struct net_bridge_mdb_entry *star_mp,
446 struct net_bridge_port_group *sg)
447 {
448 struct net_bridge_port_group_sg_key sg_key;
449 struct net_bridge *br = star_mp->br;
450 struct net_bridge_mcast_port *pmctx;
451 struct net_bridge_port_group *pg;
452 struct net_bridge_mcast *brmctx;
453
454 if (WARN_ON(br_multicast_is_star_g(&sg->key.addr)))
455 return;
456 if (WARN_ON(!br_multicast_is_star_g(&star_mp->addr)))
457 return;
458
459 br_multicast_sg_host_state(star_mp, sg);
460 memset(&sg_key, 0, sizeof(sg_key));
461 sg_key.addr = sg->key.addr;
462 /* we need to add all exclude ports to the S,G */
463 for (pg = mlock_dereference(star_mp->ports, br);
464 pg;
465 pg = mlock_dereference(pg->next, br)) {
466 struct net_bridge_port_group *src_pg;
467
468 if (pg == sg || pg->filter_mode == MCAST_INCLUDE)
469 continue;
470
471 sg_key.port = pg->key.port;
472 if (br_sg_port_find(br, &sg_key))
473 continue;
474
475 pmctx = br_multicast_pg_to_port_ctx(pg);
476 if (!pmctx)
477 continue;
478 brmctx = br_multicast_port_ctx_get_global(pmctx);
479
480 src_pg = __br_multicast_add_group(brmctx, pmctx,
481 &sg->key.addr,
482 sg->eth_addr,
483 MCAST_INCLUDE, false, false);
484 if (IS_ERR_OR_NULL(src_pg) ||
485 src_pg->rt_protocol != RTPROT_KERNEL)
486 continue;
487 src_pg->flags |= MDB_PG_FLAGS_STAR_EXCL;
488 }
489 }
490
br_multicast_fwd_src_add(struct net_bridge_group_src * src)491 static void br_multicast_fwd_src_add(struct net_bridge_group_src *src)
492 {
493 struct net_bridge_mdb_entry *star_mp;
494 struct net_bridge_mcast_port *pmctx;
495 struct net_bridge_port_group *sg;
496 struct net_bridge_mcast *brmctx;
497 struct br_ip sg_ip;
498
499 if (src->flags & BR_SGRP_F_INSTALLED)
500 return;
501
502 memset(&sg_ip, 0, sizeof(sg_ip));
503 pmctx = br_multicast_pg_to_port_ctx(src->pg);
504 if (!pmctx)
505 return;
506 brmctx = br_multicast_port_ctx_get_global(pmctx);
507 sg_ip = src->pg->key.addr;
508 sg_ip.src = src->addr.src;
509
510 sg = __br_multicast_add_group(brmctx, pmctx, &sg_ip,
511 src->pg->eth_addr, MCAST_INCLUDE, false,
512 !timer_pending(&src->timer));
513 if (IS_ERR_OR_NULL(sg))
514 return;
515 src->flags |= BR_SGRP_F_INSTALLED;
516 sg->flags &= ~MDB_PG_FLAGS_STAR_EXCL;
517
518 /* if it was added by user-space as perm we can skip next steps */
519 if (sg->rt_protocol != RTPROT_KERNEL &&
520 (sg->flags & MDB_PG_FLAGS_PERMANENT))
521 return;
522
523 /* the kernel is now responsible for removing this S,G */
524 del_timer(&sg->timer);
525 star_mp = br_mdb_ip_get(src->br, &src->pg->key.addr);
526 if (!star_mp)
527 return;
528
529 br_multicast_sg_add_exclude_ports(star_mp, sg);
530 }
531
br_multicast_fwd_src_remove(struct net_bridge_group_src * src,bool fastleave)532 static void br_multicast_fwd_src_remove(struct net_bridge_group_src *src,
533 bool fastleave)
534 {
535 struct net_bridge_port_group *p, *pg = src->pg;
536 struct net_bridge_port_group __rcu **pp;
537 struct net_bridge_mdb_entry *mp;
538 struct br_ip sg_ip;
539
540 memset(&sg_ip, 0, sizeof(sg_ip));
541 sg_ip = pg->key.addr;
542 sg_ip.src = src->addr.src;
543
544 mp = br_mdb_ip_get(src->br, &sg_ip);
545 if (!mp)
546 return;
547
548 for (pp = &mp->ports;
549 (p = mlock_dereference(*pp, src->br)) != NULL;
550 pp = &p->next) {
551 if (!br_port_group_equal(p, pg->key.port, pg->eth_addr))
552 continue;
553
554 if (p->rt_protocol != RTPROT_KERNEL &&
555 (p->flags & MDB_PG_FLAGS_PERMANENT))
556 break;
557
558 if (fastleave)
559 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
560 br_multicast_del_pg(mp, p, pp);
561 break;
562 }
563 src->flags &= ~BR_SGRP_F_INSTALLED;
564 }
565
566 /* install S,G and based on src's timer enable or disable forwarding */
br_multicast_fwd_src_handle(struct net_bridge_group_src * src)567 static void br_multicast_fwd_src_handle(struct net_bridge_group_src *src)
568 {
569 struct net_bridge_port_group_sg_key sg_key;
570 struct net_bridge_port_group *sg;
571 u8 old_flags;
572
573 br_multicast_fwd_src_add(src);
574
575 memset(&sg_key, 0, sizeof(sg_key));
576 sg_key.addr = src->pg->key.addr;
577 sg_key.addr.src = src->addr.src;
578 sg_key.port = src->pg->key.port;
579
580 sg = br_sg_port_find(src->br, &sg_key);
581 if (!sg || (sg->flags & MDB_PG_FLAGS_PERMANENT))
582 return;
583
584 old_flags = sg->flags;
585 if (timer_pending(&src->timer))
586 sg->flags &= ~MDB_PG_FLAGS_BLOCKED;
587 else
588 sg->flags |= MDB_PG_FLAGS_BLOCKED;
589
590 if (old_flags != sg->flags) {
591 struct net_bridge_mdb_entry *sg_mp;
592
593 sg_mp = br_mdb_ip_get(src->br, &sg_key.addr);
594 if (!sg_mp)
595 return;
596 br_mdb_notify(src->br->dev, sg_mp, sg, RTM_NEWMDB);
597 }
598 }
599
br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc * gc)600 static void br_multicast_destroy_mdb_entry(struct net_bridge_mcast_gc *gc)
601 {
602 struct net_bridge_mdb_entry *mp;
603
604 mp = container_of(gc, struct net_bridge_mdb_entry, mcast_gc);
605 WARN_ON(!hlist_unhashed(&mp->mdb_node));
606 WARN_ON(mp->ports);
607
608 del_timer_sync(&mp->timer);
609 kfree_rcu(mp, rcu);
610 }
611
br_multicast_del_mdb_entry(struct net_bridge_mdb_entry * mp)612 static void br_multicast_del_mdb_entry(struct net_bridge_mdb_entry *mp)
613 {
614 struct net_bridge *br = mp->br;
615
616 rhashtable_remove_fast(&br->mdb_hash_tbl, &mp->rhnode,
617 br_mdb_rht_params);
618 hlist_del_init_rcu(&mp->mdb_node);
619 hlist_add_head(&mp->mcast_gc.gc_node, &br->mcast_gc_list);
620 queue_work(system_long_wq, &br->mcast_gc_work);
621 }
622
br_multicast_group_expired(struct timer_list * t)623 static void br_multicast_group_expired(struct timer_list *t)
624 {
625 struct net_bridge_mdb_entry *mp = from_timer(mp, t, timer);
626 struct net_bridge *br = mp->br;
627
628 spin_lock(&br->multicast_lock);
629 if (hlist_unhashed(&mp->mdb_node) || !netif_running(br->dev) ||
630 timer_pending(&mp->timer))
631 goto out;
632
633 br_multicast_host_leave(mp, true);
634
635 if (mp->ports)
636 goto out;
637 br_multicast_del_mdb_entry(mp);
638 out:
639 spin_unlock(&br->multicast_lock);
640 }
641
br_multicast_destroy_group_src(struct net_bridge_mcast_gc * gc)642 static void br_multicast_destroy_group_src(struct net_bridge_mcast_gc *gc)
643 {
644 struct net_bridge_group_src *src;
645
646 src = container_of(gc, struct net_bridge_group_src, mcast_gc);
647 WARN_ON(!hlist_unhashed(&src->node));
648
649 del_timer_sync(&src->timer);
650 kfree_rcu(src, rcu);
651 }
652
br_multicast_del_group_src(struct net_bridge_group_src * src,bool fastleave)653 void br_multicast_del_group_src(struct net_bridge_group_src *src,
654 bool fastleave)
655 {
656 struct net_bridge *br = src->pg->key.port->br;
657
658 br_multicast_fwd_src_remove(src, fastleave);
659 hlist_del_init_rcu(&src->node);
660 src->pg->src_ents--;
661 hlist_add_head(&src->mcast_gc.gc_node, &br->mcast_gc_list);
662 queue_work(system_long_wq, &br->mcast_gc_work);
663 }
664
br_multicast_destroy_port_group(struct net_bridge_mcast_gc * gc)665 static void br_multicast_destroy_port_group(struct net_bridge_mcast_gc *gc)
666 {
667 struct net_bridge_port_group *pg;
668
669 pg = container_of(gc, struct net_bridge_port_group, mcast_gc);
670 WARN_ON(!hlist_unhashed(&pg->mglist));
671 WARN_ON(!hlist_empty(&pg->src_list));
672
673 del_timer_sync(&pg->rexmit_timer);
674 del_timer_sync(&pg->timer);
675 kfree_rcu(pg, rcu);
676 }
677
br_multicast_del_pg(struct net_bridge_mdb_entry * mp,struct net_bridge_port_group * pg,struct net_bridge_port_group __rcu ** pp)678 void br_multicast_del_pg(struct net_bridge_mdb_entry *mp,
679 struct net_bridge_port_group *pg,
680 struct net_bridge_port_group __rcu **pp)
681 {
682 struct net_bridge *br = pg->key.port->br;
683 struct net_bridge_group_src *ent;
684 struct hlist_node *tmp;
685
686 rcu_assign_pointer(*pp, pg->next);
687 hlist_del_init(&pg->mglist);
688 br_multicast_eht_clean_sets(pg);
689 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
690 br_multicast_del_group_src(ent, false);
691 br_mdb_notify(br->dev, mp, pg, RTM_DELMDB);
692 if (!br_multicast_is_star_g(&mp->addr)) {
693 rhashtable_remove_fast(&br->sg_port_tbl, &pg->rhnode,
694 br_sg_port_rht_params);
695 br_multicast_sg_del_exclude_ports(mp);
696 } else {
697 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
698 }
699 hlist_add_head(&pg->mcast_gc.gc_node, &br->mcast_gc_list);
700 queue_work(system_long_wq, &br->mcast_gc_work);
701
702 if (!mp->ports && !mp->host_joined && netif_running(br->dev))
703 mod_timer(&mp->timer, jiffies);
704 }
705
br_multicast_find_del_pg(struct net_bridge * br,struct net_bridge_port_group * pg)706 static void br_multicast_find_del_pg(struct net_bridge *br,
707 struct net_bridge_port_group *pg)
708 {
709 struct net_bridge_port_group __rcu **pp;
710 struct net_bridge_mdb_entry *mp;
711 struct net_bridge_port_group *p;
712
713 mp = br_mdb_ip_get(br, &pg->key.addr);
714 if (WARN_ON(!mp))
715 return;
716
717 for (pp = &mp->ports;
718 (p = mlock_dereference(*pp, br)) != NULL;
719 pp = &p->next) {
720 if (p != pg)
721 continue;
722
723 br_multicast_del_pg(mp, pg, pp);
724 return;
725 }
726
727 WARN_ON(1);
728 }
729
br_multicast_port_group_expired(struct timer_list * t)730 static void br_multicast_port_group_expired(struct timer_list *t)
731 {
732 struct net_bridge_port_group *pg = from_timer(pg, t, timer);
733 struct net_bridge_group_src *src_ent;
734 struct net_bridge *br = pg->key.port->br;
735 struct hlist_node *tmp;
736 bool changed;
737
738 spin_lock(&br->multicast_lock);
739 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
740 hlist_unhashed(&pg->mglist) || pg->flags & MDB_PG_FLAGS_PERMANENT)
741 goto out;
742
743 changed = !!(pg->filter_mode == MCAST_EXCLUDE);
744 pg->filter_mode = MCAST_INCLUDE;
745 hlist_for_each_entry_safe(src_ent, tmp, &pg->src_list, node) {
746 if (!timer_pending(&src_ent->timer)) {
747 br_multicast_del_group_src(src_ent, false);
748 changed = true;
749 }
750 }
751
752 if (hlist_empty(&pg->src_list)) {
753 br_multicast_find_del_pg(br, pg);
754 } else if (changed) {
755 struct net_bridge_mdb_entry *mp = br_mdb_ip_get(br, &pg->key.addr);
756
757 if (changed && br_multicast_is_star_g(&pg->key.addr))
758 br_multicast_star_g_handle_mode(pg, MCAST_INCLUDE);
759
760 if (WARN_ON(!mp))
761 goto out;
762 br_mdb_notify(br->dev, mp, pg, RTM_NEWMDB);
763 }
764 out:
765 spin_unlock(&br->multicast_lock);
766 }
767
br_multicast_gc(struct hlist_head * head)768 static void br_multicast_gc(struct hlist_head *head)
769 {
770 struct net_bridge_mcast_gc *gcent;
771 struct hlist_node *tmp;
772
773 hlist_for_each_entry_safe(gcent, tmp, head, gc_node) {
774 hlist_del_init(&gcent->gc_node);
775 gcent->destroy(gcent);
776 }
777 }
778
__br_multicast_query_handle_vlan(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)779 static void __br_multicast_query_handle_vlan(struct net_bridge_mcast *brmctx,
780 struct net_bridge_mcast_port *pmctx,
781 struct sk_buff *skb)
782 {
783 struct net_bridge_vlan *vlan = NULL;
784
785 if (pmctx && br_multicast_port_ctx_is_vlan(pmctx))
786 vlan = pmctx->vlan;
787 else if (br_multicast_ctx_is_vlan(brmctx))
788 vlan = brmctx->vlan;
789
790 if (vlan && !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
791 u16 vlan_proto;
792
793 if (br_vlan_get_proto(brmctx->br->dev, &vlan_proto) != 0)
794 return;
795 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vlan->vid);
796 }
797 }
798
br_ip4_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,__be32 ip_dst,__be32 group,bool with_srcs,bool over_lmqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)799 static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge_mcast *brmctx,
800 struct net_bridge_mcast_port *pmctx,
801 struct net_bridge_port_group *pg,
802 __be32 ip_dst, __be32 group,
803 bool with_srcs, bool over_lmqt,
804 u8 sflag, u8 *igmp_type,
805 bool *need_rexmit)
806 {
807 struct net_bridge_port *p = pg ? pg->key.port : NULL;
808 struct net_bridge_group_src *ent;
809 size_t pkt_size, igmp_hdr_size;
810 unsigned long now = jiffies;
811 struct igmpv3_query *ihv3;
812 void *csum_start = NULL;
813 __sum16 *csum = NULL;
814 struct sk_buff *skb;
815 struct igmphdr *ih;
816 struct ethhdr *eth;
817 unsigned long lmqt;
818 struct iphdr *iph;
819 u16 lmqt_srcs = 0;
820
821 igmp_hdr_size = sizeof(*ih);
822 if (brmctx->multicast_igmp_version == 3) {
823 igmp_hdr_size = sizeof(*ihv3);
824 if (pg && with_srcs) {
825 lmqt = now + (brmctx->multicast_last_member_interval *
826 brmctx->multicast_last_member_count);
827 hlist_for_each_entry(ent, &pg->src_list, node) {
828 if (over_lmqt == time_after(ent->timer.expires,
829 lmqt) &&
830 ent->src_query_rexmit_cnt > 0)
831 lmqt_srcs++;
832 }
833
834 if (!lmqt_srcs)
835 return NULL;
836 igmp_hdr_size += lmqt_srcs * sizeof(__be32);
837 }
838 }
839
840 pkt_size = sizeof(*eth) + sizeof(*iph) + 4 + igmp_hdr_size;
841 if ((p && pkt_size > p->dev->mtu) ||
842 pkt_size > brmctx->br->dev->mtu)
843 return NULL;
844
845 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
846 if (!skb)
847 goto out;
848
849 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
850 skb->protocol = htons(ETH_P_IP);
851
852 skb_reset_mac_header(skb);
853 eth = eth_hdr(skb);
854
855 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
856 ip_eth_mc_map(ip_dst, eth->h_dest);
857 eth->h_proto = htons(ETH_P_IP);
858 skb_put(skb, sizeof(*eth));
859
860 skb_set_network_header(skb, skb->len);
861 iph = ip_hdr(skb);
862 iph->tot_len = htons(pkt_size - sizeof(*eth));
863
864 iph->version = 4;
865 iph->ihl = 6;
866 iph->tos = 0xc0;
867 iph->id = 0;
868 iph->frag_off = htons(IP_DF);
869 iph->ttl = 1;
870 iph->protocol = IPPROTO_IGMP;
871 iph->saddr = br_opt_get(brmctx->br, BROPT_MULTICAST_QUERY_USE_IFADDR) ?
872 inet_select_addr(brmctx->br->dev, 0, RT_SCOPE_LINK) : 0;
873 iph->daddr = ip_dst;
874 ((u8 *)&iph[1])[0] = IPOPT_RA;
875 ((u8 *)&iph[1])[1] = 4;
876 ((u8 *)&iph[1])[2] = 0;
877 ((u8 *)&iph[1])[3] = 0;
878 ip_send_check(iph);
879 skb_put(skb, 24);
880
881 skb_set_transport_header(skb, skb->len);
882 *igmp_type = IGMP_HOST_MEMBERSHIP_QUERY;
883
884 switch (brmctx->multicast_igmp_version) {
885 case 2:
886 ih = igmp_hdr(skb);
887 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
888 ih->code = (group ? brmctx->multicast_last_member_interval :
889 brmctx->multicast_query_response_interval) /
890 (HZ / IGMP_TIMER_SCALE);
891 ih->group = group;
892 ih->csum = 0;
893 csum = &ih->csum;
894 csum_start = (void *)ih;
895 break;
896 case 3:
897 ihv3 = igmpv3_query_hdr(skb);
898 ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
899 ihv3->code = (group ? brmctx->multicast_last_member_interval :
900 brmctx->multicast_query_response_interval) /
901 (HZ / IGMP_TIMER_SCALE);
902 ihv3->group = group;
903 ihv3->qqic = brmctx->multicast_query_interval / HZ;
904 ihv3->nsrcs = htons(lmqt_srcs);
905 ihv3->resv = 0;
906 ihv3->suppress = sflag;
907 ihv3->qrv = 2;
908 ihv3->csum = 0;
909 csum = &ihv3->csum;
910 csum_start = (void *)ihv3;
911 if (!pg || !with_srcs)
912 break;
913
914 lmqt_srcs = 0;
915 hlist_for_each_entry(ent, &pg->src_list, node) {
916 if (over_lmqt == time_after(ent->timer.expires,
917 lmqt) &&
918 ent->src_query_rexmit_cnt > 0) {
919 ihv3->srcs[lmqt_srcs++] = ent->addr.src.ip4;
920 ent->src_query_rexmit_cnt--;
921 if (need_rexmit && ent->src_query_rexmit_cnt)
922 *need_rexmit = true;
923 }
924 }
925 if (WARN_ON(lmqt_srcs != ntohs(ihv3->nsrcs))) {
926 kfree_skb(skb);
927 return NULL;
928 }
929 break;
930 }
931
932 if (WARN_ON(!csum || !csum_start)) {
933 kfree_skb(skb);
934 return NULL;
935 }
936
937 *csum = ip_compute_csum(csum_start, igmp_hdr_size);
938 skb_put(skb, igmp_hdr_size);
939 __skb_pull(skb, sizeof(*eth));
940
941 out:
942 return skb;
943 }
944
945 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,const struct in6_addr * ip6_dst,const struct in6_addr * group,bool with_srcs,bool over_llqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)946 static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge_mcast *brmctx,
947 struct net_bridge_mcast_port *pmctx,
948 struct net_bridge_port_group *pg,
949 const struct in6_addr *ip6_dst,
950 const struct in6_addr *group,
951 bool with_srcs, bool over_llqt,
952 u8 sflag, u8 *igmp_type,
953 bool *need_rexmit)
954 {
955 struct net_bridge_port *p = pg ? pg->key.port : NULL;
956 struct net_bridge_group_src *ent;
957 size_t pkt_size, mld_hdr_size;
958 unsigned long now = jiffies;
959 struct mld2_query *mld2q;
960 void *csum_start = NULL;
961 unsigned long interval;
962 __sum16 *csum = NULL;
963 struct ipv6hdr *ip6h;
964 struct mld_msg *mldq;
965 struct sk_buff *skb;
966 unsigned long llqt;
967 struct ethhdr *eth;
968 u16 llqt_srcs = 0;
969 u8 *hopopt;
970
971 mld_hdr_size = sizeof(*mldq);
972 if (brmctx->multicast_mld_version == 2) {
973 mld_hdr_size = sizeof(*mld2q);
974 if (pg && with_srcs) {
975 llqt = now + (brmctx->multicast_last_member_interval *
976 brmctx->multicast_last_member_count);
977 hlist_for_each_entry(ent, &pg->src_list, node) {
978 if (over_llqt == time_after(ent->timer.expires,
979 llqt) &&
980 ent->src_query_rexmit_cnt > 0)
981 llqt_srcs++;
982 }
983
984 if (!llqt_srcs)
985 return NULL;
986 mld_hdr_size += llqt_srcs * sizeof(struct in6_addr);
987 }
988 }
989
990 pkt_size = sizeof(*eth) + sizeof(*ip6h) + 8 + mld_hdr_size;
991 if ((p && pkt_size > p->dev->mtu) ||
992 pkt_size > brmctx->br->dev->mtu)
993 return NULL;
994
995 skb = netdev_alloc_skb_ip_align(brmctx->br->dev, pkt_size);
996 if (!skb)
997 goto out;
998
999 __br_multicast_query_handle_vlan(brmctx, pmctx, skb);
1000 skb->protocol = htons(ETH_P_IPV6);
1001
1002 /* Ethernet header */
1003 skb_reset_mac_header(skb);
1004 eth = eth_hdr(skb);
1005
1006 ether_addr_copy(eth->h_source, brmctx->br->dev->dev_addr);
1007 eth->h_proto = htons(ETH_P_IPV6);
1008 skb_put(skb, sizeof(*eth));
1009
1010 /* IPv6 header + HbH option */
1011 skb_set_network_header(skb, skb->len);
1012 ip6h = ipv6_hdr(skb);
1013
1014 *(__force __be32 *)ip6h = htonl(0x60000000);
1015 ip6h->payload_len = htons(8 + mld_hdr_size);
1016 ip6h->nexthdr = IPPROTO_HOPOPTS;
1017 ip6h->hop_limit = 1;
1018 ip6h->daddr = *ip6_dst;
1019 if (ipv6_dev_get_saddr(dev_net(brmctx->br->dev), brmctx->br->dev,
1020 &ip6h->daddr, 0, &ip6h->saddr)) {
1021 kfree_skb(skb);
1022 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, false);
1023 return NULL;
1024 }
1025
1026 br_opt_toggle(brmctx->br, BROPT_HAS_IPV6_ADDR, true);
1027 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
1028
1029 hopopt = (u8 *)(ip6h + 1);
1030 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
1031 hopopt[1] = 0; /* length of HbH */
1032 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
1033 hopopt[3] = 2; /* Length of RA Option */
1034 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
1035 hopopt[5] = 0;
1036 hopopt[6] = IPV6_TLV_PAD1; /* Pad1 */
1037 hopopt[7] = IPV6_TLV_PAD1; /* Pad1 */
1038
1039 skb_put(skb, sizeof(*ip6h) + 8);
1040
1041 /* ICMPv6 */
1042 skb_set_transport_header(skb, skb->len);
1043 interval = ipv6_addr_any(group) ?
1044 brmctx->multicast_query_response_interval :
1045 brmctx->multicast_last_member_interval;
1046 *igmp_type = ICMPV6_MGM_QUERY;
1047 switch (brmctx->multicast_mld_version) {
1048 case 1:
1049 mldq = (struct mld_msg *)icmp6_hdr(skb);
1050 mldq->mld_type = ICMPV6_MGM_QUERY;
1051 mldq->mld_code = 0;
1052 mldq->mld_cksum = 0;
1053 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
1054 mldq->mld_reserved = 0;
1055 mldq->mld_mca = *group;
1056 csum = &mldq->mld_cksum;
1057 csum_start = (void *)mldq;
1058 break;
1059 case 2:
1060 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1061 mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
1062 mld2q->mld2q_type = ICMPV6_MGM_QUERY;
1063 mld2q->mld2q_code = 0;
1064 mld2q->mld2q_cksum = 0;
1065 mld2q->mld2q_resv1 = 0;
1066 mld2q->mld2q_resv2 = 0;
1067 mld2q->mld2q_suppress = sflag;
1068 mld2q->mld2q_qrv = 2;
1069 mld2q->mld2q_nsrcs = htons(llqt_srcs);
1070 mld2q->mld2q_qqic = brmctx->multicast_query_interval / HZ;
1071 mld2q->mld2q_mca = *group;
1072 csum = &mld2q->mld2q_cksum;
1073 csum_start = (void *)mld2q;
1074 if (!pg || !with_srcs)
1075 break;
1076
1077 llqt_srcs = 0;
1078 hlist_for_each_entry(ent, &pg->src_list, node) {
1079 if (over_llqt == time_after(ent->timer.expires,
1080 llqt) &&
1081 ent->src_query_rexmit_cnt > 0) {
1082 mld2q->mld2q_srcs[llqt_srcs++] = ent->addr.src.ip6;
1083 ent->src_query_rexmit_cnt--;
1084 if (need_rexmit && ent->src_query_rexmit_cnt)
1085 *need_rexmit = true;
1086 }
1087 }
1088 if (WARN_ON(llqt_srcs != ntohs(mld2q->mld2q_nsrcs))) {
1089 kfree_skb(skb);
1090 return NULL;
1091 }
1092 break;
1093 }
1094
1095 if (WARN_ON(!csum || !csum_start)) {
1096 kfree_skb(skb);
1097 return NULL;
1098 }
1099
1100 *csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, mld_hdr_size,
1101 IPPROTO_ICMPV6,
1102 csum_partial(csum_start, mld_hdr_size, 0));
1103 skb_put(skb, mld_hdr_size);
1104 __skb_pull(skb, sizeof(*eth));
1105
1106 out:
1107 return skb;
1108 }
1109 #endif
1110
br_multicast_alloc_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * ip_dst,struct br_ip * group,bool with_srcs,bool over_lmqt,u8 sflag,u8 * igmp_type,bool * need_rexmit)1111 static struct sk_buff *br_multicast_alloc_query(struct net_bridge_mcast *brmctx,
1112 struct net_bridge_mcast_port *pmctx,
1113 struct net_bridge_port_group *pg,
1114 struct br_ip *ip_dst,
1115 struct br_ip *group,
1116 bool with_srcs, bool over_lmqt,
1117 u8 sflag, u8 *igmp_type,
1118 bool *need_rexmit)
1119 {
1120 __be32 ip4_dst;
1121
1122 switch (group->proto) {
1123 case htons(ETH_P_IP):
1124 ip4_dst = ip_dst ? ip_dst->dst.ip4 : htonl(INADDR_ALLHOSTS_GROUP);
1125 return br_ip4_multicast_alloc_query(brmctx, pmctx, pg,
1126 ip4_dst, group->dst.ip4,
1127 with_srcs, over_lmqt,
1128 sflag, igmp_type,
1129 need_rexmit);
1130 #if IS_ENABLED(CONFIG_IPV6)
1131 case htons(ETH_P_IPV6): {
1132 struct in6_addr ip6_dst;
1133
1134 if (ip_dst)
1135 ip6_dst = ip_dst->dst.ip6;
1136 else
1137 ipv6_addr_set(&ip6_dst, htonl(0xff020000), 0, 0,
1138 htonl(1));
1139
1140 return br_ip6_multicast_alloc_query(brmctx, pmctx, pg,
1141 &ip6_dst, &group->dst.ip6,
1142 with_srcs, over_lmqt,
1143 sflag, igmp_type,
1144 need_rexmit);
1145 }
1146 #endif
1147 }
1148 return NULL;
1149 }
1150
br_multicast_new_group(struct net_bridge * br,struct br_ip * group)1151 struct net_bridge_mdb_entry *br_multicast_new_group(struct net_bridge *br,
1152 struct br_ip *group)
1153 {
1154 struct net_bridge_mdb_entry *mp;
1155 int err;
1156
1157 mp = br_mdb_ip_get(br, group);
1158 if (mp)
1159 return mp;
1160
1161 if (atomic_read(&br->mdb_hash_tbl.nelems) >= br->hash_max) {
1162 br_mc_disabled_update(br->dev, false, NULL);
1163 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, false);
1164 return ERR_PTR(-E2BIG);
1165 }
1166
1167 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
1168 if (unlikely(!mp))
1169 return ERR_PTR(-ENOMEM);
1170
1171 mp->br = br;
1172 mp->addr = *group;
1173 mp->mcast_gc.destroy = br_multicast_destroy_mdb_entry;
1174 timer_setup(&mp->timer, br_multicast_group_expired, 0);
1175 err = rhashtable_lookup_insert_fast(&br->mdb_hash_tbl, &mp->rhnode,
1176 br_mdb_rht_params);
1177 if (err) {
1178 kfree(mp);
1179 mp = ERR_PTR(err);
1180 } else {
1181 hlist_add_head_rcu(&mp->mdb_node, &br->mdb_list);
1182 }
1183
1184 return mp;
1185 }
1186
br_multicast_group_src_expired(struct timer_list * t)1187 static void br_multicast_group_src_expired(struct timer_list *t)
1188 {
1189 struct net_bridge_group_src *src = from_timer(src, t, timer);
1190 struct net_bridge_port_group *pg;
1191 struct net_bridge *br = src->br;
1192
1193 spin_lock(&br->multicast_lock);
1194 if (hlist_unhashed(&src->node) || !netif_running(br->dev) ||
1195 timer_pending(&src->timer))
1196 goto out;
1197
1198 pg = src->pg;
1199 if (pg->filter_mode == MCAST_INCLUDE) {
1200 br_multicast_del_group_src(src, false);
1201 if (!hlist_empty(&pg->src_list))
1202 goto out;
1203 br_multicast_find_del_pg(br, pg);
1204 } else {
1205 br_multicast_fwd_src_handle(src);
1206 }
1207
1208 out:
1209 spin_unlock(&br->multicast_lock);
1210 }
1211
1212 struct net_bridge_group_src *
br_multicast_find_group_src(struct net_bridge_port_group * pg,struct br_ip * ip)1213 br_multicast_find_group_src(struct net_bridge_port_group *pg, struct br_ip *ip)
1214 {
1215 struct net_bridge_group_src *ent;
1216
1217 switch (ip->proto) {
1218 case htons(ETH_P_IP):
1219 hlist_for_each_entry(ent, &pg->src_list, node)
1220 if (ip->src.ip4 == ent->addr.src.ip4)
1221 return ent;
1222 break;
1223 #if IS_ENABLED(CONFIG_IPV6)
1224 case htons(ETH_P_IPV6):
1225 hlist_for_each_entry(ent, &pg->src_list, node)
1226 if (!ipv6_addr_cmp(&ent->addr.src.ip6, &ip->src.ip6))
1227 return ent;
1228 break;
1229 #endif
1230 }
1231
1232 return NULL;
1233 }
1234
1235 static struct net_bridge_group_src *
br_multicast_new_group_src(struct net_bridge_port_group * pg,struct br_ip * src_ip)1236 br_multicast_new_group_src(struct net_bridge_port_group *pg, struct br_ip *src_ip)
1237 {
1238 struct net_bridge_group_src *grp_src;
1239
1240 if (unlikely(pg->src_ents >= PG_SRC_ENT_LIMIT))
1241 return NULL;
1242
1243 switch (src_ip->proto) {
1244 case htons(ETH_P_IP):
1245 if (ipv4_is_zeronet(src_ip->src.ip4) ||
1246 ipv4_is_multicast(src_ip->src.ip4))
1247 return NULL;
1248 break;
1249 #if IS_ENABLED(CONFIG_IPV6)
1250 case htons(ETH_P_IPV6):
1251 if (ipv6_addr_any(&src_ip->src.ip6) ||
1252 ipv6_addr_is_multicast(&src_ip->src.ip6))
1253 return NULL;
1254 break;
1255 #endif
1256 }
1257
1258 grp_src = kzalloc(sizeof(*grp_src), GFP_ATOMIC);
1259 if (unlikely(!grp_src))
1260 return NULL;
1261
1262 grp_src->pg = pg;
1263 grp_src->br = pg->key.port->br;
1264 grp_src->addr = *src_ip;
1265 grp_src->mcast_gc.destroy = br_multicast_destroy_group_src;
1266 timer_setup(&grp_src->timer, br_multicast_group_src_expired, 0);
1267
1268 hlist_add_head_rcu(&grp_src->node, &pg->src_list);
1269 pg->src_ents++;
1270
1271 return grp_src;
1272 }
1273
br_multicast_new_port_group(struct net_bridge_port * port,struct br_ip * group,struct net_bridge_port_group __rcu * next,unsigned char flags,const unsigned char * src,u8 filter_mode,u8 rt_protocol)1274 struct net_bridge_port_group *br_multicast_new_port_group(
1275 struct net_bridge_port *port,
1276 struct br_ip *group,
1277 struct net_bridge_port_group __rcu *next,
1278 unsigned char flags,
1279 const unsigned char *src,
1280 u8 filter_mode,
1281 u8 rt_protocol)
1282 {
1283 struct net_bridge_port_group *p;
1284
1285 p = kzalloc(sizeof(*p), GFP_ATOMIC);
1286 if (unlikely(!p))
1287 return NULL;
1288
1289 p->key.addr = *group;
1290 p->key.port = port;
1291 p->flags = flags;
1292 p->filter_mode = filter_mode;
1293 p->rt_protocol = rt_protocol;
1294 p->eht_host_tree = RB_ROOT;
1295 p->eht_set_tree = RB_ROOT;
1296 p->mcast_gc.destroy = br_multicast_destroy_port_group;
1297 INIT_HLIST_HEAD(&p->src_list);
1298
1299 if (!br_multicast_is_star_g(group) &&
1300 rhashtable_lookup_insert_fast(&port->br->sg_port_tbl, &p->rhnode,
1301 br_sg_port_rht_params)) {
1302 kfree(p);
1303 return NULL;
1304 }
1305
1306 rcu_assign_pointer(p->next, next);
1307 timer_setup(&p->timer, br_multicast_port_group_expired, 0);
1308 timer_setup(&p->rexmit_timer, br_multicast_port_group_rexmit, 0);
1309 hlist_add_head(&p->mglist, &port->mglist);
1310
1311 if (src)
1312 memcpy(p->eth_addr, src, ETH_ALEN);
1313 else
1314 eth_broadcast_addr(p->eth_addr);
1315
1316 return p;
1317 }
1318
br_multicast_host_join(const struct net_bridge_mcast * brmctx,struct net_bridge_mdb_entry * mp,bool notify)1319 void br_multicast_host_join(const struct net_bridge_mcast *brmctx,
1320 struct net_bridge_mdb_entry *mp, bool notify)
1321 {
1322 if (!mp->host_joined) {
1323 mp->host_joined = true;
1324 if (br_multicast_is_star_g(&mp->addr))
1325 br_multicast_star_g_host_state(mp);
1326 if (notify)
1327 br_mdb_notify(mp->br->dev, mp, NULL, RTM_NEWMDB);
1328 }
1329
1330 if (br_group_is_l2(&mp->addr))
1331 return;
1332
1333 mod_timer(&mp->timer, jiffies + brmctx->multicast_membership_interval);
1334 }
1335
br_multicast_host_leave(struct net_bridge_mdb_entry * mp,bool notify)1336 void br_multicast_host_leave(struct net_bridge_mdb_entry *mp, bool notify)
1337 {
1338 if (!mp->host_joined)
1339 return;
1340
1341 mp->host_joined = false;
1342 if (br_multicast_is_star_g(&mp->addr))
1343 br_multicast_star_g_host_state(mp);
1344 if (notify)
1345 br_mdb_notify(mp->br->dev, mp, NULL, RTM_DELMDB);
1346 }
1347
1348 static struct net_bridge_port_group *
__br_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,const unsigned char * src,u8 filter_mode,bool igmpv2_mldv1,bool blocked)1349 __br_multicast_add_group(struct net_bridge_mcast *brmctx,
1350 struct net_bridge_mcast_port *pmctx,
1351 struct br_ip *group,
1352 const unsigned char *src,
1353 u8 filter_mode,
1354 bool igmpv2_mldv1,
1355 bool blocked)
1356 {
1357 struct net_bridge_port_group __rcu **pp;
1358 struct net_bridge_port_group *p = NULL;
1359 struct net_bridge_mdb_entry *mp;
1360 unsigned long now = jiffies;
1361
1362 if (!br_multicast_ctx_should_use(brmctx, pmctx))
1363 goto out;
1364
1365 mp = br_multicast_new_group(brmctx->br, group);
1366 if (IS_ERR(mp))
1367 return ERR_CAST(mp);
1368
1369 if (!pmctx) {
1370 br_multicast_host_join(brmctx, mp, true);
1371 goto out;
1372 }
1373
1374 for (pp = &mp->ports;
1375 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
1376 pp = &p->next) {
1377 if (br_port_group_equal(p, pmctx->port, src))
1378 goto found;
1379 if ((unsigned long)p->key.port < (unsigned long)pmctx->port)
1380 break;
1381 }
1382
1383 p = br_multicast_new_port_group(pmctx->port, group, *pp, 0, src,
1384 filter_mode, RTPROT_KERNEL);
1385 if (unlikely(!p)) {
1386 p = ERR_PTR(-ENOMEM);
1387 goto out;
1388 }
1389 rcu_assign_pointer(*pp, p);
1390 if (blocked)
1391 p->flags |= MDB_PG_FLAGS_BLOCKED;
1392 br_mdb_notify(brmctx->br->dev, mp, p, RTM_NEWMDB);
1393
1394 found:
1395 if (igmpv2_mldv1)
1396 mod_timer(&p->timer,
1397 now + brmctx->multicast_membership_interval);
1398
1399 out:
1400 return p;
1401 }
1402
br_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,const unsigned char * src,u8 filter_mode,bool igmpv2_mldv1)1403 static int br_multicast_add_group(struct net_bridge_mcast *brmctx,
1404 struct net_bridge_mcast_port *pmctx,
1405 struct br_ip *group,
1406 const unsigned char *src,
1407 u8 filter_mode,
1408 bool igmpv2_mldv1)
1409 {
1410 struct net_bridge_port_group *pg;
1411 int err;
1412
1413 spin_lock(&brmctx->br->multicast_lock);
1414 pg = __br_multicast_add_group(brmctx, pmctx, group, src, filter_mode,
1415 igmpv2_mldv1, false);
1416 /* NULL is considered valid for host joined groups */
1417 err = PTR_ERR_OR_ZERO(pg);
1418 spin_unlock(&brmctx->br->multicast_lock);
1419
1420 return err;
1421 }
1422
br_ip4_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,__be32 group,__u16 vid,const unsigned char * src,bool igmpv2)1423 static int br_ip4_multicast_add_group(struct net_bridge_mcast *brmctx,
1424 struct net_bridge_mcast_port *pmctx,
1425 __be32 group,
1426 __u16 vid,
1427 const unsigned char *src,
1428 bool igmpv2)
1429 {
1430 struct br_ip br_group;
1431 u8 filter_mode;
1432
1433 if (ipv4_is_local_multicast(group))
1434 return 0;
1435
1436 memset(&br_group, 0, sizeof(br_group));
1437 br_group.dst.ip4 = group;
1438 br_group.proto = htons(ETH_P_IP);
1439 br_group.vid = vid;
1440 filter_mode = igmpv2 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1441
1442 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1443 filter_mode, igmpv2);
1444 }
1445
1446 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_add_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct in6_addr * group,__u16 vid,const unsigned char * src,bool mldv1)1447 static int br_ip6_multicast_add_group(struct net_bridge_mcast *brmctx,
1448 struct net_bridge_mcast_port *pmctx,
1449 const struct in6_addr *group,
1450 __u16 vid,
1451 const unsigned char *src,
1452 bool mldv1)
1453 {
1454 struct br_ip br_group;
1455 u8 filter_mode;
1456
1457 if (ipv6_addr_is_ll_all_nodes(group))
1458 return 0;
1459
1460 memset(&br_group, 0, sizeof(br_group));
1461 br_group.dst.ip6 = *group;
1462 br_group.proto = htons(ETH_P_IPV6);
1463 br_group.vid = vid;
1464 filter_mode = mldv1 ? MCAST_EXCLUDE : MCAST_INCLUDE;
1465
1466 return br_multicast_add_group(brmctx, pmctx, &br_group, src,
1467 filter_mode, mldv1);
1468 }
1469 #endif
1470
br_multicast_rport_del(struct hlist_node * rlist)1471 static bool br_multicast_rport_del(struct hlist_node *rlist)
1472 {
1473 if (hlist_unhashed(rlist))
1474 return false;
1475
1476 hlist_del_init_rcu(rlist);
1477 return true;
1478 }
1479
br_ip4_multicast_rport_del(struct net_bridge_mcast_port * pmctx)1480 static bool br_ip4_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1481 {
1482 return br_multicast_rport_del(&pmctx->ip4_rlist);
1483 }
1484
br_ip6_multicast_rport_del(struct net_bridge_mcast_port * pmctx)1485 static bool br_ip6_multicast_rport_del(struct net_bridge_mcast_port *pmctx)
1486 {
1487 #if IS_ENABLED(CONFIG_IPV6)
1488 return br_multicast_rport_del(&pmctx->ip6_rlist);
1489 #else
1490 return false;
1491 #endif
1492 }
1493
br_multicast_router_expired(struct net_bridge_mcast_port * pmctx,struct timer_list * t,struct hlist_node * rlist)1494 static void br_multicast_router_expired(struct net_bridge_mcast_port *pmctx,
1495 struct timer_list *t,
1496 struct hlist_node *rlist)
1497 {
1498 struct net_bridge *br = pmctx->port->br;
1499 bool del;
1500
1501 spin_lock(&br->multicast_lock);
1502 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1503 pmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1504 timer_pending(t))
1505 goto out;
1506
1507 del = br_multicast_rport_del(rlist);
1508 br_multicast_rport_del_notify(pmctx, del);
1509 out:
1510 spin_unlock(&br->multicast_lock);
1511 }
1512
br_ip4_multicast_router_expired(struct timer_list * t)1513 static void br_ip4_multicast_router_expired(struct timer_list *t)
1514 {
1515 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1516 ip4_mc_router_timer);
1517
1518 br_multicast_router_expired(pmctx, t, &pmctx->ip4_rlist);
1519 }
1520
1521 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_router_expired(struct timer_list * t)1522 static void br_ip6_multicast_router_expired(struct timer_list *t)
1523 {
1524 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1525 ip6_mc_router_timer);
1526
1527 br_multicast_router_expired(pmctx, t, &pmctx->ip6_rlist);
1528 }
1529 #endif
1530
br_mc_router_state_change(struct net_bridge * p,bool is_mc_router)1531 static void br_mc_router_state_change(struct net_bridge *p,
1532 bool is_mc_router)
1533 {
1534 struct switchdev_attr attr = {
1535 .orig_dev = p->dev,
1536 .id = SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
1537 .flags = SWITCHDEV_F_DEFER,
1538 .u.mrouter = is_mc_router,
1539 };
1540
1541 switchdev_port_attr_set(p->dev, &attr, NULL);
1542 }
1543
br_multicast_local_router_expired(struct net_bridge_mcast * brmctx,struct timer_list * timer)1544 static void br_multicast_local_router_expired(struct net_bridge_mcast *brmctx,
1545 struct timer_list *timer)
1546 {
1547 spin_lock(&brmctx->br->multicast_lock);
1548 if (brmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
1549 brmctx->multicast_router == MDB_RTR_TYPE_PERM ||
1550 br_ip4_multicast_is_router(brmctx) ||
1551 br_ip6_multicast_is_router(brmctx))
1552 goto out;
1553
1554 br_mc_router_state_change(brmctx->br, false);
1555 out:
1556 spin_unlock(&brmctx->br->multicast_lock);
1557 }
1558
br_ip4_multicast_local_router_expired(struct timer_list * t)1559 static void br_ip4_multicast_local_router_expired(struct timer_list *t)
1560 {
1561 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1562 ip4_mc_router_timer);
1563
1564 br_multicast_local_router_expired(brmctx, t);
1565 }
1566
1567 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_local_router_expired(struct timer_list * t)1568 static void br_ip6_multicast_local_router_expired(struct timer_list *t)
1569 {
1570 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1571 ip6_mc_router_timer);
1572
1573 br_multicast_local_router_expired(brmctx, t);
1574 }
1575 #endif
1576
br_multicast_querier_expired(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query)1577 static void br_multicast_querier_expired(struct net_bridge_mcast *brmctx,
1578 struct bridge_mcast_own_query *query)
1579 {
1580 spin_lock(&brmctx->br->multicast_lock);
1581 if (!netif_running(brmctx->br->dev) ||
1582 br_multicast_ctx_vlan_global_disabled(brmctx) ||
1583 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
1584 goto out;
1585
1586 br_multicast_start_querier(brmctx, query);
1587
1588 out:
1589 spin_unlock(&brmctx->br->multicast_lock);
1590 }
1591
br_ip4_multicast_querier_expired(struct timer_list * t)1592 static void br_ip4_multicast_querier_expired(struct timer_list *t)
1593 {
1594 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1595 ip4_other_query.timer);
1596
1597 br_multicast_querier_expired(brmctx, &brmctx->ip4_own_query);
1598 }
1599
1600 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_querier_expired(struct timer_list * t)1601 static void br_ip6_multicast_querier_expired(struct timer_list *t)
1602 {
1603 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
1604 ip6_other_query.timer);
1605
1606 br_multicast_querier_expired(brmctx, &brmctx->ip6_own_query);
1607 }
1608 #endif
1609
br_multicast_query_delay_expired(struct timer_list * t)1610 static void br_multicast_query_delay_expired(struct timer_list *t)
1611 {
1612 }
1613
br_multicast_select_own_querier(struct net_bridge_mcast * brmctx,struct br_ip * ip,struct sk_buff * skb)1614 static void br_multicast_select_own_querier(struct net_bridge_mcast *brmctx,
1615 struct br_ip *ip,
1616 struct sk_buff *skb)
1617 {
1618 if (ip->proto == htons(ETH_P_IP))
1619 brmctx->ip4_querier.addr.src.ip4 = ip_hdr(skb)->saddr;
1620 #if IS_ENABLED(CONFIG_IPV6)
1621 else
1622 brmctx->ip6_querier.addr.src.ip6 = ipv6_hdr(skb)->saddr;
1623 #endif
1624 }
1625
__br_multicast_send_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,struct br_ip * ip_dst,struct br_ip * group,bool with_srcs,u8 sflag,bool * need_rexmit)1626 static void __br_multicast_send_query(struct net_bridge_mcast *brmctx,
1627 struct net_bridge_mcast_port *pmctx,
1628 struct net_bridge_port_group *pg,
1629 struct br_ip *ip_dst,
1630 struct br_ip *group,
1631 bool with_srcs,
1632 u8 sflag,
1633 bool *need_rexmit)
1634 {
1635 bool over_lmqt = !!sflag;
1636 struct sk_buff *skb;
1637 u8 igmp_type;
1638
1639 if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1640 !br_multicast_ctx_matches_vlan_snooping(brmctx))
1641 return;
1642
1643 again_under_lmqt:
1644 skb = br_multicast_alloc_query(brmctx, pmctx, pg, ip_dst, group,
1645 with_srcs, over_lmqt, sflag, &igmp_type,
1646 need_rexmit);
1647 if (!skb)
1648 return;
1649
1650 if (pmctx) {
1651 skb->dev = pmctx->port->dev;
1652 br_multicast_count(brmctx->br, pmctx->port, skb, igmp_type,
1653 BR_MCAST_DIR_TX);
1654 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT,
1655 dev_net(pmctx->port->dev), NULL, skb, NULL, skb->dev,
1656 br_dev_queue_push_xmit);
1657
1658 if (over_lmqt && with_srcs && sflag) {
1659 over_lmqt = false;
1660 goto again_under_lmqt;
1661 }
1662 } else {
1663 br_multicast_select_own_querier(brmctx, group, skb);
1664 br_multicast_count(brmctx->br, NULL, skb, igmp_type,
1665 BR_MCAST_DIR_RX);
1666 netif_rx(skb);
1667 }
1668 }
1669
br_multicast_read_querier(const struct bridge_mcast_querier * querier,struct bridge_mcast_querier * dest)1670 static void br_multicast_read_querier(const struct bridge_mcast_querier *querier,
1671 struct bridge_mcast_querier *dest)
1672 {
1673 unsigned int seq;
1674
1675 memset(dest, 0, sizeof(*dest));
1676 do {
1677 seq = read_seqcount_begin(&querier->seq);
1678 dest->port_ifidx = querier->port_ifidx;
1679 memcpy(&dest->addr, &querier->addr, sizeof(struct br_ip));
1680 } while (read_seqcount_retry(&querier->seq, seq));
1681 }
1682
br_multicast_update_querier(struct net_bridge_mcast * brmctx,struct bridge_mcast_querier * querier,int ifindex,struct br_ip * saddr)1683 static void br_multicast_update_querier(struct net_bridge_mcast *brmctx,
1684 struct bridge_mcast_querier *querier,
1685 int ifindex,
1686 struct br_ip *saddr)
1687 {
1688 write_seqcount_begin(&querier->seq);
1689 querier->port_ifidx = ifindex;
1690 memcpy(&querier->addr, saddr, sizeof(*saddr));
1691 write_seqcount_end(&querier->seq);
1692 }
1693
br_multicast_send_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_own_query * own_query)1694 static void br_multicast_send_query(struct net_bridge_mcast *brmctx,
1695 struct net_bridge_mcast_port *pmctx,
1696 struct bridge_mcast_own_query *own_query)
1697 {
1698 struct bridge_mcast_other_query *other_query = NULL;
1699 struct bridge_mcast_querier *querier;
1700 struct br_ip br_group;
1701 unsigned long time;
1702
1703 if (!br_multicast_ctx_should_use(brmctx, pmctx) ||
1704 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
1705 !brmctx->multicast_querier)
1706 return;
1707
1708 memset(&br_group.dst, 0, sizeof(br_group.dst));
1709
1710 if (pmctx ? (own_query == &pmctx->ip4_own_query) :
1711 (own_query == &brmctx->ip4_own_query)) {
1712 querier = &brmctx->ip4_querier;
1713 other_query = &brmctx->ip4_other_query;
1714 br_group.proto = htons(ETH_P_IP);
1715 #if IS_ENABLED(CONFIG_IPV6)
1716 } else {
1717 querier = &brmctx->ip6_querier;
1718 other_query = &brmctx->ip6_other_query;
1719 br_group.proto = htons(ETH_P_IPV6);
1720 #endif
1721 }
1722
1723 if (!other_query || timer_pending(&other_query->timer))
1724 return;
1725
1726 /* we're about to select ourselves as querier */
1727 if (!pmctx && querier->port_ifidx) {
1728 struct br_ip zeroip = {};
1729
1730 br_multicast_update_querier(brmctx, querier, 0, &zeroip);
1731 }
1732
1733 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &br_group, false,
1734 0, NULL);
1735
1736 time = jiffies;
1737 time += own_query->startup_sent < brmctx->multicast_startup_query_count ?
1738 brmctx->multicast_startup_query_interval :
1739 brmctx->multicast_query_interval;
1740 mod_timer(&own_query->timer, time);
1741 }
1742
1743 static void
br_multicast_port_query_expired(struct net_bridge_mcast_port * pmctx,struct bridge_mcast_own_query * query)1744 br_multicast_port_query_expired(struct net_bridge_mcast_port *pmctx,
1745 struct bridge_mcast_own_query *query)
1746 {
1747 struct net_bridge *br = pmctx->port->br;
1748 struct net_bridge_mcast *brmctx;
1749
1750 spin_lock(&br->multicast_lock);
1751 if (br_multicast_port_ctx_state_stopped(pmctx))
1752 goto out;
1753
1754 brmctx = br_multicast_port_ctx_get_global(pmctx);
1755 if (query->startup_sent < brmctx->multicast_startup_query_count)
1756 query->startup_sent++;
1757
1758 br_multicast_send_query(brmctx, pmctx, query);
1759
1760 out:
1761 spin_unlock(&br->multicast_lock);
1762 }
1763
br_ip4_multicast_port_query_expired(struct timer_list * t)1764 static void br_ip4_multicast_port_query_expired(struct timer_list *t)
1765 {
1766 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1767 ip4_own_query.timer);
1768
1769 br_multicast_port_query_expired(pmctx, &pmctx->ip4_own_query);
1770 }
1771
1772 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_port_query_expired(struct timer_list * t)1773 static void br_ip6_multicast_port_query_expired(struct timer_list *t)
1774 {
1775 struct net_bridge_mcast_port *pmctx = from_timer(pmctx, t,
1776 ip6_own_query.timer);
1777
1778 br_multicast_port_query_expired(pmctx, &pmctx->ip6_own_query);
1779 }
1780 #endif
1781
br_multicast_port_group_rexmit(struct timer_list * t)1782 static void br_multicast_port_group_rexmit(struct timer_list *t)
1783 {
1784 struct net_bridge_port_group *pg = from_timer(pg, t, rexmit_timer);
1785 struct bridge_mcast_other_query *other_query = NULL;
1786 struct net_bridge *br = pg->key.port->br;
1787 struct net_bridge_mcast_port *pmctx;
1788 struct net_bridge_mcast *brmctx;
1789 bool need_rexmit = false;
1790
1791 spin_lock(&br->multicast_lock);
1792 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
1793 !br_opt_get(br, BROPT_MULTICAST_ENABLED))
1794 goto out;
1795
1796 pmctx = br_multicast_pg_to_port_ctx(pg);
1797 if (!pmctx)
1798 goto out;
1799 brmctx = br_multicast_port_ctx_get_global(pmctx);
1800 if (!brmctx->multicast_querier)
1801 goto out;
1802
1803 if (pg->key.addr.proto == htons(ETH_P_IP))
1804 other_query = &brmctx->ip4_other_query;
1805 #if IS_ENABLED(CONFIG_IPV6)
1806 else
1807 other_query = &brmctx->ip6_other_query;
1808 #endif
1809
1810 if (!other_query || timer_pending(&other_query->timer))
1811 goto out;
1812
1813 if (pg->grp_query_rexmit_cnt) {
1814 pg->grp_query_rexmit_cnt--;
1815 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1816 &pg->key.addr, false, 1, NULL);
1817 }
1818 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
1819 &pg->key.addr, true, 0, &need_rexmit);
1820
1821 if (pg->grp_query_rexmit_cnt || need_rexmit)
1822 mod_timer(&pg->rexmit_timer, jiffies +
1823 brmctx->multicast_last_member_interval);
1824 out:
1825 spin_unlock(&br->multicast_lock);
1826 }
1827
br_mc_disabled_update(struct net_device * dev,bool value,struct netlink_ext_ack * extack)1828 static int br_mc_disabled_update(struct net_device *dev, bool value,
1829 struct netlink_ext_ack *extack)
1830 {
1831 struct switchdev_attr attr = {
1832 .orig_dev = dev,
1833 .id = SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
1834 .flags = SWITCHDEV_F_DEFER,
1835 .u.mc_disabled = !value,
1836 };
1837
1838 return switchdev_port_attr_set(dev, &attr, extack);
1839 }
1840
br_multicast_port_ctx_init(struct net_bridge_port * port,struct net_bridge_vlan * vlan,struct net_bridge_mcast_port * pmctx)1841 void br_multicast_port_ctx_init(struct net_bridge_port *port,
1842 struct net_bridge_vlan *vlan,
1843 struct net_bridge_mcast_port *pmctx)
1844 {
1845 pmctx->port = port;
1846 pmctx->vlan = vlan;
1847 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
1848 timer_setup(&pmctx->ip4_mc_router_timer,
1849 br_ip4_multicast_router_expired, 0);
1850 timer_setup(&pmctx->ip4_own_query.timer,
1851 br_ip4_multicast_port_query_expired, 0);
1852 #if IS_ENABLED(CONFIG_IPV6)
1853 timer_setup(&pmctx->ip6_mc_router_timer,
1854 br_ip6_multicast_router_expired, 0);
1855 timer_setup(&pmctx->ip6_own_query.timer,
1856 br_ip6_multicast_port_query_expired, 0);
1857 #endif
1858 }
1859
br_multicast_port_ctx_deinit(struct net_bridge_mcast_port * pmctx)1860 void br_multicast_port_ctx_deinit(struct net_bridge_mcast_port *pmctx)
1861 {
1862 #if IS_ENABLED(CONFIG_IPV6)
1863 del_timer_sync(&pmctx->ip6_mc_router_timer);
1864 #endif
1865 del_timer_sync(&pmctx->ip4_mc_router_timer);
1866 }
1867
br_multicast_add_port(struct net_bridge_port * port)1868 int br_multicast_add_port(struct net_bridge_port *port)
1869 {
1870 int err;
1871
1872 port->multicast_eht_hosts_limit = BR_MCAST_DEFAULT_EHT_HOSTS_LIMIT;
1873 br_multicast_port_ctx_init(port, NULL, &port->multicast_ctx);
1874
1875 err = br_mc_disabled_update(port->dev,
1876 br_opt_get(port->br,
1877 BROPT_MULTICAST_ENABLED),
1878 NULL);
1879 if (err && err != -EOPNOTSUPP)
1880 return err;
1881
1882 port->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
1883 if (!port->mcast_stats)
1884 return -ENOMEM;
1885
1886 return 0;
1887 }
1888
br_multicast_del_port(struct net_bridge_port * port)1889 void br_multicast_del_port(struct net_bridge_port *port)
1890 {
1891 struct net_bridge *br = port->br;
1892 struct net_bridge_port_group *pg;
1893 HLIST_HEAD(deleted_head);
1894 struct hlist_node *n;
1895
1896 /* Take care of the remaining groups, only perm ones should be left */
1897 spin_lock_bh(&br->multicast_lock);
1898 hlist_for_each_entry_safe(pg, n, &port->mglist, mglist)
1899 br_multicast_find_del_pg(br, pg);
1900 hlist_move_list(&br->mcast_gc_list, &deleted_head);
1901 spin_unlock_bh(&br->multicast_lock);
1902 br_multicast_gc(&deleted_head);
1903 br_multicast_port_ctx_deinit(&port->multicast_ctx);
1904 free_percpu(port->mcast_stats);
1905 }
1906
br_multicast_enable(struct bridge_mcast_own_query * query)1907 static void br_multicast_enable(struct bridge_mcast_own_query *query)
1908 {
1909 query->startup_sent = 0;
1910
1911 if (try_to_del_timer_sync(&query->timer) >= 0 ||
1912 del_timer(&query->timer))
1913 mod_timer(&query->timer, jiffies);
1914 }
1915
__br_multicast_enable_port_ctx(struct net_bridge_mcast_port * pmctx)1916 static void __br_multicast_enable_port_ctx(struct net_bridge_mcast_port *pmctx)
1917 {
1918 struct net_bridge *br = pmctx->port->br;
1919 struct net_bridge_mcast *brmctx;
1920
1921 brmctx = br_multicast_port_ctx_get_global(pmctx);
1922 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED) ||
1923 !netif_running(br->dev))
1924 return;
1925
1926 br_multicast_enable(&pmctx->ip4_own_query);
1927 #if IS_ENABLED(CONFIG_IPV6)
1928 br_multicast_enable(&pmctx->ip6_own_query);
1929 #endif
1930 if (pmctx->multicast_router == MDB_RTR_TYPE_PERM) {
1931 br_ip4_multicast_add_router(brmctx, pmctx);
1932 br_ip6_multicast_add_router(brmctx, pmctx);
1933 }
1934 }
1935
br_multicast_enable_port(struct net_bridge_port * port)1936 void br_multicast_enable_port(struct net_bridge_port *port)
1937 {
1938 struct net_bridge *br = port->br;
1939
1940 spin_lock_bh(&br->multicast_lock);
1941 __br_multicast_enable_port_ctx(&port->multicast_ctx);
1942 spin_unlock_bh(&br->multicast_lock);
1943 }
1944
__br_multicast_disable_port_ctx(struct net_bridge_mcast_port * pmctx)1945 static void __br_multicast_disable_port_ctx(struct net_bridge_mcast_port *pmctx)
1946 {
1947 struct net_bridge_port_group *pg;
1948 struct hlist_node *n;
1949 bool del = false;
1950
1951 hlist_for_each_entry_safe(pg, n, &pmctx->port->mglist, mglist)
1952 if (!(pg->flags & MDB_PG_FLAGS_PERMANENT) &&
1953 (!br_multicast_port_ctx_is_vlan(pmctx) ||
1954 pg->key.addr.vid == pmctx->vlan->vid))
1955 br_multicast_find_del_pg(pmctx->port->br, pg);
1956
1957 del |= br_ip4_multicast_rport_del(pmctx);
1958 del_timer(&pmctx->ip4_mc_router_timer);
1959 del_timer(&pmctx->ip4_own_query.timer);
1960 del |= br_ip6_multicast_rport_del(pmctx);
1961 #if IS_ENABLED(CONFIG_IPV6)
1962 del_timer(&pmctx->ip6_mc_router_timer);
1963 del_timer(&pmctx->ip6_own_query.timer);
1964 #endif
1965 br_multicast_rport_del_notify(pmctx, del);
1966 }
1967
br_multicast_disable_port(struct net_bridge_port * port)1968 void br_multicast_disable_port(struct net_bridge_port *port)
1969 {
1970 spin_lock_bh(&port->br->multicast_lock);
1971 __br_multicast_disable_port_ctx(&port->multicast_ctx);
1972 spin_unlock_bh(&port->br->multicast_lock);
1973 }
1974
__grp_src_delete_marked(struct net_bridge_port_group * pg)1975 static int __grp_src_delete_marked(struct net_bridge_port_group *pg)
1976 {
1977 struct net_bridge_group_src *ent;
1978 struct hlist_node *tmp;
1979 int deleted = 0;
1980
1981 hlist_for_each_entry_safe(ent, tmp, &pg->src_list, node)
1982 if (ent->flags & BR_SGRP_F_DELETE) {
1983 br_multicast_del_group_src(ent, false);
1984 deleted++;
1985 }
1986
1987 return deleted;
1988 }
1989
__grp_src_mod_timer(struct net_bridge_group_src * src,unsigned long expires)1990 static void __grp_src_mod_timer(struct net_bridge_group_src *src,
1991 unsigned long expires)
1992 {
1993 mod_timer(&src->timer, expires);
1994 br_multicast_fwd_src_handle(src);
1995 }
1996
__grp_src_query_marked_and_rexmit(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg)1997 static void __grp_src_query_marked_and_rexmit(struct net_bridge_mcast *brmctx,
1998 struct net_bridge_mcast_port *pmctx,
1999 struct net_bridge_port_group *pg)
2000 {
2001 struct bridge_mcast_other_query *other_query = NULL;
2002 u32 lmqc = brmctx->multicast_last_member_count;
2003 unsigned long lmqt, lmi, now = jiffies;
2004 struct net_bridge_group_src *ent;
2005
2006 if (!netif_running(brmctx->br->dev) ||
2007 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2008 return;
2009
2010 if (pg->key.addr.proto == htons(ETH_P_IP))
2011 other_query = &brmctx->ip4_other_query;
2012 #if IS_ENABLED(CONFIG_IPV6)
2013 else
2014 other_query = &brmctx->ip6_other_query;
2015 #endif
2016
2017 lmqt = now + br_multicast_lmqt(brmctx);
2018 hlist_for_each_entry(ent, &pg->src_list, node) {
2019 if (ent->flags & BR_SGRP_F_SEND) {
2020 ent->flags &= ~BR_SGRP_F_SEND;
2021 if (ent->timer.expires > lmqt) {
2022 if (brmctx->multicast_querier &&
2023 other_query &&
2024 !timer_pending(&other_query->timer))
2025 ent->src_query_rexmit_cnt = lmqc;
2026 __grp_src_mod_timer(ent, lmqt);
2027 }
2028 }
2029 }
2030
2031 if (!brmctx->multicast_querier ||
2032 !other_query || timer_pending(&other_query->timer))
2033 return;
2034
2035 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2036 &pg->key.addr, true, 1, NULL);
2037
2038 lmi = now + brmctx->multicast_last_member_interval;
2039 if (!timer_pending(&pg->rexmit_timer) ||
2040 time_after(pg->rexmit_timer.expires, lmi))
2041 mod_timer(&pg->rexmit_timer, lmi);
2042 }
2043
__grp_send_query_and_rexmit(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg)2044 static void __grp_send_query_and_rexmit(struct net_bridge_mcast *brmctx,
2045 struct net_bridge_mcast_port *pmctx,
2046 struct net_bridge_port_group *pg)
2047 {
2048 struct bridge_mcast_other_query *other_query = NULL;
2049 unsigned long now = jiffies, lmi;
2050
2051 if (!netif_running(brmctx->br->dev) ||
2052 !br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED))
2053 return;
2054
2055 if (pg->key.addr.proto == htons(ETH_P_IP))
2056 other_query = &brmctx->ip4_other_query;
2057 #if IS_ENABLED(CONFIG_IPV6)
2058 else
2059 other_query = &brmctx->ip6_other_query;
2060 #endif
2061
2062 if (brmctx->multicast_querier &&
2063 other_query && !timer_pending(&other_query->timer)) {
2064 lmi = now + brmctx->multicast_last_member_interval;
2065 pg->grp_query_rexmit_cnt = brmctx->multicast_last_member_count - 1;
2066 __br_multicast_send_query(brmctx, pmctx, pg, &pg->key.addr,
2067 &pg->key.addr, false, 0, NULL);
2068 if (!timer_pending(&pg->rexmit_timer) ||
2069 time_after(pg->rexmit_timer.expires, lmi))
2070 mod_timer(&pg->rexmit_timer, lmi);
2071 }
2072
2073 if (pg->filter_mode == MCAST_EXCLUDE &&
2074 (!timer_pending(&pg->timer) ||
2075 time_after(pg->timer.expires, now + br_multicast_lmqt(brmctx))))
2076 mod_timer(&pg->timer, now + br_multicast_lmqt(brmctx));
2077 }
2078
2079 /* State Msg type New state Actions
2080 * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
2081 * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
2082 * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
2083 */
br_multicast_isinc_allow(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2084 static bool br_multicast_isinc_allow(const struct net_bridge_mcast *brmctx,
2085 struct net_bridge_port_group *pg, void *h_addr,
2086 void *srcs, u32 nsrcs, size_t addr_size,
2087 int grec_type)
2088 {
2089 struct net_bridge_group_src *ent;
2090 unsigned long now = jiffies;
2091 bool changed = false;
2092 struct br_ip src_ip;
2093 u32 src_idx;
2094
2095 memset(&src_ip, 0, sizeof(src_ip));
2096 src_ip.proto = pg->key.addr.proto;
2097 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2098 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2099 ent = br_multicast_find_group_src(pg, &src_ip);
2100 if (!ent) {
2101 ent = br_multicast_new_group_src(pg, &src_ip);
2102 if (ent)
2103 changed = true;
2104 }
2105
2106 if (ent)
2107 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2108 }
2109
2110 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2111 grec_type))
2112 changed = true;
2113
2114 return changed;
2115 }
2116
2117 /* State Msg type New state Actions
2118 * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2119 * Delete (A-B)
2120 * Group Timer=GMI
2121 */
__grp_src_isexc_incl(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2122 static void __grp_src_isexc_incl(const struct net_bridge_mcast *brmctx,
2123 struct net_bridge_port_group *pg, void *h_addr,
2124 void *srcs, u32 nsrcs, size_t addr_size,
2125 int grec_type)
2126 {
2127 struct net_bridge_group_src *ent;
2128 struct br_ip src_ip;
2129 u32 src_idx;
2130
2131 hlist_for_each_entry(ent, &pg->src_list, node)
2132 ent->flags |= BR_SGRP_F_DELETE;
2133
2134 memset(&src_ip, 0, sizeof(src_ip));
2135 src_ip.proto = pg->key.addr.proto;
2136 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2137 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2138 ent = br_multicast_find_group_src(pg, &src_ip);
2139 if (ent)
2140 ent->flags &= ~BR_SGRP_F_DELETE;
2141 else
2142 ent = br_multicast_new_group_src(pg, &src_ip);
2143 if (ent)
2144 br_multicast_fwd_src_handle(ent);
2145 }
2146
2147 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2148 grec_type);
2149
2150 __grp_src_delete_marked(pg);
2151 }
2152
2153 /* State Msg type New state Actions
2154 * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
2155 * Delete (X-A)
2156 * Delete (Y-A)
2157 * Group Timer=GMI
2158 */
__grp_src_isexc_excl(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2159 static bool __grp_src_isexc_excl(const struct net_bridge_mcast *brmctx,
2160 struct net_bridge_port_group *pg, void *h_addr,
2161 void *srcs, u32 nsrcs, size_t addr_size,
2162 int grec_type)
2163 {
2164 struct net_bridge_group_src *ent;
2165 unsigned long now = jiffies;
2166 bool changed = false;
2167 struct br_ip src_ip;
2168 u32 src_idx;
2169
2170 hlist_for_each_entry(ent, &pg->src_list, node)
2171 ent->flags |= BR_SGRP_F_DELETE;
2172
2173 memset(&src_ip, 0, sizeof(src_ip));
2174 src_ip.proto = pg->key.addr.proto;
2175 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2176 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2177 ent = br_multicast_find_group_src(pg, &src_ip);
2178 if (ent) {
2179 ent->flags &= ~BR_SGRP_F_DELETE;
2180 } else {
2181 ent = br_multicast_new_group_src(pg, &src_ip);
2182 if (ent) {
2183 __grp_src_mod_timer(ent,
2184 now + br_multicast_gmi(brmctx));
2185 changed = true;
2186 }
2187 }
2188 }
2189
2190 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2191 grec_type))
2192 changed = true;
2193
2194 if (__grp_src_delete_marked(pg))
2195 changed = true;
2196
2197 return changed;
2198 }
2199
br_multicast_isexc(const struct net_bridge_mcast * brmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2200 static bool br_multicast_isexc(const struct net_bridge_mcast *brmctx,
2201 struct net_bridge_port_group *pg, void *h_addr,
2202 void *srcs, u32 nsrcs, size_t addr_size,
2203 int grec_type)
2204 {
2205 bool changed = false;
2206
2207 switch (pg->filter_mode) {
2208 case MCAST_INCLUDE:
2209 __grp_src_isexc_incl(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2210 grec_type);
2211 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2212 changed = true;
2213 break;
2214 case MCAST_EXCLUDE:
2215 changed = __grp_src_isexc_excl(brmctx, pg, h_addr, srcs, nsrcs,
2216 addr_size, grec_type);
2217 break;
2218 }
2219
2220 pg->filter_mode = MCAST_EXCLUDE;
2221 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2222
2223 return changed;
2224 }
2225
2226 /* State Msg type New state Actions
2227 * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
2228 * Send Q(G,A-B)
2229 */
__grp_src_toin_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2230 static bool __grp_src_toin_incl(struct net_bridge_mcast *brmctx,
2231 struct net_bridge_mcast_port *pmctx,
2232 struct net_bridge_port_group *pg, void *h_addr,
2233 void *srcs, u32 nsrcs, size_t addr_size,
2234 int grec_type)
2235 {
2236 u32 src_idx, to_send = pg->src_ents;
2237 struct net_bridge_group_src *ent;
2238 unsigned long now = jiffies;
2239 bool changed = false;
2240 struct br_ip src_ip;
2241
2242 hlist_for_each_entry(ent, &pg->src_list, node)
2243 ent->flags |= BR_SGRP_F_SEND;
2244
2245 memset(&src_ip, 0, sizeof(src_ip));
2246 src_ip.proto = pg->key.addr.proto;
2247 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2248 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2249 ent = br_multicast_find_group_src(pg, &src_ip);
2250 if (ent) {
2251 ent->flags &= ~BR_SGRP_F_SEND;
2252 to_send--;
2253 } else {
2254 ent = br_multicast_new_group_src(pg, &src_ip);
2255 if (ent)
2256 changed = true;
2257 }
2258 if (ent)
2259 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2260 }
2261
2262 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2263 grec_type))
2264 changed = true;
2265
2266 if (to_send)
2267 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2268
2269 return changed;
2270 }
2271
2272 /* State Msg type New state Actions
2273 * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
2274 * Send Q(G,X-A)
2275 * Send Q(G)
2276 */
__grp_src_toin_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2277 static bool __grp_src_toin_excl(struct net_bridge_mcast *brmctx,
2278 struct net_bridge_mcast_port *pmctx,
2279 struct net_bridge_port_group *pg, void *h_addr,
2280 void *srcs, u32 nsrcs, size_t addr_size,
2281 int grec_type)
2282 {
2283 u32 src_idx, to_send = pg->src_ents;
2284 struct net_bridge_group_src *ent;
2285 unsigned long now = jiffies;
2286 bool changed = false;
2287 struct br_ip src_ip;
2288
2289 hlist_for_each_entry(ent, &pg->src_list, node)
2290 if (timer_pending(&ent->timer))
2291 ent->flags |= BR_SGRP_F_SEND;
2292
2293 memset(&src_ip, 0, sizeof(src_ip));
2294 src_ip.proto = pg->key.addr.proto;
2295 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2296 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2297 ent = br_multicast_find_group_src(pg, &src_ip);
2298 if (ent) {
2299 if (timer_pending(&ent->timer)) {
2300 ent->flags &= ~BR_SGRP_F_SEND;
2301 to_send--;
2302 }
2303 } else {
2304 ent = br_multicast_new_group_src(pg, &src_ip);
2305 if (ent)
2306 changed = true;
2307 }
2308 if (ent)
2309 __grp_src_mod_timer(ent, now + br_multicast_gmi(brmctx));
2310 }
2311
2312 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2313 grec_type))
2314 changed = true;
2315
2316 if (to_send)
2317 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2318
2319 __grp_send_query_and_rexmit(brmctx, pmctx, pg);
2320
2321 return changed;
2322 }
2323
br_multicast_toin(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2324 static bool br_multicast_toin(struct net_bridge_mcast *brmctx,
2325 struct net_bridge_mcast_port *pmctx,
2326 struct net_bridge_port_group *pg, void *h_addr,
2327 void *srcs, u32 nsrcs, size_t addr_size,
2328 int grec_type)
2329 {
2330 bool changed = false;
2331
2332 switch (pg->filter_mode) {
2333 case MCAST_INCLUDE:
2334 changed = __grp_src_toin_incl(brmctx, pmctx, pg, h_addr, srcs,
2335 nsrcs, addr_size, grec_type);
2336 break;
2337 case MCAST_EXCLUDE:
2338 changed = __grp_src_toin_excl(brmctx, pmctx, pg, h_addr, srcs,
2339 nsrcs, addr_size, grec_type);
2340 break;
2341 }
2342
2343 if (br_multicast_eht_should_del_pg(pg)) {
2344 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2345 br_multicast_find_del_pg(pg->key.port->br, pg);
2346 /* a notification has already been sent and we shouldn't
2347 * access pg after the delete so we have to return false
2348 */
2349 changed = false;
2350 }
2351
2352 return changed;
2353 }
2354
2355 /* State Msg type New state Actions
2356 * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
2357 * Delete (A-B)
2358 * Send Q(G,A*B)
2359 * Group Timer=GMI
2360 */
__grp_src_toex_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2361 static void __grp_src_toex_incl(struct net_bridge_mcast *brmctx,
2362 struct net_bridge_mcast_port *pmctx,
2363 struct net_bridge_port_group *pg, void *h_addr,
2364 void *srcs, u32 nsrcs, size_t addr_size,
2365 int grec_type)
2366 {
2367 struct net_bridge_group_src *ent;
2368 u32 src_idx, to_send = 0;
2369 struct br_ip src_ip;
2370
2371 hlist_for_each_entry(ent, &pg->src_list, node)
2372 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2373
2374 memset(&src_ip, 0, sizeof(src_ip));
2375 src_ip.proto = pg->key.addr.proto;
2376 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2377 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2378 ent = br_multicast_find_group_src(pg, &src_ip);
2379 if (ent) {
2380 ent->flags = (ent->flags & ~BR_SGRP_F_DELETE) |
2381 BR_SGRP_F_SEND;
2382 to_send++;
2383 } else {
2384 ent = br_multicast_new_group_src(pg, &src_ip);
2385 }
2386 if (ent)
2387 br_multicast_fwd_src_handle(ent);
2388 }
2389
2390 br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2391 grec_type);
2392
2393 __grp_src_delete_marked(pg);
2394 if (to_send)
2395 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2396 }
2397
2398 /* State Msg type New state Actions
2399 * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
2400 * Delete (X-A)
2401 * Delete (Y-A)
2402 * Send Q(G,A-Y)
2403 * Group Timer=GMI
2404 */
__grp_src_toex_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2405 static bool __grp_src_toex_excl(struct net_bridge_mcast *brmctx,
2406 struct net_bridge_mcast_port *pmctx,
2407 struct net_bridge_port_group *pg, void *h_addr,
2408 void *srcs, u32 nsrcs, size_t addr_size,
2409 int grec_type)
2410 {
2411 struct net_bridge_group_src *ent;
2412 u32 src_idx, to_send = 0;
2413 bool changed = false;
2414 struct br_ip src_ip;
2415
2416 hlist_for_each_entry(ent, &pg->src_list, node)
2417 ent->flags = (ent->flags & ~BR_SGRP_F_SEND) | BR_SGRP_F_DELETE;
2418
2419 memset(&src_ip, 0, sizeof(src_ip));
2420 src_ip.proto = pg->key.addr.proto;
2421 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2422 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2423 ent = br_multicast_find_group_src(pg, &src_ip);
2424 if (ent) {
2425 ent->flags &= ~BR_SGRP_F_DELETE;
2426 } else {
2427 ent = br_multicast_new_group_src(pg, &src_ip);
2428 if (ent) {
2429 __grp_src_mod_timer(ent, pg->timer.expires);
2430 changed = true;
2431 }
2432 }
2433 if (ent && timer_pending(&ent->timer)) {
2434 ent->flags |= BR_SGRP_F_SEND;
2435 to_send++;
2436 }
2437 }
2438
2439 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2440 grec_type))
2441 changed = true;
2442
2443 if (__grp_src_delete_marked(pg))
2444 changed = true;
2445 if (to_send)
2446 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2447
2448 return changed;
2449 }
2450
br_multicast_toex(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2451 static bool br_multicast_toex(struct net_bridge_mcast *brmctx,
2452 struct net_bridge_mcast_port *pmctx,
2453 struct net_bridge_port_group *pg, void *h_addr,
2454 void *srcs, u32 nsrcs, size_t addr_size,
2455 int grec_type)
2456 {
2457 bool changed = false;
2458
2459 switch (pg->filter_mode) {
2460 case MCAST_INCLUDE:
2461 __grp_src_toex_incl(brmctx, pmctx, pg, h_addr, srcs, nsrcs,
2462 addr_size, grec_type);
2463 br_multicast_star_g_handle_mode(pg, MCAST_EXCLUDE);
2464 changed = true;
2465 break;
2466 case MCAST_EXCLUDE:
2467 changed = __grp_src_toex_excl(brmctx, pmctx, pg, h_addr, srcs,
2468 nsrcs, addr_size, grec_type);
2469 break;
2470 }
2471
2472 pg->filter_mode = MCAST_EXCLUDE;
2473 mod_timer(&pg->timer, jiffies + br_multicast_gmi(brmctx));
2474
2475 return changed;
2476 }
2477
2478 /* State Msg type New state Actions
2479 * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
2480 */
__grp_src_block_incl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2481 static bool __grp_src_block_incl(struct net_bridge_mcast *brmctx,
2482 struct net_bridge_mcast_port *pmctx,
2483 struct net_bridge_port_group *pg, void *h_addr,
2484 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2485 {
2486 struct net_bridge_group_src *ent;
2487 u32 src_idx, to_send = 0;
2488 bool changed = false;
2489 struct br_ip src_ip;
2490
2491 hlist_for_each_entry(ent, &pg->src_list, node)
2492 ent->flags &= ~BR_SGRP_F_SEND;
2493
2494 memset(&src_ip, 0, sizeof(src_ip));
2495 src_ip.proto = pg->key.addr.proto;
2496 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2497 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2498 ent = br_multicast_find_group_src(pg, &src_ip);
2499 if (ent) {
2500 ent->flags |= BR_SGRP_F_SEND;
2501 to_send++;
2502 }
2503 }
2504
2505 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2506 grec_type))
2507 changed = true;
2508
2509 if (to_send)
2510 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2511
2512 return changed;
2513 }
2514
2515 /* State Msg type New state Actions
2516 * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
2517 * Send Q(G,A-Y)
2518 */
__grp_src_block_excl(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2519 static bool __grp_src_block_excl(struct net_bridge_mcast *brmctx,
2520 struct net_bridge_mcast_port *pmctx,
2521 struct net_bridge_port_group *pg, void *h_addr,
2522 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2523 {
2524 struct net_bridge_group_src *ent;
2525 u32 src_idx, to_send = 0;
2526 bool changed = false;
2527 struct br_ip src_ip;
2528
2529 hlist_for_each_entry(ent, &pg->src_list, node)
2530 ent->flags &= ~BR_SGRP_F_SEND;
2531
2532 memset(&src_ip, 0, sizeof(src_ip));
2533 src_ip.proto = pg->key.addr.proto;
2534 for (src_idx = 0; src_idx < nsrcs; src_idx++) {
2535 memcpy(&src_ip.src, srcs + (src_idx * addr_size), addr_size);
2536 ent = br_multicast_find_group_src(pg, &src_ip);
2537 if (!ent) {
2538 ent = br_multicast_new_group_src(pg, &src_ip);
2539 if (ent) {
2540 __grp_src_mod_timer(ent, pg->timer.expires);
2541 changed = true;
2542 }
2543 }
2544 if (ent && timer_pending(&ent->timer)) {
2545 ent->flags |= BR_SGRP_F_SEND;
2546 to_send++;
2547 }
2548 }
2549
2550 if (br_multicast_eht_handle(brmctx, pg, h_addr, srcs, nsrcs, addr_size,
2551 grec_type))
2552 changed = true;
2553
2554 if (to_send)
2555 __grp_src_query_marked_and_rexmit(brmctx, pmctx, pg);
2556
2557 return changed;
2558 }
2559
br_multicast_block(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct net_bridge_port_group * pg,void * h_addr,void * srcs,u32 nsrcs,size_t addr_size,int grec_type)2560 static bool br_multicast_block(struct net_bridge_mcast *brmctx,
2561 struct net_bridge_mcast_port *pmctx,
2562 struct net_bridge_port_group *pg, void *h_addr,
2563 void *srcs, u32 nsrcs, size_t addr_size, int grec_type)
2564 {
2565 bool changed = false;
2566
2567 switch (pg->filter_mode) {
2568 case MCAST_INCLUDE:
2569 changed = __grp_src_block_incl(brmctx, pmctx, pg, h_addr, srcs,
2570 nsrcs, addr_size, grec_type);
2571 break;
2572 case MCAST_EXCLUDE:
2573 changed = __grp_src_block_excl(brmctx, pmctx, pg, h_addr, srcs,
2574 nsrcs, addr_size, grec_type);
2575 break;
2576 }
2577
2578 if ((pg->filter_mode == MCAST_INCLUDE && hlist_empty(&pg->src_list)) ||
2579 br_multicast_eht_should_del_pg(pg)) {
2580 if (br_multicast_eht_should_del_pg(pg))
2581 pg->flags |= MDB_PG_FLAGS_FAST_LEAVE;
2582 br_multicast_find_del_pg(pg->key.port->br, pg);
2583 /* a notification has already been sent and we shouldn't
2584 * access pg after the delete so we have to return false
2585 */
2586 changed = false;
2587 }
2588
2589 return changed;
2590 }
2591
2592 static struct net_bridge_port_group *
br_multicast_find_port(struct net_bridge_mdb_entry * mp,struct net_bridge_port * p,const unsigned char * src)2593 br_multicast_find_port(struct net_bridge_mdb_entry *mp,
2594 struct net_bridge_port *p,
2595 const unsigned char *src)
2596 {
2597 struct net_bridge *br __maybe_unused = mp->br;
2598 struct net_bridge_port_group *pg;
2599
2600 for (pg = mlock_dereference(mp->ports, br);
2601 pg;
2602 pg = mlock_dereference(pg->next, br))
2603 if (br_port_group_equal(pg, p, src))
2604 return pg;
2605
2606 return NULL;
2607 }
2608
br_ip4_multicast_igmp3_report(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)2609 static int br_ip4_multicast_igmp3_report(struct net_bridge_mcast *brmctx,
2610 struct net_bridge_mcast_port *pmctx,
2611 struct sk_buff *skb,
2612 u16 vid)
2613 {
2614 bool igmpv2 = brmctx->multicast_igmp_version == 2;
2615 struct net_bridge_mdb_entry *mdst;
2616 struct net_bridge_port_group *pg;
2617 const unsigned char *src;
2618 struct igmpv3_report *ih;
2619 struct igmpv3_grec *grec;
2620 int i, len, num, type;
2621 __be32 group, *h_addr;
2622 bool changed = false;
2623 int err = 0;
2624 u16 nsrcs;
2625
2626 ih = igmpv3_report_hdr(skb);
2627 num = ntohs(ih->ngrec);
2628 len = skb_transport_offset(skb) + sizeof(*ih);
2629
2630 for (i = 0; i < num; i++) {
2631 len += sizeof(*grec);
2632 if (!ip_mc_may_pull(skb, len))
2633 return -EINVAL;
2634
2635 grec = (void *)(skb->data + len - sizeof(*grec));
2636 group = grec->grec_mca;
2637 type = grec->grec_type;
2638 nsrcs = ntohs(grec->grec_nsrcs);
2639
2640 len += nsrcs * 4;
2641 if (!ip_mc_may_pull(skb, len))
2642 return -EINVAL;
2643
2644 switch (type) {
2645 case IGMPV3_MODE_IS_INCLUDE:
2646 case IGMPV3_MODE_IS_EXCLUDE:
2647 case IGMPV3_CHANGE_TO_INCLUDE:
2648 case IGMPV3_CHANGE_TO_EXCLUDE:
2649 case IGMPV3_ALLOW_NEW_SOURCES:
2650 case IGMPV3_BLOCK_OLD_SOURCES:
2651 break;
2652
2653 default:
2654 continue;
2655 }
2656
2657 src = eth_hdr(skb)->h_source;
2658 if (nsrcs == 0 &&
2659 (type == IGMPV3_CHANGE_TO_INCLUDE ||
2660 type == IGMPV3_MODE_IS_INCLUDE)) {
2661 if (!pmctx || igmpv2) {
2662 br_ip4_multicast_leave_group(brmctx, pmctx,
2663 group, vid, src);
2664 continue;
2665 }
2666 } else {
2667 err = br_ip4_multicast_add_group(brmctx, pmctx, group,
2668 vid, src, igmpv2);
2669 if (err)
2670 break;
2671 }
2672
2673 if (!pmctx || igmpv2)
2674 continue;
2675
2676 spin_lock_bh(&brmctx->br->multicast_lock);
2677 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2678 goto unlock_continue;
2679
2680 mdst = br_mdb_ip4_get(brmctx->br, group, vid);
2681 if (!mdst)
2682 goto unlock_continue;
2683 pg = br_multicast_find_port(mdst, pmctx->port, src);
2684 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2685 goto unlock_continue;
2686 /* reload grec and host addr */
2687 grec = (void *)(skb->data + len - sizeof(*grec) - (nsrcs * 4));
2688 h_addr = &ip_hdr(skb)->saddr;
2689 switch (type) {
2690 case IGMPV3_ALLOW_NEW_SOURCES:
2691 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2692 grec->grec_src,
2693 nsrcs, sizeof(__be32), type);
2694 break;
2695 case IGMPV3_MODE_IS_INCLUDE:
2696 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2697 grec->grec_src,
2698 nsrcs, sizeof(__be32), type);
2699 break;
2700 case IGMPV3_MODE_IS_EXCLUDE:
2701 changed = br_multicast_isexc(brmctx, pg, h_addr,
2702 grec->grec_src,
2703 nsrcs, sizeof(__be32), type);
2704 break;
2705 case IGMPV3_CHANGE_TO_INCLUDE:
2706 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2707 grec->grec_src,
2708 nsrcs, sizeof(__be32), type);
2709 break;
2710 case IGMPV3_CHANGE_TO_EXCLUDE:
2711 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2712 grec->grec_src,
2713 nsrcs, sizeof(__be32), type);
2714 break;
2715 case IGMPV3_BLOCK_OLD_SOURCES:
2716 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2717 grec->grec_src,
2718 nsrcs, sizeof(__be32), type);
2719 break;
2720 }
2721 if (changed)
2722 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2723 unlock_continue:
2724 spin_unlock_bh(&brmctx->br->multicast_lock);
2725 }
2726
2727 return err;
2728 }
2729
2730 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_mld2_report(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)2731 static int br_ip6_multicast_mld2_report(struct net_bridge_mcast *brmctx,
2732 struct net_bridge_mcast_port *pmctx,
2733 struct sk_buff *skb,
2734 u16 vid)
2735 {
2736 bool mldv1 = brmctx->multicast_mld_version == 1;
2737 struct net_bridge_mdb_entry *mdst;
2738 struct net_bridge_port_group *pg;
2739 unsigned int nsrcs_offset;
2740 struct mld2_report *mld2r;
2741 const unsigned char *src;
2742 struct in6_addr *h_addr;
2743 struct mld2_grec *grec;
2744 unsigned int grec_len;
2745 bool changed = false;
2746 int i, len, num;
2747 int err = 0;
2748
2749 if (!ipv6_mc_may_pull(skb, sizeof(*mld2r)))
2750 return -EINVAL;
2751
2752 mld2r = (struct mld2_report *)icmp6_hdr(skb);
2753 num = ntohs(mld2r->mld2r_ngrec);
2754 len = skb_transport_offset(skb) + sizeof(*mld2r);
2755
2756 for (i = 0; i < num; i++) {
2757 __be16 *_nsrcs, __nsrcs;
2758 u16 nsrcs;
2759
2760 nsrcs_offset = len + offsetof(struct mld2_grec, grec_nsrcs);
2761
2762 if (skb_transport_offset(skb) + ipv6_transport_len(skb) <
2763 nsrcs_offset + sizeof(__nsrcs))
2764 return -EINVAL;
2765
2766 _nsrcs = skb_header_pointer(skb, nsrcs_offset,
2767 sizeof(__nsrcs), &__nsrcs);
2768 if (!_nsrcs)
2769 return -EINVAL;
2770
2771 nsrcs = ntohs(*_nsrcs);
2772 grec_len = struct_size(grec, grec_src, nsrcs);
2773
2774 if (!ipv6_mc_may_pull(skb, len + grec_len))
2775 return -EINVAL;
2776
2777 grec = (struct mld2_grec *)(skb->data + len);
2778 len += grec_len;
2779
2780 switch (grec->grec_type) {
2781 case MLD2_MODE_IS_INCLUDE:
2782 case MLD2_MODE_IS_EXCLUDE:
2783 case MLD2_CHANGE_TO_INCLUDE:
2784 case MLD2_CHANGE_TO_EXCLUDE:
2785 case MLD2_ALLOW_NEW_SOURCES:
2786 case MLD2_BLOCK_OLD_SOURCES:
2787 break;
2788
2789 default:
2790 continue;
2791 }
2792
2793 src = eth_hdr(skb)->h_source;
2794 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
2795 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
2796 nsrcs == 0) {
2797 if (!pmctx || mldv1) {
2798 br_ip6_multicast_leave_group(brmctx, pmctx,
2799 &grec->grec_mca,
2800 vid, src);
2801 continue;
2802 }
2803 } else {
2804 err = br_ip6_multicast_add_group(brmctx, pmctx,
2805 &grec->grec_mca, vid,
2806 src, mldv1);
2807 if (err)
2808 break;
2809 }
2810
2811 if (!pmctx || mldv1)
2812 continue;
2813
2814 spin_lock_bh(&brmctx->br->multicast_lock);
2815 if (!br_multicast_ctx_should_use(brmctx, pmctx))
2816 goto unlock_continue;
2817
2818 mdst = br_mdb_ip6_get(brmctx->br, &grec->grec_mca, vid);
2819 if (!mdst)
2820 goto unlock_continue;
2821 pg = br_multicast_find_port(mdst, pmctx->port, src);
2822 if (!pg || (pg->flags & MDB_PG_FLAGS_PERMANENT))
2823 goto unlock_continue;
2824 h_addr = &ipv6_hdr(skb)->saddr;
2825 switch (grec->grec_type) {
2826 case MLD2_ALLOW_NEW_SOURCES:
2827 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2828 grec->grec_src, nsrcs,
2829 sizeof(struct in6_addr),
2830 grec->grec_type);
2831 break;
2832 case MLD2_MODE_IS_INCLUDE:
2833 changed = br_multicast_isinc_allow(brmctx, pg, h_addr,
2834 grec->grec_src, nsrcs,
2835 sizeof(struct in6_addr),
2836 grec->grec_type);
2837 break;
2838 case MLD2_MODE_IS_EXCLUDE:
2839 changed = br_multicast_isexc(brmctx, pg, h_addr,
2840 grec->grec_src, nsrcs,
2841 sizeof(struct in6_addr),
2842 grec->grec_type);
2843 break;
2844 case MLD2_CHANGE_TO_INCLUDE:
2845 changed = br_multicast_toin(brmctx, pmctx, pg, h_addr,
2846 grec->grec_src, nsrcs,
2847 sizeof(struct in6_addr),
2848 grec->grec_type);
2849 break;
2850 case MLD2_CHANGE_TO_EXCLUDE:
2851 changed = br_multicast_toex(brmctx, pmctx, pg, h_addr,
2852 grec->grec_src, nsrcs,
2853 sizeof(struct in6_addr),
2854 grec->grec_type);
2855 break;
2856 case MLD2_BLOCK_OLD_SOURCES:
2857 changed = br_multicast_block(brmctx, pmctx, pg, h_addr,
2858 grec->grec_src, nsrcs,
2859 sizeof(struct in6_addr),
2860 grec->grec_type);
2861 break;
2862 }
2863 if (changed)
2864 br_mdb_notify(brmctx->br->dev, mdst, pg, RTM_NEWMDB);
2865 unlock_continue:
2866 spin_unlock_bh(&brmctx->br->multicast_lock);
2867 }
2868
2869 return err;
2870 }
2871 #endif
2872
br_multicast_select_querier(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * saddr)2873 static bool br_multicast_select_querier(struct net_bridge_mcast *brmctx,
2874 struct net_bridge_mcast_port *pmctx,
2875 struct br_ip *saddr)
2876 {
2877 int port_ifidx = pmctx ? pmctx->port->dev->ifindex : 0;
2878 struct timer_list *own_timer, *other_timer;
2879 struct bridge_mcast_querier *querier;
2880
2881 switch (saddr->proto) {
2882 case htons(ETH_P_IP):
2883 querier = &brmctx->ip4_querier;
2884 own_timer = &brmctx->ip4_own_query.timer;
2885 other_timer = &brmctx->ip4_other_query.timer;
2886 if (!querier->addr.src.ip4 ||
2887 ntohl(saddr->src.ip4) <= ntohl(querier->addr.src.ip4))
2888 goto update;
2889 break;
2890 #if IS_ENABLED(CONFIG_IPV6)
2891 case htons(ETH_P_IPV6):
2892 querier = &brmctx->ip6_querier;
2893 own_timer = &brmctx->ip6_own_query.timer;
2894 other_timer = &brmctx->ip6_other_query.timer;
2895 if (ipv6_addr_cmp(&saddr->src.ip6, &querier->addr.src.ip6) <= 0)
2896 goto update;
2897 break;
2898 #endif
2899 default:
2900 return false;
2901 }
2902
2903 if (!timer_pending(own_timer) && !timer_pending(other_timer))
2904 goto update;
2905
2906 return false;
2907
2908 update:
2909 br_multicast_update_querier(brmctx, querier, port_ifidx, saddr);
2910
2911 return true;
2912 }
2913
2914 static struct net_bridge_port *
__br_multicast_get_querier_port(struct net_bridge * br,const struct bridge_mcast_querier * querier)2915 __br_multicast_get_querier_port(struct net_bridge *br,
2916 const struct bridge_mcast_querier *querier)
2917 {
2918 int port_ifidx = READ_ONCE(querier->port_ifidx);
2919 struct net_bridge_port *p;
2920 struct net_device *dev;
2921
2922 if (port_ifidx == 0)
2923 return NULL;
2924
2925 dev = dev_get_by_index_rcu(dev_net(br->dev), port_ifidx);
2926 if (!dev)
2927 return NULL;
2928 p = br_port_get_rtnl_rcu(dev);
2929 if (!p || p->br != br)
2930 return NULL;
2931
2932 return p;
2933 }
2934
br_multicast_querier_state_size(void)2935 size_t br_multicast_querier_state_size(void)
2936 {
2937 return nla_total_size(0) + /* nest attribute */
2938 nla_total_size(sizeof(__be32)) + /* BRIDGE_QUERIER_IP_ADDRESS */
2939 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IP_PORT */
2940 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IP_OTHER_TIMER */
2941 #if IS_ENABLED(CONFIG_IPV6)
2942 nla_total_size(sizeof(struct in6_addr)) + /* BRIDGE_QUERIER_IPV6_ADDRESS */
2943 nla_total_size(sizeof(int)) + /* BRIDGE_QUERIER_IPV6_PORT */
2944 nla_total_size_64bit(sizeof(u64)) + /* BRIDGE_QUERIER_IPV6_OTHER_TIMER */
2945 #endif
2946 0;
2947 }
2948
2949 /* protected by rtnl or rcu */
br_multicast_dump_querier_state(struct sk_buff * skb,const struct net_bridge_mcast * brmctx,int nest_attr)2950 int br_multicast_dump_querier_state(struct sk_buff *skb,
2951 const struct net_bridge_mcast *brmctx,
2952 int nest_attr)
2953 {
2954 struct bridge_mcast_querier querier = {};
2955 struct net_bridge_port *p;
2956 struct nlattr *nest;
2957
2958 if (!br_opt_get(brmctx->br, BROPT_MULTICAST_ENABLED) ||
2959 br_multicast_ctx_vlan_global_disabled(brmctx))
2960 return 0;
2961
2962 nest = nla_nest_start(skb, nest_attr);
2963 if (!nest)
2964 return -EMSGSIZE;
2965
2966 rcu_read_lock();
2967 if (!brmctx->multicast_querier &&
2968 !timer_pending(&brmctx->ip4_other_query.timer))
2969 goto out_v6;
2970
2971 br_multicast_read_querier(&brmctx->ip4_querier, &querier);
2972 if (nla_put_in_addr(skb, BRIDGE_QUERIER_IP_ADDRESS,
2973 querier.addr.src.ip4)) {
2974 rcu_read_unlock();
2975 goto out_err;
2976 }
2977
2978 p = __br_multicast_get_querier_port(brmctx->br, &querier);
2979 if (timer_pending(&brmctx->ip4_other_query.timer) &&
2980 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IP_OTHER_TIMER,
2981 br_timer_value(&brmctx->ip4_other_query.timer),
2982 BRIDGE_QUERIER_PAD) ||
2983 (p && nla_put_u32(skb, BRIDGE_QUERIER_IP_PORT, p->dev->ifindex)))) {
2984 rcu_read_unlock();
2985 goto out_err;
2986 }
2987
2988 out_v6:
2989 #if IS_ENABLED(CONFIG_IPV6)
2990 if (!brmctx->multicast_querier &&
2991 !timer_pending(&brmctx->ip6_other_query.timer))
2992 goto out;
2993
2994 br_multicast_read_querier(&brmctx->ip6_querier, &querier);
2995 if (nla_put_in6_addr(skb, BRIDGE_QUERIER_IPV6_ADDRESS,
2996 &querier.addr.src.ip6)) {
2997 rcu_read_unlock();
2998 goto out_err;
2999 }
3000
3001 p = __br_multicast_get_querier_port(brmctx->br, &querier);
3002 if (timer_pending(&brmctx->ip6_other_query.timer) &&
3003 (nla_put_u64_64bit(skb, BRIDGE_QUERIER_IPV6_OTHER_TIMER,
3004 br_timer_value(&brmctx->ip6_other_query.timer),
3005 BRIDGE_QUERIER_PAD) ||
3006 (p && nla_put_u32(skb, BRIDGE_QUERIER_IPV6_PORT,
3007 p->dev->ifindex)))) {
3008 rcu_read_unlock();
3009 goto out_err;
3010 }
3011 out:
3012 #endif
3013 rcu_read_unlock();
3014 nla_nest_end(skb, nest);
3015 if (!nla_len(nest))
3016 nla_nest_cancel(skb, nest);
3017
3018 return 0;
3019
3020 out_err:
3021 nla_nest_cancel(skb, nest);
3022 return -EMSGSIZE;
3023 }
3024
3025 static void
br_multicast_update_query_timer(struct net_bridge_mcast * brmctx,struct bridge_mcast_other_query * query,unsigned long max_delay)3026 br_multicast_update_query_timer(struct net_bridge_mcast *brmctx,
3027 struct bridge_mcast_other_query *query,
3028 unsigned long max_delay)
3029 {
3030 if (!timer_pending(&query->timer))
3031 mod_timer(&query->delay_timer, jiffies + max_delay);
3032
3033 mod_timer(&query->timer, jiffies + brmctx->multicast_querier_interval);
3034 }
3035
br_port_mc_router_state_change(struct net_bridge_port * p,bool is_mc_router)3036 static void br_port_mc_router_state_change(struct net_bridge_port *p,
3037 bool is_mc_router)
3038 {
3039 struct switchdev_attr attr = {
3040 .orig_dev = p->dev,
3041 .id = SWITCHDEV_ATTR_ID_PORT_MROUTER,
3042 .flags = SWITCHDEV_F_DEFER,
3043 .u.mrouter = is_mc_router,
3044 };
3045
3046 switchdev_port_attr_set(p->dev, &attr, NULL);
3047 }
3048
3049 static struct net_bridge_port *
br_multicast_rport_from_node(struct net_bridge_mcast * brmctx,struct hlist_head * mc_router_list,struct hlist_node * rlist)3050 br_multicast_rport_from_node(struct net_bridge_mcast *brmctx,
3051 struct hlist_head *mc_router_list,
3052 struct hlist_node *rlist)
3053 {
3054 struct net_bridge_mcast_port *pmctx;
3055
3056 #if IS_ENABLED(CONFIG_IPV6)
3057 if (mc_router_list == &brmctx->ip6_mc_router_list)
3058 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3059 ip6_rlist);
3060 else
3061 #endif
3062 pmctx = hlist_entry(rlist, struct net_bridge_mcast_port,
3063 ip4_rlist);
3064
3065 return pmctx->port;
3066 }
3067
3068 static struct hlist_node *
br_multicast_get_rport_slot(struct net_bridge_mcast * brmctx,struct net_bridge_port * port,struct hlist_head * mc_router_list)3069 br_multicast_get_rport_slot(struct net_bridge_mcast *brmctx,
3070 struct net_bridge_port *port,
3071 struct hlist_head *mc_router_list)
3072
3073 {
3074 struct hlist_node *slot = NULL;
3075 struct net_bridge_port *p;
3076 struct hlist_node *rlist;
3077
3078 hlist_for_each(rlist, mc_router_list) {
3079 p = br_multicast_rport_from_node(brmctx, mc_router_list, rlist);
3080
3081 if ((unsigned long)port >= (unsigned long)p)
3082 break;
3083
3084 slot = rlist;
3085 }
3086
3087 return slot;
3088 }
3089
br_multicast_no_router_otherpf(struct net_bridge_mcast_port * pmctx,struct hlist_node * rnode)3090 static bool br_multicast_no_router_otherpf(struct net_bridge_mcast_port *pmctx,
3091 struct hlist_node *rnode)
3092 {
3093 #if IS_ENABLED(CONFIG_IPV6)
3094 if (rnode != &pmctx->ip6_rlist)
3095 return hlist_unhashed(&pmctx->ip6_rlist);
3096 else
3097 return hlist_unhashed(&pmctx->ip4_rlist);
3098 #else
3099 return true;
3100 #endif
3101 }
3102
3103 /* Add port to router_list
3104 * list is maintained ordered by pointer value
3105 * and locked by br->multicast_lock and RCU
3106 */
br_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct hlist_node * rlist,struct hlist_head * mc_router_list)3107 static void br_multicast_add_router(struct net_bridge_mcast *brmctx,
3108 struct net_bridge_mcast_port *pmctx,
3109 struct hlist_node *rlist,
3110 struct hlist_head *mc_router_list)
3111 {
3112 struct hlist_node *slot;
3113
3114 if (!hlist_unhashed(rlist))
3115 return;
3116
3117 slot = br_multicast_get_rport_slot(brmctx, pmctx->port, mc_router_list);
3118
3119 if (slot)
3120 hlist_add_behind_rcu(rlist, slot);
3121 else
3122 hlist_add_head_rcu(rlist, mc_router_list);
3123
3124 /* For backwards compatibility for now, only notify if we
3125 * switched from no IPv4/IPv6 multicast router to a new
3126 * IPv4 or IPv6 multicast router.
3127 */
3128 if (br_multicast_no_router_otherpf(pmctx, rlist)) {
3129 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_NEWMDB);
3130 br_port_mc_router_state_change(pmctx->port, true);
3131 }
3132 }
3133
3134 /* Add port to router_list
3135 * list is maintained ordered by pointer value
3136 * and locked by br->multicast_lock and RCU
3137 */
br_ip4_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3138 static void br_ip4_multicast_add_router(struct net_bridge_mcast *brmctx,
3139 struct net_bridge_mcast_port *pmctx)
3140 {
3141 br_multicast_add_router(brmctx, pmctx, &pmctx->ip4_rlist,
3142 &brmctx->ip4_mc_router_list);
3143 }
3144
3145 /* Add port to router_list
3146 * list is maintained ordered by pointer value
3147 * and locked by br->multicast_lock and RCU
3148 */
br_ip6_multicast_add_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3149 static void br_ip6_multicast_add_router(struct net_bridge_mcast *brmctx,
3150 struct net_bridge_mcast_port *pmctx)
3151 {
3152 #if IS_ENABLED(CONFIG_IPV6)
3153 br_multicast_add_router(brmctx, pmctx, &pmctx->ip6_rlist,
3154 &brmctx->ip6_mc_router_list);
3155 #endif
3156 }
3157
br_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct timer_list * timer,struct hlist_node * rlist,struct hlist_head * mc_router_list)3158 static void br_multicast_mark_router(struct net_bridge_mcast *brmctx,
3159 struct net_bridge_mcast_port *pmctx,
3160 struct timer_list *timer,
3161 struct hlist_node *rlist,
3162 struct hlist_head *mc_router_list)
3163 {
3164 unsigned long now = jiffies;
3165
3166 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3167 return;
3168
3169 if (!pmctx) {
3170 if (brmctx->multicast_router == MDB_RTR_TYPE_TEMP_QUERY) {
3171 if (!br_ip4_multicast_is_router(brmctx) &&
3172 !br_ip6_multicast_is_router(brmctx))
3173 br_mc_router_state_change(brmctx->br, true);
3174 mod_timer(timer, now + brmctx->multicast_querier_interval);
3175 }
3176 return;
3177 }
3178
3179 if (pmctx->multicast_router == MDB_RTR_TYPE_DISABLED ||
3180 pmctx->multicast_router == MDB_RTR_TYPE_PERM)
3181 return;
3182
3183 br_multicast_add_router(brmctx, pmctx, rlist, mc_router_list);
3184 mod_timer(timer, now + brmctx->multicast_querier_interval);
3185 }
3186
br_ip4_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3187 static void br_ip4_multicast_mark_router(struct net_bridge_mcast *brmctx,
3188 struct net_bridge_mcast_port *pmctx)
3189 {
3190 struct timer_list *timer = &brmctx->ip4_mc_router_timer;
3191 struct hlist_node *rlist = NULL;
3192
3193 if (pmctx) {
3194 timer = &pmctx->ip4_mc_router_timer;
3195 rlist = &pmctx->ip4_rlist;
3196 }
3197
3198 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3199 &brmctx->ip4_mc_router_list);
3200 }
3201
br_ip6_multicast_mark_router(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx)3202 static void br_ip6_multicast_mark_router(struct net_bridge_mcast *brmctx,
3203 struct net_bridge_mcast_port *pmctx)
3204 {
3205 #if IS_ENABLED(CONFIG_IPV6)
3206 struct timer_list *timer = &brmctx->ip6_mc_router_timer;
3207 struct hlist_node *rlist = NULL;
3208
3209 if (pmctx) {
3210 timer = &pmctx->ip6_mc_router_timer;
3211 rlist = &pmctx->ip6_rlist;
3212 }
3213
3214 br_multicast_mark_router(brmctx, pmctx, timer, rlist,
3215 &brmctx->ip6_mc_router_list);
3216 #endif
3217 }
3218
3219 static void
br_ip4_multicast_query_received(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_other_query * query,struct br_ip * saddr,unsigned long max_delay)3220 br_ip4_multicast_query_received(struct net_bridge_mcast *brmctx,
3221 struct net_bridge_mcast_port *pmctx,
3222 struct bridge_mcast_other_query *query,
3223 struct br_ip *saddr,
3224 unsigned long max_delay)
3225 {
3226 if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3227 return;
3228
3229 br_multicast_update_query_timer(brmctx, query, max_delay);
3230 br_ip4_multicast_mark_router(brmctx, pmctx);
3231 }
3232
3233 #if IS_ENABLED(CONFIG_IPV6)
3234 static void
br_ip6_multicast_query_received(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct bridge_mcast_other_query * query,struct br_ip * saddr,unsigned long max_delay)3235 br_ip6_multicast_query_received(struct net_bridge_mcast *brmctx,
3236 struct net_bridge_mcast_port *pmctx,
3237 struct bridge_mcast_other_query *query,
3238 struct br_ip *saddr,
3239 unsigned long max_delay)
3240 {
3241 if (!br_multicast_select_querier(brmctx, pmctx, saddr))
3242 return;
3243
3244 br_multicast_update_query_timer(brmctx, query, max_delay);
3245 br_ip6_multicast_mark_router(brmctx, pmctx);
3246 }
3247 #endif
3248
br_ip4_multicast_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3249 static void br_ip4_multicast_query(struct net_bridge_mcast *brmctx,
3250 struct net_bridge_mcast_port *pmctx,
3251 struct sk_buff *skb,
3252 u16 vid)
3253 {
3254 unsigned int transport_len = ip_transport_len(skb);
3255 const struct iphdr *iph = ip_hdr(skb);
3256 struct igmphdr *ih = igmp_hdr(skb);
3257 struct net_bridge_mdb_entry *mp;
3258 struct igmpv3_query *ih3;
3259 struct net_bridge_port_group *p;
3260 struct net_bridge_port_group __rcu **pp;
3261 struct br_ip saddr = {};
3262 unsigned long max_delay;
3263 unsigned long now = jiffies;
3264 __be32 group;
3265
3266 spin_lock(&brmctx->br->multicast_lock);
3267 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3268 goto out;
3269
3270 group = ih->group;
3271
3272 if (transport_len == sizeof(*ih)) {
3273 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
3274
3275 if (!max_delay) {
3276 max_delay = 10 * HZ;
3277 group = 0;
3278 }
3279 } else if (transport_len >= sizeof(*ih3)) {
3280 ih3 = igmpv3_query_hdr(skb);
3281 if (ih3->nsrcs ||
3282 (brmctx->multicast_igmp_version == 3 && group &&
3283 ih3->suppress))
3284 goto out;
3285
3286 max_delay = ih3->code ?
3287 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
3288 } else {
3289 goto out;
3290 }
3291
3292 if (!group) {
3293 saddr.proto = htons(ETH_P_IP);
3294 saddr.src.ip4 = iph->saddr;
3295
3296 br_ip4_multicast_query_received(brmctx, pmctx,
3297 &brmctx->ip4_other_query,
3298 &saddr, max_delay);
3299 goto out;
3300 }
3301
3302 mp = br_mdb_ip4_get(brmctx->br, group, vid);
3303 if (!mp)
3304 goto out;
3305
3306 max_delay *= brmctx->multicast_last_member_count;
3307
3308 if (mp->host_joined &&
3309 (timer_pending(&mp->timer) ?
3310 time_after(mp->timer.expires, now + max_delay) :
3311 try_to_del_timer_sync(&mp->timer) >= 0))
3312 mod_timer(&mp->timer, now + max_delay);
3313
3314 for (pp = &mp->ports;
3315 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3316 pp = &p->next) {
3317 if (timer_pending(&p->timer) ?
3318 time_after(p->timer.expires, now + max_delay) :
3319 try_to_del_timer_sync(&p->timer) >= 0 &&
3320 (brmctx->multicast_igmp_version == 2 ||
3321 p->filter_mode == MCAST_EXCLUDE))
3322 mod_timer(&p->timer, now + max_delay);
3323 }
3324
3325 out:
3326 spin_unlock(&brmctx->br->multicast_lock);
3327 }
3328
3329 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_query(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3330 static int br_ip6_multicast_query(struct net_bridge_mcast *brmctx,
3331 struct net_bridge_mcast_port *pmctx,
3332 struct sk_buff *skb,
3333 u16 vid)
3334 {
3335 unsigned int transport_len = ipv6_transport_len(skb);
3336 struct mld_msg *mld;
3337 struct net_bridge_mdb_entry *mp;
3338 struct mld2_query *mld2q;
3339 struct net_bridge_port_group *p;
3340 struct net_bridge_port_group __rcu **pp;
3341 struct br_ip saddr = {};
3342 unsigned long max_delay;
3343 unsigned long now = jiffies;
3344 unsigned int offset = skb_transport_offset(skb);
3345 const struct in6_addr *group = NULL;
3346 bool is_general_query;
3347 int err = 0;
3348
3349 spin_lock(&brmctx->br->multicast_lock);
3350 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3351 goto out;
3352
3353 if (transport_len == sizeof(*mld)) {
3354 if (!pskb_may_pull(skb, offset + sizeof(*mld))) {
3355 err = -EINVAL;
3356 goto out;
3357 }
3358 mld = (struct mld_msg *) icmp6_hdr(skb);
3359 max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay));
3360 if (max_delay)
3361 group = &mld->mld_mca;
3362 } else {
3363 if (!pskb_may_pull(skb, offset + sizeof(*mld2q))) {
3364 err = -EINVAL;
3365 goto out;
3366 }
3367 mld2q = (struct mld2_query *)icmp6_hdr(skb);
3368 if (!mld2q->mld2q_nsrcs)
3369 group = &mld2q->mld2q_mca;
3370 if (brmctx->multicast_mld_version == 2 &&
3371 !ipv6_addr_any(&mld2q->mld2q_mca) &&
3372 mld2q->mld2q_suppress)
3373 goto out;
3374
3375 max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
3376 }
3377
3378 is_general_query = group && ipv6_addr_any(group);
3379
3380 if (is_general_query) {
3381 saddr.proto = htons(ETH_P_IPV6);
3382 saddr.src.ip6 = ipv6_hdr(skb)->saddr;
3383
3384 br_ip6_multicast_query_received(brmctx, pmctx,
3385 &brmctx->ip6_other_query,
3386 &saddr, max_delay);
3387 goto out;
3388 } else if (!group) {
3389 goto out;
3390 }
3391
3392 mp = br_mdb_ip6_get(brmctx->br, group, vid);
3393 if (!mp)
3394 goto out;
3395
3396 max_delay *= brmctx->multicast_last_member_count;
3397 if (mp->host_joined &&
3398 (timer_pending(&mp->timer) ?
3399 time_after(mp->timer.expires, now + max_delay) :
3400 try_to_del_timer_sync(&mp->timer) >= 0))
3401 mod_timer(&mp->timer, now + max_delay);
3402
3403 for (pp = &mp->ports;
3404 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3405 pp = &p->next) {
3406 if (timer_pending(&p->timer) ?
3407 time_after(p->timer.expires, now + max_delay) :
3408 try_to_del_timer_sync(&p->timer) >= 0 &&
3409 (brmctx->multicast_mld_version == 1 ||
3410 p->filter_mode == MCAST_EXCLUDE))
3411 mod_timer(&p->timer, now + max_delay);
3412 }
3413
3414 out:
3415 spin_unlock(&brmctx->br->multicast_lock);
3416 return err;
3417 }
3418 #endif
3419
3420 static void
br_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct br_ip * group,struct bridge_mcast_other_query * other_query,struct bridge_mcast_own_query * own_query,const unsigned char * src)3421 br_multicast_leave_group(struct net_bridge_mcast *brmctx,
3422 struct net_bridge_mcast_port *pmctx,
3423 struct br_ip *group,
3424 struct bridge_mcast_other_query *other_query,
3425 struct bridge_mcast_own_query *own_query,
3426 const unsigned char *src)
3427 {
3428 struct net_bridge_mdb_entry *mp;
3429 struct net_bridge_port_group *p;
3430 unsigned long now;
3431 unsigned long time;
3432
3433 spin_lock(&brmctx->br->multicast_lock);
3434 if (!br_multicast_ctx_should_use(brmctx, pmctx))
3435 goto out;
3436
3437 mp = br_mdb_ip_get(brmctx->br, group);
3438 if (!mp)
3439 goto out;
3440
3441 if (pmctx && (pmctx->port->flags & BR_MULTICAST_FAST_LEAVE)) {
3442 struct net_bridge_port_group __rcu **pp;
3443
3444 for (pp = &mp->ports;
3445 (p = mlock_dereference(*pp, brmctx->br)) != NULL;
3446 pp = &p->next) {
3447 if (!br_port_group_equal(p, pmctx->port, src))
3448 continue;
3449
3450 if (p->flags & MDB_PG_FLAGS_PERMANENT)
3451 break;
3452
3453 p->flags |= MDB_PG_FLAGS_FAST_LEAVE;
3454 br_multicast_del_pg(mp, p, pp);
3455 }
3456 goto out;
3457 }
3458
3459 if (timer_pending(&other_query->timer))
3460 goto out;
3461
3462 if (brmctx->multicast_querier) {
3463 __br_multicast_send_query(brmctx, pmctx, NULL, NULL, &mp->addr,
3464 false, 0, NULL);
3465
3466 time = jiffies + brmctx->multicast_last_member_count *
3467 brmctx->multicast_last_member_interval;
3468
3469 mod_timer(&own_query->timer, time);
3470
3471 for (p = mlock_dereference(mp->ports, brmctx->br);
3472 p != NULL && pmctx != NULL;
3473 p = mlock_dereference(p->next, brmctx->br)) {
3474 if (!br_port_group_equal(p, pmctx->port, src))
3475 continue;
3476
3477 if (!hlist_unhashed(&p->mglist) &&
3478 (timer_pending(&p->timer) ?
3479 time_after(p->timer.expires, time) :
3480 try_to_del_timer_sync(&p->timer) >= 0)) {
3481 mod_timer(&p->timer, time);
3482 }
3483
3484 break;
3485 }
3486 }
3487
3488 now = jiffies;
3489 time = now + brmctx->multicast_last_member_count *
3490 brmctx->multicast_last_member_interval;
3491
3492 if (!pmctx) {
3493 if (mp->host_joined &&
3494 (timer_pending(&mp->timer) ?
3495 time_after(mp->timer.expires, time) :
3496 try_to_del_timer_sync(&mp->timer) >= 0)) {
3497 mod_timer(&mp->timer, time);
3498 }
3499
3500 goto out;
3501 }
3502
3503 for (p = mlock_dereference(mp->ports, brmctx->br);
3504 p != NULL;
3505 p = mlock_dereference(p->next, brmctx->br)) {
3506 if (p->key.port != pmctx->port)
3507 continue;
3508
3509 if (!hlist_unhashed(&p->mglist) &&
3510 (timer_pending(&p->timer) ?
3511 time_after(p->timer.expires, time) :
3512 try_to_del_timer_sync(&p->timer) >= 0)) {
3513 mod_timer(&p->timer, time);
3514 }
3515
3516 break;
3517 }
3518 out:
3519 spin_unlock(&brmctx->br->multicast_lock);
3520 }
3521
br_ip4_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,__be32 group,__u16 vid,const unsigned char * src)3522 static void br_ip4_multicast_leave_group(struct net_bridge_mcast *brmctx,
3523 struct net_bridge_mcast_port *pmctx,
3524 __be32 group,
3525 __u16 vid,
3526 const unsigned char *src)
3527 {
3528 struct br_ip br_group;
3529 struct bridge_mcast_own_query *own_query;
3530
3531 if (ipv4_is_local_multicast(group))
3532 return;
3533
3534 own_query = pmctx ? &pmctx->ip4_own_query : &brmctx->ip4_own_query;
3535
3536 memset(&br_group, 0, sizeof(br_group));
3537 br_group.dst.ip4 = group;
3538 br_group.proto = htons(ETH_P_IP);
3539 br_group.vid = vid;
3540
3541 br_multicast_leave_group(brmctx, pmctx, &br_group,
3542 &brmctx->ip4_other_query,
3543 own_query, src);
3544 }
3545
3546 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_leave_group(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct in6_addr * group,__u16 vid,const unsigned char * src)3547 static void br_ip6_multicast_leave_group(struct net_bridge_mcast *brmctx,
3548 struct net_bridge_mcast_port *pmctx,
3549 const struct in6_addr *group,
3550 __u16 vid,
3551 const unsigned char *src)
3552 {
3553 struct br_ip br_group;
3554 struct bridge_mcast_own_query *own_query;
3555
3556 if (ipv6_addr_is_ll_all_nodes(group))
3557 return;
3558
3559 own_query = pmctx ? &pmctx->ip6_own_query : &brmctx->ip6_own_query;
3560
3561 memset(&br_group, 0, sizeof(br_group));
3562 br_group.dst.ip6 = *group;
3563 br_group.proto = htons(ETH_P_IPV6);
3564 br_group.vid = vid;
3565
3566 br_multicast_leave_group(brmctx, pmctx, &br_group,
3567 &brmctx->ip6_other_query,
3568 own_query, src);
3569 }
3570 #endif
3571
br_multicast_err_count(const struct net_bridge * br,const struct net_bridge_port * p,__be16 proto)3572 static void br_multicast_err_count(const struct net_bridge *br,
3573 const struct net_bridge_port *p,
3574 __be16 proto)
3575 {
3576 struct bridge_mcast_stats __percpu *stats;
3577 struct bridge_mcast_stats *pstats;
3578
3579 if (!br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
3580 return;
3581
3582 if (p)
3583 stats = p->mcast_stats;
3584 else
3585 stats = br->mcast_stats;
3586 if (WARN_ON(!stats))
3587 return;
3588
3589 pstats = this_cpu_ptr(stats);
3590
3591 u64_stats_update_begin(&pstats->syncp);
3592 switch (proto) {
3593 case htons(ETH_P_IP):
3594 pstats->mstats.igmp_parse_errors++;
3595 break;
3596 #if IS_ENABLED(CONFIG_IPV6)
3597 case htons(ETH_P_IPV6):
3598 pstats->mstats.mld_parse_errors++;
3599 break;
3600 #endif
3601 }
3602 u64_stats_update_end(&pstats->syncp);
3603 }
3604
br_multicast_pim(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,const struct sk_buff * skb)3605 static void br_multicast_pim(struct net_bridge_mcast *brmctx,
3606 struct net_bridge_mcast_port *pmctx,
3607 const struct sk_buff *skb)
3608 {
3609 unsigned int offset = skb_transport_offset(skb);
3610 struct pimhdr *pimhdr, _pimhdr;
3611
3612 pimhdr = skb_header_pointer(skb, offset, sizeof(_pimhdr), &_pimhdr);
3613 if (!pimhdr || pim_hdr_version(pimhdr) != PIM_VERSION ||
3614 pim_hdr_type(pimhdr) != PIM_TYPE_HELLO)
3615 return;
3616
3617 spin_lock(&brmctx->br->multicast_lock);
3618 br_ip4_multicast_mark_router(brmctx, pmctx);
3619 spin_unlock(&brmctx->br->multicast_lock);
3620 }
3621
br_ip4_multicast_mrd_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)3622 static int br_ip4_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3623 struct net_bridge_mcast_port *pmctx,
3624 struct sk_buff *skb)
3625 {
3626 if (ip_hdr(skb)->protocol != IPPROTO_IGMP ||
3627 igmp_hdr(skb)->type != IGMP_MRDISC_ADV)
3628 return -ENOMSG;
3629
3630 spin_lock(&brmctx->br->multicast_lock);
3631 br_ip4_multicast_mark_router(brmctx, pmctx);
3632 spin_unlock(&brmctx->br->multicast_lock);
3633
3634 return 0;
3635 }
3636
br_multicast_ipv4_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3637 static int br_multicast_ipv4_rcv(struct net_bridge_mcast *brmctx,
3638 struct net_bridge_mcast_port *pmctx,
3639 struct sk_buff *skb,
3640 u16 vid)
3641 {
3642 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3643 const unsigned char *src;
3644 struct igmphdr *ih;
3645 int err;
3646
3647 err = ip_mc_check_igmp(skb);
3648
3649 if (err == -ENOMSG) {
3650 if (!ipv4_is_local_multicast(ip_hdr(skb)->daddr)) {
3651 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3652 } else if (pim_ipv4_all_pim_routers(ip_hdr(skb)->daddr)) {
3653 if (ip_hdr(skb)->protocol == IPPROTO_PIM)
3654 br_multicast_pim(brmctx, pmctx, skb);
3655 } else if (ipv4_is_all_snoopers(ip_hdr(skb)->daddr)) {
3656 br_ip4_multicast_mrd_rcv(brmctx, pmctx, skb);
3657 }
3658
3659 return 0;
3660 } else if (err < 0) {
3661 br_multicast_err_count(brmctx->br, p, skb->protocol);
3662 return err;
3663 }
3664
3665 ih = igmp_hdr(skb);
3666 src = eth_hdr(skb)->h_source;
3667 BR_INPUT_SKB_CB(skb)->igmp = ih->type;
3668
3669 switch (ih->type) {
3670 case IGMP_HOST_MEMBERSHIP_REPORT:
3671 case IGMPV2_HOST_MEMBERSHIP_REPORT:
3672 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3673 err = br_ip4_multicast_add_group(brmctx, pmctx, ih->group, vid,
3674 src, true);
3675 break;
3676 case IGMPV3_HOST_MEMBERSHIP_REPORT:
3677 err = br_ip4_multicast_igmp3_report(brmctx, pmctx, skb, vid);
3678 break;
3679 case IGMP_HOST_MEMBERSHIP_QUERY:
3680 br_ip4_multicast_query(brmctx, pmctx, skb, vid);
3681 break;
3682 case IGMP_HOST_LEAVE_MESSAGE:
3683 br_ip4_multicast_leave_group(brmctx, pmctx, ih->group, vid, src);
3684 break;
3685 }
3686
3687 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3688 BR_MCAST_DIR_RX);
3689
3690 return err;
3691 }
3692
3693 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_mrd_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb)3694 static void br_ip6_multicast_mrd_rcv(struct net_bridge_mcast *brmctx,
3695 struct net_bridge_mcast_port *pmctx,
3696 struct sk_buff *skb)
3697 {
3698 if (icmp6_hdr(skb)->icmp6_type != ICMPV6_MRDISC_ADV)
3699 return;
3700
3701 spin_lock(&brmctx->br->multicast_lock);
3702 br_ip6_multicast_mark_router(brmctx, pmctx);
3703 spin_unlock(&brmctx->br->multicast_lock);
3704 }
3705
br_multicast_ipv6_rcv(struct net_bridge_mcast * brmctx,struct net_bridge_mcast_port * pmctx,struct sk_buff * skb,u16 vid)3706 static int br_multicast_ipv6_rcv(struct net_bridge_mcast *brmctx,
3707 struct net_bridge_mcast_port *pmctx,
3708 struct sk_buff *skb,
3709 u16 vid)
3710 {
3711 struct net_bridge_port *p = pmctx ? pmctx->port : NULL;
3712 const unsigned char *src;
3713 struct mld_msg *mld;
3714 int err;
3715
3716 err = ipv6_mc_check_mld(skb);
3717
3718 if (err == -ENOMSG || err == -ENODATA) {
3719 if (!ipv6_addr_is_ll_all_nodes(&ipv6_hdr(skb)->daddr))
3720 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3721 if (err == -ENODATA &&
3722 ipv6_addr_is_all_snoopers(&ipv6_hdr(skb)->daddr))
3723 br_ip6_multicast_mrd_rcv(brmctx, pmctx, skb);
3724
3725 return 0;
3726 } else if (err < 0) {
3727 br_multicast_err_count(brmctx->br, p, skb->protocol);
3728 return err;
3729 }
3730
3731 mld = (struct mld_msg *)skb_transport_header(skb);
3732 BR_INPUT_SKB_CB(skb)->igmp = mld->mld_type;
3733
3734 switch (mld->mld_type) {
3735 case ICMPV6_MGM_REPORT:
3736 src = eth_hdr(skb)->h_source;
3737 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
3738 err = br_ip6_multicast_add_group(brmctx, pmctx, &mld->mld_mca,
3739 vid, src, true);
3740 break;
3741 case ICMPV6_MLD2_REPORT:
3742 err = br_ip6_multicast_mld2_report(brmctx, pmctx, skb, vid);
3743 break;
3744 case ICMPV6_MGM_QUERY:
3745 err = br_ip6_multicast_query(brmctx, pmctx, skb, vid);
3746 break;
3747 case ICMPV6_MGM_REDUCTION:
3748 src = eth_hdr(skb)->h_source;
3749 br_ip6_multicast_leave_group(brmctx, pmctx, &mld->mld_mca, vid,
3750 src);
3751 break;
3752 }
3753
3754 br_multicast_count(brmctx->br, p, skb, BR_INPUT_SKB_CB(skb)->igmp,
3755 BR_MCAST_DIR_RX);
3756
3757 return err;
3758 }
3759 #endif
3760
br_multicast_rcv(struct net_bridge_mcast ** brmctx,struct net_bridge_mcast_port ** pmctx,struct net_bridge_vlan * vlan,struct sk_buff * skb,u16 vid)3761 int br_multicast_rcv(struct net_bridge_mcast **brmctx,
3762 struct net_bridge_mcast_port **pmctx,
3763 struct net_bridge_vlan *vlan,
3764 struct sk_buff *skb, u16 vid)
3765 {
3766 int ret = 0;
3767
3768 BR_INPUT_SKB_CB(skb)->igmp = 0;
3769 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
3770
3771 if (!br_opt_get((*brmctx)->br, BROPT_MULTICAST_ENABLED))
3772 return 0;
3773
3774 if (br_opt_get((*brmctx)->br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) && vlan) {
3775 const struct net_bridge_vlan *masterv;
3776
3777 /* the vlan has the master flag set only when transmitting
3778 * through the bridge device
3779 */
3780 if (br_vlan_is_master(vlan)) {
3781 masterv = vlan;
3782 *brmctx = &vlan->br_mcast_ctx;
3783 *pmctx = NULL;
3784 } else {
3785 masterv = vlan->brvlan;
3786 *brmctx = &vlan->brvlan->br_mcast_ctx;
3787 *pmctx = &vlan->port_mcast_ctx;
3788 }
3789
3790 if (!(masterv->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
3791 return 0;
3792 }
3793
3794 switch (skb->protocol) {
3795 case htons(ETH_P_IP):
3796 ret = br_multicast_ipv4_rcv(*brmctx, *pmctx, skb, vid);
3797 break;
3798 #if IS_ENABLED(CONFIG_IPV6)
3799 case htons(ETH_P_IPV6):
3800 ret = br_multicast_ipv6_rcv(*brmctx, *pmctx, skb, vid);
3801 break;
3802 #endif
3803 }
3804
3805 return ret;
3806 }
3807
br_multicast_query_expired(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query,struct bridge_mcast_querier * querier)3808 static void br_multicast_query_expired(struct net_bridge_mcast *brmctx,
3809 struct bridge_mcast_own_query *query,
3810 struct bridge_mcast_querier *querier)
3811 {
3812 spin_lock(&brmctx->br->multicast_lock);
3813 if (br_multicast_ctx_vlan_disabled(brmctx))
3814 goto out;
3815
3816 if (query->startup_sent < brmctx->multicast_startup_query_count)
3817 query->startup_sent++;
3818
3819 br_multicast_send_query(brmctx, NULL, query);
3820 out:
3821 spin_unlock(&brmctx->br->multicast_lock);
3822 }
3823
br_ip4_multicast_query_expired(struct timer_list * t)3824 static void br_ip4_multicast_query_expired(struct timer_list *t)
3825 {
3826 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3827 ip4_own_query.timer);
3828
3829 br_multicast_query_expired(brmctx, &brmctx->ip4_own_query,
3830 &brmctx->ip4_querier);
3831 }
3832
3833 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_query_expired(struct timer_list * t)3834 static void br_ip6_multicast_query_expired(struct timer_list *t)
3835 {
3836 struct net_bridge_mcast *brmctx = from_timer(brmctx, t,
3837 ip6_own_query.timer);
3838
3839 br_multicast_query_expired(brmctx, &brmctx->ip6_own_query,
3840 &brmctx->ip6_querier);
3841 }
3842 #endif
3843
br_multicast_gc_work(struct work_struct * work)3844 static void br_multicast_gc_work(struct work_struct *work)
3845 {
3846 struct net_bridge *br = container_of(work, struct net_bridge,
3847 mcast_gc_work);
3848 HLIST_HEAD(deleted_head);
3849
3850 spin_lock_bh(&br->multicast_lock);
3851 hlist_move_list(&br->mcast_gc_list, &deleted_head);
3852 spin_unlock_bh(&br->multicast_lock);
3853
3854 br_multicast_gc(&deleted_head);
3855 }
3856
br_multicast_ctx_init(struct net_bridge * br,struct net_bridge_vlan * vlan,struct net_bridge_mcast * brmctx)3857 void br_multicast_ctx_init(struct net_bridge *br,
3858 struct net_bridge_vlan *vlan,
3859 struct net_bridge_mcast *brmctx)
3860 {
3861 brmctx->br = br;
3862 brmctx->vlan = vlan;
3863 brmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
3864 brmctx->multicast_last_member_count = 2;
3865 brmctx->multicast_startup_query_count = 2;
3866
3867 brmctx->multicast_last_member_interval = HZ;
3868 brmctx->multicast_query_response_interval = 10 * HZ;
3869 brmctx->multicast_startup_query_interval = 125 * HZ / 4;
3870 brmctx->multicast_query_interval = 125 * HZ;
3871 brmctx->multicast_querier_interval = 255 * HZ;
3872 brmctx->multicast_membership_interval = 260 * HZ;
3873
3874 brmctx->ip4_querier.port_ifidx = 0;
3875 seqcount_spinlock_init(&brmctx->ip4_querier.seq, &br->multicast_lock);
3876 brmctx->multicast_igmp_version = 2;
3877 #if IS_ENABLED(CONFIG_IPV6)
3878 brmctx->multicast_mld_version = 1;
3879 brmctx->ip6_querier.port_ifidx = 0;
3880 seqcount_spinlock_init(&brmctx->ip6_querier.seq, &br->multicast_lock);
3881 #endif
3882
3883 timer_setup(&brmctx->ip4_mc_router_timer,
3884 br_ip4_multicast_local_router_expired, 0);
3885 timer_setup(&brmctx->ip4_other_query.timer,
3886 br_ip4_multicast_querier_expired, 0);
3887 timer_setup(&brmctx->ip4_other_query.delay_timer,
3888 br_multicast_query_delay_expired, 0);
3889 timer_setup(&brmctx->ip4_own_query.timer,
3890 br_ip4_multicast_query_expired, 0);
3891 #if IS_ENABLED(CONFIG_IPV6)
3892 timer_setup(&brmctx->ip6_mc_router_timer,
3893 br_ip6_multicast_local_router_expired, 0);
3894 timer_setup(&brmctx->ip6_other_query.timer,
3895 br_ip6_multicast_querier_expired, 0);
3896 timer_setup(&brmctx->ip6_other_query.delay_timer,
3897 br_multicast_query_delay_expired, 0);
3898 timer_setup(&brmctx->ip6_own_query.timer,
3899 br_ip6_multicast_query_expired, 0);
3900 #endif
3901 }
3902
br_multicast_ctx_deinit(struct net_bridge_mcast * brmctx)3903 void br_multicast_ctx_deinit(struct net_bridge_mcast *brmctx)
3904 {
3905 __br_multicast_stop(brmctx);
3906 }
3907
br_multicast_init(struct net_bridge * br)3908 void br_multicast_init(struct net_bridge *br)
3909 {
3910 br->hash_max = BR_MULTICAST_DEFAULT_HASH_MAX;
3911
3912 br_multicast_ctx_init(br, NULL, &br->multicast_ctx);
3913
3914 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, true);
3915 br_opt_toggle(br, BROPT_HAS_IPV6_ADDR, true);
3916
3917 spin_lock_init(&br->multicast_lock);
3918 INIT_HLIST_HEAD(&br->mdb_list);
3919 INIT_HLIST_HEAD(&br->mcast_gc_list);
3920 INIT_WORK(&br->mcast_gc_work, br_multicast_gc_work);
3921 }
3922
br_ip4_multicast_join_snoopers(struct net_bridge * br)3923 static void br_ip4_multicast_join_snoopers(struct net_bridge *br)
3924 {
3925 struct in_device *in_dev = in_dev_get(br->dev);
3926
3927 if (!in_dev)
3928 return;
3929
3930 __ip_mc_inc_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3931 in_dev_put(in_dev);
3932 }
3933
3934 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_join_snoopers(struct net_bridge * br)3935 static void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3936 {
3937 struct in6_addr addr;
3938
3939 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3940 ipv6_dev_mc_inc(br->dev, &addr);
3941 }
3942 #else
br_ip6_multicast_join_snoopers(struct net_bridge * br)3943 static inline void br_ip6_multicast_join_snoopers(struct net_bridge *br)
3944 {
3945 }
3946 #endif
3947
br_multicast_join_snoopers(struct net_bridge * br)3948 void br_multicast_join_snoopers(struct net_bridge *br)
3949 {
3950 br_ip4_multicast_join_snoopers(br);
3951 br_ip6_multicast_join_snoopers(br);
3952 }
3953
br_ip4_multicast_leave_snoopers(struct net_bridge * br)3954 static void br_ip4_multicast_leave_snoopers(struct net_bridge *br)
3955 {
3956 struct in_device *in_dev = in_dev_get(br->dev);
3957
3958 if (WARN_ON(!in_dev))
3959 return;
3960
3961 __ip_mc_dec_group(in_dev, htonl(INADDR_ALLSNOOPERS_GROUP), GFP_ATOMIC);
3962 in_dev_put(in_dev);
3963 }
3964
3965 #if IS_ENABLED(CONFIG_IPV6)
br_ip6_multicast_leave_snoopers(struct net_bridge * br)3966 static void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3967 {
3968 struct in6_addr addr;
3969
3970 ipv6_addr_set(&addr, htonl(0xff020000), 0, 0, htonl(0x6a));
3971 ipv6_dev_mc_dec(br->dev, &addr);
3972 }
3973 #else
br_ip6_multicast_leave_snoopers(struct net_bridge * br)3974 static inline void br_ip6_multicast_leave_snoopers(struct net_bridge *br)
3975 {
3976 }
3977 #endif
3978
br_multicast_leave_snoopers(struct net_bridge * br)3979 void br_multicast_leave_snoopers(struct net_bridge *br)
3980 {
3981 br_ip4_multicast_leave_snoopers(br);
3982 br_ip6_multicast_leave_snoopers(br);
3983 }
3984
__br_multicast_open_query(struct net_bridge * br,struct bridge_mcast_own_query * query)3985 static void __br_multicast_open_query(struct net_bridge *br,
3986 struct bridge_mcast_own_query *query)
3987 {
3988 query->startup_sent = 0;
3989
3990 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED))
3991 return;
3992
3993 mod_timer(&query->timer, jiffies);
3994 }
3995
__br_multicast_open(struct net_bridge_mcast * brmctx)3996 static void __br_multicast_open(struct net_bridge_mcast *brmctx)
3997 {
3998 __br_multicast_open_query(brmctx->br, &brmctx->ip4_own_query);
3999 #if IS_ENABLED(CONFIG_IPV6)
4000 __br_multicast_open_query(brmctx->br, &brmctx->ip6_own_query);
4001 #endif
4002 }
4003
br_multicast_open(struct net_bridge * br)4004 void br_multicast_open(struct net_bridge *br)
4005 {
4006 ASSERT_RTNL();
4007
4008 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4009 struct net_bridge_vlan_group *vg;
4010 struct net_bridge_vlan *vlan;
4011
4012 vg = br_vlan_group(br);
4013 if (vg) {
4014 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4015 struct net_bridge_mcast *brmctx;
4016
4017 brmctx = &vlan->br_mcast_ctx;
4018 if (br_vlan_is_brentry(vlan) &&
4019 !br_multicast_ctx_vlan_disabled(brmctx))
4020 __br_multicast_open(&vlan->br_mcast_ctx);
4021 }
4022 }
4023 } else {
4024 __br_multicast_open(&br->multicast_ctx);
4025 }
4026 }
4027
__br_multicast_stop(struct net_bridge_mcast * brmctx)4028 static void __br_multicast_stop(struct net_bridge_mcast *brmctx)
4029 {
4030 del_timer_sync(&brmctx->ip4_mc_router_timer);
4031 del_timer_sync(&brmctx->ip4_other_query.timer);
4032 del_timer_sync(&brmctx->ip4_other_query.delay_timer);
4033 del_timer_sync(&brmctx->ip4_own_query.timer);
4034 #if IS_ENABLED(CONFIG_IPV6)
4035 del_timer_sync(&brmctx->ip6_mc_router_timer);
4036 del_timer_sync(&brmctx->ip6_other_query.timer);
4037 del_timer_sync(&brmctx->ip6_other_query.delay_timer);
4038 del_timer_sync(&brmctx->ip6_own_query.timer);
4039 #endif
4040 }
4041
br_multicast_toggle_one_vlan(struct net_bridge_vlan * vlan,bool on)4042 void br_multicast_toggle_one_vlan(struct net_bridge_vlan *vlan, bool on)
4043 {
4044 struct net_bridge *br;
4045
4046 /* it's okay to check for the flag without the multicast lock because it
4047 * can only change under RTNL -> multicast_lock, we need the latter to
4048 * sync with timers and packets
4049 */
4050 if (on == !!(vlan->priv_flags & BR_VLFLAG_MCAST_ENABLED))
4051 return;
4052
4053 if (br_vlan_is_master(vlan)) {
4054 br = vlan->br;
4055
4056 if (!br_vlan_is_brentry(vlan) ||
4057 (on &&
4058 br_multicast_ctx_vlan_global_disabled(&vlan->br_mcast_ctx)))
4059 return;
4060
4061 spin_lock_bh(&br->multicast_lock);
4062 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4063 spin_unlock_bh(&br->multicast_lock);
4064
4065 if (on)
4066 __br_multicast_open(&vlan->br_mcast_ctx);
4067 else
4068 __br_multicast_stop(&vlan->br_mcast_ctx);
4069 } else {
4070 struct net_bridge_mcast *brmctx;
4071
4072 brmctx = br_multicast_port_ctx_get_global(&vlan->port_mcast_ctx);
4073 if (on && br_multicast_ctx_vlan_global_disabled(brmctx))
4074 return;
4075
4076 br = vlan->port->br;
4077 spin_lock_bh(&br->multicast_lock);
4078 vlan->priv_flags ^= BR_VLFLAG_MCAST_ENABLED;
4079 if (on)
4080 __br_multicast_enable_port_ctx(&vlan->port_mcast_ctx);
4081 else
4082 __br_multicast_disable_port_ctx(&vlan->port_mcast_ctx);
4083 spin_unlock_bh(&br->multicast_lock);
4084 }
4085 }
4086
br_multicast_toggle_vlan(struct net_bridge_vlan * vlan,bool on)4087 static void br_multicast_toggle_vlan(struct net_bridge_vlan *vlan, bool on)
4088 {
4089 struct net_bridge_port *p;
4090
4091 if (WARN_ON_ONCE(!br_vlan_is_master(vlan)))
4092 return;
4093
4094 list_for_each_entry(p, &vlan->br->port_list, list) {
4095 struct net_bridge_vlan *vport;
4096
4097 vport = br_vlan_find(nbp_vlan_group(p), vlan->vid);
4098 if (!vport)
4099 continue;
4100 br_multicast_toggle_one_vlan(vport, on);
4101 }
4102
4103 if (br_vlan_is_brentry(vlan))
4104 br_multicast_toggle_one_vlan(vlan, on);
4105 }
4106
br_multicast_toggle_vlan_snooping(struct net_bridge * br,bool on,struct netlink_ext_ack * extack)4107 int br_multicast_toggle_vlan_snooping(struct net_bridge *br, bool on,
4108 struct netlink_ext_ack *extack)
4109 {
4110 struct net_bridge_vlan_group *vg;
4111 struct net_bridge_vlan *vlan;
4112 struct net_bridge_port *p;
4113
4114 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED) == on)
4115 return 0;
4116
4117 if (on && !br_opt_get(br, BROPT_VLAN_ENABLED)) {
4118 NL_SET_ERR_MSG_MOD(extack, "Cannot enable multicast vlan snooping with vlan filtering disabled");
4119 return -EINVAL;
4120 }
4121
4122 vg = br_vlan_group(br);
4123 if (!vg)
4124 return 0;
4125
4126 br_opt_toggle(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED, on);
4127
4128 /* disable/enable non-vlan mcast contexts based on vlan snooping */
4129 if (on)
4130 __br_multicast_stop(&br->multicast_ctx);
4131 else
4132 __br_multicast_open(&br->multicast_ctx);
4133 list_for_each_entry(p, &br->port_list, list) {
4134 if (on)
4135 br_multicast_disable_port(p);
4136 else
4137 br_multicast_enable_port(p);
4138 }
4139
4140 list_for_each_entry(vlan, &vg->vlan_list, vlist)
4141 br_multicast_toggle_vlan(vlan, on);
4142
4143 return 0;
4144 }
4145
br_multicast_toggle_global_vlan(struct net_bridge_vlan * vlan,bool on)4146 bool br_multicast_toggle_global_vlan(struct net_bridge_vlan *vlan, bool on)
4147 {
4148 ASSERT_RTNL();
4149
4150 /* BR_VLFLAG_GLOBAL_MCAST_ENABLED relies on eventual consistency and
4151 * requires only RTNL to change
4152 */
4153 if (on == !!(vlan->priv_flags & BR_VLFLAG_GLOBAL_MCAST_ENABLED))
4154 return false;
4155
4156 vlan->priv_flags ^= BR_VLFLAG_GLOBAL_MCAST_ENABLED;
4157 br_multicast_toggle_vlan(vlan, on);
4158
4159 return true;
4160 }
4161
br_multicast_stop(struct net_bridge * br)4162 void br_multicast_stop(struct net_bridge *br)
4163 {
4164 ASSERT_RTNL();
4165
4166 if (br_opt_get(br, BROPT_MCAST_VLAN_SNOOPING_ENABLED)) {
4167 struct net_bridge_vlan_group *vg;
4168 struct net_bridge_vlan *vlan;
4169
4170 vg = br_vlan_group(br);
4171 if (vg) {
4172 list_for_each_entry(vlan, &vg->vlan_list, vlist) {
4173 struct net_bridge_mcast *brmctx;
4174
4175 brmctx = &vlan->br_mcast_ctx;
4176 if (br_vlan_is_brentry(vlan) &&
4177 !br_multicast_ctx_vlan_disabled(brmctx))
4178 __br_multicast_stop(&vlan->br_mcast_ctx);
4179 }
4180 }
4181 } else {
4182 __br_multicast_stop(&br->multicast_ctx);
4183 }
4184 }
4185
br_multicast_dev_del(struct net_bridge * br)4186 void br_multicast_dev_del(struct net_bridge *br)
4187 {
4188 struct net_bridge_mdb_entry *mp;
4189 HLIST_HEAD(deleted_head);
4190 struct hlist_node *tmp;
4191
4192 spin_lock_bh(&br->multicast_lock);
4193 hlist_for_each_entry_safe(mp, tmp, &br->mdb_list, mdb_node)
4194 br_multicast_del_mdb_entry(mp);
4195 hlist_move_list(&br->mcast_gc_list, &deleted_head);
4196 spin_unlock_bh(&br->multicast_lock);
4197
4198 br_multicast_ctx_deinit(&br->multicast_ctx);
4199 br_multicast_gc(&deleted_head);
4200 cancel_work_sync(&br->mcast_gc_work);
4201
4202 rcu_barrier();
4203 }
4204
br_multicast_set_router(struct net_bridge_mcast * brmctx,unsigned long val)4205 int br_multicast_set_router(struct net_bridge_mcast *brmctx, unsigned long val)
4206 {
4207 int err = -EINVAL;
4208
4209 spin_lock_bh(&brmctx->br->multicast_lock);
4210
4211 switch (val) {
4212 case MDB_RTR_TYPE_DISABLED:
4213 case MDB_RTR_TYPE_PERM:
4214 br_mc_router_state_change(brmctx->br, val == MDB_RTR_TYPE_PERM);
4215 del_timer(&brmctx->ip4_mc_router_timer);
4216 #if IS_ENABLED(CONFIG_IPV6)
4217 del_timer(&brmctx->ip6_mc_router_timer);
4218 #endif
4219 brmctx->multicast_router = val;
4220 err = 0;
4221 break;
4222 case MDB_RTR_TYPE_TEMP_QUERY:
4223 if (brmctx->multicast_router != MDB_RTR_TYPE_TEMP_QUERY)
4224 br_mc_router_state_change(brmctx->br, false);
4225 brmctx->multicast_router = val;
4226 err = 0;
4227 break;
4228 }
4229
4230 spin_unlock_bh(&brmctx->br->multicast_lock);
4231
4232 return err;
4233 }
4234
4235 static void
br_multicast_rport_del_notify(struct net_bridge_mcast_port * pmctx,bool deleted)4236 br_multicast_rport_del_notify(struct net_bridge_mcast_port *pmctx, bool deleted)
4237 {
4238 if (!deleted)
4239 return;
4240
4241 /* For backwards compatibility for now, only notify if there is
4242 * no multicast router anymore for both IPv4 and IPv6.
4243 */
4244 if (!hlist_unhashed(&pmctx->ip4_rlist))
4245 return;
4246 #if IS_ENABLED(CONFIG_IPV6)
4247 if (!hlist_unhashed(&pmctx->ip6_rlist))
4248 return;
4249 #endif
4250
4251 br_rtr_notify(pmctx->port->br->dev, pmctx, RTM_DELMDB);
4252 br_port_mc_router_state_change(pmctx->port, false);
4253
4254 /* don't allow timer refresh */
4255 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP)
4256 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4257 }
4258
br_multicast_set_port_router(struct net_bridge_mcast_port * pmctx,unsigned long val)4259 int br_multicast_set_port_router(struct net_bridge_mcast_port *pmctx,
4260 unsigned long val)
4261 {
4262 struct net_bridge_mcast *brmctx;
4263 unsigned long now = jiffies;
4264 int err = -EINVAL;
4265 bool del = false;
4266
4267 brmctx = br_multicast_port_ctx_get_global(pmctx);
4268 spin_lock_bh(&brmctx->br->multicast_lock);
4269 if (pmctx->multicast_router == val) {
4270 /* Refresh the temp router port timer */
4271 if (pmctx->multicast_router == MDB_RTR_TYPE_TEMP) {
4272 mod_timer(&pmctx->ip4_mc_router_timer,
4273 now + brmctx->multicast_querier_interval);
4274 #if IS_ENABLED(CONFIG_IPV6)
4275 mod_timer(&pmctx->ip6_mc_router_timer,
4276 now + brmctx->multicast_querier_interval);
4277 #endif
4278 }
4279 err = 0;
4280 goto unlock;
4281 }
4282 switch (val) {
4283 case MDB_RTR_TYPE_DISABLED:
4284 pmctx->multicast_router = MDB_RTR_TYPE_DISABLED;
4285 del |= br_ip4_multicast_rport_del(pmctx);
4286 del_timer(&pmctx->ip4_mc_router_timer);
4287 del |= br_ip6_multicast_rport_del(pmctx);
4288 #if IS_ENABLED(CONFIG_IPV6)
4289 del_timer(&pmctx->ip6_mc_router_timer);
4290 #endif
4291 br_multicast_rport_del_notify(pmctx, del);
4292 break;
4293 case MDB_RTR_TYPE_TEMP_QUERY:
4294 pmctx->multicast_router = MDB_RTR_TYPE_TEMP_QUERY;
4295 del |= br_ip4_multicast_rport_del(pmctx);
4296 del |= br_ip6_multicast_rport_del(pmctx);
4297 br_multicast_rport_del_notify(pmctx, del);
4298 break;
4299 case MDB_RTR_TYPE_PERM:
4300 pmctx->multicast_router = MDB_RTR_TYPE_PERM;
4301 del_timer(&pmctx->ip4_mc_router_timer);
4302 br_ip4_multicast_add_router(brmctx, pmctx);
4303 #if IS_ENABLED(CONFIG_IPV6)
4304 del_timer(&pmctx->ip6_mc_router_timer);
4305 #endif
4306 br_ip6_multicast_add_router(brmctx, pmctx);
4307 break;
4308 case MDB_RTR_TYPE_TEMP:
4309 pmctx->multicast_router = MDB_RTR_TYPE_TEMP;
4310 br_ip4_multicast_mark_router(brmctx, pmctx);
4311 br_ip6_multicast_mark_router(brmctx, pmctx);
4312 break;
4313 default:
4314 goto unlock;
4315 }
4316 err = 0;
4317 unlock:
4318 spin_unlock_bh(&brmctx->br->multicast_lock);
4319
4320 return err;
4321 }
4322
br_multicast_set_vlan_router(struct net_bridge_vlan * v,u8 mcast_router)4323 int br_multicast_set_vlan_router(struct net_bridge_vlan *v, u8 mcast_router)
4324 {
4325 int err;
4326
4327 if (br_vlan_is_master(v))
4328 err = br_multicast_set_router(&v->br_mcast_ctx, mcast_router);
4329 else
4330 err = br_multicast_set_port_router(&v->port_mcast_ctx,
4331 mcast_router);
4332
4333 return err;
4334 }
4335
br_multicast_start_querier(struct net_bridge_mcast * brmctx,struct bridge_mcast_own_query * query)4336 static void br_multicast_start_querier(struct net_bridge_mcast *brmctx,
4337 struct bridge_mcast_own_query *query)
4338 {
4339 struct net_bridge_port *port;
4340
4341 if (!br_multicast_ctx_matches_vlan_snooping(brmctx))
4342 return;
4343
4344 __br_multicast_open_query(brmctx->br, query);
4345
4346 rcu_read_lock();
4347 list_for_each_entry_rcu(port, &brmctx->br->port_list, list) {
4348 struct bridge_mcast_own_query *ip4_own_query;
4349 #if IS_ENABLED(CONFIG_IPV6)
4350 struct bridge_mcast_own_query *ip6_own_query;
4351 #endif
4352
4353 if (br_multicast_port_ctx_state_stopped(&port->multicast_ctx))
4354 continue;
4355
4356 if (br_multicast_ctx_is_vlan(brmctx)) {
4357 struct net_bridge_vlan *vlan;
4358
4359 vlan = br_vlan_find(nbp_vlan_group_rcu(port),
4360 brmctx->vlan->vid);
4361 if (!vlan ||
4362 br_multicast_port_ctx_state_stopped(&vlan->port_mcast_ctx))
4363 continue;
4364
4365 ip4_own_query = &vlan->port_mcast_ctx.ip4_own_query;
4366 #if IS_ENABLED(CONFIG_IPV6)
4367 ip6_own_query = &vlan->port_mcast_ctx.ip6_own_query;
4368 #endif
4369 } else {
4370 ip4_own_query = &port->multicast_ctx.ip4_own_query;
4371 #if IS_ENABLED(CONFIG_IPV6)
4372 ip6_own_query = &port->multicast_ctx.ip6_own_query;
4373 #endif
4374 }
4375
4376 if (query == &brmctx->ip4_own_query)
4377 br_multicast_enable(ip4_own_query);
4378 #if IS_ENABLED(CONFIG_IPV6)
4379 else
4380 br_multicast_enable(ip6_own_query);
4381 #endif
4382 }
4383 rcu_read_unlock();
4384 }
4385
br_multicast_toggle(struct net_bridge * br,unsigned long val,struct netlink_ext_ack * extack)4386 int br_multicast_toggle(struct net_bridge *br, unsigned long val,
4387 struct netlink_ext_ack *extack)
4388 {
4389 struct net_bridge_port *port;
4390 bool change_snoopers = false;
4391 int err = 0;
4392
4393 spin_lock_bh(&br->multicast_lock);
4394 if (!!br_opt_get(br, BROPT_MULTICAST_ENABLED) == !!val)
4395 goto unlock;
4396
4397 err = br_mc_disabled_update(br->dev, val, extack);
4398 if (err == -EOPNOTSUPP)
4399 err = 0;
4400 if (err)
4401 goto unlock;
4402
4403 br_opt_toggle(br, BROPT_MULTICAST_ENABLED, !!val);
4404 if (!br_opt_get(br, BROPT_MULTICAST_ENABLED)) {
4405 change_snoopers = true;
4406 goto unlock;
4407 }
4408
4409 if (!netif_running(br->dev))
4410 goto unlock;
4411
4412 br_multicast_open(br);
4413 list_for_each_entry(port, &br->port_list, list)
4414 __br_multicast_enable_port_ctx(&port->multicast_ctx);
4415
4416 change_snoopers = true;
4417
4418 unlock:
4419 spin_unlock_bh(&br->multicast_lock);
4420
4421 /* br_multicast_join_snoopers has the potential to cause
4422 * an MLD Report/Leave to be delivered to br_multicast_rcv,
4423 * which would in turn call br_multicast_add_group, which would
4424 * attempt to acquire multicast_lock. This function should be
4425 * called after the lock has been released to avoid deadlocks on
4426 * multicast_lock.
4427 *
4428 * br_multicast_leave_snoopers does not have the problem since
4429 * br_multicast_rcv first checks BROPT_MULTICAST_ENABLED, and
4430 * returns without calling br_multicast_ipv4/6_rcv if it's not
4431 * enabled. Moved both functions out just for symmetry.
4432 */
4433 if (change_snoopers) {
4434 if (br_opt_get(br, BROPT_MULTICAST_ENABLED))
4435 br_multicast_join_snoopers(br);
4436 else
4437 br_multicast_leave_snoopers(br);
4438 }
4439
4440 return err;
4441 }
4442
br_multicast_enabled(const struct net_device * dev)4443 bool br_multicast_enabled(const struct net_device *dev)
4444 {
4445 struct net_bridge *br = netdev_priv(dev);
4446
4447 return !!br_opt_get(br, BROPT_MULTICAST_ENABLED);
4448 }
4449 EXPORT_SYMBOL_GPL(br_multicast_enabled);
4450
br_multicast_router(const struct net_device * dev)4451 bool br_multicast_router(const struct net_device *dev)
4452 {
4453 struct net_bridge *br = netdev_priv(dev);
4454 bool is_router;
4455
4456 spin_lock_bh(&br->multicast_lock);
4457 is_router = br_multicast_is_router(&br->multicast_ctx, NULL);
4458 spin_unlock_bh(&br->multicast_lock);
4459 return is_router;
4460 }
4461 EXPORT_SYMBOL_GPL(br_multicast_router);
4462
br_multicast_set_querier(struct net_bridge_mcast * brmctx,unsigned long val)4463 int br_multicast_set_querier(struct net_bridge_mcast *brmctx, unsigned long val)
4464 {
4465 unsigned long max_delay;
4466
4467 val = !!val;
4468
4469 spin_lock_bh(&brmctx->br->multicast_lock);
4470 if (brmctx->multicast_querier == val)
4471 goto unlock;
4472
4473 WRITE_ONCE(brmctx->multicast_querier, val);
4474 if (!val)
4475 goto unlock;
4476
4477 max_delay = brmctx->multicast_query_response_interval;
4478
4479 if (!timer_pending(&brmctx->ip4_other_query.timer))
4480 mod_timer(&brmctx->ip4_other_query.delay_timer,
4481 jiffies + max_delay);
4482
4483 br_multicast_start_querier(brmctx, &brmctx->ip4_own_query);
4484
4485 #if IS_ENABLED(CONFIG_IPV6)
4486 if (!timer_pending(&brmctx->ip6_other_query.timer))
4487 mod_timer(&brmctx->ip6_other_query.delay_timer,
4488 jiffies + max_delay);
4489
4490 br_multicast_start_querier(brmctx, &brmctx->ip6_own_query);
4491 #endif
4492
4493 unlock:
4494 spin_unlock_bh(&brmctx->br->multicast_lock);
4495
4496 return 0;
4497 }
4498
br_multicast_set_igmp_version(struct net_bridge_mcast * brmctx,unsigned long val)4499 int br_multicast_set_igmp_version(struct net_bridge_mcast *brmctx,
4500 unsigned long val)
4501 {
4502 /* Currently we support only version 2 and 3 */
4503 switch (val) {
4504 case 2:
4505 case 3:
4506 break;
4507 default:
4508 return -EINVAL;
4509 }
4510
4511 spin_lock_bh(&brmctx->br->multicast_lock);
4512 brmctx->multicast_igmp_version = val;
4513 spin_unlock_bh(&brmctx->br->multicast_lock);
4514
4515 return 0;
4516 }
4517
4518 #if IS_ENABLED(CONFIG_IPV6)
br_multicast_set_mld_version(struct net_bridge_mcast * brmctx,unsigned long val)4519 int br_multicast_set_mld_version(struct net_bridge_mcast *brmctx,
4520 unsigned long val)
4521 {
4522 /* Currently we support version 1 and 2 */
4523 switch (val) {
4524 case 1:
4525 case 2:
4526 break;
4527 default:
4528 return -EINVAL;
4529 }
4530
4531 spin_lock_bh(&brmctx->br->multicast_lock);
4532 brmctx->multicast_mld_version = val;
4533 spin_unlock_bh(&brmctx->br->multicast_lock);
4534
4535 return 0;
4536 }
4537 #endif
4538
br_multicast_set_query_intvl(struct net_bridge_mcast * brmctx,unsigned long val)4539 void br_multicast_set_query_intvl(struct net_bridge_mcast *brmctx,
4540 unsigned long val)
4541 {
4542 unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4543
4544 if (intvl_jiffies < BR_MULTICAST_QUERY_INTVL_MIN) {
4545 br_info(brmctx->br,
4546 "trying to set multicast query interval below minimum, setting to %lu (%ums)\n",
4547 jiffies_to_clock_t(BR_MULTICAST_QUERY_INTVL_MIN),
4548 jiffies_to_msecs(BR_MULTICAST_QUERY_INTVL_MIN));
4549 intvl_jiffies = BR_MULTICAST_QUERY_INTVL_MIN;
4550 }
4551
4552 brmctx->multicast_query_interval = intvl_jiffies;
4553 }
4554
br_multicast_set_startup_query_intvl(struct net_bridge_mcast * brmctx,unsigned long val)4555 void br_multicast_set_startup_query_intvl(struct net_bridge_mcast *brmctx,
4556 unsigned long val)
4557 {
4558 unsigned long intvl_jiffies = clock_t_to_jiffies(val);
4559
4560 if (intvl_jiffies < BR_MULTICAST_STARTUP_QUERY_INTVL_MIN) {
4561 br_info(brmctx->br,
4562 "trying to set multicast startup query interval below minimum, setting to %lu (%ums)\n",
4563 jiffies_to_clock_t(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN),
4564 jiffies_to_msecs(BR_MULTICAST_STARTUP_QUERY_INTVL_MIN));
4565 intvl_jiffies = BR_MULTICAST_STARTUP_QUERY_INTVL_MIN;
4566 }
4567
4568 brmctx->multicast_startup_query_interval = intvl_jiffies;
4569 }
4570
4571 /**
4572 * br_multicast_list_adjacent - Returns snooped multicast addresses
4573 * @dev: The bridge port adjacent to which to retrieve addresses
4574 * @br_ip_list: The list to store found, snooped multicast IP addresses in
4575 *
4576 * Creates a list of IP addresses (struct br_ip_list) sensed by the multicast
4577 * snooping feature on all bridge ports of dev's bridge device, excluding
4578 * the addresses from dev itself.
4579 *
4580 * Returns the number of items added to br_ip_list.
4581 *
4582 * Notes:
4583 * - br_ip_list needs to be initialized by caller
4584 * - br_ip_list might contain duplicates in the end
4585 * (needs to be taken care of by caller)
4586 * - br_ip_list needs to be freed by caller
4587 */
br_multicast_list_adjacent(struct net_device * dev,struct list_head * br_ip_list)4588 int br_multicast_list_adjacent(struct net_device *dev,
4589 struct list_head *br_ip_list)
4590 {
4591 struct net_bridge *br;
4592 struct net_bridge_port *port;
4593 struct net_bridge_port_group *group;
4594 struct br_ip_list *entry;
4595 int count = 0;
4596
4597 rcu_read_lock();
4598 if (!br_ip_list || !netif_is_bridge_port(dev))
4599 goto unlock;
4600
4601 port = br_port_get_rcu(dev);
4602 if (!port || !port->br)
4603 goto unlock;
4604
4605 br = port->br;
4606
4607 list_for_each_entry_rcu(port, &br->port_list, list) {
4608 if (!port->dev || port->dev == dev)
4609 continue;
4610
4611 hlist_for_each_entry_rcu(group, &port->mglist, mglist) {
4612 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
4613 if (!entry)
4614 goto unlock;
4615
4616 entry->addr = group->key.addr;
4617 list_add(&entry->list, br_ip_list);
4618 count++;
4619 }
4620 }
4621
4622 unlock:
4623 rcu_read_unlock();
4624 return count;
4625 }
4626 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
4627
4628 /**
4629 * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
4630 * @dev: The bridge port providing the bridge on which to check for a querier
4631 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4632 *
4633 * Checks whether the given interface has a bridge on top and if so returns
4634 * true if a valid querier exists anywhere on the bridged link layer.
4635 * Otherwise returns false.
4636 */
br_multicast_has_querier_anywhere(struct net_device * dev,int proto)4637 bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
4638 {
4639 struct net_bridge *br;
4640 struct net_bridge_port *port;
4641 struct ethhdr eth;
4642 bool ret = false;
4643
4644 rcu_read_lock();
4645 if (!netif_is_bridge_port(dev))
4646 goto unlock;
4647
4648 port = br_port_get_rcu(dev);
4649 if (!port || !port->br)
4650 goto unlock;
4651
4652 br = port->br;
4653
4654 memset(ð, 0, sizeof(eth));
4655 eth.h_proto = htons(proto);
4656
4657 ret = br_multicast_querier_exists(&br->multicast_ctx, ð, NULL);
4658
4659 unlock:
4660 rcu_read_unlock();
4661 return ret;
4662 }
4663 EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
4664
4665 /**
4666 * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
4667 * @dev: The bridge port adjacent to which to check for a querier
4668 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4669 *
4670 * Checks whether the given interface has a bridge on top and if so returns
4671 * true if a selected querier is behind one of the other ports of this
4672 * bridge. Otherwise returns false.
4673 */
br_multicast_has_querier_adjacent(struct net_device * dev,int proto)4674 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto)
4675 {
4676 struct net_bridge_mcast *brmctx;
4677 struct net_bridge *br;
4678 struct net_bridge_port *port;
4679 bool ret = false;
4680 int port_ifidx;
4681
4682 rcu_read_lock();
4683 if (!netif_is_bridge_port(dev))
4684 goto unlock;
4685
4686 port = br_port_get_rcu(dev);
4687 if (!port || !port->br)
4688 goto unlock;
4689
4690 br = port->br;
4691 brmctx = &br->multicast_ctx;
4692
4693 switch (proto) {
4694 case ETH_P_IP:
4695 port_ifidx = brmctx->ip4_querier.port_ifidx;
4696 if (!timer_pending(&brmctx->ip4_other_query.timer) ||
4697 port_ifidx == port->dev->ifindex)
4698 goto unlock;
4699 break;
4700 #if IS_ENABLED(CONFIG_IPV6)
4701 case ETH_P_IPV6:
4702 port_ifidx = brmctx->ip6_querier.port_ifidx;
4703 if (!timer_pending(&brmctx->ip6_other_query.timer) ||
4704 port_ifidx == port->dev->ifindex)
4705 goto unlock;
4706 break;
4707 #endif
4708 default:
4709 goto unlock;
4710 }
4711
4712 ret = true;
4713 unlock:
4714 rcu_read_unlock();
4715 return ret;
4716 }
4717 EXPORT_SYMBOL_GPL(br_multicast_has_querier_adjacent);
4718
4719 /**
4720 * br_multicast_has_router_adjacent - Checks for a router behind a bridge port
4721 * @dev: The bridge port adjacent to which to check for a multicast router
4722 * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
4723 *
4724 * Checks whether the given interface has a bridge on top and if so returns
4725 * true if a multicast router is behind one of the other ports of this
4726 * bridge. Otherwise returns false.
4727 */
br_multicast_has_router_adjacent(struct net_device * dev,int proto)4728 bool br_multicast_has_router_adjacent(struct net_device *dev, int proto)
4729 {
4730 struct net_bridge_mcast_port *pmctx;
4731 struct net_bridge_mcast *brmctx;
4732 struct net_bridge_port *port;
4733 bool ret = false;
4734
4735 rcu_read_lock();
4736 port = br_port_get_check_rcu(dev);
4737 if (!port)
4738 goto unlock;
4739
4740 brmctx = &port->br->multicast_ctx;
4741 switch (proto) {
4742 case ETH_P_IP:
4743 hlist_for_each_entry_rcu(pmctx, &brmctx->ip4_mc_router_list,
4744 ip4_rlist) {
4745 if (pmctx->port == port)
4746 continue;
4747
4748 ret = true;
4749 goto unlock;
4750 }
4751 break;
4752 #if IS_ENABLED(CONFIG_IPV6)
4753 case ETH_P_IPV6:
4754 hlist_for_each_entry_rcu(pmctx, &brmctx->ip6_mc_router_list,
4755 ip6_rlist) {
4756 if (pmctx->port == port)
4757 continue;
4758
4759 ret = true;
4760 goto unlock;
4761 }
4762 break;
4763 #endif
4764 default:
4765 /* when compiled without IPv6 support, be conservative and
4766 * always assume presence of an IPv6 multicast router
4767 */
4768 ret = true;
4769 }
4770
4771 unlock:
4772 rcu_read_unlock();
4773 return ret;
4774 }
4775 EXPORT_SYMBOL_GPL(br_multicast_has_router_adjacent);
4776
br_mcast_stats_add(struct bridge_mcast_stats __percpu * stats,const struct sk_buff * skb,u8 type,u8 dir)4777 static void br_mcast_stats_add(struct bridge_mcast_stats __percpu *stats,
4778 const struct sk_buff *skb, u8 type, u8 dir)
4779 {
4780 struct bridge_mcast_stats *pstats = this_cpu_ptr(stats);
4781 __be16 proto = skb->protocol;
4782 unsigned int t_len;
4783
4784 u64_stats_update_begin(&pstats->syncp);
4785 switch (proto) {
4786 case htons(ETH_P_IP):
4787 t_len = ntohs(ip_hdr(skb)->tot_len) - ip_hdrlen(skb);
4788 switch (type) {
4789 case IGMP_HOST_MEMBERSHIP_REPORT:
4790 pstats->mstats.igmp_v1reports[dir]++;
4791 break;
4792 case IGMPV2_HOST_MEMBERSHIP_REPORT:
4793 pstats->mstats.igmp_v2reports[dir]++;
4794 break;
4795 case IGMPV3_HOST_MEMBERSHIP_REPORT:
4796 pstats->mstats.igmp_v3reports[dir]++;
4797 break;
4798 case IGMP_HOST_MEMBERSHIP_QUERY:
4799 if (t_len != sizeof(struct igmphdr)) {
4800 pstats->mstats.igmp_v3queries[dir]++;
4801 } else {
4802 unsigned int offset = skb_transport_offset(skb);
4803 struct igmphdr *ih, _ihdr;
4804
4805 ih = skb_header_pointer(skb, offset,
4806 sizeof(_ihdr), &_ihdr);
4807 if (!ih)
4808 break;
4809 if (!ih->code)
4810 pstats->mstats.igmp_v1queries[dir]++;
4811 else
4812 pstats->mstats.igmp_v2queries[dir]++;
4813 }
4814 break;
4815 case IGMP_HOST_LEAVE_MESSAGE:
4816 pstats->mstats.igmp_leaves[dir]++;
4817 break;
4818 }
4819 break;
4820 #if IS_ENABLED(CONFIG_IPV6)
4821 case htons(ETH_P_IPV6):
4822 t_len = ntohs(ipv6_hdr(skb)->payload_len) +
4823 sizeof(struct ipv6hdr);
4824 t_len -= skb_network_header_len(skb);
4825 switch (type) {
4826 case ICMPV6_MGM_REPORT:
4827 pstats->mstats.mld_v1reports[dir]++;
4828 break;
4829 case ICMPV6_MLD2_REPORT:
4830 pstats->mstats.mld_v2reports[dir]++;
4831 break;
4832 case ICMPV6_MGM_QUERY:
4833 if (t_len != sizeof(struct mld_msg))
4834 pstats->mstats.mld_v2queries[dir]++;
4835 else
4836 pstats->mstats.mld_v1queries[dir]++;
4837 break;
4838 case ICMPV6_MGM_REDUCTION:
4839 pstats->mstats.mld_leaves[dir]++;
4840 break;
4841 }
4842 break;
4843 #endif /* CONFIG_IPV6 */
4844 }
4845 u64_stats_update_end(&pstats->syncp);
4846 }
4847
br_multicast_count(struct net_bridge * br,const struct net_bridge_port * p,const struct sk_buff * skb,u8 type,u8 dir)4848 void br_multicast_count(struct net_bridge *br,
4849 const struct net_bridge_port *p,
4850 const struct sk_buff *skb, u8 type, u8 dir)
4851 {
4852 struct bridge_mcast_stats __percpu *stats;
4853
4854 /* if multicast_disabled is true then igmp type can't be set */
4855 if (!type || !br_opt_get(br, BROPT_MULTICAST_STATS_ENABLED))
4856 return;
4857
4858 if (p)
4859 stats = p->mcast_stats;
4860 else
4861 stats = br->mcast_stats;
4862 if (WARN_ON(!stats))
4863 return;
4864
4865 br_mcast_stats_add(stats, skb, type, dir);
4866 }
4867
br_multicast_init_stats(struct net_bridge * br)4868 int br_multicast_init_stats(struct net_bridge *br)
4869 {
4870 br->mcast_stats = netdev_alloc_pcpu_stats(struct bridge_mcast_stats);
4871 if (!br->mcast_stats)
4872 return -ENOMEM;
4873
4874 return 0;
4875 }
4876
br_multicast_uninit_stats(struct net_bridge * br)4877 void br_multicast_uninit_stats(struct net_bridge *br)
4878 {
4879 free_percpu(br->mcast_stats);
4880 }
4881
4882 /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */
mcast_stats_add_dir(u64 * dst,u64 * src)4883 static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src)
4884 {
4885 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX];
4886 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
4887 }
4888
br_multicast_get_stats(const struct net_bridge * br,const struct net_bridge_port * p,struct br_mcast_stats * dest)4889 void br_multicast_get_stats(const struct net_bridge *br,
4890 const struct net_bridge_port *p,
4891 struct br_mcast_stats *dest)
4892 {
4893 struct bridge_mcast_stats __percpu *stats;
4894 struct br_mcast_stats tdst;
4895 int i;
4896
4897 memset(dest, 0, sizeof(*dest));
4898 if (p)
4899 stats = p->mcast_stats;
4900 else
4901 stats = br->mcast_stats;
4902 if (WARN_ON(!stats))
4903 return;
4904
4905 memset(&tdst, 0, sizeof(tdst));
4906 for_each_possible_cpu(i) {
4907 struct bridge_mcast_stats *cpu_stats = per_cpu_ptr(stats, i);
4908 struct br_mcast_stats temp;
4909 unsigned int start;
4910
4911 do {
4912 start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
4913 memcpy(&temp, &cpu_stats->mstats, sizeof(temp));
4914 } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
4915
4916 mcast_stats_add_dir(tdst.igmp_v1queries, temp.igmp_v1queries);
4917 mcast_stats_add_dir(tdst.igmp_v2queries, temp.igmp_v2queries);
4918 mcast_stats_add_dir(tdst.igmp_v3queries, temp.igmp_v3queries);
4919 mcast_stats_add_dir(tdst.igmp_leaves, temp.igmp_leaves);
4920 mcast_stats_add_dir(tdst.igmp_v1reports, temp.igmp_v1reports);
4921 mcast_stats_add_dir(tdst.igmp_v2reports, temp.igmp_v2reports);
4922 mcast_stats_add_dir(tdst.igmp_v3reports, temp.igmp_v3reports);
4923 tdst.igmp_parse_errors += temp.igmp_parse_errors;
4924
4925 mcast_stats_add_dir(tdst.mld_v1queries, temp.mld_v1queries);
4926 mcast_stats_add_dir(tdst.mld_v2queries, temp.mld_v2queries);
4927 mcast_stats_add_dir(tdst.mld_leaves, temp.mld_leaves);
4928 mcast_stats_add_dir(tdst.mld_v1reports, temp.mld_v1reports);
4929 mcast_stats_add_dir(tdst.mld_v2reports, temp.mld_v2reports);
4930 tdst.mld_parse_errors += temp.mld_parse_errors;
4931 }
4932 memcpy(dest, &tdst, sizeof(*dest));
4933 }
4934
br_mdb_hash_init(struct net_bridge * br)4935 int br_mdb_hash_init(struct net_bridge *br)
4936 {
4937 int err;
4938
4939 err = rhashtable_init(&br->sg_port_tbl, &br_sg_port_rht_params);
4940 if (err)
4941 return err;
4942
4943 err = rhashtable_init(&br->mdb_hash_tbl, &br_mdb_rht_params);
4944 if (err) {
4945 rhashtable_destroy(&br->sg_port_tbl);
4946 return err;
4947 }
4948
4949 return 0;
4950 }
4951
br_mdb_hash_fini(struct net_bridge * br)4952 void br_mdb_hash_fini(struct net_bridge *br)
4953 {
4954 rhashtable_destroy(&br->sg_port_tbl);
4955 rhashtable_destroy(&br->mdb_hash_tbl);
4956 }
4957