• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2014-2020  B.A.T.M.A.N. contributors:
3  *
4  * Linus Lüssing
5  */
6 
7 #include "multicast.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/bitops.h>
12 #include <linux/bug.h>
13 #include <linux/byteorder/generic.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/icmpv6.h>
18 #include <linux/if_bridge.h>
19 #include <linux/if_ether.h>
20 #include <linux/igmp.h>
21 #include <linux/in.h>
22 #include <linux/in6.h>
23 #include <linux/inetdevice.h>
24 #include <linux/ip.h>
25 #include <linux/ipv6.h>
26 #include <linux/jiffies.h>
27 #include <linux/kernel.h>
28 #include <linux/kref.h>
29 #include <linux/list.h>
30 #include <linux/lockdep.h>
31 #include <linux/netdevice.h>
32 #include <linux/netlink.h>
33 #include <linux/printk.h>
34 #include <linux/rculist.h>
35 #include <linux/rcupdate.h>
36 #include <linux/seq_file.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/stddef.h>
41 #include <linux/string.h>
42 #include <linux/types.h>
43 #include <linux/workqueue.h>
44 #include <net/addrconf.h>
45 #include <net/genetlink.h>
46 #include <net/if_inet6.h>
47 #include <net/ip.h>
48 #include <net/ipv6.h>
49 #include <net/netlink.h>
50 #include <net/sock.h>
51 #include <uapi/linux/batadv_packet.h>
52 #include <uapi/linux/batman_adv.h>
53 
54 #include "bridge_loop_avoidance.h"
55 #include "hard-interface.h"
56 #include "hash.h"
57 #include "log.h"
58 #include "netlink.h"
59 #include "send.h"
60 #include "soft-interface.h"
61 #include "translation-table.h"
62 #include "tvlv.h"
63 
64 static void batadv_mcast_mla_update(struct work_struct *work);
65 
66 /**
67  * batadv_mcast_start_timer() - schedule the multicast periodic worker
68  * @bat_priv: the bat priv with all the soft interface information
69  */
batadv_mcast_start_timer(struct batadv_priv * bat_priv)70 static void batadv_mcast_start_timer(struct batadv_priv *bat_priv)
71 {
72 	queue_delayed_work(batadv_event_workqueue, &bat_priv->mcast.work,
73 			   msecs_to_jiffies(BATADV_MCAST_WORK_PERIOD));
74 }
75 
76 /**
77  * batadv_mcast_get_bridge() - get the bridge on top of the softif if it exists
78  * @soft_iface: netdev struct of the mesh interface
79  *
80  * If the given soft interface has a bridge on top then the refcount
81  * of the according net device is increased.
82  *
83  * Return: NULL if no such bridge exists. Otherwise the net device of the
84  * bridge.
85  */
batadv_mcast_get_bridge(struct net_device * soft_iface)86 static struct net_device *batadv_mcast_get_bridge(struct net_device *soft_iface)
87 {
88 	struct net_device *upper = soft_iface;
89 
90 	rcu_read_lock();
91 	do {
92 		upper = netdev_master_upper_dev_get_rcu(upper);
93 	} while (upper && !(upper->priv_flags & IFF_EBRIDGE));
94 
95 	if (upper)
96 		dev_hold(upper);
97 	rcu_read_unlock();
98 
99 	return upper;
100 }
101 
102 /**
103  * batadv_mcast_mla_rtr_flags_softif_get_ipv4() - get mcast router flags from
104  *  node for IPv4
105  * @dev: the interface to check
106  *
107  * Checks the presence of an IPv4 multicast router on this node.
108  *
109  * Caller needs to hold rcu read lock.
110  *
111  * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR4 otherwise.
112  */
batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device * dev)113 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv4(struct net_device *dev)
114 {
115 	struct in_device *in_dev = __in_dev_get_rcu(dev);
116 
117 	if (in_dev && IN_DEV_MFORWARD(in_dev))
118 		return BATADV_NO_FLAGS;
119 	else
120 		return BATADV_MCAST_WANT_NO_RTR4;
121 }
122 
123 /**
124  * batadv_mcast_mla_rtr_flags_softif_get_ipv6() - get mcast router flags from
125  *  node for IPv6
126  * @dev: the interface to check
127  *
128  * Checks the presence of an IPv6 multicast router on this node.
129  *
130  * Caller needs to hold rcu read lock.
131  *
132  * Return: BATADV_NO_FLAGS if present, BATADV_MCAST_WANT_NO_RTR6 otherwise.
133  */
134 #if IS_ENABLED(CONFIG_IPV6_MROUTE)
batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device * dev)135 static u8 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
136 {
137 	struct inet6_dev *in6_dev = __in6_dev_get(dev);
138 
139 	if (in6_dev && atomic_read(&in6_dev->cnf.mc_forwarding))
140 		return BATADV_NO_FLAGS;
141 	else
142 		return BATADV_MCAST_WANT_NO_RTR6;
143 }
144 #else
145 static inline u8
batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device * dev)146 batadv_mcast_mla_rtr_flags_softif_get_ipv6(struct net_device *dev)
147 {
148 	return BATADV_MCAST_WANT_NO_RTR6;
149 }
150 #endif
151 
152 /**
153  * batadv_mcast_mla_rtr_flags_softif_get() - get mcast router flags from node
154  * @bat_priv: the bat priv with all the soft interface information
155  * @bridge: bridge interface on top of the soft_iface if present,
156  *  otherwise pass NULL
157  *
158  * Checks the presence of IPv4 and IPv6 multicast routers on this
159  * node.
160  *
161  * Return:
162  *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
163  *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
164  *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
165  *	The former two OR'd: no multicast router is present
166  */
batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv * bat_priv,struct net_device * bridge)167 static u8 batadv_mcast_mla_rtr_flags_softif_get(struct batadv_priv *bat_priv,
168 						struct net_device *bridge)
169 {
170 	struct net_device *dev = bridge ? bridge : bat_priv->soft_iface;
171 	u8 flags = BATADV_NO_FLAGS;
172 
173 	rcu_read_lock();
174 
175 	flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv4(dev);
176 	flags |= batadv_mcast_mla_rtr_flags_softif_get_ipv6(dev);
177 
178 	rcu_read_unlock();
179 
180 	return flags;
181 }
182 
183 /**
184  * batadv_mcast_mla_rtr_flags_bridge_get() - get mcast router flags from bridge
185  * @bat_priv: the bat priv with all the soft interface information
186  * @bridge: bridge interface on top of the soft_iface if present,
187  *  otherwise pass NULL
188  *
189  * Checks the presence of IPv4 and IPv6 multicast routers behind a bridge.
190  *
191  * Return:
192  *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
193  *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
194  *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
195  *	The former two OR'd: no multicast router is present
196  */
197 #if IS_ENABLED(CONFIG_IPV6)
batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv * bat_priv,struct net_device * bridge)198 static u8 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
199 						struct net_device *bridge)
200 {
201 	struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
202 	struct net_device *dev = bat_priv->soft_iface;
203 	struct br_ip_list *br_ip_entry, *tmp;
204 	u8 flags = BATADV_MCAST_WANT_NO_RTR6;
205 	int ret;
206 
207 	if (!bridge)
208 		return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
209 
210 	/* TODO: ask the bridge if a multicast router is present (the bridge
211 	 * is capable of performing proper RFC4286 multicast router
212 	 * discovery) instead of searching for a ff02::2 listener here
213 	 */
214 	ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
215 	if (ret < 0)
216 		return BATADV_NO_FLAGS;
217 
218 	list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
219 		/* the bridge snooping does not maintain IPv4 link-local
220 		 * addresses - therefore we won't find any IPv4 multicast router
221 		 * address here, only IPv6 ones
222 		 */
223 		if (br_ip_entry->addr.proto == htons(ETH_P_IPV6) &&
224 		    ipv6_addr_is_ll_all_routers(&br_ip_entry->addr.dst.ip6))
225 			flags &= ~BATADV_MCAST_WANT_NO_RTR6;
226 
227 		list_del(&br_ip_entry->list);
228 		kfree(br_ip_entry);
229 	}
230 
231 	return flags;
232 }
233 #else
234 static inline u8
batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv * bat_priv,struct net_device * bridge)235 batadv_mcast_mla_rtr_flags_bridge_get(struct batadv_priv *bat_priv,
236 				      struct net_device *bridge)
237 {
238 	if (bridge)
239 		return BATADV_NO_FLAGS;
240 	else
241 		return BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
242 }
243 #endif
244 
245 /**
246  * batadv_mcast_mla_rtr_flags_get() - get multicast router flags
247  * @bat_priv: the bat priv with all the soft interface information
248  * @bridge: bridge interface on top of the soft_iface if present,
249  *  otherwise pass NULL
250  *
251  * Checks the presence of IPv4 and IPv6 multicast routers on this
252  * node or behind its bridge.
253  *
254  * Return:
255  *	BATADV_NO_FLAGS: Both an IPv4 and IPv6 multicast router is present
256  *	BATADV_MCAST_WANT_NO_RTR4: No IPv4 multicast router is present
257  *	BATADV_MCAST_WANT_NO_RTR6: No IPv6 multicast router is present
258  *	The former two OR'd: no multicast router is present
259  */
batadv_mcast_mla_rtr_flags_get(struct batadv_priv * bat_priv,struct net_device * bridge)260 static u8 batadv_mcast_mla_rtr_flags_get(struct batadv_priv *bat_priv,
261 					 struct net_device *bridge)
262 {
263 	u8 flags = BATADV_MCAST_WANT_NO_RTR4 | BATADV_MCAST_WANT_NO_RTR6;
264 
265 	flags &= batadv_mcast_mla_rtr_flags_softif_get(bat_priv, bridge);
266 	flags &= batadv_mcast_mla_rtr_flags_bridge_get(bat_priv, bridge);
267 
268 	return flags;
269 }
270 
271 /**
272  * batadv_mcast_mla_flags_get() - get the new multicast flags
273  * @bat_priv: the bat priv with all the soft interface information
274  *
275  * Return: A set of flags for the current/next TVLV, querier and
276  * bridge state.
277  */
278 static struct batadv_mcast_mla_flags
batadv_mcast_mla_flags_get(struct batadv_priv * bat_priv)279 batadv_mcast_mla_flags_get(struct batadv_priv *bat_priv)
280 {
281 	struct net_device *dev = bat_priv->soft_iface;
282 	struct batadv_mcast_querier_state *qr4, *qr6;
283 	struct batadv_mcast_mla_flags mla_flags;
284 	struct net_device *bridge;
285 
286 	bridge = batadv_mcast_get_bridge(dev);
287 
288 	memset(&mla_flags, 0, sizeof(mla_flags));
289 	mla_flags.enabled = 1;
290 	mla_flags.tvlv_flags |= batadv_mcast_mla_rtr_flags_get(bat_priv,
291 							       bridge);
292 
293 	if (!bridge)
294 		return mla_flags;
295 
296 	dev_put(bridge);
297 
298 	mla_flags.bridged = 1;
299 	qr4 = &mla_flags.querier_ipv4;
300 	qr6 = &mla_flags.querier_ipv6;
301 
302 	if (!IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING))
303 		pr_warn_once("No bridge IGMP snooping compiled - multicast optimizations disabled\n");
304 
305 	qr4->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IP);
306 	qr4->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IP);
307 
308 	qr6->exists = br_multicast_has_querier_anywhere(dev, ETH_P_IPV6);
309 	qr6->shadowing = br_multicast_has_querier_adjacent(dev, ETH_P_IPV6);
310 
311 	mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_UNSNOOPABLES;
312 
313 	/* 1) If no querier exists at all, then multicast listeners on
314 	 *    our local TT clients behind the bridge will keep silent.
315 	 * 2) If the selected querier is on one of our local TT clients,
316 	 *    behind the bridge, then this querier might shadow multicast
317 	 *    listeners on our local TT clients, behind this bridge.
318 	 *
319 	 * In both cases, we will signalize other batman nodes that
320 	 * we need all multicast traffic of the according protocol.
321 	 */
322 	if (!qr4->exists || qr4->shadowing) {
323 		mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV4;
324 		mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR4;
325 	}
326 
327 	if (!qr6->exists || qr6->shadowing) {
328 		mla_flags.tvlv_flags |= BATADV_MCAST_WANT_ALL_IPV6;
329 		mla_flags.tvlv_flags &= ~BATADV_MCAST_WANT_NO_RTR6;
330 	}
331 
332 	return mla_flags;
333 }
334 
335 /**
336  * batadv_mcast_mla_is_duplicate() - check whether an address is in a list
337  * @mcast_addr: the multicast address to check
338  * @mcast_list: the list with multicast addresses to search in
339  *
340  * Return: true if the given address is already in the given list.
341  * Otherwise returns false.
342  */
batadv_mcast_mla_is_duplicate(u8 * mcast_addr,struct hlist_head * mcast_list)343 static bool batadv_mcast_mla_is_duplicate(u8 *mcast_addr,
344 					  struct hlist_head *mcast_list)
345 {
346 	struct batadv_hw_addr *mcast_entry;
347 
348 	hlist_for_each_entry(mcast_entry, mcast_list, list)
349 		if (batadv_compare_eth(mcast_entry->addr, mcast_addr))
350 			return true;
351 
352 	return false;
353 }
354 
355 /**
356  * batadv_mcast_mla_softif_get_ipv4() - get softif IPv4 multicast listeners
357  * @dev: the device to collect multicast addresses from
358  * @mcast_list: a list to put found addresses into
359  * @flags: flags indicating the new multicast state
360  *
361  * Collects multicast addresses of IPv4 multicast listeners residing
362  * on this kernel on the given soft interface, dev, in
363  * the given mcast_list. In general, multicast listeners provided by
364  * your multicast receiving applications run directly on this node.
365  *
366  * Return: -ENOMEM on memory allocation error or the number of
367  * items added to the mcast_list otherwise.
368  */
369 static int
batadv_mcast_mla_softif_get_ipv4(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)370 batadv_mcast_mla_softif_get_ipv4(struct net_device *dev,
371 				 struct hlist_head *mcast_list,
372 				 struct batadv_mcast_mla_flags *flags)
373 {
374 	struct batadv_hw_addr *new;
375 	struct in_device *in_dev;
376 	u8 mcast_addr[ETH_ALEN];
377 	struct ip_mc_list *pmc;
378 	int ret = 0;
379 
380 	if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
381 		return 0;
382 
383 	rcu_read_lock();
384 
385 	in_dev = __in_dev_get_rcu(dev);
386 	if (!in_dev) {
387 		rcu_read_unlock();
388 		return 0;
389 	}
390 
391 	for (pmc = rcu_dereference(in_dev->mc_list); pmc;
392 	     pmc = rcu_dereference(pmc->next_rcu)) {
393 		if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
394 		    ipv4_is_local_multicast(pmc->multiaddr))
395 			continue;
396 
397 		if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
398 		    !ipv4_is_local_multicast(pmc->multiaddr))
399 			continue;
400 
401 		ip_eth_mc_map(pmc->multiaddr, mcast_addr);
402 
403 		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
404 			continue;
405 
406 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
407 		if (!new) {
408 			ret = -ENOMEM;
409 			break;
410 		}
411 
412 		ether_addr_copy(new->addr, mcast_addr);
413 		hlist_add_head(&new->list, mcast_list);
414 		ret++;
415 	}
416 	rcu_read_unlock();
417 
418 	return ret;
419 }
420 
421 /**
422  * batadv_mcast_mla_softif_get_ipv6() - get softif IPv6 multicast listeners
423  * @dev: the device to collect multicast addresses from
424  * @mcast_list: a list to put found addresses into
425  * @flags: flags indicating the new multicast state
426  *
427  * Collects multicast addresses of IPv6 multicast listeners residing
428  * on this kernel on the given soft interface, dev, in
429  * the given mcast_list. In general, multicast listeners provided by
430  * your multicast receiving applications run directly on this node.
431  *
432  * Return: -ENOMEM on memory allocation error or the number of
433  * items added to the mcast_list otherwise.
434  */
435 #if IS_ENABLED(CONFIG_IPV6)
436 static int
batadv_mcast_mla_softif_get_ipv6(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)437 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
438 				 struct hlist_head *mcast_list,
439 				 struct batadv_mcast_mla_flags *flags)
440 {
441 	struct batadv_hw_addr *new;
442 	struct inet6_dev *in6_dev;
443 	u8 mcast_addr[ETH_ALEN];
444 	struct ifmcaddr6 *pmc6;
445 	int ret = 0;
446 
447 	if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
448 		return 0;
449 
450 	rcu_read_lock();
451 
452 	in6_dev = __in6_dev_get(dev);
453 	if (!in6_dev) {
454 		rcu_read_unlock();
455 		return 0;
456 	}
457 
458 	read_lock_bh(&in6_dev->lock);
459 	for (pmc6 = in6_dev->mc_list; pmc6; pmc6 = pmc6->next) {
460 		if (IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) <
461 		    IPV6_ADDR_SCOPE_LINKLOCAL)
462 			continue;
463 
464 		if (flags->tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
465 		    ipv6_addr_is_ll_all_nodes(&pmc6->mca_addr))
466 			continue;
467 
468 		if (!(flags->tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
469 		    IPV6_ADDR_MC_SCOPE(&pmc6->mca_addr) >
470 		    IPV6_ADDR_SCOPE_LINKLOCAL)
471 			continue;
472 
473 		ipv6_eth_mc_map(&pmc6->mca_addr, mcast_addr);
474 
475 		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
476 			continue;
477 
478 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
479 		if (!new) {
480 			ret = -ENOMEM;
481 			break;
482 		}
483 
484 		ether_addr_copy(new->addr, mcast_addr);
485 		hlist_add_head(&new->list, mcast_list);
486 		ret++;
487 	}
488 	read_unlock_bh(&in6_dev->lock);
489 	rcu_read_unlock();
490 
491 	return ret;
492 }
493 #else
494 static inline int
batadv_mcast_mla_softif_get_ipv6(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)495 batadv_mcast_mla_softif_get_ipv6(struct net_device *dev,
496 				 struct hlist_head *mcast_list,
497 				 struct batadv_mcast_mla_flags *flags)
498 {
499 	return 0;
500 }
501 #endif
502 
503 /**
504  * batadv_mcast_mla_softif_get() - get softif multicast listeners
505  * @dev: the device to collect multicast addresses from
506  * @mcast_list: a list to put found addresses into
507  * @flags: flags indicating the new multicast state
508  *
509  * Collects multicast addresses of multicast listeners residing
510  * on this kernel on the given soft interface, dev, in
511  * the given mcast_list. In general, multicast listeners provided by
512  * your multicast receiving applications run directly on this node.
513  *
514  * If there is a bridge interface on top of dev, collect from that one
515  * instead. Just like with IP addresses and routes, multicast listeners
516  * will(/should) register to the bridge interface instead of an
517  * enslaved bat0.
518  *
519  * Return: -ENOMEM on memory allocation error or the number of
520  * items added to the mcast_list otherwise.
521  */
522 static int
batadv_mcast_mla_softif_get(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)523 batadv_mcast_mla_softif_get(struct net_device *dev,
524 			    struct hlist_head *mcast_list,
525 			    struct batadv_mcast_mla_flags *flags)
526 {
527 	struct net_device *bridge = batadv_mcast_get_bridge(dev);
528 	int ret4, ret6 = 0;
529 
530 	if (bridge)
531 		dev = bridge;
532 
533 	ret4 = batadv_mcast_mla_softif_get_ipv4(dev, mcast_list, flags);
534 	if (ret4 < 0)
535 		goto out;
536 
537 	ret6 = batadv_mcast_mla_softif_get_ipv6(dev, mcast_list, flags);
538 	if (ret6 < 0) {
539 		ret4 = 0;
540 		goto out;
541 	}
542 
543 out:
544 	if (bridge)
545 		dev_put(bridge);
546 
547 	return ret4 + ret6;
548 }
549 
550 /**
551  * batadv_mcast_mla_br_addr_cpy() - copy a bridge multicast address
552  * @dst: destination to write to - a multicast MAC address
553  * @src: source to read from - a multicast IP address
554  *
555  * Converts a given multicast IPv4/IPv6 address from a bridge
556  * to its matching multicast MAC address and copies it into the given
557  * destination buffer.
558  *
559  * Caller needs to make sure the destination buffer can hold
560  * at least ETH_ALEN bytes.
561  */
batadv_mcast_mla_br_addr_cpy(char * dst,const struct br_ip * src)562 static void batadv_mcast_mla_br_addr_cpy(char *dst, const struct br_ip *src)
563 {
564 	if (src->proto == htons(ETH_P_IP))
565 		ip_eth_mc_map(src->dst.ip4, dst);
566 #if IS_ENABLED(CONFIG_IPV6)
567 	else if (src->proto == htons(ETH_P_IPV6))
568 		ipv6_eth_mc_map(&src->dst.ip6, dst);
569 #endif
570 	else
571 		eth_zero_addr(dst);
572 }
573 
574 /**
575  * batadv_mcast_mla_bridge_get() - get bridged-in multicast listeners
576  * @dev: a bridge slave whose bridge to collect multicast addresses from
577  * @mcast_list: a list to put found addresses into
578  * @flags: flags indicating the new multicast state
579  *
580  * Collects multicast addresses of multicast listeners residing
581  * on foreign, non-mesh devices which we gave access to our mesh via
582  * a bridge on top of the given soft interface, dev, in the given
583  * mcast_list.
584  *
585  * Return: -ENOMEM on memory allocation error or the number of
586  * items added to the mcast_list otherwise.
587  */
batadv_mcast_mla_bridge_get(struct net_device * dev,struct hlist_head * mcast_list,struct batadv_mcast_mla_flags * flags)588 static int batadv_mcast_mla_bridge_get(struct net_device *dev,
589 				       struct hlist_head *mcast_list,
590 				       struct batadv_mcast_mla_flags *flags)
591 {
592 	struct list_head bridge_mcast_list = LIST_HEAD_INIT(bridge_mcast_list);
593 	struct br_ip_list *br_ip_entry, *tmp;
594 	u8 tvlv_flags = flags->tvlv_flags;
595 	struct batadv_hw_addr *new;
596 	u8 mcast_addr[ETH_ALEN];
597 	int ret;
598 
599 	/* we don't need to detect these devices/listeners, the IGMP/MLD
600 	 * snooping code of the Linux bridge already does that for us
601 	 */
602 	ret = br_multicast_list_adjacent(dev, &bridge_mcast_list);
603 	if (ret < 0)
604 		goto out;
605 
606 	list_for_each_entry(br_ip_entry, &bridge_mcast_list, list) {
607 		if (br_ip_entry->addr.proto == htons(ETH_P_IP)) {
608 			if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV4)
609 				continue;
610 
611 			if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
612 			    ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
613 				continue;
614 
615 			if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR4) &&
616 			    !ipv4_is_local_multicast(br_ip_entry->addr.dst.ip4))
617 				continue;
618 		}
619 
620 #if IS_ENABLED(CONFIG_IPV6)
621 		if (br_ip_entry->addr.proto == htons(ETH_P_IPV6)) {
622 			if (tvlv_flags & BATADV_MCAST_WANT_ALL_IPV6)
623 				continue;
624 
625 			if (tvlv_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
626 			    ipv6_addr_is_ll_all_nodes(&br_ip_entry->addr.dst.ip6))
627 				continue;
628 
629 			if (!(tvlv_flags & BATADV_MCAST_WANT_NO_RTR6) &&
630 			    IPV6_ADDR_MC_SCOPE(&br_ip_entry->addr.dst.ip6) >
631 			    IPV6_ADDR_SCOPE_LINKLOCAL)
632 				continue;
633 		}
634 #endif
635 
636 		batadv_mcast_mla_br_addr_cpy(mcast_addr, &br_ip_entry->addr);
637 		if (batadv_mcast_mla_is_duplicate(mcast_addr, mcast_list))
638 			continue;
639 
640 		new = kmalloc(sizeof(*new), GFP_ATOMIC);
641 		if (!new) {
642 			ret = -ENOMEM;
643 			break;
644 		}
645 
646 		ether_addr_copy(new->addr, mcast_addr);
647 		hlist_add_head(&new->list, mcast_list);
648 	}
649 
650 out:
651 	list_for_each_entry_safe(br_ip_entry, tmp, &bridge_mcast_list, list) {
652 		list_del(&br_ip_entry->list);
653 		kfree(br_ip_entry);
654 	}
655 
656 	return ret;
657 }
658 
659 /**
660  * batadv_mcast_mla_list_free() - free a list of multicast addresses
661  * @mcast_list: the list to free
662  *
663  * Removes and frees all items in the given mcast_list.
664  */
batadv_mcast_mla_list_free(struct hlist_head * mcast_list)665 static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
666 {
667 	struct batadv_hw_addr *mcast_entry;
668 	struct hlist_node *tmp;
669 
670 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
671 		hlist_del(&mcast_entry->list);
672 		kfree(mcast_entry);
673 	}
674 }
675 
676 /**
677  * batadv_mcast_mla_tt_retract() - clean up multicast listener announcements
678  * @bat_priv: the bat priv with all the soft interface information
679  * @mcast_list: a list of addresses which should _not_ be removed
680  *
681  * Retracts the announcement of any multicast listener from the
682  * translation table except the ones listed in the given mcast_list.
683  *
684  * If mcast_list is NULL then all are retracted.
685  */
batadv_mcast_mla_tt_retract(struct batadv_priv * bat_priv,struct hlist_head * mcast_list)686 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
687 					struct hlist_head *mcast_list)
688 {
689 	struct batadv_hw_addr *mcast_entry;
690 	struct hlist_node *tmp;
691 
692 	hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
693 				  list) {
694 		if (mcast_list &&
695 		    batadv_mcast_mla_is_duplicate(mcast_entry->addr,
696 						  mcast_list))
697 			continue;
698 
699 		batadv_tt_local_remove(bat_priv, mcast_entry->addr,
700 				       BATADV_NO_FLAGS,
701 				       "mcast TT outdated", false);
702 
703 		hlist_del(&mcast_entry->list);
704 		kfree(mcast_entry);
705 	}
706 }
707 
708 /**
709  * batadv_mcast_mla_tt_add() - add multicast listener announcements
710  * @bat_priv: the bat priv with all the soft interface information
711  * @mcast_list: a list of addresses which are going to get added
712  *
713  * Adds multicast listener announcements from the given mcast_list to the
714  * translation table if they have not been added yet.
715  */
batadv_mcast_mla_tt_add(struct batadv_priv * bat_priv,struct hlist_head * mcast_list)716 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
717 				    struct hlist_head *mcast_list)
718 {
719 	struct batadv_hw_addr *mcast_entry;
720 	struct hlist_node *tmp;
721 
722 	if (!mcast_list)
723 		return;
724 
725 	hlist_for_each_entry_safe(mcast_entry, tmp, mcast_list, list) {
726 		if (batadv_mcast_mla_is_duplicate(mcast_entry->addr,
727 						  &bat_priv->mcast.mla_list))
728 			continue;
729 
730 		if (!batadv_tt_local_add(bat_priv->soft_iface,
731 					 mcast_entry->addr, BATADV_NO_FLAGS,
732 					 BATADV_NULL_IFINDEX, BATADV_NO_MARK))
733 			continue;
734 
735 		hlist_del(&mcast_entry->list);
736 		hlist_add_head(&mcast_entry->list, &bat_priv->mcast.mla_list);
737 	}
738 }
739 
740 /**
741  * batadv_mcast_querier_log() - debug output regarding the querier status on
742  *  link
743  * @bat_priv: the bat priv with all the soft interface information
744  * @str_proto: a string for the querier protocol (e.g. "IGMP" or "MLD")
745  * @old_state: the previous querier state on our link
746  * @new_state: the new querier state on our link
747  *
748  * Outputs debug messages to the logging facility with log level 'mcast'
749  * regarding changes to the querier status on the link which are relevant
750  * to our multicast optimizations.
751  *
752  * Usually this is about whether a querier appeared or vanished in
753  * our mesh or whether the querier is in the suboptimal position of being
754  * behind our local bridge segment: Snooping switches will directly
755  * forward listener reports to the querier, therefore batman-adv and
756  * the bridge will potentially not see these listeners - the querier is
757  * potentially shadowing listeners from us then.
758  *
759  * This is only interesting for nodes with a bridge on top of their
760  * soft interface.
761  */
762 static void
batadv_mcast_querier_log(struct batadv_priv * bat_priv,char * str_proto,struct batadv_mcast_querier_state * old_state,struct batadv_mcast_querier_state * new_state)763 batadv_mcast_querier_log(struct batadv_priv *bat_priv, char *str_proto,
764 			 struct batadv_mcast_querier_state *old_state,
765 			 struct batadv_mcast_querier_state *new_state)
766 {
767 	if (!old_state->exists && new_state->exists)
768 		batadv_info(bat_priv->soft_iface, "%s Querier appeared\n",
769 			    str_proto);
770 	else if (old_state->exists && !new_state->exists)
771 		batadv_info(bat_priv->soft_iface,
772 			    "%s Querier disappeared - multicast optimizations disabled\n",
773 			    str_proto);
774 	else if (!bat_priv->mcast.mla_flags.bridged && !new_state->exists)
775 		batadv_info(bat_priv->soft_iface,
776 			    "No %s Querier present - multicast optimizations disabled\n",
777 			    str_proto);
778 
779 	if (new_state->exists) {
780 		if ((!old_state->shadowing && new_state->shadowing) ||
781 		    (!old_state->exists && new_state->shadowing))
782 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
783 				   "%s Querier is behind our bridged segment: Might shadow listeners\n",
784 				   str_proto);
785 		else if (old_state->shadowing && !new_state->shadowing)
786 			batadv_dbg(BATADV_DBG_MCAST, bat_priv,
787 				   "%s Querier is not behind our bridged segment\n",
788 				   str_proto);
789 	}
790 }
791 
792 /**
793  * batadv_mcast_bridge_log() - debug output for topology changes in bridged
794  *  setups
795  * @bat_priv: the bat priv with all the soft interface information
796  * @new_flags: flags indicating the new multicast state
797  *
798  * If no bridges are ever used on this node, then this function does nothing.
799  *
800  * Otherwise this function outputs debug information to the 'mcast' log level
801  * which might be relevant to our multicast optimizations.
802  *
803  * More precisely, it outputs information when a bridge interface is added or
804  * removed from a soft interface. And when a bridge is present, it further
805  * outputs information about the querier state which is relevant for the
806  * multicast flags this node is going to set.
807  */
808 static void
batadv_mcast_bridge_log(struct batadv_priv * bat_priv,struct batadv_mcast_mla_flags * new_flags)809 batadv_mcast_bridge_log(struct batadv_priv *bat_priv,
810 			struct batadv_mcast_mla_flags *new_flags)
811 {
812 	struct batadv_mcast_mla_flags *old_flags = &bat_priv->mcast.mla_flags;
813 
814 	if (!old_flags->bridged && new_flags->bridged)
815 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
816 			   "Bridge added: Setting Unsnoopables(U)-flag\n");
817 	else if (old_flags->bridged && !new_flags->bridged)
818 		batadv_dbg(BATADV_DBG_MCAST, bat_priv,
819 			   "Bridge removed: Unsetting Unsnoopables(U)-flag\n");
820 
821 	if (new_flags->bridged) {
822 		batadv_mcast_querier_log(bat_priv, "IGMP",
823 					 &old_flags->querier_ipv4,
824 					 &new_flags->querier_ipv4);
825 		batadv_mcast_querier_log(bat_priv, "MLD",
826 					 &old_flags->querier_ipv6,
827 					 &new_flags->querier_ipv6);
828 	}
829 }
830 
831 /**
832  * batadv_mcast_flags_logs() - output debug information about mcast flag changes
833  * @bat_priv: the bat priv with all the soft interface information
834  * @flags: TVLV flags indicating the new multicast state
835  *
836  * Whenever the multicast TVLV flags this node announces change, this function
837  * should be used to notify userspace about the change.
838  */
batadv_mcast_flags_log(struct batadv_priv * bat_priv,u8 flags)839 static void batadv_mcast_flags_log(struct batadv_priv *bat_priv, u8 flags)
840 {
841 	bool old_enabled = bat_priv->mcast.mla_flags.enabled;
842 	u8 old_flags = bat_priv->mcast.mla_flags.tvlv_flags;
843 	char str_old_flags[] = "[.... . ]";
844 
845 	sprintf(str_old_flags, "[%c%c%c%s%s]",
846 		(old_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
847 		(old_flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
848 		(old_flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
849 		!(old_flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
850 		!(old_flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
851 
852 	batadv_dbg(BATADV_DBG_MCAST, bat_priv,
853 		   "Changing multicast flags from '%s' to '[%c%c%c%s%s]'\n",
854 		   old_enabled ? str_old_flags : "<undefined>",
855 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
856 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
857 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
858 		   !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
859 		   !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
860 }
861 
862 /**
863  * batadv_mcast_mla_flags_update() - update multicast flags
864  * @bat_priv: the bat priv with all the soft interface information
865  * @flags: flags indicating the new multicast state
866  *
867  * Updates the own multicast tvlv with our current multicast related settings,
868  * capabilities and inabilities.
869  */
870 static void
batadv_mcast_mla_flags_update(struct batadv_priv * bat_priv,struct batadv_mcast_mla_flags * flags)871 batadv_mcast_mla_flags_update(struct batadv_priv *bat_priv,
872 			      struct batadv_mcast_mla_flags *flags)
873 {
874 	struct batadv_tvlv_mcast_data mcast_data;
875 
876 	if (!memcmp(flags, &bat_priv->mcast.mla_flags, sizeof(*flags)))
877 		return;
878 
879 	batadv_mcast_bridge_log(bat_priv, flags);
880 	batadv_mcast_flags_log(bat_priv, flags->tvlv_flags);
881 
882 	mcast_data.flags = flags->tvlv_flags;
883 	memset(mcast_data.reserved, 0, sizeof(mcast_data.reserved));
884 
885 	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_MCAST, 2,
886 				       &mcast_data, sizeof(mcast_data));
887 
888 	bat_priv->mcast.mla_flags = *flags;
889 }
890 
891 /**
892  * __batadv_mcast_mla_update() - update the own MLAs
893  * @bat_priv: the bat priv with all the soft interface information
894  *
895  * Updates the own multicast listener announcements in the translation
896  * table as well as the own, announced multicast tvlv container.
897  *
898  * Note that non-conflicting reads and writes to bat_priv->mcast.mla_list
899  * in batadv_mcast_mla_tt_retract() and batadv_mcast_mla_tt_add() are
900  * ensured by the non-parallel execution of the worker this function
901  * belongs to.
902  */
__batadv_mcast_mla_update(struct batadv_priv * bat_priv)903 static void __batadv_mcast_mla_update(struct batadv_priv *bat_priv)
904 {
905 	struct net_device *soft_iface = bat_priv->soft_iface;
906 	struct hlist_head mcast_list = HLIST_HEAD_INIT;
907 	struct batadv_mcast_mla_flags flags;
908 	int ret;
909 
910 	flags = batadv_mcast_mla_flags_get(bat_priv);
911 
912 	ret = batadv_mcast_mla_softif_get(soft_iface, &mcast_list, &flags);
913 	if (ret < 0)
914 		goto out;
915 
916 	ret = batadv_mcast_mla_bridge_get(soft_iface, &mcast_list, &flags);
917 	if (ret < 0)
918 		goto out;
919 
920 	spin_lock(&bat_priv->mcast.mla_lock);
921 	batadv_mcast_mla_tt_retract(bat_priv, &mcast_list);
922 	batadv_mcast_mla_tt_add(bat_priv, &mcast_list);
923 	batadv_mcast_mla_flags_update(bat_priv, &flags);
924 	spin_unlock(&bat_priv->mcast.mla_lock);
925 
926 out:
927 	batadv_mcast_mla_list_free(&mcast_list);
928 }
929 
930 /**
931  * batadv_mcast_mla_update() - update the own MLAs
932  * @work: kernel work struct
933  *
934  * Updates the own multicast listener announcements in the translation
935  * table as well as the own, announced multicast tvlv container.
936  *
937  * In the end, reschedules the work timer.
938  */
batadv_mcast_mla_update(struct work_struct * work)939 static void batadv_mcast_mla_update(struct work_struct *work)
940 {
941 	struct delayed_work *delayed_work;
942 	struct batadv_priv_mcast *priv_mcast;
943 	struct batadv_priv *bat_priv;
944 
945 	delayed_work = to_delayed_work(work);
946 	priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
947 	bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
948 
949 	__batadv_mcast_mla_update(bat_priv);
950 	batadv_mcast_start_timer(bat_priv);
951 }
952 
953 /**
954  * batadv_mcast_is_report_ipv4() - check for IGMP reports
955  * @skb: the ethernet frame destined for the mesh
956  *
957  * This call might reallocate skb data.
958  *
959  * Checks whether the given frame is a valid IGMP report.
960  *
961  * Return: If so then true, otherwise false.
962  */
batadv_mcast_is_report_ipv4(struct sk_buff * skb)963 static bool batadv_mcast_is_report_ipv4(struct sk_buff *skb)
964 {
965 	if (ip_mc_check_igmp(skb) < 0)
966 		return false;
967 
968 	switch (igmp_hdr(skb)->type) {
969 	case IGMP_HOST_MEMBERSHIP_REPORT:
970 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
971 	case IGMPV3_HOST_MEMBERSHIP_REPORT:
972 		return true;
973 	}
974 
975 	return false;
976 }
977 
978 /**
979  * batadv_mcast_forw_mode_check_ipv4() - check for optimized forwarding
980  *  potential
981  * @bat_priv: the bat priv with all the soft interface information
982  * @skb: the IPv4 packet to check
983  * @is_unsnoopable: stores whether the destination is snoopable
984  * @is_routable: stores whether the destination is routable
985  *
986  * Checks whether the given IPv4 packet has the potential to be forwarded with a
987  * mode more optimal than classic flooding.
988  *
989  * Return: If so then 0. Otherwise -EINVAL or -ENOMEM in case of memory
990  * allocation failure.
991  */
batadv_mcast_forw_mode_check_ipv4(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)992 static int batadv_mcast_forw_mode_check_ipv4(struct batadv_priv *bat_priv,
993 					     struct sk_buff *skb,
994 					     bool *is_unsnoopable,
995 					     int *is_routable)
996 {
997 	struct iphdr *iphdr;
998 
999 	/* We might fail due to out-of-memory -> drop it */
1000 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*iphdr)))
1001 		return -ENOMEM;
1002 
1003 	if (batadv_mcast_is_report_ipv4(skb))
1004 		return -EINVAL;
1005 
1006 	iphdr = ip_hdr(skb);
1007 
1008 	/* link-local multicast listeners behind a bridge are
1009 	 * not snoopable (see RFC4541, section 2.1.2.2)
1010 	 */
1011 	if (ipv4_is_local_multicast(iphdr->daddr))
1012 		*is_unsnoopable = true;
1013 	else
1014 		*is_routable = ETH_P_IP;
1015 
1016 	return 0;
1017 }
1018 
1019 /**
1020  * batadv_mcast_is_report_ipv6() - check for MLD reports
1021  * @skb: the ethernet frame destined for the mesh
1022  *
1023  * This call might reallocate skb data.
1024  *
1025  * Checks whether the given frame is a valid MLD report.
1026  *
1027  * Return: If so then true, otherwise false.
1028  */
batadv_mcast_is_report_ipv6(struct sk_buff * skb)1029 static bool batadv_mcast_is_report_ipv6(struct sk_buff *skb)
1030 {
1031 	if (ipv6_mc_check_mld(skb) < 0)
1032 		return false;
1033 
1034 	switch (icmp6_hdr(skb)->icmp6_type) {
1035 	case ICMPV6_MGM_REPORT:
1036 	case ICMPV6_MLD2_REPORT:
1037 		return true;
1038 	}
1039 
1040 	return false;
1041 }
1042 
1043 /**
1044  * batadv_mcast_forw_mode_check_ipv6() - check for optimized forwarding
1045  *  potential
1046  * @bat_priv: the bat priv with all the soft interface information
1047  * @skb: the IPv6 packet to check
1048  * @is_unsnoopable: stores whether the destination is snoopable
1049  * @is_routable: stores whether the destination is routable
1050  *
1051  * Checks whether the given IPv6 packet has the potential to be forwarded with a
1052  * mode more optimal than classic flooding.
1053  *
1054  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1055  */
batadv_mcast_forw_mode_check_ipv6(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)1056 static int batadv_mcast_forw_mode_check_ipv6(struct batadv_priv *bat_priv,
1057 					     struct sk_buff *skb,
1058 					     bool *is_unsnoopable,
1059 					     int *is_routable)
1060 {
1061 	struct ipv6hdr *ip6hdr;
1062 
1063 	/* We might fail due to out-of-memory -> drop it */
1064 	if (!pskb_may_pull(skb, sizeof(struct ethhdr) + sizeof(*ip6hdr)))
1065 		return -ENOMEM;
1066 
1067 	if (batadv_mcast_is_report_ipv6(skb))
1068 		return -EINVAL;
1069 
1070 	ip6hdr = ipv6_hdr(skb);
1071 
1072 	if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) < IPV6_ADDR_SCOPE_LINKLOCAL)
1073 		return -EINVAL;
1074 
1075 	/* link-local-all-nodes multicast listeners behind a bridge are
1076 	 * not snoopable (see RFC4541, section 3, paragraph 3)
1077 	 */
1078 	if (ipv6_addr_is_ll_all_nodes(&ip6hdr->daddr))
1079 		*is_unsnoopable = true;
1080 	else if (IPV6_ADDR_MC_SCOPE(&ip6hdr->daddr) > IPV6_ADDR_SCOPE_LINKLOCAL)
1081 		*is_routable = ETH_P_IPV6;
1082 
1083 	return 0;
1084 }
1085 
1086 /**
1087  * batadv_mcast_forw_mode_check() - check for optimized forwarding potential
1088  * @bat_priv: the bat priv with all the soft interface information
1089  * @skb: the multicast frame to check
1090  * @is_unsnoopable: stores whether the destination is snoopable
1091  * @is_routable: stores whether the destination is routable
1092  *
1093  * Checks whether the given multicast ethernet frame has the potential to be
1094  * forwarded with a mode more optimal than classic flooding.
1095  *
1096  * Return: If so then 0. Otherwise -EINVAL is or -ENOMEM if we are out of memory
1097  */
batadv_mcast_forw_mode_check(struct batadv_priv * bat_priv,struct sk_buff * skb,bool * is_unsnoopable,int * is_routable)1098 static int batadv_mcast_forw_mode_check(struct batadv_priv *bat_priv,
1099 					struct sk_buff *skb,
1100 					bool *is_unsnoopable,
1101 					int *is_routable)
1102 {
1103 	struct ethhdr *ethhdr = eth_hdr(skb);
1104 
1105 	if (!atomic_read(&bat_priv->multicast_mode))
1106 		return -EINVAL;
1107 
1108 	switch (ntohs(ethhdr->h_proto)) {
1109 	case ETH_P_IP:
1110 		return batadv_mcast_forw_mode_check_ipv4(bat_priv, skb,
1111 							 is_unsnoopable,
1112 							 is_routable);
1113 	case ETH_P_IPV6:
1114 		if (!IS_ENABLED(CONFIG_IPV6))
1115 			return -EINVAL;
1116 
1117 		return batadv_mcast_forw_mode_check_ipv6(bat_priv, skb,
1118 							 is_unsnoopable,
1119 							 is_routable);
1120 	default:
1121 		return -EINVAL;
1122 	}
1123 }
1124 
1125 /**
1126  * batadv_mcast_forw_want_all_ip_count() - count nodes with unspecific mcast
1127  *  interest
1128  * @bat_priv: the bat priv with all the soft interface information
1129  * @ethhdr: ethernet header of a packet
1130  *
1131  * Return: the number of nodes which want all IPv4 multicast traffic if the
1132  * given ethhdr is from an IPv4 packet or the number of nodes which want all
1133  * IPv6 traffic if it matches an IPv6 packet.
1134  */
batadv_mcast_forw_want_all_ip_count(struct batadv_priv * bat_priv,struct ethhdr * ethhdr)1135 static int batadv_mcast_forw_want_all_ip_count(struct batadv_priv *bat_priv,
1136 					       struct ethhdr *ethhdr)
1137 {
1138 	switch (ntohs(ethhdr->h_proto)) {
1139 	case ETH_P_IP:
1140 		return atomic_read(&bat_priv->mcast.num_want_all_ipv4);
1141 	case ETH_P_IPV6:
1142 		return atomic_read(&bat_priv->mcast.num_want_all_ipv6);
1143 	default:
1144 		/* we shouldn't be here... */
1145 		return 0;
1146 	}
1147 }
1148 
1149 /**
1150  * batadv_mcast_forw_rtr_count() - count nodes with a multicast router
1151  * @bat_priv: the bat priv with all the soft interface information
1152  * @protocol: the ethernet protocol type to count multicast routers for
1153  *
1154  * Return: the number of nodes which want all routable IPv4 multicast traffic
1155  * if the protocol is ETH_P_IP or the number of nodes which want all routable
1156  * IPv6 traffic if the protocol is ETH_P_IPV6. Otherwise returns 0.
1157  */
1158 
batadv_mcast_forw_rtr_count(struct batadv_priv * bat_priv,int protocol)1159 static int batadv_mcast_forw_rtr_count(struct batadv_priv *bat_priv,
1160 				       int protocol)
1161 {
1162 	switch (protocol) {
1163 	case ETH_P_IP:
1164 		return atomic_read(&bat_priv->mcast.num_want_all_rtr4);
1165 	case ETH_P_IPV6:
1166 		return atomic_read(&bat_priv->mcast.num_want_all_rtr6);
1167 	default:
1168 		return 0;
1169 	}
1170 }
1171 
1172 /**
1173  * batadv_mcast_forw_tt_node_get() - get a multicast tt node
1174  * @bat_priv: the bat priv with all the soft interface information
1175  * @ethhdr: the ether header containing the multicast destination
1176  *
1177  * Return: an orig_node matching the multicast address provided by ethhdr
1178  * via a translation table lookup. This increases the returned nodes refcount.
1179  */
1180 static struct batadv_orig_node *
batadv_mcast_forw_tt_node_get(struct batadv_priv * bat_priv,struct ethhdr * ethhdr)1181 batadv_mcast_forw_tt_node_get(struct batadv_priv *bat_priv,
1182 			      struct ethhdr *ethhdr)
1183 {
1184 	return batadv_transtable_search(bat_priv, NULL, ethhdr->h_dest,
1185 					BATADV_NO_FLAGS);
1186 }
1187 
1188 /**
1189  * batadv_mcast_forw_ipv4_node_get() - get a node with an ipv4 flag
1190  * @bat_priv: the bat priv with all the soft interface information
1191  *
1192  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 flag set and
1193  * increases its refcount.
1194  */
1195 static struct batadv_orig_node *
batadv_mcast_forw_ipv4_node_get(struct batadv_priv * bat_priv)1196 batadv_mcast_forw_ipv4_node_get(struct batadv_priv *bat_priv)
1197 {
1198 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1199 
1200 	rcu_read_lock();
1201 	hlist_for_each_entry_rcu(tmp_orig_node,
1202 				 &bat_priv->mcast.want_all_ipv4_list,
1203 				 mcast_want_all_ipv4_node) {
1204 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1205 			continue;
1206 
1207 		orig_node = tmp_orig_node;
1208 		break;
1209 	}
1210 	rcu_read_unlock();
1211 
1212 	return orig_node;
1213 }
1214 
1215 /**
1216  * batadv_mcast_forw_ipv6_node_get() - get a node with an ipv6 flag
1217  * @bat_priv: the bat priv with all the soft interface information
1218  *
1219  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV6 flag set
1220  * and increases its refcount.
1221  */
1222 static struct batadv_orig_node *
batadv_mcast_forw_ipv6_node_get(struct batadv_priv * bat_priv)1223 batadv_mcast_forw_ipv6_node_get(struct batadv_priv *bat_priv)
1224 {
1225 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1226 
1227 	rcu_read_lock();
1228 	hlist_for_each_entry_rcu(tmp_orig_node,
1229 				 &bat_priv->mcast.want_all_ipv6_list,
1230 				 mcast_want_all_ipv6_node) {
1231 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1232 			continue;
1233 
1234 		orig_node = tmp_orig_node;
1235 		break;
1236 	}
1237 	rcu_read_unlock();
1238 
1239 	return orig_node;
1240 }
1241 
1242 /**
1243  * batadv_mcast_forw_ip_node_get() - get a node with an ipv4/ipv6 flag
1244  * @bat_priv: the bat priv with all the soft interface information
1245  * @ethhdr: an ethernet header to determine the protocol family from
1246  *
1247  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_IPV4 or
1248  * BATADV_MCAST_WANT_ALL_IPV6 flag, depending on the provided ethhdr, sets and
1249  * increases its refcount.
1250  */
1251 static struct batadv_orig_node *
batadv_mcast_forw_ip_node_get(struct batadv_priv * bat_priv,struct ethhdr * ethhdr)1252 batadv_mcast_forw_ip_node_get(struct batadv_priv *bat_priv,
1253 			      struct ethhdr *ethhdr)
1254 {
1255 	switch (ntohs(ethhdr->h_proto)) {
1256 	case ETH_P_IP:
1257 		return batadv_mcast_forw_ipv4_node_get(bat_priv);
1258 	case ETH_P_IPV6:
1259 		return batadv_mcast_forw_ipv6_node_get(bat_priv);
1260 	default:
1261 		/* we shouldn't be here... */
1262 		return NULL;
1263 	}
1264 }
1265 
1266 /**
1267  * batadv_mcast_forw_unsnoop_node_get() - get a node with an unsnoopable flag
1268  * @bat_priv: the bat priv with all the soft interface information
1269  *
1270  * Return: an orig_node which has the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag
1271  * set and increases its refcount.
1272  */
1273 static struct batadv_orig_node *
batadv_mcast_forw_unsnoop_node_get(struct batadv_priv * bat_priv)1274 batadv_mcast_forw_unsnoop_node_get(struct batadv_priv *bat_priv)
1275 {
1276 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1277 
1278 	rcu_read_lock();
1279 	hlist_for_each_entry_rcu(tmp_orig_node,
1280 				 &bat_priv->mcast.want_all_unsnoopables_list,
1281 				 mcast_want_all_unsnoopables_node) {
1282 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1283 			continue;
1284 
1285 		orig_node = tmp_orig_node;
1286 		break;
1287 	}
1288 	rcu_read_unlock();
1289 
1290 	return orig_node;
1291 }
1292 
1293 /**
1294  * batadv_mcast_forw_rtr4_node_get() - get a node with an ipv4 mcast router flag
1295  * @bat_priv: the bat priv with all the soft interface information
1296  *
1297  * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR4 flag unset and
1298  * increases its refcount.
1299  */
1300 static struct batadv_orig_node *
batadv_mcast_forw_rtr4_node_get(struct batadv_priv * bat_priv)1301 batadv_mcast_forw_rtr4_node_get(struct batadv_priv *bat_priv)
1302 {
1303 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1304 
1305 	rcu_read_lock();
1306 	hlist_for_each_entry_rcu(tmp_orig_node,
1307 				 &bat_priv->mcast.want_all_rtr4_list,
1308 				 mcast_want_all_rtr4_node) {
1309 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1310 			continue;
1311 
1312 		orig_node = tmp_orig_node;
1313 		break;
1314 	}
1315 	rcu_read_unlock();
1316 
1317 	return orig_node;
1318 }
1319 
1320 /**
1321  * batadv_mcast_forw_rtr6_node_get() - get a node with an ipv6 mcast router flag
1322  * @bat_priv: the bat priv with all the soft interface information
1323  *
1324  * Return: an orig_node which has the BATADV_MCAST_WANT_NO_RTR6 flag unset
1325  * and increases its refcount.
1326  */
1327 static struct batadv_orig_node *
batadv_mcast_forw_rtr6_node_get(struct batadv_priv * bat_priv)1328 batadv_mcast_forw_rtr6_node_get(struct batadv_priv *bat_priv)
1329 {
1330 	struct batadv_orig_node *tmp_orig_node, *orig_node = NULL;
1331 
1332 	rcu_read_lock();
1333 	hlist_for_each_entry_rcu(tmp_orig_node,
1334 				 &bat_priv->mcast.want_all_rtr6_list,
1335 				 mcast_want_all_rtr6_node) {
1336 		if (!kref_get_unless_zero(&tmp_orig_node->refcount))
1337 			continue;
1338 
1339 		orig_node = tmp_orig_node;
1340 		break;
1341 	}
1342 	rcu_read_unlock();
1343 
1344 	return orig_node;
1345 }
1346 
1347 /**
1348  * batadv_mcast_forw_rtr_node_get() - get a node with an ipv4/ipv6 router flag
1349  * @bat_priv: the bat priv with all the soft interface information
1350  * @ethhdr: an ethernet header to determine the protocol family from
1351  *
1352  * Return: an orig_node which has no BATADV_MCAST_WANT_NO_RTR4 or
1353  * BATADV_MCAST_WANT_NO_RTR6 flag, depending on the provided ethhdr, set and
1354  * increases its refcount.
1355  */
1356 static struct batadv_orig_node *
batadv_mcast_forw_rtr_node_get(struct batadv_priv * bat_priv,struct ethhdr * ethhdr)1357 batadv_mcast_forw_rtr_node_get(struct batadv_priv *bat_priv,
1358 			       struct ethhdr *ethhdr)
1359 {
1360 	switch (ntohs(ethhdr->h_proto)) {
1361 	case ETH_P_IP:
1362 		return batadv_mcast_forw_rtr4_node_get(bat_priv);
1363 	case ETH_P_IPV6:
1364 		return batadv_mcast_forw_rtr6_node_get(bat_priv);
1365 	default:
1366 		/* we shouldn't be here... */
1367 		return NULL;
1368 	}
1369 }
1370 
1371 /**
1372  * batadv_mcast_forw_mode() - check on how to forward a multicast packet
1373  * @bat_priv: the bat priv with all the soft interface information
1374  * @skb: The multicast packet to check
1375  * @orig: an originator to be set to forward the skb to
1376  * @is_routable: stores whether the destination is routable
1377  *
1378  * Return: the forwarding mode as enum batadv_forw_mode and in case of
1379  * BATADV_FORW_SINGLE set the orig to the single originator the skb
1380  * should be forwarded to.
1381  */
1382 enum batadv_forw_mode
batadv_mcast_forw_mode(struct batadv_priv * bat_priv,struct sk_buff * skb,struct batadv_orig_node ** orig,int * is_routable)1383 batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb,
1384 		       struct batadv_orig_node **orig, int *is_routable)
1385 {
1386 	int ret, tt_count, ip_count, unsnoop_count, total_count;
1387 	bool is_unsnoopable = false;
1388 	unsigned int mcast_fanout;
1389 	struct ethhdr *ethhdr;
1390 	int rtr_count = 0;
1391 
1392 	ret = batadv_mcast_forw_mode_check(bat_priv, skb, &is_unsnoopable,
1393 					   is_routable);
1394 	if (ret == -ENOMEM)
1395 		return BATADV_FORW_NONE;
1396 	else if (ret < 0)
1397 		return BATADV_FORW_ALL;
1398 
1399 	ethhdr = eth_hdr(skb);
1400 
1401 	tt_count = batadv_tt_global_hash_count(bat_priv, ethhdr->h_dest,
1402 					       BATADV_NO_FLAGS);
1403 	ip_count = batadv_mcast_forw_want_all_ip_count(bat_priv, ethhdr);
1404 	unsnoop_count = !is_unsnoopable ? 0 :
1405 			atomic_read(&bat_priv->mcast.num_want_all_unsnoopables);
1406 	rtr_count = batadv_mcast_forw_rtr_count(bat_priv, *is_routable);
1407 
1408 	total_count = tt_count + ip_count + unsnoop_count + rtr_count;
1409 
1410 	switch (total_count) {
1411 	case 1:
1412 		if (tt_count)
1413 			*orig = batadv_mcast_forw_tt_node_get(bat_priv, ethhdr);
1414 		else if (ip_count)
1415 			*orig = batadv_mcast_forw_ip_node_get(bat_priv, ethhdr);
1416 		else if (unsnoop_count)
1417 			*orig = batadv_mcast_forw_unsnoop_node_get(bat_priv);
1418 		else if (rtr_count)
1419 			*orig = batadv_mcast_forw_rtr_node_get(bat_priv,
1420 							       ethhdr);
1421 
1422 		if (*orig)
1423 			return BATADV_FORW_SINGLE;
1424 
1425 		fallthrough;
1426 	case 0:
1427 		return BATADV_FORW_NONE;
1428 	default:
1429 		mcast_fanout = atomic_read(&bat_priv->multicast_fanout);
1430 
1431 		if (!unsnoop_count && total_count <= mcast_fanout)
1432 			return BATADV_FORW_SOME;
1433 	}
1434 
1435 	return BATADV_FORW_ALL;
1436 }
1437 
1438 /**
1439  * batadv_mcast_forw_send_orig() - send a multicast packet to an originator
1440  * @bat_priv: the bat priv with all the soft interface information
1441  * @skb: the multicast packet to send
1442  * @vid: the vlan identifier
1443  * @orig_node: the originator to send the packet to
1444  *
1445  * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
1446  */
batadv_mcast_forw_send_orig(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,struct batadv_orig_node * orig_node)1447 int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv,
1448 				struct sk_buff *skb,
1449 				unsigned short vid,
1450 				struct batadv_orig_node *orig_node)
1451 {
1452 	/* Avoid sending multicast-in-unicast packets to other BLA
1453 	 * gateways - they already got the frame from the LAN side
1454 	 * we share with them.
1455 	 * TODO: Refactor to take BLA into account earlier, to avoid
1456 	 * reducing the mcast_fanout count.
1457 	 */
1458 	if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) {
1459 		dev_kfree_skb(skb);
1460 		return NET_XMIT_SUCCESS;
1461 	}
1462 
1463 	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
1464 				       orig_node, vid);
1465 }
1466 
1467 /**
1468  * batadv_mcast_forw_tt() - forwards a packet to multicast listeners
1469  * @bat_priv: the bat priv with all the soft interface information
1470  * @skb: the multicast packet to transmit
1471  * @vid: the vlan identifier
1472  *
1473  * Sends copies of a frame with multicast destination to any multicast
1474  * listener registered in the translation table. A transmission is performed
1475  * via a batman-adv unicast packet for each such destination node.
1476  *
1477  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1478  * otherwise.
1479  */
1480 static int
batadv_mcast_forw_tt(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1481 batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb,
1482 		     unsigned short vid)
1483 {
1484 	int ret = NET_XMIT_SUCCESS;
1485 	struct sk_buff *newskb;
1486 
1487 	struct batadv_tt_orig_list_entry *orig_entry;
1488 
1489 	struct batadv_tt_global_entry *tt_global;
1490 	const u8 *addr = eth_hdr(skb)->h_dest;
1491 
1492 	tt_global = batadv_tt_global_hash_find(bat_priv, addr, vid);
1493 	if (!tt_global)
1494 		goto out;
1495 
1496 	rcu_read_lock();
1497 	hlist_for_each_entry_rcu(orig_entry, &tt_global->orig_list, list) {
1498 		newskb = skb_copy(skb, GFP_ATOMIC);
1499 		if (!newskb) {
1500 			ret = NET_XMIT_DROP;
1501 			break;
1502 		}
1503 
1504 		batadv_mcast_forw_send_orig(bat_priv, newskb, vid,
1505 					    orig_entry->orig_node);
1506 	}
1507 	rcu_read_unlock();
1508 
1509 	batadv_tt_global_entry_put(tt_global);
1510 
1511 out:
1512 	return ret;
1513 }
1514 
1515 /**
1516  * batadv_mcast_forw_want_all_ipv4() - forward to nodes with want-all-ipv4
1517  * @bat_priv: the bat priv with all the soft interface information
1518  * @skb: the multicast packet to transmit
1519  * @vid: the vlan identifier
1520  *
1521  * Sends copies of a frame with multicast destination to any node with a
1522  * BATADV_MCAST_WANT_ALL_IPV4 flag set. A transmission is performed via a
1523  * batman-adv unicast packet for each such destination node.
1524  *
1525  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1526  * otherwise.
1527  */
1528 static int
batadv_mcast_forw_want_all_ipv4(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1529 batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv,
1530 				struct sk_buff *skb, unsigned short vid)
1531 {
1532 	struct batadv_orig_node *orig_node;
1533 	int ret = NET_XMIT_SUCCESS;
1534 	struct sk_buff *newskb;
1535 
1536 	rcu_read_lock();
1537 	hlist_for_each_entry_rcu(orig_node,
1538 				 &bat_priv->mcast.want_all_ipv4_list,
1539 				 mcast_want_all_ipv4_node) {
1540 		newskb = skb_copy(skb, GFP_ATOMIC);
1541 		if (!newskb) {
1542 			ret = NET_XMIT_DROP;
1543 			break;
1544 		}
1545 
1546 		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1547 	}
1548 	rcu_read_unlock();
1549 	return ret;
1550 }
1551 
1552 /**
1553  * batadv_mcast_forw_want_all_ipv6() - forward to nodes with want-all-ipv6
1554  * @bat_priv: the bat priv with all the soft interface information
1555  * @skb: The multicast packet to transmit
1556  * @vid: the vlan identifier
1557  *
1558  * Sends copies of a frame with multicast destination to any node with a
1559  * BATADV_MCAST_WANT_ALL_IPV6 flag set. A transmission is performed via a
1560  * batman-adv unicast packet for each such destination node.
1561  *
1562  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1563  * otherwise.
1564  */
1565 static int
batadv_mcast_forw_want_all_ipv6(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1566 batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv,
1567 				struct sk_buff *skb, unsigned short vid)
1568 {
1569 	struct batadv_orig_node *orig_node;
1570 	int ret = NET_XMIT_SUCCESS;
1571 	struct sk_buff *newskb;
1572 
1573 	rcu_read_lock();
1574 	hlist_for_each_entry_rcu(orig_node,
1575 				 &bat_priv->mcast.want_all_ipv6_list,
1576 				 mcast_want_all_ipv6_node) {
1577 		newskb = skb_copy(skb, GFP_ATOMIC);
1578 		if (!newskb) {
1579 			ret = NET_XMIT_DROP;
1580 			break;
1581 		}
1582 
1583 		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1584 	}
1585 	rcu_read_unlock();
1586 	return ret;
1587 }
1588 
1589 /**
1590  * batadv_mcast_forw_want_all() - forward packet to nodes in a want-all list
1591  * @bat_priv: the bat priv with all the soft interface information
1592  * @skb: the multicast packet to transmit
1593  * @vid: the vlan identifier
1594  *
1595  * Sends copies of a frame with multicast destination to any node with a
1596  * BATADV_MCAST_WANT_ALL_IPV4 or BATADV_MCAST_WANT_ALL_IPV6 flag set. A
1597  * transmission is performed via a batman-adv unicast packet for each such
1598  * destination node.
1599  *
1600  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1601  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1602  */
1603 static int
batadv_mcast_forw_want_all(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1604 batadv_mcast_forw_want_all(struct batadv_priv *bat_priv,
1605 			   struct sk_buff *skb, unsigned short vid)
1606 {
1607 	switch (ntohs(eth_hdr(skb)->h_proto)) {
1608 	case ETH_P_IP:
1609 		return batadv_mcast_forw_want_all_ipv4(bat_priv, skb, vid);
1610 	case ETH_P_IPV6:
1611 		return batadv_mcast_forw_want_all_ipv6(bat_priv, skb, vid);
1612 	default:
1613 		/* we shouldn't be here... */
1614 		return NET_XMIT_DROP;
1615 	}
1616 }
1617 
1618 /**
1619  * batadv_mcast_forw_want_all_rtr4() - forward to nodes with want-all-rtr4
1620  * @bat_priv: the bat priv with all the soft interface information
1621  * @skb: the multicast packet to transmit
1622  * @vid: the vlan identifier
1623  *
1624  * Sends copies of a frame with multicast destination to any node with a
1625  * BATADV_MCAST_WANT_NO_RTR4 flag unset. A transmission is performed via a
1626  * batman-adv unicast packet for each such destination node.
1627  *
1628  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1629  * otherwise.
1630  */
1631 static int
batadv_mcast_forw_want_all_rtr4(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1632 batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv,
1633 				struct sk_buff *skb, unsigned short vid)
1634 {
1635 	struct batadv_orig_node *orig_node;
1636 	int ret = NET_XMIT_SUCCESS;
1637 	struct sk_buff *newskb;
1638 
1639 	rcu_read_lock();
1640 	hlist_for_each_entry_rcu(orig_node,
1641 				 &bat_priv->mcast.want_all_rtr4_list,
1642 				 mcast_want_all_rtr4_node) {
1643 		newskb = skb_copy(skb, GFP_ATOMIC);
1644 		if (!newskb) {
1645 			ret = NET_XMIT_DROP;
1646 			break;
1647 		}
1648 
1649 		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1650 	}
1651 	rcu_read_unlock();
1652 	return ret;
1653 }
1654 
1655 /**
1656  * batadv_mcast_forw_want_all_rtr6() - forward to nodes with want-all-rtr6
1657  * @bat_priv: the bat priv with all the soft interface information
1658  * @skb: The multicast packet to transmit
1659  * @vid: the vlan identifier
1660  *
1661  * Sends copies of a frame with multicast destination to any node with a
1662  * BATADV_MCAST_WANT_NO_RTR6 flag unset. A transmission is performed via a
1663  * batman-adv unicast packet for each such destination node.
1664  *
1665  * Return: NET_XMIT_DROP on memory allocation failure, NET_XMIT_SUCCESS
1666  * otherwise.
1667  */
1668 static int
batadv_mcast_forw_want_all_rtr6(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1669 batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv,
1670 				struct sk_buff *skb, unsigned short vid)
1671 {
1672 	struct batadv_orig_node *orig_node;
1673 	int ret = NET_XMIT_SUCCESS;
1674 	struct sk_buff *newskb;
1675 
1676 	rcu_read_lock();
1677 	hlist_for_each_entry_rcu(orig_node,
1678 				 &bat_priv->mcast.want_all_rtr6_list,
1679 				 mcast_want_all_rtr6_node) {
1680 		newskb = skb_copy(skb, GFP_ATOMIC);
1681 		if (!newskb) {
1682 			ret = NET_XMIT_DROP;
1683 			break;
1684 		}
1685 
1686 		batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node);
1687 	}
1688 	rcu_read_unlock();
1689 	return ret;
1690 }
1691 
1692 /**
1693  * batadv_mcast_forw_want_rtr() - forward packet to nodes in a want-all-rtr list
1694  * @bat_priv: the bat priv with all the soft interface information
1695  * @skb: the multicast packet to transmit
1696  * @vid: the vlan identifier
1697  *
1698  * Sends copies of a frame with multicast destination to any node with a
1699  * BATADV_MCAST_WANT_NO_RTR4 or BATADV_MCAST_WANT_NO_RTR6 flag unset. A
1700  * transmission is performed via a batman-adv unicast packet for each such
1701  * destination node.
1702  *
1703  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1704  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1705  */
1706 static int
batadv_mcast_forw_want_rtr(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1707 batadv_mcast_forw_want_rtr(struct batadv_priv *bat_priv,
1708 			   struct sk_buff *skb, unsigned short vid)
1709 {
1710 	switch (ntohs(eth_hdr(skb)->h_proto)) {
1711 	case ETH_P_IP:
1712 		return batadv_mcast_forw_want_all_rtr4(bat_priv, skb, vid);
1713 	case ETH_P_IPV6:
1714 		return batadv_mcast_forw_want_all_rtr6(bat_priv, skb, vid);
1715 	default:
1716 		/* we shouldn't be here... */
1717 		return NET_XMIT_DROP;
1718 	}
1719 }
1720 
1721 /**
1722  * batadv_mcast_forw_send() - send packet to any detected multicast recipient
1723  * @bat_priv: the bat priv with all the soft interface information
1724  * @skb: the multicast packet to transmit
1725  * @vid: the vlan identifier
1726  * @is_routable: stores whether the destination is routable
1727  *
1728  * Sends copies of a frame with multicast destination to any node that signaled
1729  * interest in it, that is either via the translation table or the according
1730  * want-all flags. A transmission is performed via a batman-adv unicast packet
1731  * for each such destination node.
1732  *
1733  * The given skb is consumed/freed.
1734  *
1735  * Return: NET_XMIT_DROP on memory allocation failure or if the protocol family
1736  * is neither IPv4 nor IPv6. NET_XMIT_SUCCESS otherwise.
1737  */
batadv_mcast_forw_send(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,int is_routable)1738 int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb,
1739 			   unsigned short vid, int is_routable)
1740 {
1741 	int ret;
1742 
1743 	ret = batadv_mcast_forw_tt(bat_priv, skb, vid);
1744 	if (ret != NET_XMIT_SUCCESS) {
1745 		kfree_skb(skb);
1746 		return ret;
1747 	}
1748 
1749 	ret = batadv_mcast_forw_want_all(bat_priv, skb, vid);
1750 	if (ret != NET_XMIT_SUCCESS) {
1751 		kfree_skb(skb);
1752 		return ret;
1753 	}
1754 
1755 	if (!is_routable)
1756 		goto skip_mc_router;
1757 
1758 	ret = batadv_mcast_forw_want_rtr(bat_priv, skb, vid);
1759 	if (ret != NET_XMIT_SUCCESS) {
1760 		kfree_skb(skb);
1761 		return ret;
1762 	}
1763 
1764 skip_mc_router:
1765 	consume_skb(skb);
1766 	return ret;
1767 }
1768 
1769 /**
1770  * batadv_mcast_want_unsnoop_update() - update unsnoop counter and list
1771  * @bat_priv: the bat priv with all the soft interface information
1772  * @orig: the orig_node which multicast state might have changed of
1773  * @mcast_flags: flags indicating the new multicast state
1774  *
1775  * If the BATADV_MCAST_WANT_ALL_UNSNOOPABLES flag of this originator,
1776  * orig, has toggled then this method updates the counter and the list
1777  * accordingly.
1778  *
1779  * Caller needs to hold orig->mcast_handler_lock.
1780  */
batadv_mcast_want_unsnoop_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1781 static void batadv_mcast_want_unsnoop_update(struct batadv_priv *bat_priv,
1782 					     struct batadv_orig_node *orig,
1783 					     u8 mcast_flags)
1784 {
1785 	struct hlist_node *node = &orig->mcast_want_all_unsnoopables_node;
1786 	struct hlist_head *head = &bat_priv->mcast.want_all_unsnoopables_list;
1787 
1788 	lockdep_assert_held(&orig->mcast_handler_lock);
1789 
1790 	/* switched from flag unset to set */
1791 	if (mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES &&
1792 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)) {
1793 		atomic_inc(&bat_priv->mcast.num_want_all_unsnoopables);
1794 
1795 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1796 		/* flag checks above + mcast_handler_lock prevents this */
1797 		WARN_ON(!hlist_unhashed(node));
1798 
1799 		hlist_add_head_rcu(node, head);
1800 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1801 	/* switched from flag set to unset */
1802 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) &&
1803 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) {
1804 		atomic_dec(&bat_priv->mcast.num_want_all_unsnoopables);
1805 
1806 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1807 		/* flag checks above + mcast_handler_lock prevents this */
1808 		WARN_ON(hlist_unhashed(node));
1809 
1810 		hlist_del_init_rcu(node);
1811 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1812 	}
1813 }
1814 
1815 /**
1816  * batadv_mcast_want_ipv4_update() - update want-all-ipv4 counter and list
1817  * @bat_priv: the bat priv with all the soft interface information
1818  * @orig: the orig_node which multicast state might have changed of
1819  * @mcast_flags: flags indicating the new multicast state
1820  *
1821  * If the BATADV_MCAST_WANT_ALL_IPV4 flag of this originator, orig, has
1822  * toggled then this method updates the counter and the list accordingly.
1823  *
1824  * Caller needs to hold orig->mcast_handler_lock.
1825  */
batadv_mcast_want_ipv4_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1826 static void batadv_mcast_want_ipv4_update(struct batadv_priv *bat_priv,
1827 					  struct batadv_orig_node *orig,
1828 					  u8 mcast_flags)
1829 {
1830 	struct hlist_node *node = &orig->mcast_want_all_ipv4_node;
1831 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv4_list;
1832 
1833 	lockdep_assert_held(&orig->mcast_handler_lock);
1834 
1835 	/* switched from flag unset to set */
1836 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4 &&
1837 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)) {
1838 		atomic_inc(&bat_priv->mcast.num_want_all_ipv4);
1839 
1840 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1841 		/* flag checks above + mcast_handler_lock prevents this */
1842 		WARN_ON(!hlist_unhashed(node));
1843 
1844 		hlist_add_head_rcu(node, head);
1845 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1846 	/* switched from flag set to unset */
1847 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) &&
1848 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV4) {
1849 		atomic_dec(&bat_priv->mcast.num_want_all_ipv4);
1850 
1851 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1852 		/* flag checks above + mcast_handler_lock prevents this */
1853 		WARN_ON(hlist_unhashed(node));
1854 
1855 		hlist_del_init_rcu(node);
1856 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1857 	}
1858 }
1859 
1860 /**
1861  * batadv_mcast_want_ipv6_update() - update want-all-ipv6 counter and list
1862  * @bat_priv: the bat priv with all the soft interface information
1863  * @orig: the orig_node which multicast state might have changed of
1864  * @mcast_flags: flags indicating the new multicast state
1865  *
1866  * If the BATADV_MCAST_WANT_ALL_IPV6 flag of this originator, orig, has
1867  * toggled then this method updates the counter and the list accordingly.
1868  *
1869  * Caller needs to hold orig->mcast_handler_lock.
1870  */
batadv_mcast_want_ipv6_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1871 static void batadv_mcast_want_ipv6_update(struct batadv_priv *bat_priv,
1872 					  struct batadv_orig_node *orig,
1873 					  u8 mcast_flags)
1874 {
1875 	struct hlist_node *node = &orig->mcast_want_all_ipv6_node;
1876 	struct hlist_head *head = &bat_priv->mcast.want_all_ipv6_list;
1877 
1878 	lockdep_assert_held(&orig->mcast_handler_lock);
1879 
1880 	/* switched from flag unset to set */
1881 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6 &&
1882 	    !(orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)) {
1883 		atomic_inc(&bat_priv->mcast.num_want_all_ipv6);
1884 
1885 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1886 		/* flag checks above + mcast_handler_lock prevents this */
1887 		WARN_ON(!hlist_unhashed(node));
1888 
1889 		hlist_add_head_rcu(node, head);
1890 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1891 	/* switched from flag set to unset */
1892 	} else if (!(mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) &&
1893 		   orig->mcast_flags & BATADV_MCAST_WANT_ALL_IPV6) {
1894 		atomic_dec(&bat_priv->mcast.num_want_all_ipv6);
1895 
1896 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1897 		/* flag checks above + mcast_handler_lock prevents this */
1898 		WARN_ON(hlist_unhashed(node));
1899 
1900 		hlist_del_init_rcu(node);
1901 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1902 	}
1903 }
1904 
1905 /**
1906  * batadv_mcast_want_rtr4_update() - update want-all-rtr4 counter and list
1907  * @bat_priv: the bat priv with all the soft interface information
1908  * @orig: the orig_node which multicast state might have changed of
1909  * @mcast_flags: flags indicating the new multicast state
1910  *
1911  * If the BATADV_MCAST_WANT_NO_RTR4 flag of this originator, orig, has
1912  * toggled then this method updates the counter and the list accordingly.
1913  *
1914  * Caller needs to hold orig->mcast_handler_lock.
1915  */
batadv_mcast_want_rtr4_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1916 static void batadv_mcast_want_rtr4_update(struct batadv_priv *bat_priv,
1917 					  struct batadv_orig_node *orig,
1918 					  u8 mcast_flags)
1919 {
1920 	struct hlist_node *node = &orig->mcast_want_all_rtr4_node;
1921 	struct hlist_head *head = &bat_priv->mcast.want_all_rtr4_list;
1922 
1923 	lockdep_assert_held(&orig->mcast_handler_lock);
1924 
1925 	/* switched from flag set to unset */
1926 	if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR4) &&
1927 	    orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4) {
1928 		atomic_inc(&bat_priv->mcast.num_want_all_rtr4);
1929 
1930 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1931 		/* flag checks above + mcast_handler_lock prevents this */
1932 		WARN_ON(!hlist_unhashed(node));
1933 
1934 		hlist_add_head_rcu(node, head);
1935 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1936 	/* switched from flag unset to set */
1937 	} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR4 &&
1938 		   !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR4)) {
1939 		atomic_dec(&bat_priv->mcast.num_want_all_rtr4);
1940 
1941 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1942 		/* flag checks above + mcast_handler_lock prevents this */
1943 		WARN_ON(hlist_unhashed(node));
1944 
1945 		hlist_del_init_rcu(node);
1946 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1947 	}
1948 }
1949 
1950 /**
1951  * batadv_mcast_want_rtr6_update() - update want-all-rtr6 counter and list
1952  * @bat_priv: the bat priv with all the soft interface information
1953  * @orig: the orig_node which multicast state might have changed of
1954  * @mcast_flags: flags indicating the new multicast state
1955  *
1956  * If the BATADV_MCAST_WANT_NO_RTR6 flag of this originator, orig, has
1957  * toggled then this method updates the counter and the list accordingly.
1958  *
1959  * Caller needs to hold orig->mcast_handler_lock.
1960  */
batadv_mcast_want_rtr6_update(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 mcast_flags)1961 static void batadv_mcast_want_rtr6_update(struct batadv_priv *bat_priv,
1962 					  struct batadv_orig_node *orig,
1963 					  u8 mcast_flags)
1964 {
1965 	struct hlist_node *node = &orig->mcast_want_all_rtr6_node;
1966 	struct hlist_head *head = &bat_priv->mcast.want_all_rtr6_list;
1967 
1968 	lockdep_assert_held(&orig->mcast_handler_lock);
1969 
1970 	/* switched from flag set to unset */
1971 	if (!(mcast_flags & BATADV_MCAST_WANT_NO_RTR6) &&
1972 	    orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6) {
1973 		atomic_inc(&bat_priv->mcast.num_want_all_rtr6);
1974 
1975 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1976 		/* flag checks above + mcast_handler_lock prevents this */
1977 		WARN_ON(!hlist_unhashed(node));
1978 
1979 		hlist_add_head_rcu(node, head);
1980 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1981 	/* switched from flag unset to set */
1982 	} else if (mcast_flags & BATADV_MCAST_WANT_NO_RTR6 &&
1983 		   !(orig->mcast_flags & BATADV_MCAST_WANT_NO_RTR6)) {
1984 		atomic_dec(&bat_priv->mcast.num_want_all_rtr6);
1985 
1986 		spin_lock_bh(&bat_priv->mcast.want_lists_lock);
1987 		/* flag checks above + mcast_handler_lock prevents this */
1988 		WARN_ON(hlist_unhashed(node));
1989 
1990 		hlist_del_init_rcu(node);
1991 		spin_unlock_bh(&bat_priv->mcast.want_lists_lock);
1992 	}
1993 }
1994 
1995 /**
1996  * batadv_mcast_tvlv_flags_get() - get multicast flags from an OGM TVLV
1997  * @enabled: whether the originator has multicast TVLV support enabled
1998  * @tvlv_value: tvlv buffer containing the multicast flags
1999  * @tvlv_value_len: tvlv buffer length
2000  *
2001  * Return: multicast flags for the given tvlv buffer
2002  */
2003 static u8
batadv_mcast_tvlv_flags_get(bool enabled,void * tvlv_value,u16 tvlv_value_len)2004 batadv_mcast_tvlv_flags_get(bool enabled, void *tvlv_value, u16 tvlv_value_len)
2005 {
2006 	u8 mcast_flags = BATADV_NO_FLAGS;
2007 
2008 	if (enabled && tvlv_value && tvlv_value_len >= sizeof(mcast_flags))
2009 		mcast_flags = *(u8 *)tvlv_value;
2010 
2011 	if (!enabled) {
2012 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV4;
2013 		mcast_flags |= BATADV_MCAST_WANT_ALL_IPV6;
2014 	}
2015 
2016 	/* remove redundant flags to avoid sending duplicate packets later */
2017 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV4)
2018 		mcast_flags |= BATADV_MCAST_WANT_NO_RTR4;
2019 
2020 	if (mcast_flags & BATADV_MCAST_WANT_ALL_IPV6)
2021 		mcast_flags |= BATADV_MCAST_WANT_NO_RTR6;
2022 
2023 	return mcast_flags;
2024 }
2025 
2026 /**
2027  * batadv_mcast_tvlv_ogm_handler() - process incoming multicast tvlv container
2028  * @bat_priv: the bat priv with all the soft interface information
2029  * @orig: the orig_node of the ogm
2030  * @flags: flags indicating the tvlv state (see batadv_tvlv_handler_flags)
2031  * @tvlv_value: tvlv buffer containing the multicast data
2032  * @tvlv_value_len: tvlv buffer length
2033  */
batadv_mcast_tvlv_ogm_handler(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 flags,void * tvlv_value,u16 tvlv_value_len)2034 static void batadv_mcast_tvlv_ogm_handler(struct batadv_priv *bat_priv,
2035 					  struct batadv_orig_node *orig,
2036 					  u8 flags,
2037 					  void *tvlv_value,
2038 					  u16 tvlv_value_len)
2039 {
2040 	bool orig_mcast_enabled = !(flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2041 	u8 mcast_flags;
2042 
2043 	mcast_flags = batadv_mcast_tvlv_flags_get(orig_mcast_enabled,
2044 						  tvlv_value, tvlv_value_len);
2045 
2046 	spin_lock_bh(&orig->mcast_handler_lock);
2047 
2048 	if (orig_mcast_enabled &&
2049 	    !test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2050 		set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2051 	} else if (!orig_mcast_enabled &&
2052 		   test_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities)) {
2053 		clear_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capabilities);
2054 	}
2055 
2056 	set_bit(BATADV_ORIG_CAPA_HAS_MCAST, &orig->capa_initialized);
2057 
2058 	batadv_mcast_want_unsnoop_update(bat_priv, orig, mcast_flags);
2059 	batadv_mcast_want_ipv4_update(bat_priv, orig, mcast_flags);
2060 	batadv_mcast_want_ipv6_update(bat_priv, orig, mcast_flags);
2061 	batadv_mcast_want_rtr4_update(bat_priv, orig, mcast_flags);
2062 	batadv_mcast_want_rtr6_update(bat_priv, orig, mcast_flags);
2063 
2064 	orig->mcast_flags = mcast_flags;
2065 	spin_unlock_bh(&orig->mcast_handler_lock);
2066 }
2067 
2068 /**
2069  * batadv_mcast_init() - initialize the multicast optimizations structures
2070  * @bat_priv: the bat priv with all the soft interface information
2071  */
batadv_mcast_init(struct batadv_priv * bat_priv)2072 void batadv_mcast_init(struct batadv_priv *bat_priv)
2073 {
2074 	batadv_tvlv_handler_register(bat_priv, batadv_mcast_tvlv_ogm_handler,
2075 				     NULL, BATADV_TVLV_MCAST, 2,
2076 				     BATADV_TVLV_HANDLER_OGM_CIFNOTFND);
2077 
2078 	INIT_DELAYED_WORK(&bat_priv->mcast.work, batadv_mcast_mla_update);
2079 	batadv_mcast_start_timer(bat_priv);
2080 }
2081 
2082 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2083 /**
2084  * batadv_mcast_flags_print_header() - print own mcast flags to debugfs table
2085  * @bat_priv: the bat priv with all the soft interface information
2086  * @seq: debugfs table seq_file struct
2087  *
2088  * Prints our own multicast flags including a more specific reason why
2089  * they are set, that is prints the bridge and querier state too, to
2090  * the debugfs table specified via @seq.
2091  */
batadv_mcast_flags_print_header(struct batadv_priv * bat_priv,struct seq_file * seq)2092 static void batadv_mcast_flags_print_header(struct batadv_priv *bat_priv,
2093 					    struct seq_file *seq)
2094 {
2095 	struct batadv_mcast_mla_flags *mla_flags = &bat_priv->mcast.mla_flags;
2096 	char querier4, querier6, shadowing4, shadowing6;
2097 	bool bridged = mla_flags->bridged;
2098 	u8 flags = mla_flags->tvlv_flags;
2099 
2100 	if (bridged) {
2101 		querier4 = mla_flags->querier_ipv4.exists ? '.' : '4';
2102 		querier6 = mla_flags->querier_ipv6.exists ? '.' : '6';
2103 		shadowing4 = mla_flags->querier_ipv4.shadowing ? '4' : '.';
2104 		shadowing6 = mla_flags->querier_ipv6.shadowing ? '6' : '.';
2105 	} else {
2106 		querier4 = '?';
2107 		querier6 = '?';
2108 		shadowing4 = '?';
2109 		shadowing6 = '?';
2110 	}
2111 
2112 	seq_printf(seq, "Multicast flags (own flags: [%c%c%c%s%s])\n",
2113 		   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES) ? 'U' : '.',
2114 		   (flags & BATADV_MCAST_WANT_ALL_IPV4) ? '4' : '.',
2115 		   (flags & BATADV_MCAST_WANT_ALL_IPV6) ? '6' : '.',
2116 		   !(flags & BATADV_MCAST_WANT_NO_RTR4) ? "R4" : ". ",
2117 		   !(flags & BATADV_MCAST_WANT_NO_RTR6) ? "R6" : ". ");
2118 	seq_printf(seq, "* Bridged [U]\t\t\t\t%c\n", bridged ? 'U' : '.');
2119 	seq_printf(seq, "* No IGMP/MLD Querier [4/6]:\t\t%c/%c\n",
2120 		   querier4, querier6);
2121 	seq_printf(seq, "* Shadowing IGMP/MLD Querier [4/6]:\t%c/%c\n",
2122 		   shadowing4, shadowing6);
2123 	seq_puts(seq, "-------------------------------------------\n");
2124 	seq_printf(seq, "       %-10s %s\n", "Originator", "Flags");
2125 }
2126 
2127 /**
2128  * batadv_mcast_flags_seq_print_text() - print the mcast flags of other nodes
2129  * @seq: seq file to print on
2130  * @offset: not used
2131  *
2132  * This prints a table of (primary) originators and their according
2133  * multicast flags, including (in the header) our own.
2134  *
2135  * Return: always 0
2136  */
batadv_mcast_flags_seq_print_text(struct seq_file * seq,void * offset)2137 int batadv_mcast_flags_seq_print_text(struct seq_file *seq, void *offset)
2138 {
2139 	struct net_device *net_dev = (struct net_device *)seq->private;
2140 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2141 	struct batadv_hard_iface *primary_if;
2142 	struct batadv_hashtable *hash = bat_priv->orig_hash;
2143 	struct batadv_orig_node *orig_node;
2144 	struct hlist_head *head;
2145 	u8 flags;
2146 	u32 i;
2147 
2148 	primary_if = batadv_seq_print_text_primary_if_get(seq);
2149 	if (!primary_if)
2150 		return 0;
2151 
2152 	batadv_mcast_flags_print_header(bat_priv, seq);
2153 
2154 	for (i = 0; i < hash->size; i++) {
2155 		head = &hash->table[i];
2156 
2157 		rcu_read_lock();
2158 		hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
2159 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2160 				      &orig_node->capa_initialized))
2161 				continue;
2162 
2163 			if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2164 				      &orig_node->capabilities)) {
2165 				seq_printf(seq, "%pM -\n", orig_node->orig);
2166 				continue;
2167 			}
2168 
2169 			flags = orig_node->mcast_flags;
2170 
2171 			seq_printf(seq, "%pM [%c%c%c%s%s]\n", orig_node->orig,
2172 				   (flags & BATADV_MCAST_WANT_ALL_UNSNOOPABLES)
2173 				   ? 'U' : '.',
2174 				   (flags & BATADV_MCAST_WANT_ALL_IPV4)
2175 				   ? '4' : '.',
2176 				   (flags & BATADV_MCAST_WANT_ALL_IPV6)
2177 				   ? '6' : '.',
2178 				   !(flags & BATADV_MCAST_WANT_NO_RTR4)
2179 				   ? "R4" : ". ",
2180 				   !(flags & BATADV_MCAST_WANT_NO_RTR6)
2181 				   ? "R6" : ". ");
2182 		}
2183 		rcu_read_unlock();
2184 	}
2185 
2186 	batadv_hardif_put(primary_if);
2187 
2188 	return 0;
2189 }
2190 #endif
2191 
2192 /**
2193  * batadv_mcast_mesh_info_put() - put multicast info into a netlink message
2194  * @msg: buffer for the message
2195  * @bat_priv: the bat priv with all the soft interface information
2196  *
2197  * Return: 0 or error code.
2198  */
batadv_mcast_mesh_info_put(struct sk_buff * msg,struct batadv_priv * bat_priv)2199 int batadv_mcast_mesh_info_put(struct sk_buff *msg,
2200 			       struct batadv_priv *bat_priv)
2201 {
2202 	u32 flags = bat_priv->mcast.mla_flags.tvlv_flags;
2203 	u32 flags_priv = BATADV_NO_FLAGS;
2204 
2205 	if (bat_priv->mcast.mla_flags.bridged) {
2206 		flags_priv |= BATADV_MCAST_FLAGS_BRIDGED;
2207 
2208 		if (bat_priv->mcast.mla_flags.querier_ipv4.exists)
2209 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_EXISTS;
2210 		if (bat_priv->mcast.mla_flags.querier_ipv6.exists)
2211 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_EXISTS;
2212 		if (bat_priv->mcast.mla_flags.querier_ipv4.shadowing)
2213 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV4_SHADOWING;
2214 		if (bat_priv->mcast.mla_flags.querier_ipv6.shadowing)
2215 			flags_priv |= BATADV_MCAST_FLAGS_QUERIER_IPV6_SHADOWING;
2216 	}
2217 
2218 	if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS, flags) ||
2219 	    nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS_PRIV, flags_priv))
2220 		return -EMSGSIZE;
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * batadv_mcast_flags_dump_entry() - dump one entry of the multicast flags table
2227  *  to a netlink socket
2228  * @msg: buffer for the message
2229  * @portid: netlink port
2230  * @cb: Control block containing additional options
2231  * @orig_node: originator to dump the multicast flags of
2232  *
2233  * Return: 0 or error code.
2234  */
2235 static int
batadv_mcast_flags_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_orig_node * orig_node)2236 batadv_mcast_flags_dump_entry(struct sk_buff *msg, u32 portid,
2237 			      struct netlink_callback *cb,
2238 			      struct batadv_orig_node *orig_node)
2239 {
2240 	void *hdr;
2241 
2242 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2243 			  &batadv_netlink_family, NLM_F_MULTI,
2244 			  BATADV_CMD_GET_MCAST_FLAGS);
2245 	if (!hdr)
2246 		return -ENOBUFS;
2247 
2248 	genl_dump_check_consistent(cb, hdr);
2249 
2250 	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
2251 		    orig_node->orig)) {
2252 		genlmsg_cancel(msg, hdr);
2253 		return -EMSGSIZE;
2254 	}
2255 
2256 	if (test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2257 		     &orig_node->capabilities)) {
2258 		if (nla_put_u32(msg, BATADV_ATTR_MCAST_FLAGS,
2259 				orig_node->mcast_flags)) {
2260 			genlmsg_cancel(msg, hdr);
2261 			return -EMSGSIZE;
2262 		}
2263 	}
2264 
2265 	genlmsg_end(msg, hdr);
2266 	return 0;
2267 }
2268 
2269 /**
2270  * batadv_mcast_flags_dump_bucket() - dump one bucket of the multicast flags
2271  *  table to a netlink socket
2272  * @msg: buffer for the message
2273  * @portid: netlink port
2274  * @cb: Control block containing additional options
2275  * @hash: hash to dump
2276  * @bucket: bucket index to dump
2277  * @idx_skip: How many entries to skip
2278  *
2279  * Return: 0 or error code.
2280  */
2281 static int
batadv_mcast_flags_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hashtable * hash,unsigned int bucket,long * idx_skip)2282 batadv_mcast_flags_dump_bucket(struct sk_buff *msg, u32 portid,
2283 			       struct netlink_callback *cb,
2284 			       struct batadv_hashtable *hash,
2285 			       unsigned int bucket, long *idx_skip)
2286 {
2287 	struct batadv_orig_node *orig_node;
2288 	long idx = 0;
2289 
2290 	spin_lock_bh(&hash->list_locks[bucket]);
2291 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
2292 
2293 	hlist_for_each_entry(orig_node, &hash->table[bucket], hash_entry) {
2294 		if (!test_bit(BATADV_ORIG_CAPA_HAS_MCAST,
2295 			      &orig_node->capa_initialized))
2296 			continue;
2297 
2298 		if (idx < *idx_skip)
2299 			goto skip;
2300 
2301 		if (batadv_mcast_flags_dump_entry(msg, portid, cb, orig_node)) {
2302 			spin_unlock_bh(&hash->list_locks[bucket]);
2303 			*idx_skip = idx;
2304 
2305 			return -EMSGSIZE;
2306 		}
2307 
2308 skip:
2309 		idx++;
2310 	}
2311 	spin_unlock_bh(&hash->list_locks[bucket]);
2312 
2313 	return 0;
2314 }
2315 
2316 /**
2317  * __batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2318  * @msg: buffer for the message
2319  * @portid: netlink port
2320  * @cb: Control block containing additional options
2321  * @bat_priv: the bat priv with all the soft interface information
2322  * @bucket: current bucket to dump
2323  * @idx: index in current bucket to the next entry to dump
2324  *
2325  * Return: 0 or error code.
2326  */
2327 static int
__batadv_mcast_flags_dump(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_priv * bat_priv,long * bucket,long * idx)2328 __batadv_mcast_flags_dump(struct sk_buff *msg, u32 portid,
2329 			  struct netlink_callback *cb,
2330 			  struct batadv_priv *bat_priv, long *bucket, long *idx)
2331 {
2332 	struct batadv_hashtable *hash = bat_priv->orig_hash;
2333 	long bucket_tmp = *bucket;
2334 	long idx_tmp = *idx;
2335 
2336 	while (bucket_tmp < hash->size) {
2337 		if (batadv_mcast_flags_dump_bucket(msg, portid, cb, hash,
2338 						   bucket_tmp, &idx_tmp))
2339 			break;
2340 
2341 		bucket_tmp++;
2342 		idx_tmp = 0;
2343 	}
2344 
2345 	*bucket = bucket_tmp;
2346 	*idx = idx_tmp;
2347 
2348 	return msg->len;
2349 }
2350 
2351 /**
2352  * batadv_mcast_netlink_get_primary() - get primary interface from netlink
2353  *  callback
2354  * @cb: netlink callback structure
2355  * @primary_if: the primary interface pointer to return the result in
2356  *
2357  * Return: 0 or error code.
2358  */
2359 static int
batadv_mcast_netlink_get_primary(struct netlink_callback * cb,struct batadv_hard_iface ** primary_if)2360 batadv_mcast_netlink_get_primary(struct netlink_callback *cb,
2361 				 struct batadv_hard_iface **primary_if)
2362 {
2363 	struct batadv_hard_iface *hard_iface = NULL;
2364 	struct net *net = sock_net(cb->skb->sk);
2365 	struct net_device *soft_iface;
2366 	struct batadv_priv *bat_priv;
2367 	int ifindex;
2368 	int ret = 0;
2369 
2370 	ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
2371 	if (!ifindex)
2372 		return -EINVAL;
2373 
2374 	soft_iface = dev_get_by_index(net, ifindex);
2375 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2376 		ret = -ENODEV;
2377 		goto out;
2378 	}
2379 
2380 	bat_priv = netdev_priv(soft_iface);
2381 
2382 	hard_iface = batadv_primary_if_get_selected(bat_priv);
2383 	if (!hard_iface || hard_iface->if_status != BATADV_IF_ACTIVE) {
2384 		ret = -ENOENT;
2385 		goto out;
2386 	}
2387 
2388 out:
2389 	if (soft_iface)
2390 		dev_put(soft_iface);
2391 
2392 	if (!ret && primary_if)
2393 		*primary_if = hard_iface;
2394 	else if (hard_iface)
2395 		batadv_hardif_put(hard_iface);
2396 
2397 	return ret;
2398 }
2399 
2400 /**
2401  * batadv_mcast_flags_dump() - dump multicast flags table to a netlink socket
2402  * @msg: buffer for the message
2403  * @cb: callback structure containing arguments
2404  *
2405  * Return: message length.
2406  */
batadv_mcast_flags_dump(struct sk_buff * msg,struct netlink_callback * cb)2407 int batadv_mcast_flags_dump(struct sk_buff *msg, struct netlink_callback *cb)
2408 {
2409 	struct batadv_hard_iface *primary_if = NULL;
2410 	int portid = NETLINK_CB(cb->skb).portid;
2411 	struct batadv_priv *bat_priv;
2412 	long *bucket = &cb->args[0];
2413 	long *idx = &cb->args[1];
2414 	int ret;
2415 
2416 	ret = batadv_mcast_netlink_get_primary(cb, &primary_if);
2417 	if (ret)
2418 		return ret;
2419 
2420 	bat_priv = netdev_priv(primary_if->soft_iface);
2421 	ret = __batadv_mcast_flags_dump(msg, portid, cb, bat_priv, bucket, idx);
2422 
2423 	batadv_hardif_put(primary_if);
2424 	return ret;
2425 }
2426 
2427 /**
2428  * batadv_mcast_free() - free the multicast optimizations structures
2429  * @bat_priv: the bat priv with all the soft interface information
2430  */
batadv_mcast_free(struct batadv_priv * bat_priv)2431 void batadv_mcast_free(struct batadv_priv *bat_priv)
2432 {
2433 	cancel_delayed_work_sync(&bat_priv->mcast.work);
2434 
2435 	batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2436 	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_MCAST, 2);
2437 
2438 	/* safely calling outside of worker, as worker was canceled above */
2439 	batadv_mcast_mla_tt_retract(bat_priv, NULL);
2440 }
2441 
2442 /**
2443  * batadv_mcast_purge_orig() - reset originator global mcast state modifications
2444  * @orig: the originator which is going to get purged
2445  */
batadv_mcast_purge_orig(struct batadv_orig_node * orig)2446 void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
2447 {
2448 	struct batadv_priv *bat_priv = orig->bat_priv;
2449 
2450 	spin_lock_bh(&orig->mcast_handler_lock);
2451 
2452 	batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
2453 	batadv_mcast_want_ipv4_update(bat_priv, orig, BATADV_NO_FLAGS);
2454 	batadv_mcast_want_ipv6_update(bat_priv, orig, BATADV_NO_FLAGS);
2455 	batadv_mcast_want_rtr4_update(bat_priv, orig,
2456 				      BATADV_MCAST_WANT_NO_RTR4);
2457 	batadv_mcast_want_rtr6_update(bat_priv, orig,
2458 				      BATADV_MCAST_WANT_NO_RTR6);
2459 
2460 	spin_unlock_bh(&orig->mcast_handler_lock);
2461 }
2462