• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2011-2020  B.A.T.M.A.N. contributors:
3  *
4  * Simon Wunderlich
5  */
6 
7 #include "bridge_loop_avoidance.h"
8 #include "main.h"
9 
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/compiler.h>
13 #include <linux/crc16.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
20 #include <linux/jhash.h>
21 #include <linux/jiffies.h>
22 #include <linux/kernel.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/preempt.h>
29 #include <linux/rculist.h>
30 #include <linux/rcupdate.h>
31 #include <linux/seq_file.h>
32 #include <linux/skbuff.h>
33 #include <linux/slab.h>
34 #include <linux/spinlock.h>
35 #include <linux/stddef.h>
36 #include <linux/string.h>
37 #include <linux/workqueue.h>
38 #include <net/arp.h>
39 #include <net/genetlink.h>
40 #include <net/netlink.h>
41 #include <net/sock.h>
42 #include <uapi/linux/batadv_packet.h>
43 #include <uapi/linux/batman_adv.h>
44 
45 #include "hard-interface.h"
46 #include "hash.h"
47 #include "log.h"
48 #include "netlink.h"
49 #include "originator.h"
50 #include "soft-interface.h"
51 #include "translation-table.h"
52 
53 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
54 
55 static void batadv_bla_periodic_work(struct work_struct *work);
56 static void
57 batadv_bla_send_announce(struct batadv_priv *bat_priv,
58 			 struct batadv_bla_backbone_gw *backbone_gw);
59 
60 /**
61  * batadv_choose_claim() - choose the right bucket for a claim.
62  * @data: data to hash
63  * @size: size of the hash table
64  *
65  * Return: the hash index of the claim
66  */
batadv_choose_claim(const void * data,u32 size)67 static inline u32 batadv_choose_claim(const void *data, u32 size)
68 {
69 	struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
70 	u32 hash = 0;
71 
72 	hash = jhash(&claim->addr, sizeof(claim->addr), hash);
73 	hash = jhash(&claim->vid, sizeof(claim->vid), hash);
74 
75 	return hash % size;
76 }
77 
78 /**
79  * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
80  * @data: data to hash
81  * @size: size of the hash table
82  *
83  * Return: the hash index of the backbone gateway
84  */
batadv_choose_backbone_gw(const void * data,u32 size)85 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
86 {
87 	const struct batadv_bla_backbone_gw *gw;
88 	u32 hash = 0;
89 
90 	gw = (struct batadv_bla_backbone_gw *)data;
91 	hash = jhash(&gw->orig, sizeof(gw->orig), hash);
92 	hash = jhash(&gw->vid, sizeof(gw->vid), hash);
93 
94 	return hash % size;
95 }
96 
97 /**
98  * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
99  * @node: list node of the first entry to compare
100  * @data2: pointer to the second backbone gateway
101  *
102  * Return: true if the backbones have the same data, false otherwise
103  */
batadv_compare_backbone_gw(const struct hlist_node * node,const void * data2)104 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
105 				       const void *data2)
106 {
107 	const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
108 					 hash_entry);
109 	const struct batadv_bla_backbone_gw *gw1 = data1;
110 	const struct batadv_bla_backbone_gw *gw2 = data2;
111 
112 	if (!batadv_compare_eth(gw1->orig, gw2->orig))
113 		return false;
114 
115 	if (gw1->vid != gw2->vid)
116 		return false;
117 
118 	return true;
119 }
120 
121 /**
122  * batadv_compare_claim() - compare address and vid of two claims
123  * @node: list node of the first entry to compare
124  * @data2: pointer to the second claims
125  *
126  * Return: true if the claim have the same data, 0 otherwise
127  */
batadv_compare_claim(const struct hlist_node * node,const void * data2)128 static bool batadv_compare_claim(const struct hlist_node *node,
129 				 const void *data2)
130 {
131 	const void *data1 = container_of(node, struct batadv_bla_claim,
132 					 hash_entry);
133 	const struct batadv_bla_claim *cl1 = data1;
134 	const struct batadv_bla_claim *cl2 = data2;
135 
136 	if (!batadv_compare_eth(cl1->addr, cl2->addr))
137 		return false;
138 
139 	if (cl1->vid != cl2->vid)
140 		return false;
141 
142 	return true;
143 }
144 
145 /**
146  * batadv_backbone_gw_release() - release backbone gw from lists and queue for
147  *  free after rcu grace period
148  * @ref: kref pointer of the backbone gw
149  */
batadv_backbone_gw_release(struct kref * ref)150 static void batadv_backbone_gw_release(struct kref *ref)
151 {
152 	struct batadv_bla_backbone_gw *backbone_gw;
153 
154 	backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
155 				   refcount);
156 
157 	kfree_rcu(backbone_gw, rcu);
158 }
159 
160 /**
161  * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
162  *  release it
163  * @backbone_gw: backbone gateway to be free'd
164  */
batadv_backbone_gw_put(struct batadv_bla_backbone_gw * backbone_gw)165 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
166 {
167 	if (!backbone_gw)
168 		return;
169 
170 	kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
171 }
172 
173 /**
174  * batadv_claim_release() - release claim from lists and queue for free after
175  *  rcu grace period
176  * @ref: kref pointer of the claim
177  */
batadv_claim_release(struct kref * ref)178 static void batadv_claim_release(struct kref *ref)
179 {
180 	struct batadv_bla_claim *claim;
181 	struct batadv_bla_backbone_gw *old_backbone_gw;
182 
183 	claim = container_of(ref, struct batadv_bla_claim, refcount);
184 
185 	spin_lock_bh(&claim->backbone_lock);
186 	old_backbone_gw = claim->backbone_gw;
187 	claim->backbone_gw = NULL;
188 	spin_unlock_bh(&claim->backbone_lock);
189 
190 	spin_lock_bh(&old_backbone_gw->crc_lock);
191 	old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
192 	spin_unlock_bh(&old_backbone_gw->crc_lock);
193 
194 	batadv_backbone_gw_put(old_backbone_gw);
195 
196 	kfree_rcu(claim, rcu);
197 }
198 
199 /**
200  * batadv_claim_put() - decrement the claim refcounter and possibly release it
201  * @claim: claim to be free'd
202  */
batadv_claim_put(struct batadv_bla_claim * claim)203 static void batadv_claim_put(struct batadv_bla_claim *claim)
204 {
205 	if (!claim)
206 		return;
207 
208 	kref_put(&claim->refcount, batadv_claim_release);
209 }
210 
211 /**
212  * batadv_claim_hash_find() - looks for a claim in the claim hash
213  * @bat_priv: the bat priv with all the soft interface information
214  * @data: search data (may be local/static data)
215  *
216  * Return: claim if found or NULL otherwise.
217  */
218 static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv * bat_priv,struct batadv_bla_claim * data)219 batadv_claim_hash_find(struct batadv_priv *bat_priv,
220 		       struct batadv_bla_claim *data)
221 {
222 	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
223 	struct hlist_head *head;
224 	struct batadv_bla_claim *claim;
225 	struct batadv_bla_claim *claim_tmp = NULL;
226 	int index;
227 
228 	if (!hash)
229 		return NULL;
230 
231 	index = batadv_choose_claim(data, hash->size);
232 	head = &hash->table[index];
233 
234 	rcu_read_lock();
235 	hlist_for_each_entry_rcu(claim, head, hash_entry) {
236 		if (!batadv_compare_claim(&claim->hash_entry, data))
237 			continue;
238 
239 		if (!kref_get_unless_zero(&claim->refcount))
240 			continue;
241 
242 		claim_tmp = claim;
243 		break;
244 	}
245 	rcu_read_unlock();
246 
247 	return claim_tmp;
248 }
249 
250 /**
251  * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
252  * @bat_priv: the bat priv with all the soft interface information
253  * @addr: the address of the originator
254  * @vid: the VLAN ID
255  *
256  * Return: backbone gateway if found or NULL otherwise
257  */
258 static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)259 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
260 			  unsigned short vid)
261 {
262 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
263 	struct hlist_head *head;
264 	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
265 	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
266 	int index;
267 
268 	if (!hash)
269 		return NULL;
270 
271 	ether_addr_copy(search_entry.orig, addr);
272 	search_entry.vid = vid;
273 
274 	index = batadv_choose_backbone_gw(&search_entry, hash->size);
275 	head = &hash->table[index];
276 
277 	rcu_read_lock();
278 	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
279 		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
280 						&search_entry))
281 			continue;
282 
283 		if (!kref_get_unless_zero(&backbone_gw->refcount))
284 			continue;
285 
286 		backbone_gw_tmp = backbone_gw;
287 		break;
288 	}
289 	rcu_read_unlock();
290 
291 	return backbone_gw_tmp;
292 }
293 
294 /**
295  * batadv_bla_del_backbone_claims() - delete all claims for a backbone
296  * @backbone_gw: backbone gateway where the claims should be removed
297  */
298 static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw * backbone_gw)299 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
300 {
301 	struct batadv_hashtable *hash;
302 	struct hlist_node *node_tmp;
303 	struct hlist_head *head;
304 	struct batadv_bla_claim *claim;
305 	int i;
306 	spinlock_t *list_lock;	/* protects write access to the hash lists */
307 
308 	hash = backbone_gw->bat_priv->bla.claim_hash;
309 	if (!hash)
310 		return;
311 
312 	for (i = 0; i < hash->size; i++) {
313 		head = &hash->table[i];
314 		list_lock = &hash->list_locks[i];
315 
316 		spin_lock_bh(list_lock);
317 		hlist_for_each_entry_safe(claim, node_tmp,
318 					  head, hash_entry) {
319 			if (claim->backbone_gw != backbone_gw)
320 				continue;
321 
322 			batadv_claim_put(claim);
323 			hlist_del_rcu(&claim->hash_entry);
324 		}
325 		spin_unlock_bh(list_lock);
326 	}
327 
328 	/* all claims gone, initialize CRC */
329 	spin_lock_bh(&backbone_gw->crc_lock);
330 	backbone_gw->crc = BATADV_BLA_CRC_INIT;
331 	spin_unlock_bh(&backbone_gw->crc_lock);
332 }
333 
334 /**
335  * batadv_bla_send_claim() - sends a claim frame according to the provided info
336  * @bat_priv: the bat priv with all the soft interface information
337  * @mac: the mac address to be announced within the claim
338  * @vid: the VLAN ID
339  * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
340  */
batadv_bla_send_claim(struct batadv_priv * bat_priv,u8 * mac,unsigned short vid,int claimtype)341 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
342 				  unsigned short vid, int claimtype)
343 {
344 	struct sk_buff *skb;
345 	struct ethhdr *ethhdr;
346 	struct batadv_hard_iface *primary_if;
347 	struct net_device *soft_iface;
348 	u8 *hw_src;
349 	struct batadv_bla_claim_dst local_claim_dest;
350 	__be32 zeroip = 0;
351 
352 	primary_if = batadv_primary_if_get_selected(bat_priv);
353 	if (!primary_if)
354 		return;
355 
356 	memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
357 	       sizeof(local_claim_dest));
358 	local_claim_dest.type = claimtype;
359 
360 	soft_iface = primary_if->soft_iface;
361 
362 	skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
363 			 /* IP DST: 0.0.0.0 */
364 			 zeroip,
365 			 primary_if->soft_iface,
366 			 /* IP SRC: 0.0.0.0 */
367 			 zeroip,
368 			 /* Ethernet DST: Broadcast */
369 			 NULL,
370 			 /* Ethernet SRC/HW SRC:  originator mac */
371 			 primary_if->net_dev->dev_addr,
372 			 /* HW DST: FF:43:05:XX:YY:YY
373 			  * with XX   = claim type
374 			  * and YY:YY = group id
375 			  */
376 			 (u8 *)&local_claim_dest);
377 
378 	if (!skb)
379 		goto out;
380 
381 	ethhdr = (struct ethhdr *)skb->data;
382 	hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
383 
384 	/* now we pretend that the client would have sent this ... */
385 	switch (claimtype) {
386 	case BATADV_CLAIM_TYPE_CLAIM:
387 		/* normal claim frame
388 		 * set Ethernet SRC to the clients mac
389 		 */
390 		ether_addr_copy(ethhdr->h_source, mac);
391 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
392 			   "%s(): CLAIM %pM on vid %d\n", __func__, mac,
393 			   batadv_print_vid(vid));
394 		break;
395 	case BATADV_CLAIM_TYPE_UNCLAIM:
396 		/* unclaim frame
397 		 * set HW SRC to the clients mac
398 		 */
399 		ether_addr_copy(hw_src, mac);
400 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
401 			   "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
402 			   batadv_print_vid(vid));
403 		break;
404 	case BATADV_CLAIM_TYPE_ANNOUNCE:
405 		/* announcement frame
406 		 * set HW SRC to the special mac containg the crc
407 		 */
408 		ether_addr_copy(hw_src, mac);
409 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
410 			   "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
411 			   ethhdr->h_source, batadv_print_vid(vid));
412 		break;
413 	case BATADV_CLAIM_TYPE_REQUEST:
414 		/* request frame
415 		 * set HW SRC and header destination to the receiving backbone
416 		 * gws mac
417 		 */
418 		ether_addr_copy(hw_src, mac);
419 		ether_addr_copy(ethhdr->h_dest, mac);
420 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
421 			   "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
422 			   ethhdr->h_source, ethhdr->h_dest,
423 			   batadv_print_vid(vid));
424 		break;
425 	case BATADV_CLAIM_TYPE_LOOPDETECT:
426 		ether_addr_copy(ethhdr->h_source, mac);
427 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
428 			   "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
429 			   __func__, ethhdr->h_source, ethhdr->h_dest,
430 			   batadv_print_vid(vid));
431 
432 		break;
433 	}
434 
435 	if (vid & BATADV_VLAN_HAS_TAG) {
436 		skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
437 				      vid & VLAN_VID_MASK);
438 		if (!skb)
439 			goto out;
440 	}
441 
442 	skb_reset_mac_header(skb);
443 	skb->protocol = eth_type_trans(skb, soft_iface);
444 	batadv_inc_counter(bat_priv, BATADV_CNT_RX);
445 	batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
446 			   skb->len + ETH_HLEN);
447 
448 	if (in_interrupt())
449 		netif_rx(skb);
450 	else
451 		netif_rx_ni(skb);
452 out:
453 	if (primary_if)
454 		batadv_hardif_put(primary_if);
455 }
456 
457 /**
458  * batadv_bla_loopdetect_report() - worker for reporting the loop
459  * @work: work queue item
460  *
461  * Throws an uevent, as the loopdetect check function can't do that itself
462  * since the kernel may sleep while throwing uevents.
463  */
batadv_bla_loopdetect_report(struct work_struct * work)464 static void batadv_bla_loopdetect_report(struct work_struct *work)
465 {
466 	struct batadv_bla_backbone_gw *backbone_gw;
467 	struct batadv_priv *bat_priv;
468 	char vid_str[6] = { '\0' };
469 
470 	backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
471 				   report_work);
472 	bat_priv = backbone_gw->bat_priv;
473 
474 	batadv_info(bat_priv->soft_iface,
475 		    "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
476 		    batadv_print_vid(backbone_gw->vid));
477 	snprintf(vid_str, sizeof(vid_str), "%d",
478 		 batadv_print_vid(backbone_gw->vid));
479 	vid_str[sizeof(vid_str) - 1] = 0;
480 
481 	batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
482 			    vid_str);
483 
484 	batadv_backbone_gw_put(backbone_gw);
485 }
486 
487 /**
488  * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
489  * @bat_priv: the bat priv with all the soft interface information
490  * @orig: the mac address of the originator
491  * @vid: the VLAN ID
492  * @own_backbone: set if the requested backbone is local
493  *
494  * Return: the (possibly created) backbone gateway or NULL on error
495  */
496 static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid,bool own_backbone)497 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
498 			   unsigned short vid, bool own_backbone)
499 {
500 	struct batadv_bla_backbone_gw *entry;
501 	struct batadv_orig_node *orig_node;
502 	int hash_added;
503 
504 	entry = batadv_backbone_hash_find(bat_priv, orig, vid);
505 
506 	if (entry)
507 		return entry;
508 
509 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
510 		   "%s(): not found (%pM, %d), creating new entry\n", __func__,
511 		   orig, batadv_print_vid(vid));
512 
513 	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
514 	if (!entry)
515 		return NULL;
516 
517 	entry->vid = vid;
518 	entry->lasttime = jiffies;
519 	entry->crc = BATADV_BLA_CRC_INIT;
520 	entry->bat_priv = bat_priv;
521 	spin_lock_init(&entry->crc_lock);
522 	atomic_set(&entry->request_sent, 0);
523 	atomic_set(&entry->wait_periods, 0);
524 	ether_addr_copy(entry->orig, orig);
525 	INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
526 	kref_init(&entry->refcount);
527 
528 	kref_get(&entry->refcount);
529 	hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
530 				     batadv_compare_backbone_gw,
531 				     batadv_choose_backbone_gw, entry,
532 				     &entry->hash_entry);
533 
534 	if (unlikely(hash_added != 0)) {
535 		/* hash failed, free the structure */
536 		kfree(entry);
537 		return NULL;
538 	}
539 
540 	/* this is a gateway now, remove any TT entry on this VLAN */
541 	orig_node = batadv_orig_hash_find(bat_priv, orig);
542 	if (orig_node) {
543 		batadv_tt_global_del_orig(bat_priv, orig_node, vid,
544 					  "became a backbone gateway");
545 		batadv_orig_node_put(orig_node);
546 	}
547 
548 	if (own_backbone) {
549 		batadv_bla_send_announce(bat_priv, entry);
550 
551 		/* this will be decreased in the worker thread */
552 		atomic_inc(&entry->request_sent);
553 		atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
554 		atomic_inc(&bat_priv->bla.num_requests);
555 	}
556 
557 	return entry;
558 }
559 
560 /**
561  * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
562  * @bat_priv: the bat priv with all the soft interface information
563  * @primary_if: the selected primary interface
564  * @vid: VLAN identifier
565  *
566  * update or add the own backbone gw to make sure we announce
567  * where we receive other backbone gws
568  */
569 static void
batadv_bla_update_own_backbone_gw(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)570 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
571 				  struct batadv_hard_iface *primary_if,
572 				  unsigned short vid)
573 {
574 	struct batadv_bla_backbone_gw *backbone_gw;
575 
576 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
577 						 primary_if->net_dev->dev_addr,
578 						 vid, true);
579 	if (unlikely(!backbone_gw))
580 		return;
581 
582 	backbone_gw->lasttime = jiffies;
583 	batadv_backbone_gw_put(backbone_gw);
584 }
585 
586 /**
587  * batadv_bla_answer_request() - answer a bla request by sending own claims
588  * @bat_priv: the bat priv with all the soft interface information
589  * @primary_if: interface where the request came on
590  * @vid: the vid where the request came on
591  *
592  * Repeat all of our own claims, and finally send an ANNOUNCE frame
593  * to allow the requester another check if the CRC is correct now.
594  */
batadv_bla_answer_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)595 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
596 				      struct batadv_hard_iface *primary_if,
597 				      unsigned short vid)
598 {
599 	struct hlist_head *head;
600 	struct batadv_hashtable *hash;
601 	struct batadv_bla_claim *claim;
602 	struct batadv_bla_backbone_gw *backbone_gw;
603 	int i;
604 
605 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
606 		   "%s(): received a claim request, send all of our own claims again\n",
607 		   __func__);
608 
609 	backbone_gw = batadv_backbone_hash_find(bat_priv,
610 						primary_if->net_dev->dev_addr,
611 						vid);
612 	if (!backbone_gw)
613 		return;
614 
615 	hash = bat_priv->bla.claim_hash;
616 	for (i = 0; i < hash->size; i++) {
617 		head = &hash->table[i];
618 
619 		rcu_read_lock();
620 		hlist_for_each_entry_rcu(claim, head, hash_entry) {
621 			/* only own claims are interesting */
622 			if (claim->backbone_gw != backbone_gw)
623 				continue;
624 
625 			batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
626 					      BATADV_CLAIM_TYPE_CLAIM);
627 		}
628 		rcu_read_unlock();
629 	}
630 
631 	/* finally, send an announcement frame */
632 	batadv_bla_send_announce(bat_priv, backbone_gw);
633 	batadv_backbone_gw_put(backbone_gw);
634 }
635 
636 /**
637  * batadv_bla_send_request() - send a request to repeat claims
638  * @backbone_gw: the backbone gateway from whom we are out of sync
639  *
640  * When the crc is wrong, ask the backbone gateway for a full table update.
641  * After the request, it will repeat all of his own claims and finally
642  * send an announcement claim with which we can check again.
643  */
batadv_bla_send_request(struct batadv_bla_backbone_gw * backbone_gw)644 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
645 {
646 	/* first, remove all old entries */
647 	batadv_bla_del_backbone_claims(backbone_gw);
648 
649 	batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
650 		   "Sending REQUEST to %pM\n", backbone_gw->orig);
651 
652 	/* send request */
653 	batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
654 			      backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
655 
656 	/* no local broadcasts should be sent or received, for now. */
657 	if (!atomic_read(&backbone_gw->request_sent)) {
658 		atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
659 		atomic_set(&backbone_gw->request_sent, 1);
660 	}
661 }
662 
663 /**
664  * batadv_bla_send_announce() - Send an announcement frame
665  * @bat_priv: the bat priv with all the soft interface information
666  * @backbone_gw: our backbone gateway which should be announced
667  */
batadv_bla_send_announce(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)668 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
669 				     struct batadv_bla_backbone_gw *backbone_gw)
670 {
671 	u8 mac[ETH_ALEN];
672 	__be16 crc;
673 
674 	memcpy(mac, batadv_announce_mac, 4);
675 	spin_lock_bh(&backbone_gw->crc_lock);
676 	crc = htons(backbone_gw->crc);
677 	spin_unlock_bh(&backbone_gw->crc_lock);
678 	memcpy(&mac[4], &crc, 2);
679 
680 	batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
681 			      BATADV_CLAIM_TYPE_ANNOUNCE);
682 }
683 
684 /**
685  * batadv_bla_add_claim() - Adds a claim in the claim hash
686  * @bat_priv: the bat priv with all the soft interface information
687  * @mac: the mac address of the claim
688  * @vid: the VLAN ID of the frame
689  * @backbone_gw: the backbone gateway which claims it
690  */
batadv_bla_add_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid,struct batadv_bla_backbone_gw * backbone_gw)691 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
692 				 const u8 *mac, const unsigned short vid,
693 				 struct batadv_bla_backbone_gw *backbone_gw)
694 {
695 	struct batadv_bla_backbone_gw *old_backbone_gw;
696 	struct batadv_bla_claim *claim;
697 	struct batadv_bla_claim search_claim;
698 	bool remove_crc = false;
699 	int hash_added;
700 
701 	ether_addr_copy(search_claim.addr, mac);
702 	search_claim.vid = vid;
703 	claim = batadv_claim_hash_find(bat_priv, &search_claim);
704 
705 	/* create a new claim entry if it does not exist yet. */
706 	if (!claim) {
707 		claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
708 		if (!claim)
709 			return;
710 
711 		ether_addr_copy(claim->addr, mac);
712 		spin_lock_init(&claim->backbone_lock);
713 		claim->vid = vid;
714 		claim->lasttime = jiffies;
715 		kref_get(&backbone_gw->refcount);
716 		claim->backbone_gw = backbone_gw;
717 		kref_init(&claim->refcount);
718 
719 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
720 			   "%s(): adding new entry %pM, vid %d to hash ...\n",
721 			   __func__, mac, batadv_print_vid(vid));
722 
723 		kref_get(&claim->refcount);
724 		hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
725 					     batadv_compare_claim,
726 					     batadv_choose_claim, claim,
727 					     &claim->hash_entry);
728 
729 		if (unlikely(hash_added != 0)) {
730 			/* only local changes happened. */
731 			kfree(claim);
732 			return;
733 		}
734 	} else {
735 		claim->lasttime = jiffies;
736 		if (claim->backbone_gw == backbone_gw)
737 			/* no need to register a new backbone */
738 			goto claim_free_ref;
739 
740 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
741 			   "%s(): changing ownership for %pM, vid %d to gw %pM\n",
742 			   __func__, mac, batadv_print_vid(vid),
743 			   backbone_gw->orig);
744 
745 		remove_crc = true;
746 	}
747 
748 	/* replace backbone_gw atomically and adjust reference counters */
749 	spin_lock_bh(&claim->backbone_lock);
750 	old_backbone_gw = claim->backbone_gw;
751 	kref_get(&backbone_gw->refcount);
752 	claim->backbone_gw = backbone_gw;
753 	spin_unlock_bh(&claim->backbone_lock);
754 
755 	if (remove_crc) {
756 		/* remove claim address from old backbone_gw */
757 		spin_lock_bh(&old_backbone_gw->crc_lock);
758 		old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
759 		spin_unlock_bh(&old_backbone_gw->crc_lock);
760 	}
761 
762 	batadv_backbone_gw_put(old_backbone_gw);
763 
764 	/* add claim address to new backbone_gw */
765 	spin_lock_bh(&backbone_gw->crc_lock);
766 	backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
767 	spin_unlock_bh(&backbone_gw->crc_lock);
768 	backbone_gw->lasttime = jiffies;
769 
770 claim_free_ref:
771 	batadv_claim_put(claim);
772 }
773 
774 /**
775  * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
776  *  claim
777  * @claim: claim whose backbone_gw should be returned
778  *
779  * Return: valid reference to claim::backbone_gw
780  */
781 static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim * claim)782 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
783 {
784 	struct batadv_bla_backbone_gw *backbone_gw;
785 
786 	spin_lock_bh(&claim->backbone_lock);
787 	backbone_gw = claim->backbone_gw;
788 	kref_get(&backbone_gw->refcount);
789 	spin_unlock_bh(&claim->backbone_lock);
790 
791 	return backbone_gw;
792 }
793 
794 /**
795  * batadv_bla_del_claim() - delete a claim from the claim hash
796  * @bat_priv: the bat priv with all the soft interface information
797  * @mac: mac address of the claim to be removed
798  * @vid: VLAN id for the claim to be removed
799  */
batadv_bla_del_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid)800 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
801 				 const u8 *mac, const unsigned short vid)
802 {
803 	struct batadv_bla_claim search_claim, *claim;
804 	struct batadv_bla_claim *claim_removed_entry;
805 	struct hlist_node *claim_removed_node;
806 
807 	ether_addr_copy(search_claim.addr, mac);
808 	search_claim.vid = vid;
809 	claim = batadv_claim_hash_find(bat_priv, &search_claim);
810 	if (!claim)
811 		return;
812 
813 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
814 		   mac, batadv_print_vid(vid));
815 
816 	claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
817 						batadv_compare_claim,
818 						batadv_choose_claim, claim);
819 	if (!claim_removed_node)
820 		goto free_claim;
821 
822 	/* reference from the hash is gone */
823 	claim_removed_entry = hlist_entry(claim_removed_node,
824 					  struct batadv_bla_claim, hash_entry);
825 	batadv_claim_put(claim_removed_entry);
826 
827 free_claim:
828 	/* don't need the reference from hash_find() anymore */
829 	batadv_claim_put(claim);
830 }
831 
832 /**
833  * batadv_handle_announce() - check for ANNOUNCE frame
834  * @bat_priv: the bat priv with all the soft interface information
835  * @an_addr: announcement mac address (ARP Sender HW address)
836  * @backbone_addr: originator address of the sender (Ethernet source MAC)
837  * @vid: the VLAN ID of the frame
838  *
839  * Return: true if handled
840  */
batadv_handle_announce(struct batadv_priv * bat_priv,u8 * an_addr,u8 * backbone_addr,unsigned short vid)841 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
842 				   u8 *backbone_addr, unsigned short vid)
843 {
844 	struct batadv_bla_backbone_gw *backbone_gw;
845 	u16 backbone_crc, crc;
846 
847 	if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
848 		return false;
849 
850 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
851 						 false);
852 
853 	if (unlikely(!backbone_gw))
854 		return true;
855 
856 	/* handle as ANNOUNCE frame */
857 	backbone_gw->lasttime = jiffies;
858 	crc = ntohs(*((__force __be16 *)(&an_addr[4])));
859 
860 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
861 		   "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
862 		   __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
863 
864 	spin_lock_bh(&backbone_gw->crc_lock);
865 	backbone_crc = backbone_gw->crc;
866 	spin_unlock_bh(&backbone_gw->crc_lock);
867 
868 	if (backbone_crc != crc) {
869 		batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
870 			   "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
871 			   __func__, backbone_gw->orig,
872 			   batadv_print_vid(backbone_gw->vid),
873 			   backbone_crc, crc);
874 
875 		batadv_bla_send_request(backbone_gw);
876 	} else {
877 		/* if we have sent a request and the crc was OK,
878 		 * we can allow traffic again.
879 		 */
880 		if (atomic_read(&backbone_gw->request_sent)) {
881 			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
882 			atomic_set(&backbone_gw->request_sent, 0);
883 		}
884 	}
885 
886 	batadv_backbone_gw_put(backbone_gw);
887 	return true;
888 }
889 
890 /**
891  * batadv_handle_request() - check for REQUEST frame
892  * @bat_priv: the bat priv with all the soft interface information
893  * @primary_if: the primary hard interface of this batman soft interface
894  * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
895  * @ethhdr: ethernet header of a packet
896  * @vid: the VLAN ID of the frame
897  *
898  * Return: true if handled
899  */
batadv_handle_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,struct ethhdr * ethhdr,unsigned short vid)900 static bool batadv_handle_request(struct batadv_priv *bat_priv,
901 				  struct batadv_hard_iface *primary_if,
902 				  u8 *backbone_addr, struct ethhdr *ethhdr,
903 				  unsigned short vid)
904 {
905 	/* check for REQUEST frame */
906 	if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
907 		return false;
908 
909 	/* sanity check, this should not happen on a normal switch,
910 	 * we ignore it in this case.
911 	 */
912 	if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
913 		return true;
914 
915 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
916 		   "%s(): REQUEST vid %d (sent by %pM)...\n",
917 		   __func__, batadv_print_vid(vid), ethhdr->h_source);
918 
919 	batadv_bla_answer_request(bat_priv, primary_if, vid);
920 	return true;
921 }
922 
923 /**
924  * batadv_handle_unclaim() - check for UNCLAIM frame
925  * @bat_priv: the bat priv with all the soft interface information
926  * @primary_if: the primary hard interface of this batman soft interface
927  * @backbone_addr: originator address of the backbone (Ethernet source)
928  * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
929  * @vid: the VLAN ID of the frame
930  *
931  * Return: true if handled
932  */
batadv_handle_unclaim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)933 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
934 				  struct batadv_hard_iface *primary_if,
935 				  u8 *backbone_addr, u8 *claim_addr,
936 				  unsigned short vid)
937 {
938 	struct batadv_bla_backbone_gw *backbone_gw;
939 
940 	/* unclaim in any case if it is our own */
941 	if (primary_if && batadv_compare_eth(backbone_addr,
942 					     primary_if->net_dev->dev_addr))
943 		batadv_bla_send_claim(bat_priv, claim_addr, vid,
944 				      BATADV_CLAIM_TYPE_UNCLAIM);
945 
946 	backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
947 
948 	if (!backbone_gw)
949 		return true;
950 
951 	/* this must be an UNCLAIM frame */
952 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
953 		   "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
954 		   claim_addr, batadv_print_vid(vid), backbone_gw->orig);
955 
956 	batadv_bla_del_claim(bat_priv, claim_addr, vid);
957 	batadv_backbone_gw_put(backbone_gw);
958 	return true;
959 }
960 
961 /**
962  * batadv_handle_claim() - check for CLAIM frame
963  * @bat_priv: the bat priv with all the soft interface information
964  * @primary_if: the primary hard interface of this batman soft interface
965  * @backbone_addr: originator address of the backbone (Ethernet Source)
966  * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
967  * @vid: the VLAN ID of the frame
968  *
969  * Return: true if handled
970  */
batadv_handle_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)971 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
972 				struct batadv_hard_iface *primary_if,
973 				u8 *backbone_addr, u8 *claim_addr,
974 				unsigned short vid)
975 {
976 	struct batadv_bla_backbone_gw *backbone_gw;
977 
978 	/* register the gateway if not yet available, and add the claim. */
979 
980 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
981 						 false);
982 
983 	if (unlikely(!backbone_gw))
984 		return true;
985 
986 	/* this must be a CLAIM frame */
987 	batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
988 	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
989 		batadv_bla_send_claim(bat_priv, claim_addr, vid,
990 				      BATADV_CLAIM_TYPE_CLAIM);
991 
992 	/* TODO: we could call something like tt_local_del() here. */
993 
994 	batadv_backbone_gw_put(backbone_gw);
995 	return true;
996 }
997 
998 /**
999  * batadv_check_claim_group() - check for claim group membership
1000  * @bat_priv: the bat priv with all the soft interface information
1001  * @primary_if: the primary interface of this batman interface
1002  * @hw_src: the Hardware source in the ARP Header
1003  * @hw_dst: the Hardware destination in the ARP Header
1004  * @ethhdr: pointer to the Ethernet header of the claim frame
1005  *
1006  * checks if it is a claim packet and if it's on the same group.
1007  * This function also applies the group ID of the sender
1008  * if it is in the same mesh.
1009  *
1010  * Return:
1011  *	2  - if it is a claim packet and on the same group
1012  *	1  - if is a claim packet from another group
1013  *	0  - if it is not a claim packet
1014  */
batadv_check_claim_group(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * hw_src,u8 * hw_dst,struct ethhdr * ethhdr)1015 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1016 				    struct batadv_hard_iface *primary_if,
1017 				    u8 *hw_src, u8 *hw_dst,
1018 				    struct ethhdr *ethhdr)
1019 {
1020 	u8 *backbone_addr;
1021 	struct batadv_orig_node *orig_node;
1022 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1023 
1024 	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1025 	bla_dst_own = &bat_priv->bla.claim_dest;
1026 
1027 	/* if announcement packet, use the source,
1028 	 * otherwise assume it is in the hw_src
1029 	 */
1030 	switch (bla_dst->type) {
1031 	case BATADV_CLAIM_TYPE_CLAIM:
1032 		backbone_addr = hw_src;
1033 		break;
1034 	case BATADV_CLAIM_TYPE_REQUEST:
1035 	case BATADV_CLAIM_TYPE_ANNOUNCE:
1036 	case BATADV_CLAIM_TYPE_UNCLAIM:
1037 		backbone_addr = ethhdr->h_source;
1038 		break;
1039 	default:
1040 		return 0;
1041 	}
1042 
1043 	/* don't accept claim frames from ourselves */
1044 	if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1045 		return 0;
1046 
1047 	/* if its already the same group, it is fine. */
1048 	if (bla_dst->group == bla_dst_own->group)
1049 		return 2;
1050 
1051 	/* lets see if this originator is in our mesh */
1052 	orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1053 
1054 	/* dont accept claims from gateways which are not in
1055 	 * the same mesh or group.
1056 	 */
1057 	if (!orig_node)
1058 		return 1;
1059 
1060 	/* if our mesh friends mac is bigger, use it for ourselves. */
1061 	if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1062 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1063 			   "taking other backbones claim group: %#.4x\n",
1064 			   ntohs(bla_dst->group));
1065 		bla_dst_own->group = bla_dst->group;
1066 	}
1067 
1068 	batadv_orig_node_put(orig_node);
1069 
1070 	return 2;
1071 }
1072 
1073 /**
1074  * batadv_bla_process_claim() - Check if this is a claim frame, and process it
1075  * @bat_priv: the bat priv with all the soft interface information
1076  * @primary_if: the primary hard interface of this batman soft interface
1077  * @skb: the frame to be checked
1078  *
1079  * Return: true if it was a claim frame, otherwise return false to
1080  * tell the callee that it can use the frame on its own.
1081  */
batadv_bla_process_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct sk_buff * skb)1082 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1083 				     struct batadv_hard_iface *primary_if,
1084 				     struct sk_buff *skb)
1085 {
1086 	struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1087 	u8 *hw_src, *hw_dst;
1088 	struct vlan_hdr *vhdr, vhdr_buf;
1089 	struct ethhdr *ethhdr;
1090 	struct arphdr *arphdr;
1091 	unsigned short vid;
1092 	int vlan_depth = 0;
1093 	__be16 proto;
1094 	int headlen;
1095 	int ret;
1096 
1097 	vid = batadv_get_vid(skb, 0);
1098 	ethhdr = eth_hdr(skb);
1099 
1100 	proto = ethhdr->h_proto;
1101 	headlen = ETH_HLEN;
1102 	if (vid & BATADV_VLAN_HAS_TAG) {
1103 		/* Traverse the VLAN/Ethertypes.
1104 		 *
1105 		 * At this point it is known that the first protocol is a VLAN
1106 		 * header, so start checking at the encapsulated protocol.
1107 		 *
1108 		 * The depth of the VLAN headers is recorded to drop BLA claim
1109 		 * frames encapsulated into multiple VLAN headers (QinQ).
1110 		 */
1111 		do {
1112 			vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1113 						  &vhdr_buf);
1114 			if (!vhdr)
1115 				return false;
1116 
1117 			proto = vhdr->h_vlan_encapsulated_proto;
1118 			headlen += VLAN_HLEN;
1119 			vlan_depth++;
1120 		} while (proto == htons(ETH_P_8021Q));
1121 	}
1122 
1123 	if (proto != htons(ETH_P_ARP))
1124 		return false; /* not a claim frame */
1125 
1126 	/* this must be a ARP frame. check if it is a claim. */
1127 
1128 	if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1129 		return false;
1130 
1131 	/* pskb_may_pull() may have modified the pointers, get ethhdr again */
1132 	ethhdr = eth_hdr(skb);
1133 	arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1134 
1135 	/* Check whether the ARP frame carries a valid
1136 	 * IP information
1137 	 */
1138 	if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1139 		return false;
1140 	if (arphdr->ar_pro != htons(ETH_P_IP))
1141 		return false;
1142 	if (arphdr->ar_hln != ETH_ALEN)
1143 		return false;
1144 	if (arphdr->ar_pln != 4)
1145 		return false;
1146 
1147 	hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1148 	hw_dst = hw_src + ETH_ALEN + 4;
1149 	bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1150 	bla_dst_own = &bat_priv->bla.claim_dest;
1151 
1152 	/* check if it is a claim frame in general */
1153 	if (memcmp(bla_dst->magic, bla_dst_own->magic,
1154 		   sizeof(bla_dst->magic)) != 0)
1155 		return false;
1156 
1157 	/* check if there is a claim frame encapsulated deeper in (QinQ) and
1158 	 * drop that, as this is not supported by BLA but should also not be
1159 	 * sent via the mesh.
1160 	 */
1161 	if (vlan_depth > 1)
1162 		return true;
1163 
1164 	/* Let the loopdetect frames on the mesh in any case. */
1165 	if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1166 		return false;
1167 
1168 	/* check if it is a claim frame. */
1169 	ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1170 				       ethhdr);
1171 	if (ret == 1)
1172 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1173 			   "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1174 			   __func__, ethhdr->h_source, batadv_print_vid(vid),
1175 			   hw_src, hw_dst);
1176 
1177 	if (ret < 2)
1178 		return !!ret;
1179 
1180 	/* become a backbone gw ourselves on this vlan if not happened yet */
1181 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1182 
1183 	/* check for the different types of claim frames ... */
1184 	switch (bla_dst->type) {
1185 	case BATADV_CLAIM_TYPE_CLAIM:
1186 		if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1187 					ethhdr->h_source, vid))
1188 			return true;
1189 		break;
1190 	case BATADV_CLAIM_TYPE_UNCLAIM:
1191 		if (batadv_handle_unclaim(bat_priv, primary_if,
1192 					  ethhdr->h_source, hw_src, vid))
1193 			return true;
1194 		break;
1195 
1196 	case BATADV_CLAIM_TYPE_ANNOUNCE:
1197 		if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1198 					   vid))
1199 			return true;
1200 		break;
1201 	case BATADV_CLAIM_TYPE_REQUEST:
1202 		if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1203 					  vid))
1204 			return true;
1205 		break;
1206 	}
1207 
1208 	batadv_dbg(BATADV_DBG_BLA, bat_priv,
1209 		   "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1210 		   __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1211 		   hw_dst);
1212 	return true;
1213 }
1214 
1215 /**
1216  * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
1217  *  immediately
1218  * @bat_priv: the bat priv with all the soft interface information
1219  * @now: whether the whole hash shall be wiped now
1220  *
1221  * Check when we last heard from other nodes, and remove them in case of
1222  * a time out, or clean all backbone gws if now is set.
1223  */
batadv_bla_purge_backbone_gw(struct batadv_priv * bat_priv,int now)1224 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1225 {
1226 	struct batadv_bla_backbone_gw *backbone_gw;
1227 	struct hlist_node *node_tmp;
1228 	struct hlist_head *head;
1229 	struct batadv_hashtable *hash;
1230 	spinlock_t *list_lock;	/* protects write access to the hash lists */
1231 	int i;
1232 
1233 	hash = bat_priv->bla.backbone_hash;
1234 	if (!hash)
1235 		return;
1236 
1237 	for (i = 0; i < hash->size; i++) {
1238 		head = &hash->table[i];
1239 		list_lock = &hash->list_locks[i];
1240 
1241 		spin_lock_bh(list_lock);
1242 		hlist_for_each_entry_safe(backbone_gw, node_tmp,
1243 					  head, hash_entry) {
1244 			if (now)
1245 				goto purge_now;
1246 			if (!batadv_has_timed_out(backbone_gw->lasttime,
1247 						  BATADV_BLA_BACKBONE_TIMEOUT))
1248 				continue;
1249 
1250 			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1251 				   "%s(): backbone gw %pM timed out\n",
1252 				   __func__, backbone_gw->orig);
1253 
1254 purge_now:
1255 			/* don't wait for the pending request anymore */
1256 			if (atomic_read(&backbone_gw->request_sent))
1257 				atomic_dec(&bat_priv->bla.num_requests);
1258 
1259 			batadv_bla_del_backbone_claims(backbone_gw);
1260 
1261 			hlist_del_rcu(&backbone_gw->hash_entry);
1262 			batadv_backbone_gw_put(backbone_gw);
1263 		}
1264 		spin_unlock_bh(list_lock);
1265 	}
1266 }
1267 
1268 /**
1269  * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
1270  * @bat_priv: the bat priv with all the soft interface information
1271  * @primary_if: the selected primary interface, may be NULL if now is set
1272  * @now: whether the whole hash shall be wiped now
1273  *
1274  * Check when we heard last time from our own claims, and remove them in case of
1275  * a time out, or clean all claims if now is set
1276  */
batadv_bla_purge_claims(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,int now)1277 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1278 				    struct batadv_hard_iface *primary_if,
1279 				    int now)
1280 {
1281 	struct batadv_bla_backbone_gw *backbone_gw;
1282 	struct batadv_bla_claim *claim;
1283 	struct hlist_head *head;
1284 	struct batadv_hashtable *hash;
1285 	int i;
1286 
1287 	hash = bat_priv->bla.claim_hash;
1288 	if (!hash)
1289 		return;
1290 
1291 	for (i = 0; i < hash->size; i++) {
1292 		head = &hash->table[i];
1293 
1294 		rcu_read_lock();
1295 		hlist_for_each_entry_rcu(claim, head, hash_entry) {
1296 			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1297 			if (now)
1298 				goto purge_now;
1299 
1300 			if (!batadv_compare_eth(backbone_gw->orig,
1301 						primary_if->net_dev->dev_addr))
1302 				goto skip;
1303 
1304 			if (!batadv_has_timed_out(claim->lasttime,
1305 						  BATADV_BLA_CLAIM_TIMEOUT))
1306 				goto skip;
1307 
1308 			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1309 				   "%s(): timed out.\n", __func__);
1310 
1311 purge_now:
1312 			batadv_dbg(BATADV_DBG_BLA, bat_priv,
1313 				   "%s(): %pM, vid %d\n", __func__,
1314 				   claim->addr, claim->vid);
1315 
1316 			batadv_handle_unclaim(bat_priv, primary_if,
1317 					      backbone_gw->orig,
1318 					      claim->addr, claim->vid);
1319 skip:
1320 			batadv_backbone_gw_put(backbone_gw);
1321 		}
1322 		rcu_read_unlock();
1323 	}
1324 }
1325 
1326 /**
1327  * batadv_bla_update_orig_address() - Update the backbone gateways when the own
1328  *  originator address changes
1329  * @bat_priv: the bat priv with all the soft interface information
1330  * @primary_if: the new selected primary_if
1331  * @oldif: the old primary interface, may be NULL
1332  */
batadv_bla_update_orig_address(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct batadv_hard_iface * oldif)1333 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1334 				    struct batadv_hard_iface *primary_if,
1335 				    struct batadv_hard_iface *oldif)
1336 {
1337 	struct batadv_bla_backbone_gw *backbone_gw;
1338 	struct hlist_head *head;
1339 	struct batadv_hashtable *hash;
1340 	__be16 group;
1341 	int i;
1342 
1343 	/* reset bridge loop avoidance group id */
1344 	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1345 	bat_priv->bla.claim_dest.group = group;
1346 
1347 	/* purge everything when bridge loop avoidance is turned off */
1348 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1349 		oldif = NULL;
1350 
1351 	if (!oldif) {
1352 		batadv_bla_purge_claims(bat_priv, NULL, 1);
1353 		batadv_bla_purge_backbone_gw(bat_priv, 1);
1354 		return;
1355 	}
1356 
1357 	hash = bat_priv->bla.backbone_hash;
1358 	if (!hash)
1359 		return;
1360 
1361 	for (i = 0; i < hash->size; i++) {
1362 		head = &hash->table[i];
1363 
1364 		rcu_read_lock();
1365 		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1366 			/* own orig still holds the old value. */
1367 			if (!batadv_compare_eth(backbone_gw->orig,
1368 						oldif->net_dev->dev_addr))
1369 				continue;
1370 
1371 			ether_addr_copy(backbone_gw->orig,
1372 					primary_if->net_dev->dev_addr);
1373 			/* send an announce frame so others will ask for our
1374 			 * claims and update their tables.
1375 			 */
1376 			batadv_bla_send_announce(bat_priv, backbone_gw);
1377 		}
1378 		rcu_read_unlock();
1379 	}
1380 }
1381 
1382 /**
1383  * batadv_bla_send_loopdetect() - send a loopdetect frame
1384  * @bat_priv: the bat priv with all the soft interface information
1385  * @backbone_gw: the backbone gateway for which a loop should be detected
1386  *
1387  * To detect loops that the bridge loop avoidance can't handle, send a loop
1388  * detection packet on the backbone. Unlike other BLA frames, this frame will
1389  * be allowed on the mesh by other nodes. If it is received on the mesh, this
1390  * indicates that there is a loop.
1391  */
1392 static void
batadv_bla_send_loopdetect(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)1393 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1394 			   struct batadv_bla_backbone_gw *backbone_gw)
1395 {
1396 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1397 		   backbone_gw->vid);
1398 	batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1399 			      backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1400 }
1401 
1402 /**
1403  * batadv_bla_status_update() - purge bla interfaces if necessary
1404  * @net_dev: the soft interface net device
1405  */
batadv_bla_status_update(struct net_device * net_dev)1406 void batadv_bla_status_update(struct net_device *net_dev)
1407 {
1408 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
1409 	struct batadv_hard_iface *primary_if;
1410 
1411 	primary_if = batadv_primary_if_get_selected(bat_priv);
1412 	if (!primary_if)
1413 		return;
1414 
1415 	/* this function already purges everything when bla is disabled,
1416 	 * so just call that one.
1417 	 */
1418 	batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1419 	batadv_hardif_put(primary_if);
1420 }
1421 
1422 /**
1423  * batadv_bla_periodic_work() - performs periodic bla work
1424  * @work: kernel work struct
1425  *
1426  * periodic work to do:
1427  *  * purge structures when they are too old
1428  *  * send announcements
1429  */
batadv_bla_periodic_work(struct work_struct * work)1430 static void batadv_bla_periodic_work(struct work_struct *work)
1431 {
1432 	struct delayed_work *delayed_work;
1433 	struct batadv_priv *bat_priv;
1434 	struct batadv_priv_bla *priv_bla;
1435 	struct hlist_head *head;
1436 	struct batadv_bla_backbone_gw *backbone_gw;
1437 	struct batadv_hashtable *hash;
1438 	struct batadv_hard_iface *primary_if;
1439 	bool send_loopdetect = false;
1440 	int i;
1441 
1442 	delayed_work = to_delayed_work(work);
1443 	priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1444 	bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1445 	primary_if = batadv_primary_if_get_selected(bat_priv);
1446 	if (!primary_if)
1447 		goto out;
1448 
1449 	batadv_bla_purge_claims(bat_priv, primary_if, 0);
1450 	batadv_bla_purge_backbone_gw(bat_priv, 0);
1451 
1452 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1453 		goto out;
1454 
1455 	if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1456 		/* set a new random mac address for the next bridge loop
1457 		 * detection frames. Set the locally administered bit to avoid
1458 		 * collisions with users mac addresses.
1459 		 */
1460 		eth_random_addr(bat_priv->bla.loopdetect_addr);
1461 		bat_priv->bla.loopdetect_addr[0] = 0xba;
1462 		bat_priv->bla.loopdetect_addr[1] = 0xbe;
1463 		bat_priv->bla.loopdetect_lasttime = jiffies;
1464 		atomic_set(&bat_priv->bla.loopdetect_next,
1465 			   BATADV_BLA_LOOPDETECT_PERIODS);
1466 
1467 		/* mark for sending loop detect on all VLANs */
1468 		send_loopdetect = true;
1469 	}
1470 
1471 	hash = bat_priv->bla.backbone_hash;
1472 	if (!hash)
1473 		goto out;
1474 
1475 	for (i = 0; i < hash->size; i++) {
1476 		head = &hash->table[i];
1477 
1478 		rcu_read_lock();
1479 		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1480 			if (!batadv_compare_eth(backbone_gw->orig,
1481 						primary_if->net_dev->dev_addr))
1482 				continue;
1483 
1484 			backbone_gw->lasttime = jiffies;
1485 
1486 			batadv_bla_send_announce(bat_priv, backbone_gw);
1487 			if (send_loopdetect)
1488 				batadv_bla_send_loopdetect(bat_priv,
1489 							   backbone_gw);
1490 
1491 			/* request_sent is only set after creation to avoid
1492 			 * problems when we are not yet known as backbone gw
1493 			 * in the backbone.
1494 			 *
1495 			 * We can reset this now after we waited some periods
1496 			 * to give bridge forward delays and bla group forming
1497 			 * some grace time.
1498 			 */
1499 
1500 			if (atomic_read(&backbone_gw->request_sent) == 0)
1501 				continue;
1502 
1503 			if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1504 				continue;
1505 
1506 			atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1507 			atomic_set(&backbone_gw->request_sent, 0);
1508 		}
1509 		rcu_read_unlock();
1510 	}
1511 out:
1512 	if (primary_if)
1513 		batadv_hardif_put(primary_if);
1514 
1515 	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1516 			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1517 }
1518 
1519 /* The hash for claim and backbone hash receive the same key because they
1520  * are getting initialized by hash_new with the same key. Reinitializing
1521  * them with to different keys to allow nested locking without generating
1522  * lockdep warnings
1523  */
1524 static struct lock_class_key batadv_claim_hash_lock_class_key;
1525 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1526 
1527 /**
1528  * batadv_bla_init() - initialize all bla structures
1529  * @bat_priv: the bat priv with all the soft interface information
1530  *
1531  * Return: 0 on success, < 0 on error.
1532  */
batadv_bla_init(struct batadv_priv * bat_priv)1533 int batadv_bla_init(struct batadv_priv *bat_priv)
1534 {
1535 	int i;
1536 	u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1537 	struct batadv_hard_iface *primary_if;
1538 	u16 crc;
1539 	unsigned long entrytime;
1540 
1541 	spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1542 
1543 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1544 
1545 	/* setting claim destination address */
1546 	memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1547 	bat_priv->bla.claim_dest.type = 0;
1548 	primary_if = batadv_primary_if_get_selected(bat_priv);
1549 	if (primary_if) {
1550 		crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1551 		bat_priv->bla.claim_dest.group = htons(crc);
1552 		batadv_hardif_put(primary_if);
1553 	} else {
1554 		bat_priv->bla.claim_dest.group = 0; /* will be set later */
1555 	}
1556 
1557 	/* initialize the duplicate list */
1558 	entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1559 	for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1560 		bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1561 	bat_priv->bla.bcast_duplist_curr = 0;
1562 
1563 	atomic_set(&bat_priv->bla.loopdetect_next,
1564 		   BATADV_BLA_LOOPDETECT_PERIODS);
1565 
1566 	if (bat_priv->bla.claim_hash)
1567 		return 0;
1568 
1569 	bat_priv->bla.claim_hash = batadv_hash_new(128);
1570 	if (!bat_priv->bla.claim_hash)
1571 		return -ENOMEM;
1572 
1573 	bat_priv->bla.backbone_hash = batadv_hash_new(32);
1574 	if (!bat_priv->bla.backbone_hash) {
1575 		batadv_hash_destroy(bat_priv->bla.claim_hash);
1576 		return -ENOMEM;
1577 	}
1578 
1579 	batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1580 				   &batadv_claim_hash_lock_class_key);
1581 	batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1582 				   &batadv_backbone_hash_lock_class_key);
1583 
1584 	batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1585 
1586 	INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1587 
1588 	queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1589 			   msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1590 	return 0;
1591 }
1592 
1593 /**
1594  * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup.
1595  * @bat_priv: the bat priv with all the soft interface information
1596  * @skb: contains the multicast packet to be checked
1597  * @payload_ptr: pointer to position inside the head buffer of the skb
1598  *  marking the start of the data to be CRC'ed
1599  * @orig: originator mac address, NULL if unknown
1600  *
1601  * Check if it is on our broadcast list. Another gateway might have sent the
1602  * same packet because it is connected to the same backbone, so we have to
1603  * remove this duplicate.
1604  *
1605  * This is performed by checking the CRC, which will tell us
1606  * with a good chance that it is the same packet. If it is furthermore
1607  * sent by another host, drop it. We allow equal packets from
1608  * the same host however as this might be intended.
1609  *
1610  * Return: true if a packet is in the duplicate list, false otherwise.
1611  */
batadv_bla_check_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb,u8 * payload_ptr,const u8 * orig)1612 static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv,
1613 				     struct sk_buff *skb, u8 *payload_ptr,
1614 				     const u8 *orig)
1615 {
1616 	struct batadv_bcast_duplist_entry *entry;
1617 	bool ret = false;
1618 	int i, curr;
1619 	__be32 crc;
1620 
1621 	/* calculate the crc ... */
1622 	crc = batadv_skb_crc32(skb, payload_ptr);
1623 
1624 	spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1625 
1626 	for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1627 		curr = (bat_priv->bla.bcast_duplist_curr + i);
1628 		curr %= BATADV_DUPLIST_SIZE;
1629 		entry = &bat_priv->bla.bcast_duplist[curr];
1630 
1631 		/* we can stop searching if the entry is too old ;
1632 		 * later entries will be even older
1633 		 */
1634 		if (batadv_has_timed_out(entry->entrytime,
1635 					 BATADV_DUPLIST_TIMEOUT))
1636 			break;
1637 
1638 		if (entry->crc != crc)
1639 			continue;
1640 
1641 		/* are the originators both known and not anonymous? */
1642 		if (orig && !is_zero_ether_addr(orig) &&
1643 		    !is_zero_ether_addr(entry->orig)) {
1644 			/* If known, check if the new frame came from
1645 			 * the same originator:
1646 			 * We are safe to take identical frames from the
1647 			 * same orig, if known, as multiplications in
1648 			 * the mesh are detected via the (orig, seqno) pair.
1649 			 * So we can be a bit more liberal here and allow
1650 			 * identical frames from the same orig which the source
1651 			 * host might have sent multiple times on purpose.
1652 			 */
1653 			if (batadv_compare_eth(entry->orig, orig))
1654 				continue;
1655 		}
1656 
1657 		/* this entry seems to match: same crc, not too old,
1658 		 * and from another gw. therefore return true to forbid it.
1659 		 */
1660 		ret = true;
1661 		goto out;
1662 	}
1663 	/* not found, add a new entry (overwrite the oldest entry)
1664 	 * and allow it, its the first occurrence.
1665 	 */
1666 	curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1667 	curr %= BATADV_DUPLIST_SIZE;
1668 	entry = &bat_priv->bla.bcast_duplist[curr];
1669 	entry->crc = crc;
1670 	entry->entrytime = jiffies;
1671 
1672 	/* known originator */
1673 	if (orig)
1674 		ether_addr_copy(entry->orig, orig);
1675 	/* anonymous originator */
1676 	else
1677 		eth_zero_addr(entry->orig);
1678 
1679 	bat_priv->bla.bcast_duplist_curr = curr;
1680 
1681 out:
1682 	spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1683 
1684 	return ret;
1685 }
1686 
1687 /**
1688  * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup.
1689  * @bat_priv: the bat priv with all the soft interface information
1690  * @skb: contains the multicast packet to be checked, decapsulated from a
1691  *  unicast_packet
1692  *
1693  * Check if it is on our broadcast list. Another gateway might have sent the
1694  * same packet because it is connected to the same backbone, so we have to
1695  * remove this duplicate.
1696  *
1697  * Return: true if a packet is in the duplicate list, false otherwise.
1698  */
batadv_bla_check_ucast_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb)1699 static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv,
1700 					   struct sk_buff *skb)
1701 {
1702 	return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL);
1703 }
1704 
1705 /**
1706  * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
1707  * @bat_priv: the bat priv with all the soft interface information
1708  * @skb: contains the bcast_packet to be checked
1709  *
1710  * Check if it is on our broadcast list. Another gateway might have sent the
1711  * same packet because it is connected to the same backbone, so we have to
1712  * remove this duplicate.
1713  *
1714  * Return: true if a packet is in the duplicate list, false otherwise.
1715  */
batadv_bla_check_bcast_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb)1716 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1717 				    struct sk_buff *skb)
1718 {
1719 	struct batadv_bcast_packet *bcast_packet;
1720 	u8 *payload_ptr;
1721 
1722 	bcast_packet = (struct batadv_bcast_packet *)skb->data;
1723 	payload_ptr = (u8 *)(bcast_packet + 1);
1724 
1725 	return batadv_bla_check_duplist(bat_priv, skb, payload_ptr,
1726 					bcast_packet->orig);
1727 }
1728 
1729 /**
1730  * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
1731  *  the VLAN identified by vid.
1732  * @bat_priv: the bat priv with all the soft interface information
1733  * @orig: originator mac address
1734  * @vid: VLAN identifier
1735  *
1736  * Return: true if orig is a backbone for this vid, false otherwise.
1737  */
batadv_bla_is_backbone_gw_orig(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid)1738 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1739 				    unsigned short vid)
1740 {
1741 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1742 	struct hlist_head *head;
1743 	struct batadv_bla_backbone_gw *backbone_gw;
1744 	int i;
1745 
1746 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1747 		return false;
1748 
1749 	if (!hash)
1750 		return false;
1751 
1752 	for (i = 0; i < hash->size; i++) {
1753 		head = &hash->table[i];
1754 
1755 		rcu_read_lock();
1756 		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1757 			if (batadv_compare_eth(backbone_gw->orig, orig) &&
1758 			    backbone_gw->vid == vid) {
1759 				rcu_read_unlock();
1760 				return true;
1761 			}
1762 		}
1763 		rcu_read_unlock();
1764 	}
1765 
1766 	return false;
1767 }
1768 
1769 /**
1770  * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
1771  * @skb: the frame to be checked
1772  * @orig_node: the orig_node of the frame
1773  * @hdr_size: maximum length of the frame
1774  *
1775  * Return: true if the orig_node is also a gateway on the soft interface,
1776  * otherwise it returns false.
1777  */
batadv_bla_is_backbone_gw(struct sk_buff * skb,struct batadv_orig_node * orig_node,int hdr_size)1778 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1779 			       struct batadv_orig_node *orig_node, int hdr_size)
1780 {
1781 	struct batadv_bla_backbone_gw *backbone_gw;
1782 	unsigned short vid;
1783 
1784 	if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1785 		return false;
1786 
1787 	/* first, find out the vid. */
1788 	if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1789 		return false;
1790 
1791 	vid = batadv_get_vid(skb, hdr_size);
1792 
1793 	/* see if this originator is a backbone gw for this VLAN */
1794 	backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1795 						orig_node->orig, vid);
1796 	if (!backbone_gw)
1797 		return false;
1798 
1799 	batadv_backbone_gw_put(backbone_gw);
1800 	return true;
1801 }
1802 
1803 /**
1804  * batadv_bla_free() - free all bla structures
1805  * @bat_priv: the bat priv with all the soft interface information
1806  *
1807  * for softinterface free or module unload
1808  */
batadv_bla_free(struct batadv_priv * bat_priv)1809 void batadv_bla_free(struct batadv_priv *bat_priv)
1810 {
1811 	struct batadv_hard_iface *primary_if;
1812 
1813 	cancel_delayed_work_sync(&bat_priv->bla.work);
1814 	primary_if = batadv_primary_if_get_selected(bat_priv);
1815 
1816 	if (bat_priv->bla.claim_hash) {
1817 		batadv_bla_purge_claims(bat_priv, primary_if, 1);
1818 		batadv_hash_destroy(bat_priv->bla.claim_hash);
1819 		bat_priv->bla.claim_hash = NULL;
1820 	}
1821 	if (bat_priv->bla.backbone_hash) {
1822 		batadv_bla_purge_backbone_gw(bat_priv, 1);
1823 		batadv_hash_destroy(bat_priv->bla.backbone_hash);
1824 		bat_priv->bla.backbone_hash = NULL;
1825 	}
1826 	if (primary_if)
1827 		batadv_hardif_put(primary_if);
1828 }
1829 
1830 /**
1831  * batadv_bla_loopdetect_check() - check and handle a detected loop
1832  * @bat_priv: the bat priv with all the soft interface information
1833  * @skb: the packet to check
1834  * @primary_if: interface where the request came on
1835  * @vid: the VLAN ID of the frame
1836  *
1837  * Checks if this packet is a loop detect frame which has been sent by us,
1838  * throws an uevent and logs the event if that is the case.
1839  *
1840  * Return: true if it is a loop detect frame which is to be dropped, false
1841  * otherwise.
1842  */
1843 static bool
batadv_bla_loopdetect_check(struct batadv_priv * bat_priv,struct sk_buff * skb,struct batadv_hard_iface * primary_if,unsigned short vid)1844 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1845 			    struct batadv_hard_iface *primary_if,
1846 			    unsigned short vid)
1847 {
1848 	struct batadv_bla_backbone_gw *backbone_gw;
1849 	struct ethhdr *ethhdr;
1850 	bool ret;
1851 
1852 	ethhdr = eth_hdr(skb);
1853 
1854 	/* Only check for the MAC address and skip more checks here for
1855 	 * performance reasons - this function is on the hotpath, after all.
1856 	 */
1857 	if (!batadv_compare_eth(ethhdr->h_source,
1858 				bat_priv->bla.loopdetect_addr))
1859 		return false;
1860 
1861 	/* If the packet came too late, don't forward it on the mesh
1862 	 * but don't consider that as loop. It might be a coincidence.
1863 	 */
1864 	if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1865 				 BATADV_BLA_LOOPDETECT_TIMEOUT))
1866 		return true;
1867 
1868 	backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1869 						 primary_if->net_dev->dev_addr,
1870 						 vid, true);
1871 	if (unlikely(!backbone_gw))
1872 		return true;
1873 
1874 	ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1875 
1876 	/* backbone_gw is unreferenced in the report work function
1877 	 * if queue_work() call was successful
1878 	 */
1879 	if (!ret)
1880 		batadv_backbone_gw_put(backbone_gw);
1881 
1882 	return true;
1883 }
1884 
1885 /**
1886  * batadv_bla_rx() - check packets coming from the mesh.
1887  * @bat_priv: the bat priv with all the soft interface information
1888  * @skb: the frame to be checked
1889  * @vid: the VLAN ID of the frame
1890  * @packet_type: the batman packet type this frame came in
1891  *
1892  * batadv_bla_rx avoidance checks if:
1893  *  * we have to race for a claim
1894  *  * if the frame is allowed on the LAN
1895  *
1896  * In these cases, the skb is further handled by this function
1897  *
1898  * Return: true if handled, otherwise it returns false and the caller shall
1899  * further process the skb.
1900  */
batadv_bla_rx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,int packet_type)1901 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1902 		   unsigned short vid, int packet_type)
1903 {
1904 	struct batadv_bla_backbone_gw *backbone_gw;
1905 	struct ethhdr *ethhdr;
1906 	struct batadv_bla_claim search_claim, *claim = NULL;
1907 	struct batadv_hard_iface *primary_if;
1908 	bool own_claim;
1909 	bool ret;
1910 
1911 	ethhdr = eth_hdr(skb);
1912 
1913 	primary_if = batadv_primary_if_get_selected(bat_priv);
1914 	if (!primary_if)
1915 		goto handled;
1916 
1917 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1918 		goto allow;
1919 
1920 	if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1921 		goto handled;
1922 
1923 	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1924 		/* don't allow multicast packets while requests are in flight */
1925 		if (is_multicast_ether_addr(ethhdr->h_dest))
1926 			/* Both broadcast flooding or multicast-via-unicasts
1927 			 * delivery might send to multiple backbone gateways
1928 			 * sharing the same LAN and therefore need to coordinate
1929 			 * which backbone gateway forwards into the LAN,
1930 			 * by claiming the payload source address.
1931 			 *
1932 			 * Broadcast flooding and multicast-via-unicasts
1933 			 * delivery use the following two batman packet types.
1934 			 * Note: explicitly exclude BATADV_UNICAST_4ADDR,
1935 			 * as the DHCP gateway feature will send explicitly
1936 			 * to only one BLA gateway, so the claiming process
1937 			 * should be avoided there.
1938 			 */
1939 			if (packet_type == BATADV_BCAST ||
1940 			    packet_type == BATADV_UNICAST)
1941 				goto handled;
1942 
1943 	/* potential duplicates from foreign BLA backbone gateways via
1944 	 * multicast-in-unicast packets
1945 	 */
1946 	if (is_multicast_ether_addr(ethhdr->h_dest) &&
1947 	    packet_type == BATADV_UNICAST &&
1948 	    batadv_bla_check_ucast_duplist(bat_priv, skb))
1949 		goto handled;
1950 
1951 	ether_addr_copy(search_claim.addr, ethhdr->h_source);
1952 	search_claim.vid = vid;
1953 	claim = batadv_claim_hash_find(bat_priv, &search_claim);
1954 
1955 	if (!claim) {
1956 		/* possible optimization: race for a claim */
1957 		/* No claim exists yet, claim it for us!
1958 		 */
1959 
1960 		batadv_dbg(BATADV_DBG_BLA, bat_priv,
1961 			   "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1962 			   __func__, ethhdr->h_source,
1963 			   batadv_is_my_client(bat_priv,
1964 					       ethhdr->h_source, vid) ?
1965 			   "yes" : "no");
1966 		batadv_handle_claim(bat_priv, primary_if,
1967 				    primary_if->net_dev->dev_addr,
1968 				    ethhdr->h_source, vid);
1969 		goto allow;
1970 	}
1971 
1972 	/* if it is our own claim ... */
1973 	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1974 	own_claim = batadv_compare_eth(backbone_gw->orig,
1975 				       primary_if->net_dev->dev_addr);
1976 	batadv_backbone_gw_put(backbone_gw);
1977 
1978 	if (own_claim) {
1979 		/* ... allow it in any case */
1980 		claim->lasttime = jiffies;
1981 		goto allow;
1982 	}
1983 
1984 	/* if it is a multicast ... */
1985 	if (is_multicast_ether_addr(ethhdr->h_dest) &&
1986 	    (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) {
1987 		/* ... drop it. the responsible gateway is in charge.
1988 		 *
1989 		 * We need to check packet type because with the gateway
1990 		 * feature, broadcasts (like DHCP requests) may be sent
1991 		 * using a unicast 4 address packet type. See comment above.
1992 		 */
1993 		goto handled;
1994 	} else {
1995 		/* seems the client considers us as its best gateway.
1996 		 * send a claim and update the claim table
1997 		 * immediately.
1998 		 */
1999 		batadv_handle_claim(bat_priv, primary_if,
2000 				    primary_if->net_dev->dev_addr,
2001 				    ethhdr->h_source, vid);
2002 		goto allow;
2003 	}
2004 allow:
2005 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2006 	ret = false;
2007 	goto out;
2008 
2009 handled:
2010 	kfree_skb(skb);
2011 	ret = true;
2012 
2013 out:
2014 	if (primary_if)
2015 		batadv_hardif_put(primary_if);
2016 	if (claim)
2017 		batadv_claim_put(claim);
2018 	return ret;
2019 }
2020 
2021 /**
2022  * batadv_bla_tx() - check packets going into the mesh
2023  * @bat_priv: the bat priv with all the soft interface information
2024  * @skb: the frame to be checked
2025  * @vid: the VLAN ID of the frame
2026  *
2027  * batadv_bla_tx checks if:
2028  *  * a claim was received which has to be processed
2029  *  * the frame is allowed on the mesh
2030  *
2031  * in these cases, the skb is further handled by this function.
2032  *
2033  * This call might reallocate skb data.
2034  *
2035  * Return: true if handled, otherwise it returns false and the caller shall
2036  * further process the skb.
2037  */
batadv_bla_tx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)2038 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
2039 		   unsigned short vid)
2040 {
2041 	struct ethhdr *ethhdr;
2042 	struct batadv_bla_claim search_claim, *claim = NULL;
2043 	struct batadv_bla_backbone_gw *backbone_gw;
2044 	struct batadv_hard_iface *primary_if;
2045 	bool client_roamed;
2046 	bool ret = false;
2047 
2048 	primary_if = batadv_primary_if_get_selected(bat_priv);
2049 	if (!primary_if)
2050 		goto out;
2051 
2052 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2053 		goto allow;
2054 
2055 	if (batadv_bla_process_claim(bat_priv, primary_if, skb))
2056 		goto handled;
2057 
2058 	ethhdr = eth_hdr(skb);
2059 
2060 	if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
2061 		/* don't allow broadcasts while requests are in flight */
2062 		if (is_multicast_ether_addr(ethhdr->h_dest))
2063 			goto handled;
2064 
2065 	ether_addr_copy(search_claim.addr, ethhdr->h_source);
2066 	search_claim.vid = vid;
2067 
2068 	claim = batadv_claim_hash_find(bat_priv, &search_claim);
2069 
2070 	/* if no claim exists, allow it. */
2071 	if (!claim)
2072 		goto allow;
2073 
2074 	/* check if we are responsible. */
2075 	backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2076 	client_roamed = batadv_compare_eth(backbone_gw->orig,
2077 					   primary_if->net_dev->dev_addr);
2078 	batadv_backbone_gw_put(backbone_gw);
2079 
2080 	if (client_roamed) {
2081 		/* if yes, the client has roamed and we have
2082 		 * to unclaim it.
2083 		 */
2084 		if (batadv_has_timed_out(claim->lasttime, 100)) {
2085 			/* only unclaim if the last claim entry is
2086 			 * older than 100 ms to make sure we really
2087 			 * have a roaming client here.
2088 			 */
2089 			batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
2090 				   __func__, ethhdr->h_source);
2091 			batadv_handle_unclaim(bat_priv, primary_if,
2092 					      primary_if->net_dev->dev_addr,
2093 					      ethhdr->h_source, vid);
2094 			goto allow;
2095 		} else {
2096 			batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
2097 				   __func__, ethhdr->h_source);
2098 			goto handled;
2099 		}
2100 	}
2101 
2102 	/* check if it is a multicast/broadcast frame */
2103 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
2104 		/* drop it. the responsible gateway has forwarded it into
2105 		 * the backbone network.
2106 		 */
2107 		goto handled;
2108 	} else {
2109 		/* we must allow it. at least if we are
2110 		 * responsible for the DESTINATION.
2111 		 */
2112 		goto allow;
2113 	}
2114 allow:
2115 	batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2116 	ret = false;
2117 	goto out;
2118 handled:
2119 	ret = true;
2120 out:
2121 	if (primary_if)
2122 		batadv_hardif_put(primary_if);
2123 	if (claim)
2124 		batadv_claim_put(claim);
2125 	return ret;
2126 }
2127 
2128 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2129 /**
2130  * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
2131  * @seq: seq file to print on
2132  * @offset: not used
2133  *
2134  * Return: always 0
2135  */
batadv_bla_claim_table_seq_print_text(struct seq_file * seq,void * offset)2136 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2137 {
2138 	struct net_device *net_dev = (struct net_device *)seq->private;
2139 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2140 	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2141 	struct batadv_bla_backbone_gw *backbone_gw;
2142 	struct batadv_bla_claim *claim;
2143 	struct batadv_hard_iface *primary_if;
2144 	struct hlist_head *head;
2145 	u16 backbone_crc;
2146 	u32 i;
2147 	bool is_own;
2148 	u8 *primary_addr;
2149 
2150 	primary_if = batadv_seq_print_text_primary_if_get(seq);
2151 	if (!primary_if)
2152 		goto out;
2153 
2154 	primary_addr = primary_if->net_dev->dev_addr;
2155 	seq_printf(seq,
2156 		   "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2157 		   net_dev->name, primary_addr,
2158 		   ntohs(bat_priv->bla.claim_dest.group));
2159 	seq_puts(seq,
2160 		 "   Client               VID      Originator        [o] (CRC   )\n");
2161 	for (i = 0; i < hash->size; i++) {
2162 		head = &hash->table[i];
2163 
2164 		rcu_read_lock();
2165 		hlist_for_each_entry_rcu(claim, head, hash_entry) {
2166 			backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2167 
2168 			is_own = batadv_compare_eth(backbone_gw->orig,
2169 						    primary_addr);
2170 
2171 			spin_lock_bh(&backbone_gw->crc_lock);
2172 			backbone_crc = backbone_gw->crc;
2173 			spin_unlock_bh(&backbone_gw->crc_lock);
2174 			seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2175 				   claim->addr, batadv_print_vid(claim->vid),
2176 				   backbone_gw->orig,
2177 				   (is_own ? 'x' : ' '),
2178 				   backbone_crc);
2179 
2180 			batadv_backbone_gw_put(backbone_gw);
2181 		}
2182 		rcu_read_unlock();
2183 	}
2184 out:
2185 	if (primary_if)
2186 		batadv_hardif_put(primary_if);
2187 	return 0;
2188 }
2189 #endif
2190 
2191 /**
2192  * batadv_bla_claim_dump_entry() - dump one entry of the claim table
2193  * to a netlink socket
2194  * @msg: buffer for the message
2195  * @portid: netlink port
2196  * @cb: Control block containing additional options
2197  * @primary_if: primary interface
2198  * @claim: entry to dump
2199  *
2200  * Return: 0 or error code.
2201  */
2202 static int
batadv_bla_claim_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_bla_claim * claim)2203 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2204 			    struct netlink_callback *cb,
2205 			    struct batadv_hard_iface *primary_if,
2206 			    struct batadv_bla_claim *claim)
2207 {
2208 	u8 *primary_addr = primary_if->net_dev->dev_addr;
2209 	u16 backbone_crc;
2210 	bool is_own;
2211 	void *hdr;
2212 	int ret = -EINVAL;
2213 
2214 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2215 			  &batadv_netlink_family, NLM_F_MULTI,
2216 			  BATADV_CMD_GET_BLA_CLAIM);
2217 	if (!hdr) {
2218 		ret = -ENOBUFS;
2219 		goto out;
2220 	}
2221 
2222 	genl_dump_check_consistent(cb, hdr);
2223 
2224 	is_own = batadv_compare_eth(claim->backbone_gw->orig,
2225 				    primary_addr);
2226 
2227 	spin_lock_bh(&claim->backbone_gw->crc_lock);
2228 	backbone_crc = claim->backbone_gw->crc;
2229 	spin_unlock_bh(&claim->backbone_gw->crc_lock);
2230 
2231 	if (is_own)
2232 		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2233 			genlmsg_cancel(msg, hdr);
2234 			goto out;
2235 		}
2236 
2237 	if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2238 	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2239 	    nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2240 		    claim->backbone_gw->orig) ||
2241 	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2242 			backbone_crc)) {
2243 		genlmsg_cancel(msg, hdr);
2244 		goto out;
2245 	}
2246 
2247 	genlmsg_end(msg, hdr);
2248 	ret = 0;
2249 
2250 out:
2251 	return ret;
2252 }
2253 
2254 /**
2255  * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
2256  * to a netlink socket
2257  * @msg: buffer for the message
2258  * @portid: netlink port
2259  * @cb: Control block containing additional options
2260  * @primary_if: primary interface
2261  * @hash: hash to dump
2262  * @bucket: bucket index to dump
2263  * @idx_skip: How many entries to skip
2264  *
2265  * Return: always 0.
2266  */
2267 static int
batadv_bla_claim_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_hashtable * hash,unsigned int bucket,int * idx_skip)2268 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2269 			     struct netlink_callback *cb,
2270 			     struct batadv_hard_iface *primary_if,
2271 			     struct batadv_hashtable *hash, unsigned int bucket,
2272 			     int *idx_skip)
2273 {
2274 	struct batadv_bla_claim *claim;
2275 	int idx = 0;
2276 	int ret = 0;
2277 
2278 	spin_lock_bh(&hash->list_locks[bucket]);
2279 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
2280 
2281 	hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2282 		if (idx++ < *idx_skip)
2283 			continue;
2284 
2285 		ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2286 						  primary_if, claim);
2287 		if (ret) {
2288 			*idx_skip = idx - 1;
2289 			goto unlock;
2290 		}
2291 	}
2292 
2293 	*idx_skip = 0;
2294 unlock:
2295 	spin_unlock_bh(&hash->list_locks[bucket]);
2296 	return ret;
2297 }
2298 
2299 /**
2300  * batadv_bla_claim_dump() - dump claim table to a netlink socket
2301  * @msg: buffer for the message
2302  * @cb: callback structure containing arguments
2303  *
2304  * Return: message length.
2305  */
batadv_bla_claim_dump(struct sk_buff * msg,struct netlink_callback * cb)2306 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2307 {
2308 	struct batadv_hard_iface *primary_if = NULL;
2309 	int portid = NETLINK_CB(cb->skb).portid;
2310 	struct net *net = sock_net(cb->skb->sk);
2311 	struct net_device *soft_iface;
2312 	struct batadv_hashtable *hash;
2313 	struct batadv_priv *bat_priv;
2314 	int bucket = cb->args[0];
2315 	int idx = cb->args[1];
2316 	int ifindex;
2317 	int ret = 0;
2318 
2319 	ifindex = batadv_netlink_get_ifindex(cb->nlh,
2320 					     BATADV_ATTR_MESH_IFINDEX);
2321 	if (!ifindex)
2322 		return -EINVAL;
2323 
2324 	soft_iface = dev_get_by_index(net, ifindex);
2325 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2326 		ret = -ENODEV;
2327 		goto out;
2328 	}
2329 
2330 	bat_priv = netdev_priv(soft_iface);
2331 	hash = bat_priv->bla.claim_hash;
2332 
2333 	primary_if = batadv_primary_if_get_selected(bat_priv);
2334 	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2335 		ret = -ENOENT;
2336 		goto out;
2337 	}
2338 
2339 	while (bucket < hash->size) {
2340 		if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2341 						 hash, bucket, &idx))
2342 			break;
2343 		bucket++;
2344 	}
2345 
2346 	cb->args[0] = bucket;
2347 	cb->args[1] = idx;
2348 
2349 	ret = msg->len;
2350 
2351 out:
2352 	if (primary_if)
2353 		batadv_hardif_put(primary_if);
2354 
2355 	if (soft_iface)
2356 		dev_put(soft_iface);
2357 
2358 	return ret;
2359 }
2360 
2361 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2362 /**
2363  * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
2364  *  seq file
2365  * @seq: seq file to print on
2366  * @offset: not used
2367  *
2368  * Return: always 0
2369  */
batadv_bla_backbone_table_seq_print_text(struct seq_file * seq,void * offset)2370 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2371 {
2372 	struct net_device *net_dev = (struct net_device *)seq->private;
2373 	struct batadv_priv *bat_priv = netdev_priv(net_dev);
2374 	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2375 	struct batadv_bla_backbone_gw *backbone_gw;
2376 	struct batadv_hard_iface *primary_if;
2377 	struct hlist_head *head;
2378 	int secs, msecs;
2379 	u16 backbone_crc;
2380 	u32 i;
2381 	bool is_own;
2382 	u8 *primary_addr;
2383 
2384 	primary_if = batadv_seq_print_text_primary_if_get(seq);
2385 	if (!primary_if)
2386 		goto out;
2387 
2388 	primary_addr = primary_if->net_dev->dev_addr;
2389 	seq_printf(seq,
2390 		   "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2391 		   net_dev->name, primary_addr,
2392 		   ntohs(bat_priv->bla.claim_dest.group));
2393 	seq_puts(seq, "   Originator           VID   last seen (CRC   )\n");
2394 	for (i = 0; i < hash->size; i++) {
2395 		head = &hash->table[i];
2396 
2397 		rcu_read_lock();
2398 		hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2399 			msecs = jiffies_to_msecs(jiffies -
2400 						 backbone_gw->lasttime);
2401 			secs = msecs / 1000;
2402 			msecs = msecs % 1000;
2403 
2404 			is_own = batadv_compare_eth(backbone_gw->orig,
2405 						    primary_addr);
2406 			if (is_own)
2407 				continue;
2408 
2409 			spin_lock_bh(&backbone_gw->crc_lock);
2410 			backbone_crc = backbone_gw->crc;
2411 			spin_unlock_bh(&backbone_gw->crc_lock);
2412 
2413 			seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2414 				   backbone_gw->orig,
2415 				   batadv_print_vid(backbone_gw->vid), secs,
2416 				   msecs, backbone_crc);
2417 		}
2418 		rcu_read_unlock();
2419 	}
2420 out:
2421 	if (primary_if)
2422 		batadv_hardif_put(primary_if);
2423 	return 0;
2424 }
2425 #endif
2426 
2427 /**
2428  * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
2429  *  netlink socket
2430  * @msg: buffer for the message
2431  * @portid: netlink port
2432  * @cb: Control block containing additional options
2433  * @primary_if: primary interface
2434  * @backbone_gw: entry to dump
2435  *
2436  * Return: 0 or error code.
2437  */
2438 static int
batadv_bla_backbone_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_bla_backbone_gw * backbone_gw)2439 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2440 			       struct netlink_callback *cb,
2441 			       struct batadv_hard_iface *primary_if,
2442 			       struct batadv_bla_backbone_gw *backbone_gw)
2443 {
2444 	u8 *primary_addr = primary_if->net_dev->dev_addr;
2445 	u16 backbone_crc;
2446 	bool is_own;
2447 	int msecs;
2448 	void *hdr;
2449 	int ret = -EINVAL;
2450 
2451 	hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2452 			  &batadv_netlink_family, NLM_F_MULTI,
2453 			  BATADV_CMD_GET_BLA_BACKBONE);
2454 	if (!hdr) {
2455 		ret = -ENOBUFS;
2456 		goto out;
2457 	}
2458 
2459 	genl_dump_check_consistent(cb, hdr);
2460 
2461 	is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2462 
2463 	spin_lock_bh(&backbone_gw->crc_lock);
2464 	backbone_crc = backbone_gw->crc;
2465 	spin_unlock_bh(&backbone_gw->crc_lock);
2466 
2467 	msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2468 
2469 	if (is_own)
2470 		if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2471 			genlmsg_cancel(msg, hdr);
2472 			goto out;
2473 		}
2474 
2475 	if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2476 		    backbone_gw->orig) ||
2477 	    nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2478 	    nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2479 			backbone_crc) ||
2480 	    nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2481 		genlmsg_cancel(msg, hdr);
2482 		goto out;
2483 	}
2484 
2485 	genlmsg_end(msg, hdr);
2486 	ret = 0;
2487 
2488 out:
2489 	return ret;
2490 }
2491 
2492 /**
2493  * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
2494  *  a netlink socket
2495  * @msg: buffer for the message
2496  * @portid: netlink port
2497  * @cb: Control block containing additional options
2498  * @primary_if: primary interface
2499  * @hash: hash to dump
2500  * @bucket: bucket index to dump
2501  * @idx_skip: How many entries to skip
2502  *
2503  * Return: always 0.
2504  */
2505 static int
batadv_bla_backbone_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_hashtable * hash,unsigned int bucket,int * idx_skip)2506 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2507 				struct netlink_callback *cb,
2508 				struct batadv_hard_iface *primary_if,
2509 				struct batadv_hashtable *hash,
2510 				unsigned int bucket, int *idx_skip)
2511 {
2512 	struct batadv_bla_backbone_gw *backbone_gw;
2513 	int idx = 0;
2514 	int ret = 0;
2515 
2516 	spin_lock_bh(&hash->list_locks[bucket]);
2517 	cb->seq = atomic_read(&hash->generation) << 1 | 1;
2518 
2519 	hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2520 		if (idx++ < *idx_skip)
2521 			continue;
2522 
2523 		ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2524 						     primary_if, backbone_gw);
2525 		if (ret) {
2526 			*idx_skip = idx - 1;
2527 			goto unlock;
2528 		}
2529 	}
2530 
2531 	*idx_skip = 0;
2532 unlock:
2533 	spin_unlock_bh(&hash->list_locks[bucket]);
2534 	return ret;
2535 }
2536 
2537 /**
2538  * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
2539  * @msg: buffer for the message
2540  * @cb: callback structure containing arguments
2541  *
2542  * Return: message length.
2543  */
batadv_bla_backbone_dump(struct sk_buff * msg,struct netlink_callback * cb)2544 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2545 {
2546 	struct batadv_hard_iface *primary_if = NULL;
2547 	int portid = NETLINK_CB(cb->skb).portid;
2548 	struct net *net = sock_net(cb->skb->sk);
2549 	struct net_device *soft_iface;
2550 	struct batadv_hashtable *hash;
2551 	struct batadv_priv *bat_priv;
2552 	int bucket = cb->args[0];
2553 	int idx = cb->args[1];
2554 	int ifindex;
2555 	int ret = 0;
2556 
2557 	ifindex = batadv_netlink_get_ifindex(cb->nlh,
2558 					     BATADV_ATTR_MESH_IFINDEX);
2559 	if (!ifindex)
2560 		return -EINVAL;
2561 
2562 	soft_iface = dev_get_by_index(net, ifindex);
2563 	if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2564 		ret = -ENODEV;
2565 		goto out;
2566 	}
2567 
2568 	bat_priv = netdev_priv(soft_iface);
2569 	hash = bat_priv->bla.backbone_hash;
2570 
2571 	primary_if = batadv_primary_if_get_selected(bat_priv);
2572 	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2573 		ret = -ENOENT;
2574 		goto out;
2575 	}
2576 
2577 	while (bucket < hash->size) {
2578 		if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2579 						    hash, bucket, &idx))
2580 			break;
2581 		bucket++;
2582 	}
2583 
2584 	cb->args[0] = bucket;
2585 	cb->args[1] = idx;
2586 
2587 	ret = msg->len;
2588 
2589 out:
2590 	if (primary_if)
2591 		batadv_hardif_put(primary_if);
2592 
2593 	if (soft_iface)
2594 		dev_put(soft_iface);
2595 
2596 	return ret;
2597 }
2598 
2599 #ifdef CONFIG_BATMAN_ADV_DAT
2600 /**
2601  * batadv_bla_check_claim() - check if address is claimed
2602  *
2603  * @bat_priv: the bat priv with all the soft interface information
2604  * @addr: mac address of which the claim status is checked
2605  * @vid: the VLAN ID
2606  *
2607  * addr is checked if this address is claimed by the local device itself.
2608  *
2609  * Return: true if bla is disabled or the mac is claimed by the device,
2610  * false if the device addr is already claimed by another gateway
2611  */
batadv_bla_check_claim(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)2612 bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2613 			    u8 *addr, unsigned short vid)
2614 {
2615 	struct batadv_bla_claim search_claim;
2616 	struct batadv_bla_claim *claim = NULL;
2617 	struct batadv_hard_iface *primary_if = NULL;
2618 	bool ret = true;
2619 
2620 	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2621 		return ret;
2622 
2623 	primary_if = batadv_primary_if_get_selected(bat_priv);
2624 	if (!primary_if)
2625 		return ret;
2626 
2627 	/* First look if the mac address is claimed */
2628 	ether_addr_copy(search_claim.addr, addr);
2629 	search_claim.vid = vid;
2630 
2631 	claim = batadv_claim_hash_find(bat_priv, &search_claim);
2632 
2633 	/* If there is a claim and we are not owner of the claim,
2634 	 * return false.
2635 	 */
2636 	if (claim) {
2637 		if (!batadv_compare_eth(claim->backbone_gw->orig,
2638 					primary_if->net_dev->dev_addr))
2639 			ret = false;
2640 		batadv_claim_put(claim);
2641 	}
2642 
2643 	batadv_hardif_put(primary_if);
2644 	return ret;
2645 }
2646 #endif
2647