1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2011-2019 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 */
6
7 #include "bridge_loop_avoidance.h"
8 #include "main.h"
9
10 #include <linux/atomic.h>
11 #include <linux/byteorder/generic.h>
12 #include <linux/compiler.h>
13 #include <linux/crc16.h>
14 #include <linux/errno.h>
15 #include <linux/etherdevice.h>
16 #include <linux/gfp.h>
17 #include <linux/if_arp.h>
18 #include <linux/if_ether.h>
19 #include <linux/if_vlan.h>
20 #include <linux/jhash.h>
21 #include <linux/jiffies.h>
22 #include <linux/kernel.h>
23 #include <linux/kref.h>
24 #include <linux/list.h>
25 #include <linux/lockdep.h>
26 #include <linux/netdevice.h>
27 #include <linux/netlink.h>
28 #include <linux/rculist.h>
29 #include <linux/rcupdate.h>
30 #include <linux/seq_file.h>
31 #include <linux/skbuff.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/stddef.h>
35 #include <linux/string.h>
36 #include <linux/workqueue.h>
37 #include <net/arp.h>
38 #include <net/genetlink.h>
39 #include <net/netlink.h>
40 #include <net/sock.h>
41 #include <uapi/linux/batadv_packet.h>
42 #include <uapi/linux/batman_adv.h>
43
44 #include "hard-interface.h"
45 #include "hash.h"
46 #include "log.h"
47 #include "netlink.h"
48 #include "originator.h"
49 #include "soft-interface.h"
50 #include "translation-table.h"
51
52 static const u8 batadv_announce_mac[4] = {0x43, 0x05, 0x43, 0x05};
53
54 static void batadv_bla_periodic_work(struct work_struct *work);
55 static void
56 batadv_bla_send_announce(struct batadv_priv *bat_priv,
57 struct batadv_bla_backbone_gw *backbone_gw);
58
59 /**
60 * batadv_choose_claim() - choose the right bucket for a claim.
61 * @data: data to hash
62 * @size: size of the hash table
63 *
64 * Return: the hash index of the claim
65 */
batadv_choose_claim(const void * data,u32 size)66 static inline u32 batadv_choose_claim(const void *data, u32 size)
67 {
68 struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
69 u32 hash = 0;
70
71 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
72 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
73
74 return hash % size;
75 }
76
77 /**
78 * batadv_choose_backbone_gw() - choose the right bucket for a backbone gateway.
79 * @data: data to hash
80 * @size: size of the hash table
81 *
82 * Return: the hash index of the backbone gateway
83 */
batadv_choose_backbone_gw(const void * data,u32 size)84 static inline u32 batadv_choose_backbone_gw(const void *data, u32 size)
85 {
86 const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data;
87 u32 hash = 0;
88
89 hash = jhash(&claim->addr, sizeof(claim->addr), hash);
90 hash = jhash(&claim->vid, sizeof(claim->vid), hash);
91
92 return hash % size;
93 }
94
95 /**
96 * batadv_compare_backbone_gw() - compare address and vid of two backbone gws
97 * @node: list node of the first entry to compare
98 * @data2: pointer to the second backbone gateway
99 *
100 * Return: true if the backbones have the same data, false otherwise
101 */
batadv_compare_backbone_gw(const struct hlist_node * node,const void * data2)102 static bool batadv_compare_backbone_gw(const struct hlist_node *node,
103 const void *data2)
104 {
105 const void *data1 = container_of(node, struct batadv_bla_backbone_gw,
106 hash_entry);
107 const struct batadv_bla_backbone_gw *gw1 = data1;
108 const struct batadv_bla_backbone_gw *gw2 = data2;
109
110 if (!batadv_compare_eth(gw1->orig, gw2->orig))
111 return false;
112
113 if (gw1->vid != gw2->vid)
114 return false;
115
116 return true;
117 }
118
119 /**
120 * batadv_compare_claim() - compare address and vid of two claims
121 * @node: list node of the first entry to compare
122 * @data2: pointer to the second claims
123 *
124 * Return: true if the claim have the same data, 0 otherwise
125 */
batadv_compare_claim(const struct hlist_node * node,const void * data2)126 static bool batadv_compare_claim(const struct hlist_node *node,
127 const void *data2)
128 {
129 const void *data1 = container_of(node, struct batadv_bla_claim,
130 hash_entry);
131 const struct batadv_bla_claim *cl1 = data1;
132 const struct batadv_bla_claim *cl2 = data2;
133
134 if (!batadv_compare_eth(cl1->addr, cl2->addr))
135 return false;
136
137 if (cl1->vid != cl2->vid)
138 return false;
139
140 return true;
141 }
142
143 /**
144 * batadv_backbone_gw_release() - release backbone gw from lists and queue for
145 * free after rcu grace period
146 * @ref: kref pointer of the backbone gw
147 */
batadv_backbone_gw_release(struct kref * ref)148 static void batadv_backbone_gw_release(struct kref *ref)
149 {
150 struct batadv_bla_backbone_gw *backbone_gw;
151
152 backbone_gw = container_of(ref, struct batadv_bla_backbone_gw,
153 refcount);
154
155 kfree_rcu(backbone_gw, rcu);
156 }
157
158 /**
159 * batadv_backbone_gw_put() - decrement the backbone gw refcounter and possibly
160 * release it
161 * @backbone_gw: backbone gateway to be free'd
162 */
batadv_backbone_gw_put(struct batadv_bla_backbone_gw * backbone_gw)163 static void batadv_backbone_gw_put(struct batadv_bla_backbone_gw *backbone_gw)
164 {
165 kref_put(&backbone_gw->refcount, batadv_backbone_gw_release);
166 }
167
168 /**
169 * batadv_claim_release() - release claim from lists and queue for free after
170 * rcu grace period
171 * @ref: kref pointer of the claim
172 */
batadv_claim_release(struct kref * ref)173 static void batadv_claim_release(struct kref *ref)
174 {
175 struct batadv_bla_claim *claim;
176 struct batadv_bla_backbone_gw *old_backbone_gw;
177
178 claim = container_of(ref, struct batadv_bla_claim, refcount);
179
180 spin_lock_bh(&claim->backbone_lock);
181 old_backbone_gw = claim->backbone_gw;
182 claim->backbone_gw = NULL;
183 spin_unlock_bh(&claim->backbone_lock);
184
185 spin_lock_bh(&old_backbone_gw->crc_lock);
186 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
187 spin_unlock_bh(&old_backbone_gw->crc_lock);
188
189 batadv_backbone_gw_put(old_backbone_gw);
190
191 kfree_rcu(claim, rcu);
192 }
193
194 /**
195 * batadv_claim_put() - decrement the claim refcounter and possibly release it
196 * @claim: claim to be free'd
197 */
batadv_claim_put(struct batadv_bla_claim * claim)198 static void batadv_claim_put(struct batadv_bla_claim *claim)
199 {
200 kref_put(&claim->refcount, batadv_claim_release);
201 }
202
203 /**
204 * batadv_claim_hash_find() - looks for a claim in the claim hash
205 * @bat_priv: the bat priv with all the soft interface information
206 * @data: search data (may be local/static data)
207 *
208 * Return: claim if found or NULL otherwise.
209 */
210 static struct batadv_bla_claim *
batadv_claim_hash_find(struct batadv_priv * bat_priv,struct batadv_bla_claim * data)211 batadv_claim_hash_find(struct batadv_priv *bat_priv,
212 struct batadv_bla_claim *data)
213 {
214 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
215 struct hlist_head *head;
216 struct batadv_bla_claim *claim;
217 struct batadv_bla_claim *claim_tmp = NULL;
218 int index;
219
220 if (!hash)
221 return NULL;
222
223 index = batadv_choose_claim(data, hash->size);
224 head = &hash->table[index];
225
226 rcu_read_lock();
227 hlist_for_each_entry_rcu(claim, head, hash_entry) {
228 if (!batadv_compare_claim(&claim->hash_entry, data))
229 continue;
230
231 if (!kref_get_unless_zero(&claim->refcount))
232 continue;
233
234 claim_tmp = claim;
235 break;
236 }
237 rcu_read_unlock();
238
239 return claim_tmp;
240 }
241
242 /**
243 * batadv_backbone_hash_find() - looks for a backbone gateway in the hash
244 * @bat_priv: the bat priv with all the soft interface information
245 * @addr: the address of the originator
246 * @vid: the VLAN ID
247 *
248 * Return: backbone gateway if found or NULL otherwise
249 */
250 static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)251 batadv_backbone_hash_find(struct batadv_priv *bat_priv, u8 *addr,
252 unsigned short vid)
253 {
254 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
255 struct hlist_head *head;
256 struct batadv_bla_backbone_gw search_entry, *backbone_gw;
257 struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
258 int index;
259
260 if (!hash)
261 return NULL;
262
263 ether_addr_copy(search_entry.orig, addr);
264 search_entry.vid = vid;
265
266 index = batadv_choose_backbone_gw(&search_entry, hash->size);
267 head = &hash->table[index];
268
269 rcu_read_lock();
270 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
271 if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
272 &search_entry))
273 continue;
274
275 if (!kref_get_unless_zero(&backbone_gw->refcount))
276 continue;
277
278 backbone_gw_tmp = backbone_gw;
279 break;
280 }
281 rcu_read_unlock();
282
283 return backbone_gw_tmp;
284 }
285
286 /**
287 * batadv_bla_del_backbone_claims() - delete all claims for a backbone
288 * @backbone_gw: backbone gateway where the claims should be removed
289 */
290 static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw * backbone_gw)291 batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
292 {
293 struct batadv_hashtable *hash;
294 struct hlist_node *node_tmp;
295 struct hlist_head *head;
296 struct batadv_bla_claim *claim;
297 int i;
298 spinlock_t *list_lock; /* protects write access to the hash lists */
299
300 hash = backbone_gw->bat_priv->bla.claim_hash;
301 if (!hash)
302 return;
303
304 for (i = 0; i < hash->size; i++) {
305 head = &hash->table[i];
306 list_lock = &hash->list_locks[i];
307
308 spin_lock_bh(list_lock);
309 hlist_for_each_entry_safe(claim, node_tmp,
310 head, hash_entry) {
311 if (claim->backbone_gw != backbone_gw)
312 continue;
313
314 batadv_claim_put(claim);
315 hlist_del_rcu(&claim->hash_entry);
316 }
317 spin_unlock_bh(list_lock);
318 }
319
320 /* all claims gone, initialize CRC */
321 spin_lock_bh(&backbone_gw->crc_lock);
322 backbone_gw->crc = BATADV_BLA_CRC_INIT;
323 spin_unlock_bh(&backbone_gw->crc_lock);
324 }
325
326 /**
327 * batadv_bla_send_claim() - sends a claim frame according to the provided info
328 * @bat_priv: the bat priv with all the soft interface information
329 * @mac: the mac address to be announced within the claim
330 * @vid: the VLAN ID
331 * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...)
332 */
batadv_bla_send_claim(struct batadv_priv * bat_priv,u8 * mac,unsigned short vid,int claimtype)333 static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
334 unsigned short vid, int claimtype)
335 {
336 struct sk_buff *skb;
337 struct ethhdr *ethhdr;
338 struct batadv_hard_iface *primary_if;
339 struct net_device *soft_iface;
340 u8 *hw_src;
341 struct batadv_bla_claim_dst local_claim_dest;
342 __be32 zeroip = 0;
343
344 primary_if = batadv_primary_if_get_selected(bat_priv);
345 if (!primary_if)
346 return;
347
348 memcpy(&local_claim_dest, &bat_priv->bla.claim_dest,
349 sizeof(local_claim_dest));
350 local_claim_dest.type = claimtype;
351
352 soft_iface = primary_if->soft_iface;
353
354 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
355 /* IP DST: 0.0.0.0 */
356 zeroip,
357 primary_if->soft_iface,
358 /* IP SRC: 0.0.0.0 */
359 zeroip,
360 /* Ethernet DST: Broadcast */
361 NULL,
362 /* Ethernet SRC/HW SRC: originator mac */
363 primary_if->net_dev->dev_addr,
364 /* HW DST: FF:43:05:XX:YY:YY
365 * with XX = claim type
366 * and YY:YY = group id
367 */
368 (u8 *)&local_claim_dest);
369
370 if (!skb)
371 goto out;
372
373 ethhdr = (struct ethhdr *)skb->data;
374 hw_src = (u8 *)ethhdr + ETH_HLEN + sizeof(struct arphdr);
375
376 /* now we pretend that the client would have sent this ... */
377 switch (claimtype) {
378 case BATADV_CLAIM_TYPE_CLAIM:
379 /* normal claim frame
380 * set Ethernet SRC to the clients mac
381 */
382 ether_addr_copy(ethhdr->h_source, mac);
383 batadv_dbg(BATADV_DBG_BLA, bat_priv,
384 "%s(): CLAIM %pM on vid %d\n", __func__, mac,
385 batadv_print_vid(vid));
386 break;
387 case BATADV_CLAIM_TYPE_UNCLAIM:
388 /* unclaim frame
389 * set HW SRC to the clients mac
390 */
391 ether_addr_copy(hw_src, mac);
392 batadv_dbg(BATADV_DBG_BLA, bat_priv,
393 "%s(): UNCLAIM %pM on vid %d\n", __func__, mac,
394 batadv_print_vid(vid));
395 break;
396 case BATADV_CLAIM_TYPE_ANNOUNCE:
397 /* announcement frame
398 * set HW SRC to the special mac containg the crc
399 */
400 ether_addr_copy(hw_src, mac);
401 batadv_dbg(BATADV_DBG_BLA, bat_priv,
402 "%s(): ANNOUNCE of %pM on vid %d\n", __func__,
403 ethhdr->h_source, batadv_print_vid(vid));
404 break;
405 case BATADV_CLAIM_TYPE_REQUEST:
406 /* request frame
407 * set HW SRC and header destination to the receiving backbone
408 * gws mac
409 */
410 ether_addr_copy(hw_src, mac);
411 ether_addr_copy(ethhdr->h_dest, mac);
412 batadv_dbg(BATADV_DBG_BLA, bat_priv,
413 "%s(): REQUEST of %pM to %pM on vid %d\n", __func__,
414 ethhdr->h_source, ethhdr->h_dest,
415 batadv_print_vid(vid));
416 break;
417 case BATADV_CLAIM_TYPE_LOOPDETECT:
418 ether_addr_copy(ethhdr->h_source, mac);
419 batadv_dbg(BATADV_DBG_BLA, bat_priv,
420 "%s(): LOOPDETECT of %pM to %pM on vid %d\n",
421 __func__, ethhdr->h_source, ethhdr->h_dest,
422 batadv_print_vid(vid));
423
424 break;
425 }
426
427 if (vid & BATADV_VLAN_HAS_TAG) {
428 skb = vlan_insert_tag(skb, htons(ETH_P_8021Q),
429 vid & VLAN_VID_MASK);
430 if (!skb)
431 goto out;
432 }
433
434 skb_reset_mac_header(skb);
435 skb->protocol = eth_type_trans(skb, soft_iface);
436 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
437 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
438 skb->len + ETH_HLEN);
439
440 netif_rx(skb);
441 out:
442 if (primary_if)
443 batadv_hardif_put(primary_if);
444 }
445
446 /**
447 * batadv_bla_loopdetect_report() - worker for reporting the loop
448 * @work: work queue item
449 *
450 * Throws an uevent, as the loopdetect check function can't do that itself
451 * since the kernel may sleep while throwing uevents.
452 */
batadv_bla_loopdetect_report(struct work_struct * work)453 static void batadv_bla_loopdetect_report(struct work_struct *work)
454 {
455 struct batadv_bla_backbone_gw *backbone_gw;
456 struct batadv_priv *bat_priv;
457 char vid_str[6] = { '\0' };
458
459 backbone_gw = container_of(work, struct batadv_bla_backbone_gw,
460 report_work);
461 bat_priv = backbone_gw->bat_priv;
462
463 batadv_info(bat_priv->soft_iface,
464 "Possible loop on VLAN %d detected which can't be handled by BLA - please check your network setup!\n",
465 batadv_print_vid(backbone_gw->vid));
466 snprintf(vid_str, sizeof(vid_str), "%d",
467 batadv_print_vid(backbone_gw->vid));
468 vid_str[sizeof(vid_str) - 1] = 0;
469
470 batadv_throw_uevent(bat_priv, BATADV_UEV_BLA, BATADV_UEV_LOOPDETECT,
471 vid_str);
472
473 batadv_backbone_gw_put(backbone_gw);
474 }
475
476 /**
477 * batadv_bla_get_backbone_gw() - finds or creates a backbone gateway
478 * @bat_priv: the bat priv with all the soft interface information
479 * @orig: the mac address of the originator
480 * @vid: the VLAN ID
481 * @own_backbone: set if the requested backbone is local
482 *
483 * Return: the (possibly created) backbone gateway or NULL on error
484 */
485 static struct batadv_bla_backbone_gw *
batadv_bla_get_backbone_gw(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid,bool own_backbone)486 batadv_bla_get_backbone_gw(struct batadv_priv *bat_priv, u8 *orig,
487 unsigned short vid, bool own_backbone)
488 {
489 struct batadv_bla_backbone_gw *entry;
490 struct batadv_orig_node *orig_node;
491 int hash_added;
492
493 entry = batadv_backbone_hash_find(bat_priv, orig, vid);
494
495 if (entry)
496 return entry;
497
498 batadv_dbg(BATADV_DBG_BLA, bat_priv,
499 "%s(): not found (%pM, %d), creating new entry\n", __func__,
500 orig, batadv_print_vid(vid));
501
502 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
503 if (!entry)
504 return NULL;
505
506 entry->vid = vid;
507 entry->lasttime = jiffies;
508 entry->crc = BATADV_BLA_CRC_INIT;
509 entry->bat_priv = bat_priv;
510 spin_lock_init(&entry->crc_lock);
511 atomic_set(&entry->request_sent, 0);
512 atomic_set(&entry->wait_periods, 0);
513 ether_addr_copy(entry->orig, orig);
514 INIT_WORK(&entry->report_work, batadv_bla_loopdetect_report);
515 kref_init(&entry->refcount);
516
517 kref_get(&entry->refcount);
518 hash_added = batadv_hash_add(bat_priv->bla.backbone_hash,
519 batadv_compare_backbone_gw,
520 batadv_choose_backbone_gw, entry,
521 &entry->hash_entry);
522
523 if (unlikely(hash_added != 0)) {
524 /* hash failed, free the structure */
525 kfree(entry);
526 return NULL;
527 }
528
529 /* this is a gateway now, remove any TT entry on this VLAN */
530 orig_node = batadv_orig_hash_find(bat_priv, orig);
531 if (orig_node) {
532 batadv_tt_global_del_orig(bat_priv, orig_node, vid,
533 "became a backbone gateway");
534 batadv_orig_node_put(orig_node);
535 }
536
537 if (own_backbone) {
538 batadv_bla_send_announce(bat_priv, entry);
539
540 /* this will be decreased in the worker thread */
541 atomic_inc(&entry->request_sent);
542 atomic_set(&entry->wait_periods, BATADV_BLA_WAIT_PERIODS);
543 atomic_inc(&bat_priv->bla.num_requests);
544 }
545
546 return entry;
547 }
548
549 /**
550 * batadv_bla_update_own_backbone_gw() - updates the own backbone gw for a VLAN
551 * @bat_priv: the bat priv with all the soft interface information
552 * @primary_if: the selected primary interface
553 * @vid: VLAN identifier
554 *
555 * update or add the own backbone gw to make sure we announce
556 * where we receive other backbone gws
557 */
558 static void
batadv_bla_update_own_backbone_gw(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)559 batadv_bla_update_own_backbone_gw(struct batadv_priv *bat_priv,
560 struct batadv_hard_iface *primary_if,
561 unsigned short vid)
562 {
563 struct batadv_bla_backbone_gw *backbone_gw;
564
565 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
566 primary_if->net_dev->dev_addr,
567 vid, true);
568 if (unlikely(!backbone_gw))
569 return;
570
571 backbone_gw->lasttime = jiffies;
572 batadv_backbone_gw_put(backbone_gw);
573 }
574
575 /**
576 * batadv_bla_answer_request() - answer a bla request by sending own claims
577 * @bat_priv: the bat priv with all the soft interface information
578 * @primary_if: interface where the request came on
579 * @vid: the vid where the request came on
580 *
581 * Repeat all of our own claims, and finally send an ANNOUNCE frame
582 * to allow the requester another check if the CRC is correct now.
583 */
batadv_bla_answer_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,unsigned short vid)584 static void batadv_bla_answer_request(struct batadv_priv *bat_priv,
585 struct batadv_hard_iface *primary_if,
586 unsigned short vid)
587 {
588 struct hlist_head *head;
589 struct batadv_hashtable *hash;
590 struct batadv_bla_claim *claim;
591 struct batadv_bla_backbone_gw *backbone_gw;
592 int i;
593
594 batadv_dbg(BATADV_DBG_BLA, bat_priv,
595 "%s(): received a claim request, send all of our own claims again\n",
596 __func__);
597
598 backbone_gw = batadv_backbone_hash_find(bat_priv,
599 primary_if->net_dev->dev_addr,
600 vid);
601 if (!backbone_gw)
602 return;
603
604 hash = bat_priv->bla.claim_hash;
605 for (i = 0; i < hash->size; i++) {
606 head = &hash->table[i];
607
608 rcu_read_lock();
609 hlist_for_each_entry_rcu(claim, head, hash_entry) {
610 /* only own claims are interesting */
611 if (claim->backbone_gw != backbone_gw)
612 continue;
613
614 batadv_bla_send_claim(bat_priv, claim->addr, claim->vid,
615 BATADV_CLAIM_TYPE_CLAIM);
616 }
617 rcu_read_unlock();
618 }
619
620 /* finally, send an announcement frame */
621 batadv_bla_send_announce(bat_priv, backbone_gw);
622 batadv_backbone_gw_put(backbone_gw);
623 }
624
625 /**
626 * batadv_bla_send_request() - send a request to repeat claims
627 * @backbone_gw: the backbone gateway from whom we are out of sync
628 *
629 * When the crc is wrong, ask the backbone gateway for a full table update.
630 * After the request, it will repeat all of his own claims and finally
631 * send an announcement claim with which we can check again.
632 */
batadv_bla_send_request(struct batadv_bla_backbone_gw * backbone_gw)633 static void batadv_bla_send_request(struct batadv_bla_backbone_gw *backbone_gw)
634 {
635 /* first, remove all old entries */
636 batadv_bla_del_backbone_claims(backbone_gw);
637
638 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
639 "Sending REQUEST to %pM\n", backbone_gw->orig);
640
641 /* send request */
642 batadv_bla_send_claim(backbone_gw->bat_priv, backbone_gw->orig,
643 backbone_gw->vid, BATADV_CLAIM_TYPE_REQUEST);
644
645 /* no local broadcasts should be sent or received, for now. */
646 if (!atomic_read(&backbone_gw->request_sent)) {
647 atomic_inc(&backbone_gw->bat_priv->bla.num_requests);
648 atomic_set(&backbone_gw->request_sent, 1);
649 }
650 }
651
652 /**
653 * batadv_bla_send_announce() - Send an announcement frame
654 * @bat_priv: the bat priv with all the soft interface information
655 * @backbone_gw: our backbone gateway which should be announced
656 */
batadv_bla_send_announce(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)657 static void batadv_bla_send_announce(struct batadv_priv *bat_priv,
658 struct batadv_bla_backbone_gw *backbone_gw)
659 {
660 u8 mac[ETH_ALEN];
661 __be16 crc;
662
663 memcpy(mac, batadv_announce_mac, 4);
664 spin_lock_bh(&backbone_gw->crc_lock);
665 crc = htons(backbone_gw->crc);
666 spin_unlock_bh(&backbone_gw->crc_lock);
667 memcpy(&mac[4], &crc, 2);
668
669 batadv_bla_send_claim(bat_priv, mac, backbone_gw->vid,
670 BATADV_CLAIM_TYPE_ANNOUNCE);
671 }
672
673 /**
674 * batadv_bla_add_claim() - Adds a claim in the claim hash
675 * @bat_priv: the bat priv with all the soft interface information
676 * @mac: the mac address of the claim
677 * @vid: the VLAN ID of the frame
678 * @backbone_gw: the backbone gateway which claims it
679 */
batadv_bla_add_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid,struct batadv_bla_backbone_gw * backbone_gw)680 static void batadv_bla_add_claim(struct batadv_priv *bat_priv,
681 const u8 *mac, const unsigned short vid,
682 struct batadv_bla_backbone_gw *backbone_gw)
683 {
684 struct batadv_bla_backbone_gw *old_backbone_gw;
685 struct batadv_bla_claim *claim;
686 struct batadv_bla_claim search_claim;
687 bool remove_crc = false;
688 int hash_added;
689
690 ether_addr_copy(search_claim.addr, mac);
691 search_claim.vid = vid;
692 claim = batadv_claim_hash_find(bat_priv, &search_claim);
693
694 /* create a new claim entry if it does not exist yet. */
695 if (!claim) {
696 claim = kzalloc(sizeof(*claim), GFP_ATOMIC);
697 if (!claim)
698 return;
699
700 ether_addr_copy(claim->addr, mac);
701 spin_lock_init(&claim->backbone_lock);
702 claim->vid = vid;
703 claim->lasttime = jiffies;
704 kref_get(&backbone_gw->refcount);
705 claim->backbone_gw = backbone_gw;
706 kref_init(&claim->refcount);
707
708 batadv_dbg(BATADV_DBG_BLA, bat_priv,
709 "%s(): adding new entry %pM, vid %d to hash ...\n",
710 __func__, mac, batadv_print_vid(vid));
711
712 kref_get(&claim->refcount);
713 hash_added = batadv_hash_add(bat_priv->bla.claim_hash,
714 batadv_compare_claim,
715 batadv_choose_claim, claim,
716 &claim->hash_entry);
717
718 if (unlikely(hash_added != 0)) {
719 /* only local changes happened. */
720 kfree(claim);
721 return;
722 }
723 } else {
724 claim->lasttime = jiffies;
725 if (claim->backbone_gw == backbone_gw)
726 /* no need to register a new backbone */
727 goto claim_free_ref;
728
729 batadv_dbg(BATADV_DBG_BLA, bat_priv,
730 "%s(): changing ownership for %pM, vid %d to gw %pM\n",
731 __func__, mac, batadv_print_vid(vid),
732 backbone_gw->orig);
733
734 remove_crc = true;
735 }
736
737 /* replace backbone_gw atomically and adjust reference counters */
738 spin_lock_bh(&claim->backbone_lock);
739 old_backbone_gw = claim->backbone_gw;
740 kref_get(&backbone_gw->refcount);
741 claim->backbone_gw = backbone_gw;
742 spin_unlock_bh(&claim->backbone_lock);
743
744 if (remove_crc) {
745 /* remove claim address from old backbone_gw */
746 spin_lock_bh(&old_backbone_gw->crc_lock);
747 old_backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
748 spin_unlock_bh(&old_backbone_gw->crc_lock);
749 }
750
751 batadv_backbone_gw_put(old_backbone_gw);
752
753 /* add claim address to new backbone_gw */
754 spin_lock_bh(&backbone_gw->crc_lock);
755 backbone_gw->crc ^= crc16(0, claim->addr, ETH_ALEN);
756 spin_unlock_bh(&backbone_gw->crc_lock);
757 backbone_gw->lasttime = jiffies;
758
759 claim_free_ref:
760 batadv_claim_put(claim);
761 }
762
763 /**
764 * batadv_bla_claim_get_backbone_gw() - Get valid reference for backbone_gw of
765 * claim
766 * @claim: claim whose backbone_gw should be returned
767 *
768 * Return: valid reference to claim::backbone_gw
769 */
770 static struct batadv_bla_backbone_gw *
batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim * claim)771 batadv_bla_claim_get_backbone_gw(struct batadv_bla_claim *claim)
772 {
773 struct batadv_bla_backbone_gw *backbone_gw;
774
775 spin_lock_bh(&claim->backbone_lock);
776 backbone_gw = claim->backbone_gw;
777 kref_get(&backbone_gw->refcount);
778 spin_unlock_bh(&claim->backbone_lock);
779
780 return backbone_gw;
781 }
782
783 /**
784 * batadv_bla_del_claim() - delete a claim from the claim hash
785 * @bat_priv: the bat priv with all the soft interface information
786 * @mac: mac address of the claim to be removed
787 * @vid: VLAN id for the claim to be removed
788 */
batadv_bla_del_claim(struct batadv_priv * bat_priv,const u8 * mac,const unsigned short vid)789 static void batadv_bla_del_claim(struct batadv_priv *bat_priv,
790 const u8 *mac, const unsigned short vid)
791 {
792 struct batadv_bla_claim search_claim, *claim;
793 struct batadv_bla_claim *claim_removed_entry;
794 struct hlist_node *claim_removed_node;
795
796 ether_addr_copy(search_claim.addr, mac);
797 search_claim.vid = vid;
798 claim = batadv_claim_hash_find(bat_priv, &search_claim);
799 if (!claim)
800 return;
801
802 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): %pM, vid %d\n", __func__,
803 mac, batadv_print_vid(vid));
804
805 claim_removed_node = batadv_hash_remove(bat_priv->bla.claim_hash,
806 batadv_compare_claim,
807 batadv_choose_claim, claim);
808 if (!claim_removed_node)
809 goto free_claim;
810
811 /* reference from the hash is gone */
812 claim_removed_entry = hlist_entry(claim_removed_node,
813 struct batadv_bla_claim, hash_entry);
814 batadv_claim_put(claim_removed_entry);
815
816 free_claim:
817 /* don't need the reference from hash_find() anymore */
818 batadv_claim_put(claim);
819 }
820
821 /**
822 * batadv_handle_announce() - check for ANNOUNCE frame
823 * @bat_priv: the bat priv with all the soft interface information
824 * @an_addr: announcement mac address (ARP Sender HW address)
825 * @backbone_addr: originator address of the sender (Ethernet source MAC)
826 * @vid: the VLAN ID of the frame
827 *
828 * Return: true if handled
829 */
batadv_handle_announce(struct batadv_priv * bat_priv,u8 * an_addr,u8 * backbone_addr,unsigned short vid)830 static bool batadv_handle_announce(struct batadv_priv *bat_priv, u8 *an_addr,
831 u8 *backbone_addr, unsigned short vid)
832 {
833 struct batadv_bla_backbone_gw *backbone_gw;
834 u16 backbone_crc, crc;
835
836 if (memcmp(an_addr, batadv_announce_mac, 4) != 0)
837 return false;
838
839 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
840 false);
841
842 if (unlikely(!backbone_gw))
843 return true;
844
845 /* handle as ANNOUNCE frame */
846 backbone_gw->lasttime = jiffies;
847 crc = ntohs(*((__be16 *)(&an_addr[4])));
848
849 batadv_dbg(BATADV_DBG_BLA, bat_priv,
850 "%s(): ANNOUNCE vid %d (sent by %pM)... CRC = %#.4x\n",
851 __func__, batadv_print_vid(vid), backbone_gw->orig, crc);
852
853 spin_lock_bh(&backbone_gw->crc_lock);
854 backbone_crc = backbone_gw->crc;
855 spin_unlock_bh(&backbone_gw->crc_lock);
856
857 if (backbone_crc != crc) {
858 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
859 "%s(): CRC FAILED for %pM/%d (my = %#.4x, sent = %#.4x)\n",
860 __func__, backbone_gw->orig,
861 batadv_print_vid(backbone_gw->vid),
862 backbone_crc, crc);
863
864 batadv_bla_send_request(backbone_gw);
865 } else {
866 /* if we have sent a request and the crc was OK,
867 * we can allow traffic again.
868 */
869 if (atomic_read(&backbone_gw->request_sent)) {
870 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
871 atomic_set(&backbone_gw->request_sent, 0);
872 }
873 }
874
875 batadv_backbone_gw_put(backbone_gw);
876 return true;
877 }
878
879 /**
880 * batadv_handle_request() - check for REQUEST frame
881 * @bat_priv: the bat priv with all the soft interface information
882 * @primary_if: the primary hard interface of this batman soft interface
883 * @backbone_addr: backbone address to be requested (ARP sender HW MAC)
884 * @ethhdr: ethernet header of a packet
885 * @vid: the VLAN ID of the frame
886 *
887 * Return: true if handled
888 */
batadv_handle_request(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,struct ethhdr * ethhdr,unsigned short vid)889 static bool batadv_handle_request(struct batadv_priv *bat_priv,
890 struct batadv_hard_iface *primary_if,
891 u8 *backbone_addr, struct ethhdr *ethhdr,
892 unsigned short vid)
893 {
894 /* check for REQUEST frame */
895 if (!batadv_compare_eth(backbone_addr, ethhdr->h_dest))
896 return false;
897
898 /* sanity check, this should not happen on a normal switch,
899 * we ignore it in this case.
900 */
901 if (!batadv_compare_eth(ethhdr->h_dest, primary_if->net_dev->dev_addr))
902 return true;
903
904 batadv_dbg(BATADV_DBG_BLA, bat_priv,
905 "%s(): REQUEST vid %d (sent by %pM)...\n",
906 __func__, batadv_print_vid(vid), ethhdr->h_source);
907
908 batadv_bla_answer_request(bat_priv, primary_if, vid);
909 return true;
910 }
911
912 /**
913 * batadv_handle_unclaim() - check for UNCLAIM frame
914 * @bat_priv: the bat priv with all the soft interface information
915 * @primary_if: the primary hard interface of this batman soft interface
916 * @backbone_addr: originator address of the backbone (Ethernet source)
917 * @claim_addr: Client to be unclaimed (ARP sender HW MAC)
918 * @vid: the VLAN ID of the frame
919 *
920 * Return: true if handled
921 */
batadv_handle_unclaim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)922 static bool batadv_handle_unclaim(struct batadv_priv *bat_priv,
923 struct batadv_hard_iface *primary_if,
924 u8 *backbone_addr, u8 *claim_addr,
925 unsigned short vid)
926 {
927 struct batadv_bla_backbone_gw *backbone_gw;
928
929 /* unclaim in any case if it is our own */
930 if (primary_if && batadv_compare_eth(backbone_addr,
931 primary_if->net_dev->dev_addr))
932 batadv_bla_send_claim(bat_priv, claim_addr, vid,
933 BATADV_CLAIM_TYPE_UNCLAIM);
934
935 backbone_gw = batadv_backbone_hash_find(bat_priv, backbone_addr, vid);
936
937 if (!backbone_gw)
938 return true;
939
940 /* this must be an UNCLAIM frame */
941 batadv_dbg(BATADV_DBG_BLA, bat_priv,
942 "%s(): UNCLAIM %pM on vid %d (sent by %pM)...\n", __func__,
943 claim_addr, batadv_print_vid(vid), backbone_gw->orig);
944
945 batadv_bla_del_claim(bat_priv, claim_addr, vid);
946 batadv_backbone_gw_put(backbone_gw);
947 return true;
948 }
949
950 /**
951 * batadv_handle_claim() - check for CLAIM frame
952 * @bat_priv: the bat priv with all the soft interface information
953 * @primary_if: the primary hard interface of this batman soft interface
954 * @backbone_addr: originator address of the backbone (Ethernet Source)
955 * @claim_addr: client mac address to be claimed (ARP sender HW MAC)
956 * @vid: the VLAN ID of the frame
957 *
958 * Return: true if handled
959 */
batadv_handle_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * backbone_addr,u8 * claim_addr,unsigned short vid)960 static bool batadv_handle_claim(struct batadv_priv *bat_priv,
961 struct batadv_hard_iface *primary_if,
962 u8 *backbone_addr, u8 *claim_addr,
963 unsigned short vid)
964 {
965 struct batadv_bla_backbone_gw *backbone_gw;
966
967 /* register the gateway if not yet available, and add the claim. */
968
969 backbone_gw = batadv_bla_get_backbone_gw(bat_priv, backbone_addr, vid,
970 false);
971
972 if (unlikely(!backbone_gw))
973 return true;
974
975 /* this must be a CLAIM frame */
976 batadv_bla_add_claim(bat_priv, claim_addr, vid, backbone_gw);
977 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
978 batadv_bla_send_claim(bat_priv, claim_addr, vid,
979 BATADV_CLAIM_TYPE_CLAIM);
980
981 /* TODO: we could call something like tt_local_del() here. */
982
983 batadv_backbone_gw_put(backbone_gw);
984 return true;
985 }
986
987 /**
988 * batadv_check_claim_group() - check for claim group membership
989 * @bat_priv: the bat priv with all the soft interface information
990 * @primary_if: the primary interface of this batman interface
991 * @hw_src: the Hardware source in the ARP Header
992 * @hw_dst: the Hardware destination in the ARP Header
993 * @ethhdr: pointer to the Ethernet header of the claim frame
994 *
995 * checks if it is a claim packet and if its on the same group.
996 * This function also applies the group ID of the sender
997 * if it is in the same mesh.
998 *
999 * Return:
1000 * 2 - if it is a claim packet and on the same group
1001 * 1 - if is a claim packet from another group
1002 * 0 - if it is not a claim packet
1003 */
batadv_check_claim_group(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,u8 * hw_src,u8 * hw_dst,struct ethhdr * ethhdr)1004 static int batadv_check_claim_group(struct batadv_priv *bat_priv,
1005 struct batadv_hard_iface *primary_if,
1006 u8 *hw_src, u8 *hw_dst,
1007 struct ethhdr *ethhdr)
1008 {
1009 u8 *backbone_addr;
1010 struct batadv_orig_node *orig_node;
1011 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1012
1013 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1014 bla_dst_own = &bat_priv->bla.claim_dest;
1015
1016 /* if announcement packet, use the source,
1017 * otherwise assume it is in the hw_src
1018 */
1019 switch (bla_dst->type) {
1020 case BATADV_CLAIM_TYPE_CLAIM:
1021 backbone_addr = hw_src;
1022 break;
1023 case BATADV_CLAIM_TYPE_REQUEST:
1024 case BATADV_CLAIM_TYPE_ANNOUNCE:
1025 case BATADV_CLAIM_TYPE_UNCLAIM:
1026 backbone_addr = ethhdr->h_source;
1027 break;
1028 default:
1029 return 0;
1030 }
1031
1032 /* don't accept claim frames from ourselves */
1033 if (batadv_compare_eth(backbone_addr, primary_if->net_dev->dev_addr))
1034 return 0;
1035
1036 /* if its already the same group, it is fine. */
1037 if (bla_dst->group == bla_dst_own->group)
1038 return 2;
1039
1040 /* lets see if this originator is in our mesh */
1041 orig_node = batadv_orig_hash_find(bat_priv, backbone_addr);
1042
1043 /* dont accept claims from gateways which are not in
1044 * the same mesh or group.
1045 */
1046 if (!orig_node)
1047 return 1;
1048
1049 /* if our mesh friends mac is bigger, use it for ourselves. */
1050 if (ntohs(bla_dst->group) > ntohs(bla_dst_own->group)) {
1051 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1052 "taking other backbones claim group: %#.4x\n",
1053 ntohs(bla_dst->group));
1054 bla_dst_own->group = bla_dst->group;
1055 }
1056
1057 batadv_orig_node_put(orig_node);
1058
1059 return 2;
1060 }
1061
1062 /**
1063 * batadv_bla_process_claim() - Check if this is a claim frame, and process it
1064 * @bat_priv: the bat priv with all the soft interface information
1065 * @primary_if: the primary hard interface of this batman soft interface
1066 * @skb: the frame to be checked
1067 *
1068 * Return: true if it was a claim frame, otherwise return false to
1069 * tell the callee that it can use the frame on its own.
1070 */
batadv_bla_process_claim(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct sk_buff * skb)1071 static bool batadv_bla_process_claim(struct batadv_priv *bat_priv,
1072 struct batadv_hard_iface *primary_if,
1073 struct sk_buff *skb)
1074 {
1075 struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
1076 u8 *hw_src, *hw_dst;
1077 struct vlan_hdr *vhdr, vhdr_buf;
1078 struct ethhdr *ethhdr;
1079 struct arphdr *arphdr;
1080 unsigned short vid;
1081 int vlan_depth = 0;
1082 __be16 proto;
1083 int headlen;
1084 int ret;
1085
1086 vid = batadv_get_vid(skb, 0);
1087 ethhdr = eth_hdr(skb);
1088
1089 proto = ethhdr->h_proto;
1090 headlen = ETH_HLEN;
1091 if (vid & BATADV_VLAN_HAS_TAG) {
1092 /* Traverse the VLAN/Ethertypes.
1093 *
1094 * At this point it is known that the first protocol is a VLAN
1095 * header, so start checking at the encapsulated protocol.
1096 *
1097 * The depth of the VLAN headers is recorded to drop BLA claim
1098 * frames encapsulated into multiple VLAN headers (QinQ).
1099 */
1100 do {
1101 vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
1102 &vhdr_buf);
1103 if (!vhdr)
1104 return false;
1105
1106 proto = vhdr->h_vlan_encapsulated_proto;
1107 headlen += VLAN_HLEN;
1108 vlan_depth++;
1109 } while (proto == htons(ETH_P_8021Q));
1110 }
1111
1112 if (proto != htons(ETH_P_ARP))
1113 return false; /* not a claim frame */
1114
1115 /* this must be a ARP frame. check if it is a claim. */
1116
1117 if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev))))
1118 return false;
1119
1120 /* pskb_may_pull() may have modified the pointers, get ethhdr again */
1121 ethhdr = eth_hdr(skb);
1122 arphdr = (struct arphdr *)((u8 *)ethhdr + headlen);
1123
1124 /* Check whether the ARP frame carries a valid
1125 * IP information
1126 */
1127 if (arphdr->ar_hrd != htons(ARPHRD_ETHER))
1128 return false;
1129 if (arphdr->ar_pro != htons(ETH_P_IP))
1130 return false;
1131 if (arphdr->ar_hln != ETH_ALEN)
1132 return false;
1133 if (arphdr->ar_pln != 4)
1134 return false;
1135
1136 hw_src = (u8 *)arphdr + sizeof(struct arphdr);
1137 hw_dst = hw_src + ETH_ALEN + 4;
1138 bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
1139 bla_dst_own = &bat_priv->bla.claim_dest;
1140
1141 /* check if it is a claim frame in general */
1142 if (memcmp(bla_dst->magic, bla_dst_own->magic,
1143 sizeof(bla_dst->magic)) != 0)
1144 return false;
1145
1146 /* check if there is a claim frame encapsulated deeper in (QinQ) and
1147 * drop that, as this is not supported by BLA but should also not be
1148 * sent via the mesh.
1149 */
1150 if (vlan_depth > 1)
1151 return true;
1152
1153 /* Let the loopdetect frames on the mesh in any case. */
1154 if (bla_dst->type == BATADV_CLAIM_TYPE_LOOPDETECT)
1155 return false;
1156
1157 /* check if it is a claim frame. */
1158 ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
1159 ethhdr);
1160 if (ret == 1)
1161 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1162 "%s(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1163 __func__, ethhdr->h_source, batadv_print_vid(vid),
1164 hw_src, hw_dst);
1165
1166 if (ret < 2)
1167 return !!ret;
1168
1169 /* become a backbone gw ourselves on this vlan if not happened yet */
1170 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1171
1172 /* check for the different types of claim frames ... */
1173 switch (bla_dst->type) {
1174 case BATADV_CLAIM_TYPE_CLAIM:
1175 if (batadv_handle_claim(bat_priv, primary_if, hw_src,
1176 ethhdr->h_source, vid))
1177 return true;
1178 break;
1179 case BATADV_CLAIM_TYPE_UNCLAIM:
1180 if (batadv_handle_unclaim(bat_priv, primary_if,
1181 ethhdr->h_source, hw_src, vid))
1182 return true;
1183 break;
1184
1185 case BATADV_CLAIM_TYPE_ANNOUNCE:
1186 if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source,
1187 vid))
1188 return true;
1189 break;
1190 case BATADV_CLAIM_TYPE_REQUEST:
1191 if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr,
1192 vid))
1193 return true;
1194 break;
1195 }
1196
1197 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1198 "%s(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n",
1199 __func__, ethhdr->h_source, batadv_print_vid(vid), hw_src,
1200 hw_dst);
1201 return true;
1202 }
1203
1204 /**
1205 * batadv_bla_purge_backbone_gw() - Remove backbone gateways after a timeout or
1206 * immediately
1207 * @bat_priv: the bat priv with all the soft interface information
1208 * @now: whether the whole hash shall be wiped now
1209 *
1210 * Check when we last heard from other nodes, and remove them in case of
1211 * a time out, or clean all backbone gws if now is set.
1212 */
batadv_bla_purge_backbone_gw(struct batadv_priv * bat_priv,int now)1213 static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
1214 {
1215 struct batadv_bla_backbone_gw *backbone_gw;
1216 struct hlist_node *node_tmp;
1217 struct hlist_head *head;
1218 struct batadv_hashtable *hash;
1219 spinlock_t *list_lock; /* protects write access to the hash lists */
1220 int i;
1221
1222 hash = bat_priv->bla.backbone_hash;
1223 if (!hash)
1224 return;
1225
1226 for (i = 0; i < hash->size; i++) {
1227 head = &hash->table[i];
1228 list_lock = &hash->list_locks[i];
1229
1230 spin_lock_bh(list_lock);
1231 hlist_for_each_entry_safe(backbone_gw, node_tmp,
1232 head, hash_entry) {
1233 if (now)
1234 goto purge_now;
1235 if (!batadv_has_timed_out(backbone_gw->lasttime,
1236 BATADV_BLA_BACKBONE_TIMEOUT))
1237 continue;
1238
1239 batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
1240 "%s(): backbone gw %pM timed out\n",
1241 __func__, backbone_gw->orig);
1242
1243 purge_now:
1244 /* don't wait for the pending request anymore */
1245 if (atomic_read(&backbone_gw->request_sent))
1246 atomic_dec(&bat_priv->bla.num_requests);
1247
1248 batadv_bla_del_backbone_claims(backbone_gw);
1249
1250 hlist_del_rcu(&backbone_gw->hash_entry);
1251 batadv_backbone_gw_put(backbone_gw);
1252 }
1253 spin_unlock_bh(list_lock);
1254 }
1255 }
1256
1257 /**
1258 * batadv_bla_purge_claims() - Remove claims after a timeout or immediately
1259 * @bat_priv: the bat priv with all the soft interface information
1260 * @primary_if: the selected primary interface, may be NULL if now is set
1261 * @now: whether the whole hash shall be wiped now
1262 *
1263 * Check when we heard last time from our own claims, and remove them in case of
1264 * a time out, or clean all claims if now is set
1265 */
batadv_bla_purge_claims(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,int now)1266 static void batadv_bla_purge_claims(struct batadv_priv *bat_priv,
1267 struct batadv_hard_iface *primary_if,
1268 int now)
1269 {
1270 struct batadv_bla_backbone_gw *backbone_gw;
1271 struct batadv_bla_claim *claim;
1272 struct hlist_head *head;
1273 struct batadv_hashtable *hash;
1274 int i;
1275
1276 hash = bat_priv->bla.claim_hash;
1277 if (!hash)
1278 return;
1279
1280 for (i = 0; i < hash->size; i++) {
1281 head = &hash->table[i];
1282
1283 rcu_read_lock();
1284 hlist_for_each_entry_rcu(claim, head, hash_entry) {
1285 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1286 if (now)
1287 goto purge_now;
1288
1289 if (!batadv_compare_eth(backbone_gw->orig,
1290 primary_if->net_dev->dev_addr))
1291 goto skip;
1292
1293 if (!batadv_has_timed_out(claim->lasttime,
1294 BATADV_BLA_CLAIM_TIMEOUT))
1295 goto skip;
1296
1297 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1298 "%s(): timed out.\n", __func__);
1299
1300 purge_now:
1301 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1302 "%s(): %pM, vid %d\n", __func__,
1303 claim->addr, claim->vid);
1304
1305 batadv_handle_unclaim(bat_priv, primary_if,
1306 backbone_gw->orig,
1307 claim->addr, claim->vid);
1308 skip:
1309 batadv_backbone_gw_put(backbone_gw);
1310 }
1311 rcu_read_unlock();
1312 }
1313 }
1314
1315 /**
1316 * batadv_bla_update_orig_address() - Update the backbone gateways when the own
1317 * originator address changes
1318 * @bat_priv: the bat priv with all the soft interface information
1319 * @primary_if: the new selected primary_if
1320 * @oldif: the old primary interface, may be NULL
1321 */
batadv_bla_update_orig_address(struct batadv_priv * bat_priv,struct batadv_hard_iface * primary_if,struct batadv_hard_iface * oldif)1322 void batadv_bla_update_orig_address(struct batadv_priv *bat_priv,
1323 struct batadv_hard_iface *primary_if,
1324 struct batadv_hard_iface *oldif)
1325 {
1326 struct batadv_bla_backbone_gw *backbone_gw;
1327 struct hlist_head *head;
1328 struct batadv_hashtable *hash;
1329 __be16 group;
1330 int i;
1331
1332 /* reset bridge loop avoidance group id */
1333 group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
1334 bat_priv->bla.claim_dest.group = group;
1335
1336 /* purge everything when bridge loop avoidance is turned off */
1337 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1338 oldif = NULL;
1339
1340 if (!oldif) {
1341 batadv_bla_purge_claims(bat_priv, NULL, 1);
1342 batadv_bla_purge_backbone_gw(bat_priv, 1);
1343 return;
1344 }
1345
1346 hash = bat_priv->bla.backbone_hash;
1347 if (!hash)
1348 return;
1349
1350 for (i = 0; i < hash->size; i++) {
1351 head = &hash->table[i];
1352
1353 rcu_read_lock();
1354 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1355 /* own orig still holds the old value. */
1356 if (!batadv_compare_eth(backbone_gw->orig,
1357 oldif->net_dev->dev_addr))
1358 continue;
1359
1360 ether_addr_copy(backbone_gw->orig,
1361 primary_if->net_dev->dev_addr);
1362 /* send an announce frame so others will ask for our
1363 * claims and update their tables.
1364 */
1365 batadv_bla_send_announce(bat_priv, backbone_gw);
1366 }
1367 rcu_read_unlock();
1368 }
1369 }
1370
1371 /**
1372 * batadv_bla_send_loopdetect() - send a loopdetect frame
1373 * @bat_priv: the bat priv with all the soft interface information
1374 * @backbone_gw: the backbone gateway for which a loop should be detected
1375 *
1376 * To detect loops that the bridge loop avoidance can't handle, send a loop
1377 * detection packet on the backbone. Unlike other BLA frames, this frame will
1378 * be allowed on the mesh by other nodes. If it is received on the mesh, this
1379 * indicates that there is a loop.
1380 */
1381 static void
batadv_bla_send_loopdetect(struct batadv_priv * bat_priv,struct batadv_bla_backbone_gw * backbone_gw)1382 batadv_bla_send_loopdetect(struct batadv_priv *bat_priv,
1383 struct batadv_bla_backbone_gw *backbone_gw)
1384 {
1385 batadv_dbg(BATADV_DBG_BLA, bat_priv, "Send loopdetect frame for vid %d\n",
1386 backbone_gw->vid);
1387 batadv_bla_send_claim(bat_priv, bat_priv->bla.loopdetect_addr,
1388 backbone_gw->vid, BATADV_CLAIM_TYPE_LOOPDETECT);
1389 }
1390
1391 /**
1392 * batadv_bla_status_update() - purge bla interfaces if necessary
1393 * @net_dev: the soft interface net device
1394 */
batadv_bla_status_update(struct net_device * net_dev)1395 void batadv_bla_status_update(struct net_device *net_dev)
1396 {
1397 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1398 struct batadv_hard_iface *primary_if;
1399
1400 primary_if = batadv_primary_if_get_selected(bat_priv);
1401 if (!primary_if)
1402 return;
1403
1404 /* this function already purges everything when bla is disabled,
1405 * so just call that one.
1406 */
1407 batadv_bla_update_orig_address(bat_priv, primary_if, primary_if);
1408 batadv_hardif_put(primary_if);
1409 }
1410
1411 /**
1412 * batadv_bla_periodic_work() - performs periodic bla work
1413 * @work: kernel work struct
1414 *
1415 * periodic work to do:
1416 * * purge structures when they are too old
1417 * * send announcements
1418 */
batadv_bla_periodic_work(struct work_struct * work)1419 static void batadv_bla_periodic_work(struct work_struct *work)
1420 {
1421 struct delayed_work *delayed_work;
1422 struct batadv_priv *bat_priv;
1423 struct batadv_priv_bla *priv_bla;
1424 struct hlist_head *head;
1425 struct batadv_bla_backbone_gw *backbone_gw;
1426 struct batadv_hashtable *hash;
1427 struct batadv_hard_iface *primary_if;
1428 bool send_loopdetect = false;
1429 int i;
1430
1431 delayed_work = to_delayed_work(work);
1432 priv_bla = container_of(delayed_work, struct batadv_priv_bla, work);
1433 bat_priv = container_of(priv_bla, struct batadv_priv, bla);
1434 primary_if = batadv_primary_if_get_selected(bat_priv);
1435 if (!primary_if)
1436 goto out;
1437
1438 batadv_bla_purge_claims(bat_priv, primary_if, 0);
1439 batadv_bla_purge_backbone_gw(bat_priv, 0);
1440
1441 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1442 goto out;
1443
1444 if (atomic_dec_and_test(&bat_priv->bla.loopdetect_next)) {
1445 /* set a new random mac address for the next bridge loop
1446 * detection frames. Set the locally administered bit to avoid
1447 * collisions with users mac addresses.
1448 */
1449 eth_random_addr(bat_priv->bla.loopdetect_addr);
1450 bat_priv->bla.loopdetect_addr[0] = 0xba;
1451 bat_priv->bla.loopdetect_addr[1] = 0xbe;
1452 bat_priv->bla.loopdetect_lasttime = jiffies;
1453 atomic_set(&bat_priv->bla.loopdetect_next,
1454 BATADV_BLA_LOOPDETECT_PERIODS);
1455
1456 /* mark for sending loop detect on all VLANs */
1457 send_loopdetect = true;
1458 }
1459
1460 hash = bat_priv->bla.backbone_hash;
1461 if (!hash)
1462 goto out;
1463
1464 for (i = 0; i < hash->size; i++) {
1465 head = &hash->table[i];
1466
1467 rcu_read_lock();
1468 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1469 if (!batadv_compare_eth(backbone_gw->orig,
1470 primary_if->net_dev->dev_addr))
1471 continue;
1472
1473 backbone_gw->lasttime = jiffies;
1474
1475 batadv_bla_send_announce(bat_priv, backbone_gw);
1476 if (send_loopdetect)
1477 batadv_bla_send_loopdetect(bat_priv,
1478 backbone_gw);
1479
1480 /* request_sent is only set after creation to avoid
1481 * problems when we are not yet known as backbone gw
1482 * in the backbone.
1483 *
1484 * We can reset this now after we waited some periods
1485 * to give bridge forward delays and bla group forming
1486 * some grace time.
1487 */
1488
1489 if (atomic_read(&backbone_gw->request_sent) == 0)
1490 continue;
1491
1492 if (!atomic_dec_and_test(&backbone_gw->wait_periods))
1493 continue;
1494
1495 atomic_dec(&backbone_gw->bat_priv->bla.num_requests);
1496 atomic_set(&backbone_gw->request_sent, 0);
1497 }
1498 rcu_read_unlock();
1499 }
1500 out:
1501 if (primary_if)
1502 batadv_hardif_put(primary_if);
1503
1504 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1505 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1506 }
1507
1508 /* The hash for claim and backbone hash receive the same key because they
1509 * are getting initialized by hash_new with the same key. Reinitializing
1510 * them with to different keys to allow nested locking without generating
1511 * lockdep warnings
1512 */
1513 static struct lock_class_key batadv_claim_hash_lock_class_key;
1514 static struct lock_class_key batadv_backbone_hash_lock_class_key;
1515
1516 /**
1517 * batadv_bla_init() - initialize all bla structures
1518 * @bat_priv: the bat priv with all the soft interface information
1519 *
1520 * Return: 0 on success, < 0 on error.
1521 */
batadv_bla_init(struct batadv_priv * bat_priv)1522 int batadv_bla_init(struct batadv_priv *bat_priv)
1523 {
1524 int i;
1525 u8 claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00};
1526 struct batadv_hard_iface *primary_if;
1527 u16 crc;
1528 unsigned long entrytime;
1529
1530 spin_lock_init(&bat_priv->bla.bcast_duplist_lock);
1531
1532 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hash registering\n");
1533
1534 /* setting claim destination address */
1535 memcpy(&bat_priv->bla.claim_dest.magic, claim_dest, 3);
1536 bat_priv->bla.claim_dest.type = 0;
1537 primary_if = batadv_primary_if_get_selected(bat_priv);
1538 if (primary_if) {
1539 crc = crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN);
1540 bat_priv->bla.claim_dest.group = htons(crc);
1541 batadv_hardif_put(primary_if);
1542 } else {
1543 bat_priv->bla.claim_dest.group = 0; /* will be set later */
1544 }
1545
1546 /* initialize the duplicate list */
1547 entrytime = jiffies - msecs_to_jiffies(BATADV_DUPLIST_TIMEOUT);
1548 for (i = 0; i < BATADV_DUPLIST_SIZE; i++)
1549 bat_priv->bla.bcast_duplist[i].entrytime = entrytime;
1550 bat_priv->bla.bcast_duplist_curr = 0;
1551
1552 atomic_set(&bat_priv->bla.loopdetect_next,
1553 BATADV_BLA_LOOPDETECT_PERIODS);
1554
1555 if (bat_priv->bla.claim_hash)
1556 return 0;
1557
1558 bat_priv->bla.claim_hash = batadv_hash_new(128);
1559 bat_priv->bla.backbone_hash = batadv_hash_new(32);
1560
1561 if (!bat_priv->bla.claim_hash || !bat_priv->bla.backbone_hash)
1562 return -ENOMEM;
1563
1564 batadv_hash_set_lock_class(bat_priv->bla.claim_hash,
1565 &batadv_claim_hash_lock_class_key);
1566 batadv_hash_set_lock_class(bat_priv->bla.backbone_hash,
1567 &batadv_backbone_hash_lock_class_key);
1568
1569 batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla hashes initialized\n");
1570
1571 INIT_DELAYED_WORK(&bat_priv->bla.work, batadv_bla_periodic_work);
1572
1573 queue_delayed_work(batadv_event_workqueue, &bat_priv->bla.work,
1574 msecs_to_jiffies(BATADV_BLA_PERIOD_LENGTH));
1575 return 0;
1576 }
1577
1578 /**
1579 * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup.
1580 * @bat_priv: the bat priv with all the soft interface information
1581 * @skb: contains the bcast_packet to be checked
1582 *
1583 * check if it is on our broadcast list. Another gateway might
1584 * have sent the same packet because it is connected to the same backbone,
1585 * so we have to remove this duplicate.
1586 *
1587 * This is performed by checking the CRC, which will tell us
1588 * with a good chance that it is the same packet. If it is furthermore
1589 * sent by another host, drop it. We allow equal packets from
1590 * the same host however as this might be intended.
1591 *
1592 * Return: true if a packet is in the duplicate list, false otherwise.
1593 */
batadv_bla_check_bcast_duplist(struct batadv_priv * bat_priv,struct sk_buff * skb)1594 bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv,
1595 struct sk_buff *skb)
1596 {
1597 int i, curr;
1598 __be32 crc;
1599 struct batadv_bcast_packet *bcast_packet;
1600 struct batadv_bcast_duplist_entry *entry;
1601 bool ret = false;
1602
1603 bcast_packet = (struct batadv_bcast_packet *)skb->data;
1604
1605 /* calculate the crc ... */
1606 crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1));
1607
1608 spin_lock_bh(&bat_priv->bla.bcast_duplist_lock);
1609
1610 for (i = 0; i < BATADV_DUPLIST_SIZE; i++) {
1611 curr = (bat_priv->bla.bcast_duplist_curr + i);
1612 curr %= BATADV_DUPLIST_SIZE;
1613 entry = &bat_priv->bla.bcast_duplist[curr];
1614
1615 /* we can stop searching if the entry is too old ;
1616 * later entries will be even older
1617 */
1618 if (batadv_has_timed_out(entry->entrytime,
1619 BATADV_DUPLIST_TIMEOUT))
1620 break;
1621
1622 if (entry->crc != crc)
1623 continue;
1624
1625 if (batadv_compare_eth(entry->orig, bcast_packet->orig))
1626 continue;
1627
1628 /* this entry seems to match: same crc, not too old,
1629 * and from another gw. therefore return true to forbid it.
1630 */
1631 ret = true;
1632 goto out;
1633 }
1634 /* not found, add a new entry (overwrite the oldest entry)
1635 * and allow it, its the first occurrence.
1636 */
1637 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1);
1638 curr %= BATADV_DUPLIST_SIZE;
1639 entry = &bat_priv->bla.bcast_duplist[curr];
1640 entry->crc = crc;
1641 entry->entrytime = jiffies;
1642 ether_addr_copy(entry->orig, bcast_packet->orig);
1643 bat_priv->bla.bcast_duplist_curr = curr;
1644
1645 out:
1646 spin_unlock_bh(&bat_priv->bla.bcast_duplist_lock);
1647
1648 return ret;
1649 }
1650
1651 /**
1652 * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for
1653 * the VLAN identified by vid.
1654 * @bat_priv: the bat priv with all the soft interface information
1655 * @orig: originator mac address
1656 * @vid: VLAN identifier
1657 *
1658 * Return: true if orig is a backbone for this vid, false otherwise.
1659 */
batadv_bla_is_backbone_gw_orig(struct batadv_priv * bat_priv,u8 * orig,unsigned short vid)1660 bool batadv_bla_is_backbone_gw_orig(struct batadv_priv *bat_priv, u8 *orig,
1661 unsigned short vid)
1662 {
1663 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
1664 struct hlist_head *head;
1665 struct batadv_bla_backbone_gw *backbone_gw;
1666 int i;
1667
1668 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1669 return false;
1670
1671 if (!hash)
1672 return false;
1673
1674 for (i = 0; i < hash->size; i++) {
1675 head = &hash->table[i];
1676
1677 rcu_read_lock();
1678 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
1679 if (batadv_compare_eth(backbone_gw->orig, orig) &&
1680 backbone_gw->vid == vid) {
1681 rcu_read_unlock();
1682 return true;
1683 }
1684 }
1685 rcu_read_unlock();
1686 }
1687
1688 return false;
1689 }
1690
1691 /**
1692 * batadv_bla_is_backbone_gw() - check if originator is a backbone gw for a VLAN
1693 * @skb: the frame to be checked
1694 * @orig_node: the orig_node of the frame
1695 * @hdr_size: maximum length of the frame
1696 *
1697 * Return: true if the orig_node is also a gateway on the soft interface,
1698 * otherwise it returns false.
1699 */
batadv_bla_is_backbone_gw(struct sk_buff * skb,struct batadv_orig_node * orig_node,int hdr_size)1700 bool batadv_bla_is_backbone_gw(struct sk_buff *skb,
1701 struct batadv_orig_node *orig_node, int hdr_size)
1702 {
1703 struct batadv_bla_backbone_gw *backbone_gw;
1704 unsigned short vid;
1705
1706 if (!atomic_read(&orig_node->bat_priv->bridge_loop_avoidance))
1707 return false;
1708
1709 /* first, find out the vid. */
1710 if (!pskb_may_pull(skb, hdr_size + ETH_HLEN))
1711 return false;
1712
1713 vid = batadv_get_vid(skb, hdr_size);
1714
1715 /* see if this originator is a backbone gw for this VLAN */
1716 backbone_gw = batadv_backbone_hash_find(orig_node->bat_priv,
1717 orig_node->orig, vid);
1718 if (!backbone_gw)
1719 return false;
1720
1721 batadv_backbone_gw_put(backbone_gw);
1722 return true;
1723 }
1724
1725 /**
1726 * batadv_bla_free() - free all bla structures
1727 * @bat_priv: the bat priv with all the soft interface information
1728 *
1729 * for softinterface free or module unload
1730 */
batadv_bla_free(struct batadv_priv * bat_priv)1731 void batadv_bla_free(struct batadv_priv *bat_priv)
1732 {
1733 struct batadv_hard_iface *primary_if;
1734
1735 cancel_delayed_work_sync(&bat_priv->bla.work);
1736 primary_if = batadv_primary_if_get_selected(bat_priv);
1737
1738 if (bat_priv->bla.claim_hash) {
1739 batadv_bla_purge_claims(bat_priv, primary_if, 1);
1740 batadv_hash_destroy(bat_priv->bla.claim_hash);
1741 bat_priv->bla.claim_hash = NULL;
1742 }
1743 if (bat_priv->bla.backbone_hash) {
1744 batadv_bla_purge_backbone_gw(bat_priv, 1);
1745 batadv_hash_destroy(bat_priv->bla.backbone_hash);
1746 bat_priv->bla.backbone_hash = NULL;
1747 }
1748 if (primary_if)
1749 batadv_hardif_put(primary_if);
1750 }
1751
1752 /**
1753 * batadv_bla_loopdetect_check() - check and handle a detected loop
1754 * @bat_priv: the bat priv with all the soft interface information
1755 * @skb: the packet to check
1756 * @primary_if: interface where the request came on
1757 * @vid: the VLAN ID of the frame
1758 *
1759 * Checks if this packet is a loop detect frame which has been sent by us,
1760 * throw an uevent and log the event if that is the case.
1761 *
1762 * Return: true if it is a loop detect frame which is to be dropped, false
1763 * otherwise.
1764 */
1765 static bool
batadv_bla_loopdetect_check(struct batadv_priv * bat_priv,struct sk_buff * skb,struct batadv_hard_iface * primary_if,unsigned short vid)1766 batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb,
1767 struct batadv_hard_iface *primary_if,
1768 unsigned short vid)
1769 {
1770 struct batadv_bla_backbone_gw *backbone_gw;
1771 struct ethhdr *ethhdr;
1772 bool ret;
1773
1774 ethhdr = eth_hdr(skb);
1775
1776 /* Only check for the MAC address and skip more checks here for
1777 * performance reasons - this function is on the hotpath, after all.
1778 */
1779 if (!batadv_compare_eth(ethhdr->h_source,
1780 bat_priv->bla.loopdetect_addr))
1781 return false;
1782
1783 /* If the packet came too late, don't forward it on the mesh
1784 * but don't consider that as loop. It might be a coincidence.
1785 */
1786 if (batadv_has_timed_out(bat_priv->bla.loopdetect_lasttime,
1787 BATADV_BLA_LOOPDETECT_TIMEOUT))
1788 return true;
1789
1790 backbone_gw = batadv_bla_get_backbone_gw(bat_priv,
1791 primary_if->net_dev->dev_addr,
1792 vid, true);
1793 if (unlikely(!backbone_gw))
1794 return true;
1795
1796 ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
1797
1798 /* backbone_gw is unreferenced in the report work function function
1799 * if queue_work() call was successful
1800 */
1801 if (!ret)
1802 batadv_backbone_gw_put(backbone_gw);
1803
1804 return true;
1805 }
1806
1807 /**
1808 * batadv_bla_rx() - check packets coming from the mesh.
1809 * @bat_priv: the bat priv with all the soft interface information
1810 * @skb: the frame to be checked
1811 * @vid: the VLAN ID of the frame
1812 * @is_bcast: the packet came in a broadcast packet type.
1813 *
1814 * batadv_bla_rx avoidance checks if:
1815 * * we have to race for a claim
1816 * * if the frame is allowed on the LAN
1817 *
1818 * in these cases, the skb is further handled by this function
1819 *
1820 * Return: true if handled, otherwise it returns false and the caller shall
1821 * further process the skb.
1822 */
batadv_bla_rx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid,bool is_bcast)1823 bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1824 unsigned short vid, bool is_bcast)
1825 {
1826 struct batadv_bla_backbone_gw *backbone_gw;
1827 struct ethhdr *ethhdr;
1828 struct batadv_bla_claim search_claim, *claim = NULL;
1829 struct batadv_hard_iface *primary_if;
1830 bool own_claim;
1831 bool ret;
1832
1833 ethhdr = eth_hdr(skb);
1834
1835 primary_if = batadv_primary_if_get_selected(bat_priv);
1836 if (!primary_if)
1837 goto handled;
1838
1839 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1840 goto allow;
1841
1842 if (batadv_bla_loopdetect_check(bat_priv, skb, primary_if, vid))
1843 goto handled;
1844
1845 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1846 /* don't allow broadcasts while requests are in flight */
1847 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast)
1848 goto handled;
1849
1850 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1851 search_claim.vid = vid;
1852 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1853
1854 if (!claim) {
1855 /* possible optimization: race for a claim */
1856 /* No claim exists yet, claim it for us!
1857 */
1858
1859 batadv_dbg(BATADV_DBG_BLA, bat_priv,
1860 "%s(): Unclaimed MAC %pM found. Claim it. Local: %s\n",
1861 __func__, ethhdr->h_source,
1862 batadv_is_my_client(bat_priv,
1863 ethhdr->h_source, vid) ?
1864 "yes" : "no");
1865 batadv_handle_claim(bat_priv, primary_if,
1866 primary_if->net_dev->dev_addr,
1867 ethhdr->h_source, vid);
1868 goto allow;
1869 }
1870
1871 /* if it is our own claim ... */
1872 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1873 own_claim = batadv_compare_eth(backbone_gw->orig,
1874 primary_if->net_dev->dev_addr);
1875 batadv_backbone_gw_put(backbone_gw);
1876
1877 if (own_claim) {
1878 /* ... allow it in any case */
1879 claim->lasttime = jiffies;
1880 goto allow;
1881 }
1882
1883 /* if it is a broadcast ... */
1884 if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) {
1885 /* ... drop it. the responsible gateway is in charge.
1886 *
1887 * We need to check is_bcast because with the gateway
1888 * feature, broadcasts (like DHCP requests) may be sent
1889 * using a unicast packet type.
1890 */
1891 goto handled;
1892 } else {
1893 /* seems the client considers us as its best gateway.
1894 * send a claim and update the claim table
1895 * immediately.
1896 */
1897 batadv_handle_claim(bat_priv, primary_if,
1898 primary_if->net_dev->dev_addr,
1899 ethhdr->h_source, vid);
1900 goto allow;
1901 }
1902 allow:
1903 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
1904 ret = false;
1905 goto out;
1906
1907 handled:
1908 kfree_skb(skb);
1909 ret = true;
1910
1911 out:
1912 if (primary_if)
1913 batadv_hardif_put(primary_if);
1914 if (claim)
1915 batadv_claim_put(claim);
1916 return ret;
1917 }
1918
1919 /**
1920 * batadv_bla_tx() - check packets going into the mesh
1921 * @bat_priv: the bat priv with all the soft interface information
1922 * @skb: the frame to be checked
1923 * @vid: the VLAN ID of the frame
1924 *
1925 * batadv_bla_tx checks if:
1926 * * a claim was received which has to be processed
1927 * * the frame is allowed on the mesh
1928 *
1929 * in these cases, the skb is further handled by this function.
1930 *
1931 * This call might reallocate skb data.
1932 *
1933 * Return: true if handled, otherwise it returns false and the caller shall
1934 * further process the skb.
1935 */
batadv_bla_tx(struct batadv_priv * bat_priv,struct sk_buff * skb,unsigned short vid)1936 bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb,
1937 unsigned short vid)
1938 {
1939 struct ethhdr *ethhdr;
1940 struct batadv_bla_claim search_claim, *claim = NULL;
1941 struct batadv_bla_backbone_gw *backbone_gw;
1942 struct batadv_hard_iface *primary_if;
1943 bool client_roamed;
1944 bool ret = false;
1945
1946 primary_if = batadv_primary_if_get_selected(bat_priv);
1947 if (!primary_if)
1948 goto out;
1949
1950 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
1951 goto allow;
1952
1953 if (batadv_bla_process_claim(bat_priv, primary_if, skb))
1954 goto handled;
1955
1956 ethhdr = eth_hdr(skb);
1957
1958 if (unlikely(atomic_read(&bat_priv->bla.num_requests)))
1959 /* don't allow broadcasts while requests are in flight */
1960 if (is_multicast_ether_addr(ethhdr->h_dest))
1961 goto handled;
1962
1963 ether_addr_copy(search_claim.addr, ethhdr->h_source);
1964 search_claim.vid = vid;
1965
1966 claim = batadv_claim_hash_find(bat_priv, &search_claim);
1967
1968 /* if no claim exists, allow it. */
1969 if (!claim)
1970 goto allow;
1971
1972 /* check if we are responsible. */
1973 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
1974 client_roamed = batadv_compare_eth(backbone_gw->orig,
1975 primary_if->net_dev->dev_addr);
1976 batadv_backbone_gw_put(backbone_gw);
1977
1978 if (client_roamed) {
1979 /* if yes, the client has roamed and we have
1980 * to unclaim it.
1981 */
1982 if (batadv_has_timed_out(claim->lasttime, 100)) {
1983 /* only unclaim if the last claim entry is
1984 * older than 100 ms to make sure we really
1985 * have a roaming client here.
1986 */
1987 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Roaming client %pM detected. Unclaim it.\n",
1988 __func__, ethhdr->h_source);
1989 batadv_handle_unclaim(bat_priv, primary_if,
1990 primary_if->net_dev->dev_addr,
1991 ethhdr->h_source, vid);
1992 goto allow;
1993 } else {
1994 batadv_dbg(BATADV_DBG_BLA, bat_priv, "%s(): Race for claim %pM detected. Drop packet.\n",
1995 __func__, ethhdr->h_source);
1996 goto handled;
1997 }
1998 }
1999
2000 /* check if it is a multicast/broadcast frame */
2001 if (is_multicast_ether_addr(ethhdr->h_dest)) {
2002 /* drop it. the responsible gateway has forwarded it into
2003 * the backbone network.
2004 */
2005 goto handled;
2006 } else {
2007 /* we must allow it. at least if we are
2008 * responsible for the DESTINATION.
2009 */
2010 goto allow;
2011 }
2012 allow:
2013 batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid);
2014 ret = false;
2015 goto out;
2016 handled:
2017 ret = true;
2018 out:
2019 if (primary_if)
2020 batadv_hardif_put(primary_if);
2021 if (claim)
2022 batadv_claim_put(claim);
2023 return ret;
2024 }
2025
2026 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2027 /**
2028 * batadv_bla_claim_table_seq_print_text() - print the claim table in a seq file
2029 * @seq: seq file to print on
2030 * @offset: not used
2031 *
2032 * Return: always 0
2033 */
batadv_bla_claim_table_seq_print_text(struct seq_file * seq,void * offset)2034 int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset)
2035 {
2036 struct net_device *net_dev = (struct net_device *)seq->private;
2037 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2038 struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
2039 struct batadv_bla_backbone_gw *backbone_gw;
2040 struct batadv_bla_claim *claim;
2041 struct batadv_hard_iface *primary_if;
2042 struct hlist_head *head;
2043 u16 backbone_crc;
2044 u32 i;
2045 bool is_own;
2046 u8 *primary_addr;
2047
2048 primary_if = batadv_seq_print_text_primary_if_get(seq);
2049 if (!primary_if)
2050 goto out;
2051
2052 primary_addr = primary_if->net_dev->dev_addr;
2053 seq_printf(seq,
2054 "Claims announced for the mesh %s (orig %pM, group id %#.4x)\n",
2055 net_dev->name, primary_addr,
2056 ntohs(bat_priv->bla.claim_dest.group));
2057 seq_puts(seq,
2058 " Client VID Originator [o] (CRC )\n");
2059 for (i = 0; i < hash->size; i++) {
2060 head = &hash->table[i];
2061
2062 rcu_read_lock();
2063 hlist_for_each_entry_rcu(claim, head, hash_entry) {
2064 backbone_gw = batadv_bla_claim_get_backbone_gw(claim);
2065
2066 is_own = batadv_compare_eth(backbone_gw->orig,
2067 primary_addr);
2068
2069 spin_lock_bh(&backbone_gw->crc_lock);
2070 backbone_crc = backbone_gw->crc;
2071 spin_unlock_bh(&backbone_gw->crc_lock);
2072 seq_printf(seq, " * %pM on %5d by %pM [%c] (%#.4x)\n",
2073 claim->addr, batadv_print_vid(claim->vid),
2074 backbone_gw->orig,
2075 (is_own ? 'x' : ' '),
2076 backbone_crc);
2077
2078 batadv_backbone_gw_put(backbone_gw);
2079 }
2080 rcu_read_unlock();
2081 }
2082 out:
2083 if (primary_if)
2084 batadv_hardif_put(primary_if);
2085 return 0;
2086 }
2087 #endif
2088
2089 /**
2090 * batadv_bla_claim_dump_entry() - dump one entry of the claim table
2091 * to a netlink socket
2092 * @msg: buffer for the message
2093 * @portid: netlink port
2094 * @cb: Control block containing additional options
2095 * @primary_if: primary interface
2096 * @claim: entry to dump
2097 *
2098 * Return: 0 or error code.
2099 */
2100 static int
batadv_bla_claim_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_bla_claim * claim)2101 batadv_bla_claim_dump_entry(struct sk_buff *msg, u32 portid,
2102 struct netlink_callback *cb,
2103 struct batadv_hard_iface *primary_if,
2104 struct batadv_bla_claim *claim)
2105 {
2106 u8 *primary_addr = primary_if->net_dev->dev_addr;
2107 u16 backbone_crc;
2108 bool is_own;
2109 void *hdr;
2110 int ret = -EINVAL;
2111
2112 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2113 &batadv_netlink_family, NLM_F_MULTI,
2114 BATADV_CMD_GET_BLA_CLAIM);
2115 if (!hdr) {
2116 ret = -ENOBUFS;
2117 goto out;
2118 }
2119
2120 genl_dump_check_consistent(cb, hdr);
2121
2122 is_own = batadv_compare_eth(claim->backbone_gw->orig,
2123 primary_addr);
2124
2125 spin_lock_bh(&claim->backbone_gw->crc_lock);
2126 backbone_crc = claim->backbone_gw->crc;
2127 spin_unlock_bh(&claim->backbone_gw->crc_lock);
2128
2129 if (is_own)
2130 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2131 genlmsg_cancel(msg, hdr);
2132 goto out;
2133 }
2134
2135 if (nla_put(msg, BATADV_ATTR_BLA_ADDRESS, ETH_ALEN, claim->addr) ||
2136 nla_put_u16(msg, BATADV_ATTR_BLA_VID, claim->vid) ||
2137 nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2138 claim->backbone_gw->orig) ||
2139 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2140 backbone_crc)) {
2141 genlmsg_cancel(msg, hdr);
2142 goto out;
2143 }
2144
2145 genlmsg_end(msg, hdr);
2146 ret = 0;
2147
2148 out:
2149 return ret;
2150 }
2151
2152 /**
2153 * batadv_bla_claim_dump_bucket() - dump one bucket of the claim table
2154 * to a netlink socket
2155 * @msg: buffer for the message
2156 * @portid: netlink port
2157 * @cb: Control block containing additional options
2158 * @primary_if: primary interface
2159 * @hash: hash to dump
2160 * @bucket: bucket index to dump
2161 * @idx_skip: How many entries to skip
2162 *
2163 * Return: always 0.
2164 */
2165 static int
batadv_bla_claim_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_hashtable * hash,unsigned int bucket,int * idx_skip)2166 batadv_bla_claim_dump_bucket(struct sk_buff *msg, u32 portid,
2167 struct netlink_callback *cb,
2168 struct batadv_hard_iface *primary_if,
2169 struct batadv_hashtable *hash, unsigned int bucket,
2170 int *idx_skip)
2171 {
2172 struct batadv_bla_claim *claim;
2173 int idx = 0;
2174 int ret = 0;
2175
2176 spin_lock_bh(&hash->list_locks[bucket]);
2177 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2178
2179 hlist_for_each_entry(claim, &hash->table[bucket], hash_entry) {
2180 if (idx++ < *idx_skip)
2181 continue;
2182
2183 ret = batadv_bla_claim_dump_entry(msg, portid, cb,
2184 primary_if, claim);
2185 if (ret) {
2186 *idx_skip = idx - 1;
2187 goto unlock;
2188 }
2189 }
2190
2191 *idx_skip = 0;
2192 unlock:
2193 spin_unlock_bh(&hash->list_locks[bucket]);
2194 return ret;
2195 }
2196
2197 /**
2198 * batadv_bla_claim_dump() - dump claim table to a netlink socket
2199 * @msg: buffer for the message
2200 * @cb: callback structure containing arguments
2201 *
2202 * Return: message length.
2203 */
batadv_bla_claim_dump(struct sk_buff * msg,struct netlink_callback * cb)2204 int batadv_bla_claim_dump(struct sk_buff *msg, struct netlink_callback *cb)
2205 {
2206 struct batadv_hard_iface *primary_if = NULL;
2207 int portid = NETLINK_CB(cb->skb).portid;
2208 struct net *net = sock_net(cb->skb->sk);
2209 struct net_device *soft_iface;
2210 struct batadv_hashtable *hash;
2211 struct batadv_priv *bat_priv;
2212 int bucket = cb->args[0];
2213 int idx = cb->args[1];
2214 int ifindex;
2215 int ret = 0;
2216
2217 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2218 BATADV_ATTR_MESH_IFINDEX);
2219 if (!ifindex)
2220 return -EINVAL;
2221
2222 soft_iface = dev_get_by_index(net, ifindex);
2223 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2224 ret = -ENODEV;
2225 goto out;
2226 }
2227
2228 bat_priv = netdev_priv(soft_iface);
2229 hash = bat_priv->bla.claim_hash;
2230
2231 primary_if = batadv_primary_if_get_selected(bat_priv);
2232 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2233 ret = -ENOENT;
2234 goto out;
2235 }
2236
2237 while (bucket < hash->size) {
2238 if (batadv_bla_claim_dump_bucket(msg, portid, cb, primary_if,
2239 hash, bucket, &idx))
2240 break;
2241 bucket++;
2242 }
2243
2244 cb->args[0] = bucket;
2245 cb->args[1] = idx;
2246
2247 ret = msg->len;
2248
2249 out:
2250 if (primary_if)
2251 batadv_hardif_put(primary_if);
2252
2253 if (soft_iface)
2254 dev_put(soft_iface);
2255
2256 return ret;
2257 }
2258
2259 #ifdef CONFIG_BATMAN_ADV_DEBUGFS
2260 /**
2261 * batadv_bla_backbone_table_seq_print_text() - print the backbone table in a
2262 * seq file
2263 * @seq: seq file to print on
2264 * @offset: not used
2265 *
2266 * Return: always 0
2267 */
batadv_bla_backbone_table_seq_print_text(struct seq_file * seq,void * offset)2268 int batadv_bla_backbone_table_seq_print_text(struct seq_file *seq, void *offset)
2269 {
2270 struct net_device *net_dev = (struct net_device *)seq->private;
2271 struct batadv_priv *bat_priv = netdev_priv(net_dev);
2272 struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
2273 struct batadv_bla_backbone_gw *backbone_gw;
2274 struct batadv_hard_iface *primary_if;
2275 struct hlist_head *head;
2276 int secs, msecs;
2277 u16 backbone_crc;
2278 u32 i;
2279 bool is_own;
2280 u8 *primary_addr;
2281
2282 primary_if = batadv_seq_print_text_primary_if_get(seq);
2283 if (!primary_if)
2284 goto out;
2285
2286 primary_addr = primary_if->net_dev->dev_addr;
2287 seq_printf(seq,
2288 "Backbones announced for the mesh %s (orig %pM, group id %#.4x)\n",
2289 net_dev->name, primary_addr,
2290 ntohs(bat_priv->bla.claim_dest.group));
2291 seq_puts(seq, " Originator VID last seen (CRC )\n");
2292 for (i = 0; i < hash->size; i++) {
2293 head = &hash->table[i];
2294
2295 rcu_read_lock();
2296 hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
2297 msecs = jiffies_to_msecs(jiffies -
2298 backbone_gw->lasttime);
2299 secs = msecs / 1000;
2300 msecs = msecs % 1000;
2301
2302 is_own = batadv_compare_eth(backbone_gw->orig,
2303 primary_addr);
2304 if (is_own)
2305 continue;
2306
2307 spin_lock_bh(&backbone_gw->crc_lock);
2308 backbone_crc = backbone_gw->crc;
2309 spin_unlock_bh(&backbone_gw->crc_lock);
2310
2311 seq_printf(seq, " * %pM on %5d %4i.%03is (%#.4x)\n",
2312 backbone_gw->orig,
2313 batadv_print_vid(backbone_gw->vid), secs,
2314 msecs, backbone_crc);
2315 }
2316 rcu_read_unlock();
2317 }
2318 out:
2319 if (primary_if)
2320 batadv_hardif_put(primary_if);
2321 return 0;
2322 }
2323 #endif
2324
2325 /**
2326 * batadv_bla_backbone_dump_entry() - dump one entry of the backbone table to a
2327 * netlink socket
2328 * @msg: buffer for the message
2329 * @portid: netlink port
2330 * @cb: Control block containing additional options
2331 * @primary_if: primary interface
2332 * @backbone_gw: entry to dump
2333 *
2334 * Return: 0 or error code.
2335 */
2336 static int
batadv_bla_backbone_dump_entry(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_bla_backbone_gw * backbone_gw)2337 batadv_bla_backbone_dump_entry(struct sk_buff *msg, u32 portid,
2338 struct netlink_callback *cb,
2339 struct batadv_hard_iface *primary_if,
2340 struct batadv_bla_backbone_gw *backbone_gw)
2341 {
2342 u8 *primary_addr = primary_if->net_dev->dev_addr;
2343 u16 backbone_crc;
2344 bool is_own;
2345 int msecs;
2346 void *hdr;
2347 int ret = -EINVAL;
2348
2349 hdr = genlmsg_put(msg, portid, cb->nlh->nlmsg_seq,
2350 &batadv_netlink_family, NLM_F_MULTI,
2351 BATADV_CMD_GET_BLA_BACKBONE);
2352 if (!hdr) {
2353 ret = -ENOBUFS;
2354 goto out;
2355 }
2356
2357 genl_dump_check_consistent(cb, hdr);
2358
2359 is_own = batadv_compare_eth(backbone_gw->orig, primary_addr);
2360
2361 spin_lock_bh(&backbone_gw->crc_lock);
2362 backbone_crc = backbone_gw->crc;
2363 spin_unlock_bh(&backbone_gw->crc_lock);
2364
2365 msecs = jiffies_to_msecs(jiffies - backbone_gw->lasttime);
2366
2367 if (is_own)
2368 if (nla_put_flag(msg, BATADV_ATTR_BLA_OWN)) {
2369 genlmsg_cancel(msg, hdr);
2370 goto out;
2371 }
2372
2373 if (nla_put(msg, BATADV_ATTR_BLA_BACKBONE, ETH_ALEN,
2374 backbone_gw->orig) ||
2375 nla_put_u16(msg, BATADV_ATTR_BLA_VID, backbone_gw->vid) ||
2376 nla_put_u16(msg, BATADV_ATTR_BLA_CRC,
2377 backbone_crc) ||
2378 nla_put_u32(msg, BATADV_ATTR_LAST_SEEN_MSECS, msecs)) {
2379 genlmsg_cancel(msg, hdr);
2380 goto out;
2381 }
2382
2383 genlmsg_end(msg, hdr);
2384 ret = 0;
2385
2386 out:
2387 return ret;
2388 }
2389
2390 /**
2391 * batadv_bla_backbone_dump_bucket() - dump one bucket of the backbone table to
2392 * a netlink socket
2393 * @msg: buffer for the message
2394 * @portid: netlink port
2395 * @cb: Control block containing additional options
2396 * @primary_if: primary interface
2397 * @hash: hash to dump
2398 * @bucket: bucket index to dump
2399 * @idx_skip: How many entries to skip
2400 *
2401 * Return: always 0.
2402 */
2403 static int
batadv_bla_backbone_dump_bucket(struct sk_buff * msg,u32 portid,struct netlink_callback * cb,struct batadv_hard_iface * primary_if,struct batadv_hashtable * hash,unsigned int bucket,int * idx_skip)2404 batadv_bla_backbone_dump_bucket(struct sk_buff *msg, u32 portid,
2405 struct netlink_callback *cb,
2406 struct batadv_hard_iface *primary_if,
2407 struct batadv_hashtable *hash,
2408 unsigned int bucket, int *idx_skip)
2409 {
2410 struct batadv_bla_backbone_gw *backbone_gw;
2411 int idx = 0;
2412 int ret = 0;
2413
2414 spin_lock_bh(&hash->list_locks[bucket]);
2415 cb->seq = atomic_read(&hash->generation) << 1 | 1;
2416
2417 hlist_for_each_entry(backbone_gw, &hash->table[bucket], hash_entry) {
2418 if (idx++ < *idx_skip)
2419 continue;
2420
2421 ret = batadv_bla_backbone_dump_entry(msg, portid, cb,
2422 primary_if, backbone_gw);
2423 if (ret) {
2424 *idx_skip = idx - 1;
2425 goto unlock;
2426 }
2427 }
2428
2429 *idx_skip = 0;
2430 unlock:
2431 spin_unlock_bh(&hash->list_locks[bucket]);
2432 return ret;
2433 }
2434
2435 /**
2436 * batadv_bla_backbone_dump() - dump backbone table to a netlink socket
2437 * @msg: buffer for the message
2438 * @cb: callback structure containing arguments
2439 *
2440 * Return: message length.
2441 */
batadv_bla_backbone_dump(struct sk_buff * msg,struct netlink_callback * cb)2442 int batadv_bla_backbone_dump(struct sk_buff *msg, struct netlink_callback *cb)
2443 {
2444 struct batadv_hard_iface *primary_if = NULL;
2445 int portid = NETLINK_CB(cb->skb).portid;
2446 struct net *net = sock_net(cb->skb->sk);
2447 struct net_device *soft_iface;
2448 struct batadv_hashtable *hash;
2449 struct batadv_priv *bat_priv;
2450 int bucket = cb->args[0];
2451 int idx = cb->args[1];
2452 int ifindex;
2453 int ret = 0;
2454
2455 ifindex = batadv_netlink_get_ifindex(cb->nlh,
2456 BATADV_ATTR_MESH_IFINDEX);
2457 if (!ifindex)
2458 return -EINVAL;
2459
2460 soft_iface = dev_get_by_index(net, ifindex);
2461 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
2462 ret = -ENODEV;
2463 goto out;
2464 }
2465
2466 bat_priv = netdev_priv(soft_iface);
2467 hash = bat_priv->bla.backbone_hash;
2468
2469 primary_if = batadv_primary_if_get_selected(bat_priv);
2470 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
2471 ret = -ENOENT;
2472 goto out;
2473 }
2474
2475 while (bucket < hash->size) {
2476 if (batadv_bla_backbone_dump_bucket(msg, portid, cb, primary_if,
2477 hash, bucket, &idx))
2478 break;
2479 bucket++;
2480 }
2481
2482 cb->args[0] = bucket;
2483 cb->args[1] = idx;
2484
2485 ret = msg->len;
2486
2487 out:
2488 if (primary_if)
2489 batadv_hardif_put(primary_if);
2490
2491 if (soft_iface)
2492 dev_put(soft_iface);
2493
2494 return ret;
2495 }
2496
2497 #ifdef CONFIG_BATMAN_ADV_DAT
2498 /**
2499 * batadv_bla_check_claim() - check if address is claimed
2500 *
2501 * @bat_priv: the bat priv with all the soft interface information
2502 * @addr: mac address of which the claim status is checked
2503 * @vid: the VLAN ID
2504 *
2505 * addr is checked if this address is claimed by the local device itself.
2506 *
2507 * Return: true if bla is disabled or the mac is claimed by the device,
2508 * false if the device addr is already claimed by another gateway
2509 */
batadv_bla_check_claim(struct batadv_priv * bat_priv,u8 * addr,unsigned short vid)2510 bool batadv_bla_check_claim(struct batadv_priv *bat_priv,
2511 u8 *addr, unsigned short vid)
2512 {
2513 struct batadv_bla_claim search_claim;
2514 struct batadv_bla_claim *claim = NULL;
2515 struct batadv_hard_iface *primary_if = NULL;
2516 bool ret = true;
2517
2518 if (!atomic_read(&bat_priv->bridge_loop_avoidance))
2519 return ret;
2520
2521 primary_if = batadv_primary_if_get_selected(bat_priv);
2522 if (!primary_if)
2523 return ret;
2524
2525 /* First look if the mac address is claimed */
2526 ether_addr_copy(search_claim.addr, addr);
2527 search_claim.vid = vid;
2528
2529 claim = batadv_claim_hash_find(bat_priv, &search_claim);
2530
2531 /* If there is a claim and we are not owner of the claim,
2532 * return false.
2533 */
2534 if (claim) {
2535 if (!batadv_compare_eth(claim->backbone_gw->orig,
2536 primary_if->net_dev->dev_addr))
2537 ret = false;
2538 batadv_claim_put(claim);
2539 }
2540
2541 batadv_hardif_put(primary_if);
2542 return ret;
2543 }
2544 #endif
2545