1 /* Copyright (C) 2009-2015 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "originator.h"
19 #include "main.h"
20
21 #include <linux/errno.h>
22 #include <linux/etherdevice.h>
23 #include <linux/fs.h>
24 #include <linux/jiffies.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/lockdep.h>
28 #include <linux/netdevice.h>
29 #include <linux/rculist.h>
30 #include <linux/seq_file.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <linux/workqueue.h>
34
35 #include "distributed-arp-table.h"
36 #include "fragmentation.h"
37 #include "gateway_client.h"
38 #include "hard-interface.h"
39 #include "hash.h"
40 #include "multicast.h"
41 #include "network-coding.h"
42 #include "routing.h"
43 #include "translation-table.h"
44
45 /* hash class keys */
46 static struct lock_class_key batadv_orig_hash_lock_class_key;
47
48 static void batadv_purge_orig(struct work_struct *work);
49
50 /* returns 1 if they are the same originator */
batadv_compare_orig(const struct hlist_node * node,const void * data2)51 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
52 {
53 const void *data1 = container_of(node, struct batadv_orig_node,
54 hash_entry);
55
56 return batadv_compare_eth(data1, data2);
57 }
58
59 /**
60 * batadv_orig_node_vlan_get - get an orig_node_vlan object
61 * @orig_node: the originator serving the VLAN
62 * @vid: the VLAN identifier
63 *
64 * Returns the vlan object identified by vid and belonging to orig_node or NULL
65 * if it does not exist.
66 */
67 struct batadv_orig_node_vlan *
batadv_orig_node_vlan_get(struct batadv_orig_node * orig_node,unsigned short vid)68 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
69 unsigned short vid)
70 {
71 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
72
73 rcu_read_lock();
74 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
75 if (tmp->vid != vid)
76 continue;
77
78 if (!atomic_inc_not_zero(&tmp->refcount))
79 continue;
80
81 vlan = tmp;
82
83 break;
84 }
85 rcu_read_unlock();
86
87 return vlan;
88 }
89
90 /**
91 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
92 * object
93 * @orig_node: the originator serving the VLAN
94 * @vid: the VLAN identifier
95 *
96 * Returns NULL in case of failure or the vlan object identified by vid and
97 * belonging to orig_node otherwise. The object is created and added to the list
98 * if it does not exist.
99 *
100 * The object is returned with refcounter increased by 1.
101 */
102 struct batadv_orig_node_vlan *
batadv_orig_node_vlan_new(struct batadv_orig_node * orig_node,unsigned short vid)103 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
104 unsigned short vid)
105 {
106 struct batadv_orig_node_vlan *vlan;
107
108 spin_lock_bh(&orig_node->vlan_list_lock);
109
110 /* first look if an object for this vid already exists */
111 vlan = batadv_orig_node_vlan_get(orig_node, vid);
112 if (vlan)
113 goto out;
114
115 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
116 if (!vlan)
117 goto out;
118
119 atomic_set(&vlan->refcount, 2);
120 vlan->vid = vid;
121
122 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
123
124 out:
125 spin_unlock_bh(&orig_node->vlan_list_lock);
126
127 return vlan;
128 }
129
130 /**
131 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
132 * the originator-vlan object
133 * @orig_vlan: the originator-vlan object to release
134 */
batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan * orig_vlan)135 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
136 {
137 if (atomic_dec_and_test(&orig_vlan->refcount))
138 kfree_rcu(orig_vlan, rcu);
139 }
140
batadv_originator_init(struct batadv_priv * bat_priv)141 int batadv_originator_init(struct batadv_priv *bat_priv)
142 {
143 if (bat_priv->orig_hash)
144 return 0;
145
146 bat_priv->orig_hash = batadv_hash_new(1024);
147
148 if (!bat_priv->orig_hash)
149 goto err;
150
151 batadv_hash_set_lock_class(bat_priv->orig_hash,
152 &batadv_orig_hash_lock_class_key);
153
154 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
155 queue_delayed_work(batadv_event_workqueue,
156 &bat_priv->orig_work,
157 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
158
159 return 0;
160
161 err:
162 return -ENOMEM;
163 }
164
165 /**
166 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
167 * free after rcu grace period
168 * @neigh_ifinfo: the neigh_ifinfo object to release
169 */
170 static void
batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo * neigh_ifinfo)171 batadv_neigh_ifinfo_release(struct batadv_neigh_ifinfo *neigh_ifinfo)
172 {
173 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
174 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
175
176 kfree_rcu(neigh_ifinfo, rcu);
177 }
178
179 /**
180 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
181 * the neigh_ifinfo
182 * @neigh_ifinfo: the neigh_ifinfo object to release
183 */
batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo * neigh_ifinfo)184 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
185 {
186 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
187 batadv_neigh_ifinfo_release(neigh_ifinfo);
188 }
189
190 /**
191 * batadv_neigh_node_free_rcu - free the neigh_node
192 * batadv_neigh_node_release - release neigh_node from lists and queue for
193 * free after rcu grace period
194 * @neigh_node: neigh neighbor to free
195 */
batadv_neigh_node_release(struct batadv_neigh_node * neigh_node)196 static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
197 {
198 struct hlist_node *node_tmp;
199 struct batadv_neigh_ifinfo *neigh_ifinfo;
200
201 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
202 &neigh_node->ifinfo_list, list) {
203 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
204 }
205
206 batadv_hardif_free_ref(neigh_node->if_incoming);
207
208 kfree_rcu(neigh_node, rcu);
209 }
210
211 /**
212 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
213 * and possibly release it
214 * @neigh_node: neigh neighbor to free
215 */
batadv_neigh_node_free_ref(struct batadv_neigh_node * neigh_node)216 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
217 {
218 if (atomic_dec_and_test(&neigh_node->refcount))
219 batadv_neigh_node_release(neigh_node);
220 }
221
222 /**
223 * batadv_orig_node_get_router - router to the originator depending on iface
224 * @orig_node: the orig node for the router
225 * @if_outgoing: the interface where the payload packet has been received or
226 * the OGM should be sent to
227 *
228 * Returns the neighbor which should be router for this orig_node/iface.
229 *
230 * The object is returned with refcounter increased by 1.
231 */
232 struct batadv_neigh_node *
batadv_orig_router_get(struct batadv_orig_node * orig_node,const struct batadv_hard_iface * if_outgoing)233 batadv_orig_router_get(struct batadv_orig_node *orig_node,
234 const struct batadv_hard_iface *if_outgoing)
235 {
236 struct batadv_orig_ifinfo *orig_ifinfo;
237 struct batadv_neigh_node *router = NULL;
238
239 rcu_read_lock();
240 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
241 if (orig_ifinfo->if_outgoing != if_outgoing)
242 continue;
243
244 router = rcu_dereference(orig_ifinfo->router);
245 break;
246 }
247
248 if (router && !atomic_inc_not_zero(&router->refcount))
249 router = NULL;
250
251 rcu_read_unlock();
252 return router;
253 }
254
255 /**
256 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
257 * @orig_node: the orig node to be queried
258 * @if_outgoing: the interface for which the ifinfo should be acquired
259 *
260 * Returns the requested orig_ifinfo or NULL if not found.
261 *
262 * The object is returned with refcounter increased by 1.
263 */
264 struct batadv_orig_ifinfo *
batadv_orig_ifinfo_get(struct batadv_orig_node * orig_node,struct batadv_hard_iface * if_outgoing)265 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
266 struct batadv_hard_iface *if_outgoing)
267 {
268 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
269
270 rcu_read_lock();
271 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
272 list) {
273 if (tmp->if_outgoing != if_outgoing)
274 continue;
275
276 if (!atomic_inc_not_zero(&tmp->refcount))
277 continue;
278
279 orig_ifinfo = tmp;
280 break;
281 }
282 rcu_read_unlock();
283
284 return orig_ifinfo;
285 }
286
287 /**
288 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
289 * @orig_node: the orig node to be queried
290 * @if_outgoing: the interface for which the ifinfo should be acquired
291 *
292 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
293 * interface otherwise. The object is created and added to the list
294 * if it does not exist.
295 *
296 * The object is returned with refcounter increased by 1.
297 */
298 struct batadv_orig_ifinfo *
batadv_orig_ifinfo_new(struct batadv_orig_node * orig_node,struct batadv_hard_iface * if_outgoing)299 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
300 struct batadv_hard_iface *if_outgoing)
301 {
302 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
303 unsigned long reset_time;
304
305 spin_lock_bh(&orig_node->neigh_list_lock);
306
307 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
308 if (orig_ifinfo)
309 goto out;
310
311 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
312 if (!orig_ifinfo)
313 goto out;
314
315 if (if_outgoing != BATADV_IF_DEFAULT &&
316 !atomic_inc_not_zero(&if_outgoing->refcount)) {
317 kfree(orig_ifinfo);
318 orig_ifinfo = NULL;
319 goto out;
320 }
321
322 reset_time = jiffies - 1;
323 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
324 orig_ifinfo->batman_seqno_reset = reset_time;
325 orig_ifinfo->if_outgoing = if_outgoing;
326 INIT_HLIST_NODE(&orig_ifinfo->list);
327 atomic_set(&orig_ifinfo->refcount, 2);
328 hlist_add_head_rcu(&orig_ifinfo->list,
329 &orig_node->ifinfo_list);
330 out:
331 spin_unlock_bh(&orig_node->neigh_list_lock);
332 return orig_ifinfo;
333 }
334
335 /**
336 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
337 * @neigh_node: the neigh node to be queried
338 * @if_outgoing: the interface for which the ifinfo should be acquired
339 *
340 * The object is returned with refcounter increased by 1.
341 *
342 * Returns the requested neigh_ifinfo or NULL if not found
343 */
344 struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_get(struct batadv_neigh_node * neigh,struct batadv_hard_iface * if_outgoing)345 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
346 struct batadv_hard_iface *if_outgoing)
347 {
348 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
349 *tmp_neigh_ifinfo;
350
351 rcu_read_lock();
352 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
353 list) {
354 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
355 continue;
356
357 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
358 continue;
359
360 neigh_ifinfo = tmp_neigh_ifinfo;
361 break;
362 }
363 rcu_read_unlock();
364
365 return neigh_ifinfo;
366 }
367
368 /**
369 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
370 * @neigh_node: the neigh node to be queried
371 * @if_outgoing: the interface for which the ifinfo should be acquired
372 *
373 * Returns NULL in case of failure or the neigh_ifinfo object for the
374 * if_outgoing interface otherwise. The object is created and added to the list
375 * if it does not exist.
376 *
377 * The object is returned with refcounter increased by 1.
378 */
379 struct batadv_neigh_ifinfo *
batadv_neigh_ifinfo_new(struct batadv_neigh_node * neigh,struct batadv_hard_iface * if_outgoing)380 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
381 struct batadv_hard_iface *if_outgoing)
382 {
383 struct batadv_neigh_ifinfo *neigh_ifinfo;
384
385 spin_lock_bh(&neigh->ifinfo_lock);
386
387 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
388 if (neigh_ifinfo)
389 goto out;
390
391 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
392 if (!neigh_ifinfo)
393 goto out;
394
395 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
396 kfree(neigh_ifinfo);
397 neigh_ifinfo = NULL;
398 goto out;
399 }
400
401 INIT_HLIST_NODE(&neigh_ifinfo->list);
402 atomic_set(&neigh_ifinfo->refcount, 2);
403 neigh_ifinfo->if_outgoing = if_outgoing;
404
405 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
406
407 out:
408 spin_unlock_bh(&neigh->ifinfo_lock);
409
410 return neigh_ifinfo;
411 }
412
413 /**
414 * batadv_neigh_node_get - retrieve a neighbour from the list
415 * @orig_node: originator which the neighbour belongs to
416 * @hard_iface: the interface where this neighbour is connected to
417 * @addr: the address of the neighbour
418 *
419 * Looks for and possibly returns a neighbour belonging to this originator list
420 * which is connected through the provided hard interface.
421 * Returns NULL if the neighbour is not found.
422 */
423 static struct batadv_neigh_node *
batadv_neigh_node_get(const struct batadv_orig_node * orig_node,const struct batadv_hard_iface * hard_iface,const u8 * addr)424 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
425 const struct batadv_hard_iface *hard_iface,
426 const u8 *addr)
427 {
428 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
429
430 rcu_read_lock();
431 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
432 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
433 continue;
434
435 if (tmp_neigh_node->if_incoming != hard_iface)
436 continue;
437
438 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
439 continue;
440
441 res = tmp_neigh_node;
442 break;
443 }
444 rcu_read_unlock();
445
446 return res;
447 }
448
449 /**
450 * batadv_neigh_node_new - create and init a new neigh_node object
451 * @orig_node: originator object representing the neighbour
452 * @hard_iface: the interface where the neighbour is connected to
453 * @neigh_addr: the mac address of the neighbour interface
454 *
455 * Allocates a new neigh_node object and initialises all the generic fields.
456 * Returns the new object or NULL on failure.
457 */
458 struct batadv_neigh_node *
batadv_neigh_node_new(struct batadv_orig_node * orig_node,struct batadv_hard_iface * hard_iface,const u8 * neigh_addr)459 batadv_neigh_node_new(struct batadv_orig_node *orig_node,
460 struct batadv_hard_iface *hard_iface,
461 const u8 *neigh_addr)
462 {
463 struct batadv_neigh_node *neigh_node;
464
465 spin_lock_bh(&orig_node->neigh_list_lock);
466
467 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
468 if (neigh_node)
469 goto out;
470
471 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
472 if (!neigh_node)
473 goto out;
474
475 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
476 kfree(neigh_node);
477 neigh_node = NULL;
478 goto out;
479 }
480
481 INIT_HLIST_NODE(&neigh_node->list);
482 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
483 spin_lock_init(&neigh_node->ifinfo_lock);
484
485 ether_addr_copy(neigh_node->addr, neigh_addr);
486 neigh_node->if_incoming = hard_iface;
487 neigh_node->orig_node = orig_node;
488 neigh_node->last_seen = jiffies;
489
490 /* extra reference for return */
491 atomic_set(&neigh_node->refcount, 2);
492
493 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
494
495 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
496 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
497 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
498
499 out:
500 spin_unlock_bh(&orig_node->neigh_list_lock);
501
502 return neigh_node;
503 }
504
505 /**
506 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
507 * free after rcu grace period
508 * @orig_ifinfo: the orig_ifinfo object to release
509 */
batadv_orig_ifinfo_release(struct batadv_orig_ifinfo * orig_ifinfo)510 static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
511 {
512 struct batadv_neigh_node *router;
513
514 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
515 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
516
517 /* this is the last reference to this object */
518 router = rcu_dereference_protected(orig_ifinfo->router, true);
519 if (router)
520 batadv_neigh_node_free_ref(router);
521
522 kfree_rcu(orig_ifinfo, rcu);
523 }
524
525 /**
526 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
527 * the orig_ifinfo
528 * @orig_ifinfo: the orig_ifinfo object to release
529 */
batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo * orig_ifinfo)530 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
531 {
532 if (atomic_dec_and_test(&orig_ifinfo->refcount))
533 batadv_orig_ifinfo_release(orig_ifinfo);
534 }
535
536 /**
537 * batadv_orig_node_free_rcu - free the orig_node
538 * @rcu: rcu pointer of the orig_node
539 */
batadv_orig_node_free_rcu(struct rcu_head * rcu)540 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
541 {
542 struct batadv_orig_node *orig_node;
543
544 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
545
546 batadv_mcast_purge_orig(orig_node);
547
548 batadv_frag_purge_orig(orig_node, NULL);
549
550 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
551 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
552
553 kfree(orig_node->tt_buff);
554 kfree(orig_node);
555 }
556
557 /**
558 * batadv_orig_node_release - release orig_node from lists and queue for
559 * free after rcu grace period
560 * @orig_node: the orig node to free
561 */
batadv_orig_node_release(struct batadv_orig_node * orig_node)562 static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
563 {
564 struct hlist_node *node_tmp;
565 struct batadv_neigh_node *neigh_node;
566 struct batadv_orig_ifinfo *orig_ifinfo;
567 struct batadv_orig_node_vlan *vlan;
568 struct batadv_orig_ifinfo *last_candidate;
569
570 spin_lock_bh(&orig_node->neigh_list_lock);
571
572 /* for all neighbors towards this originator ... */
573 hlist_for_each_entry_safe(neigh_node, node_tmp,
574 &orig_node->neigh_list, list) {
575 hlist_del_rcu(&neigh_node->list);
576 batadv_neigh_node_free_ref(neigh_node);
577 }
578
579 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
580 &orig_node->ifinfo_list, list) {
581 hlist_del_rcu(&orig_ifinfo->list);
582 batadv_orig_ifinfo_free_ref(orig_ifinfo);
583 }
584
585 last_candidate = orig_node->last_bonding_candidate;
586 orig_node->last_bonding_candidate = NULL;
587 spin_unlock_bh(&orig_node->neigh_list_lock);
588
589 if (last_candidate)
590 batadv_orig_ifinfo_free_ref(last_candidate);
591
592 spin_lock_bh(&orig_node->vlan_list_lock);
593 hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
594 hlist_del_rcu(&vlan->list);
595 batadv_orig_node_vlan_free_ref(vlan);
596 }
597 spin_unlock_bh(&orig_node->vlan_list_lock);
598
599 /* Free nc_nodes */
600 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
601
602 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
603 }
604
605 /**
606 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
607 * release it
608 * @orig_node: the orig node to free
609 */
batadv_orig_node_free_ref(struct batadv_orig_node * orig_node)610 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
611 {
612 if (atomic_dec_and_test(&orig_node->refcount))
613 batadv_orig_node_release(orig_node);
614 }
615
batadv_originator_free(struct batadv_priv * bat_priv)616 void batadv_originator_free(struct batadv_priv *bat_priv)
617 {
618 struct batadv_hashtable *hash = bat_priv->orig_hash;
619 struct hlist_node *node_tmp;
620 struct hlist_head *head;
621 spinlock_t *list_lock; /* spinlock to protect write access */
622 struct batadv_orig_node *orig_node;
623 u32 i;
624
625 if (!hash)
626 return;
627
628 cancel_delayed_work_sync(&bat_priv->orig_work);
629
630 bat_priv->orig_hash = NULL;
631
632 for (i = 0; i < hash->size; i++) {
633 head = &hash->table[i];
634 list_lock = &hash->list_locks[i];
635
636 spin_lock_bh(list_lock);
637 hlist_for_each_entry_safe(orig_node, node_tmp,
638 head, hash_entry) {
639 hlist_del_rcu(&orig_node->hash_entry);
640 batadv_orig_node_free_ref(orig_node);
641 }
642 spin_unlock_bh(list_lock);
643 }
644
645 batadv_hash_destroy(hash);
646 }
647
648 /**
649 * batadv_orig_node_new - creates a new orig_node
650 * @bat_priv: the bat priv with all the soft interface information
651 * @addr: the mac address of the originator
652 *
653 * Creates a new originator object and initialise all the generic fields.
654 * The new object is not added to the originator list.
655 * Returns the newly created object or NULL on failure.
656 */
batadv_orig_node_new(struct batadv_priv * bat_priv,const u8 * addr)657 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
658 const u8 *addr)
659 {
660 struct batadv_orig_node *orig_node;
661 struct batadv_orig_node_vlan *vlan;
662 unsigned long reset_time;
663 int i;
664
665 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
666 "Creating new originator: %pM\n", addr);
667
668 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
669 if (!orig_node)
670 return NULL;
671
672 INIT_HLIST_HEAD(&orig_node->neigh_list);
673 INIT_HLIST_HEAD(&orig_node->vlan_list);
674 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
675 spin_lock_init(&orig_node->bcast_seqno_lock);
676 spin_lock_init(&orig_node->neigh_list_lock);
677 spin_lock_init(&orig_node->tt_buff_lock);
678 spin_lock_init(&orig_node->tt_lock);
679 spin_lock_init(&orig_node->vlan_list_lock);
680
681 batadv_nc_init_orig(orig_node);
682
683 /* extra reference for return */
684 atomic_set(&orig_node->refcount, 2);
685
686 orig_node->bat_priv = bat_priv;
687 ether_addr_copy(orig_node->orig, addr);
688 batadv_dat_init_orig_node_addr(orig_node);
689 atomic_set(&orig_node->last_ttvn, 0);
690 orig_node->tt_buff = NULL;
691 orig_node->tt_buff_len = 0;
692 orig_node->last_seen = jiffies;
693 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
694 orig_node->bcast_seqno_reset = reset_time;
695
696 #ifdef CONFIG_BATMAN_ADV_MCAST
697 orig_node->mcast_flags = BATADV_NO_FLAGS;
698 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
699 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
700 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
701 spin_lock_init(&orig_node->mcast_handler_lock);
702 #endif
703
704 /* create a vlan object for the "untagged" LAN */
705 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
706 if (!vlan)
707 goto free_orig_node;
708 /* batadv_orig_node_vlan_new() increases the refcounter.
709 * Immediately release vlan since it is not needed anymore in this
710 * context
711 */
712 batadv_orig_node_vlan_free_ref(vlan);
713
714 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
715 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
716 spin_lock_init(&orig_node->fragments[i].lock);
717 orig_node->fragments[i].size = 0;
718 }
719
720 return orig_node;
721 free_orig_node:
722 kfree(orig_node);
723 return NULL;
724 }
725
726 /**
727 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
728 * @bat_priv: the bat priv with all the soft interface information
729 * @neigh: orig node which is to be checked
730 */
731 static void
batadv_purge_neigh_ifinfo(struct batadv_priv * bat_priv,struct batadv_neigh_node * neigh)732 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
733 struct batadv_neigh_node *neigh)
734 {
735 struct batadv_neigh_ifinfo *neigh_ifinfo;
736 struct batadv_hard_iface *if_outgoing;
737 struct hlist_node *node_tmp;
738
739 spin_lock_bh(&neigh->ifinfo_lock);
740
741 /* for all ifinfo objects for this neighinator */
742 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
743 &neigh->ifinfo_list, list) {
744 if_outgoing = neigh_ifinfo->if_outgoing;
745
746 /* always keep the default interface */
747 if (if_outgoing == BATADV_IF_DEFAULT)
748 continue;
749
750 /* don't purge if the interface is not (going) down */
751 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
752 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
753 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
754 continue;
755
756 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
757 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
758 neigh->addr, if_outgoing->net_dev->name);
759
760 hlist_del_rcu(&neigh_ifinfo->list);
761 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
762 }
763
764 spin_unlock_bh(&neigh->ifinfo_lock);
765 }
766
767 /**
768 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
769 * @bat_priv: the bat priv with all the soft interface information
770 * @orig_node: orig node which is to be checked
771 *
772 * Returns true if any ifinfo entry was purged, false otherwise.
773 */
774 static bool
batadv_purge_orig_ifinfo(struct batadv_priv * bat_priv,struct batadv_orig_node * orig_node)775 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
776 struct batadv_orig_node *orig_node)
777 {
778 struct batadv_orig_ifinfo *orig_ifinfo;
779 struct batadv_hard_iface *if_outgoing;
780 struct hlist_node *node_tmp;
781 bool ifinfo_purged = false;
782
783 spin_lock_bh(&orig_node->neigh_list_lock);
784
785 /* for all ifinfo objects for this originator */
786 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
787 &orig_node->ifinfo_list, list) {
788 if_outgoing = orig_ifinfo->if_outgoing;
789
790 /* always keep the default interface */
791 if (if_outgoing == BATADV_IF_DEFAULT)
792 continue;
793
794 /* don't purge if the interface is not (going) down */
795 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
796 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
797 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
798 continue;
799
800 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
801 "router/ifinfo purge: originator %pM, iface: %s\n",
802 orig_node->orig, if_outgoing->net_dev->name);
803
804 ifinfo_purged = true;
805
806 hlist_del_rcu(&orig_ifinfo->list);
807 batadv_orig_ifinfo_free_ref(orig_ifinfo);
808 if (orig_node->last_bonding_candidate == orig_ifinfo) {
809 orig_node->last_bonding_candidate = NULL;
810 batadv_orig_ifinfo_free_ref(orig_ifinfo);
811 }
812 }
813
814 spin_unlock_bh(&orig_node->neigh_list_lock);
815
816 return ifinfo_purged;
817 }
818
819 /**
820 * batadv_purge_orig_neighbors - purges neighbors from originator
821 * @bat_priv: the bat priv with all the soft interface information
822 * @orig_node: orig node which is to be checked
823 *
824 * Returns true if any neighbor was purged, false otherwise
825 */
826 static bool
batadv_purge_orig_neighbors(struct batadv_priv * bat_priv,struct batadv_orig_node * orig_node)827 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
828 struct batadv_orig_node *orig_node)
829 {
830 struct hlist_node *node_tmp;
831 struct batadv_neigh_node *neigh_node;
832 bool neigh_purged = false;
833 unsigned long last_seen;
834 struct batadv_hard_iface *if_incoming;
835
836 spin_lock_bh(&orig_node->neigh_list_lock);
837
838 /* for all neighbors towards this originator ... */
839 hlist_for_each_entry_safe(neigh_node, node_tmp,
840 &orig_node->neigh_list, list) {
841 last_seen = neigh_node->last_seen;
842 if_incoming = neigh_node->if_incoming;
843
844 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
845 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
846 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
847 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
848 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
849 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
850 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
851 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
852 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
853 orig_node->orig, neigh_node->addr,
854 if_incoming->net_dev->name);
855 else
856 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
857 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
858 orig_node->orig, neigh_node->addr,
859 jiffies_to_msecs(last_seen));
860
861 neigh_purged = true;
862
863 hlist_del_rcu(&neigh_node->list);
864 batadv_neigh_node_free_ref(neigh_node);
865 } else {
866 /* only necessary if not the whole neighbor is to be
867 * deleted, but some interface has been removed.
868 */
869 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
870 }
871 }
872
873 spin_unlock_bh(&orig_node->neigh_list_lock);
874 return neigh_purged;
875 }
876
877 /**
878 * batadv_find_best_neighbor - finds the best neighbor after purging
879 * @bat_priv: the bat priv with all the soft interface information
880 * @orig_node: orig node which is to be checked
881 * @if_outgoing: the interface for which the metric should be compared
882 *
883 * Returns the current best neighbor, with refcount increased.
884 */
885 static struct batadv_neigh_node *
batadv_find_best_neighbor(struct batadv_priv * bat_priv,struct batadv_orig_node * orig_node,struct batadv_hard_iface * if_outgoing)886 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
887 struct batadv_orig_node *orig_node,
888 struct batadv_hard_iface *if_outgoing)
889 {
890 struct batadv_neigh_node *best = NULL, *neigh;
891 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
892
893 rcu_read_lock();
894 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
895 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
896 best, if_outgoing) <= 0))
897 continue;
898
899 if (!atomic_inc_not_zero(&neigh->refcount))
900 continue;
901
902 if (best)
903 batadv_neigh_node_free_ref(best);
904
905 best = neigh;
906 }
907 rcu_read_unlock();
908
909 return best;
910 }
911
912 /**
913 * batadv_purge_orig_node - purges obsolete information from an orig_node
914 * @bat_priv: the bat priv with all the soft interface information
915 * @orig_node: orig node which is to be checked
916 *
917 * This function checks if the orig_node or substructures of it have become
918 * obsolete, and purges this information if that's the case.
919 *
920 * Returns true if the orig_node is to be removed, false otherwise.
921 */
batadv_purge_orig_node(struct batadv_priv * bat_priv,struct batadv_orig_node * orig_node)922 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
923 struct batadv_orig_node *orig_node)
924 {
925 struct batadv_neigh_node *best_neigh_node;
926 struct batadv_hard_iface *hard_iface;
927 bool changed_ifinfo, changed_neigh;
928
929 if (batadv_has_timed_out(orig_node->last_seen,
930 2 * BATADV_PURGE_TIMEOUT)) {
931 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
932 "Originator timeout: originator %pM, last_seen %u\n",
933 orig_node->orig,
934 jiffies_to_msecs(orig_node->last_seen));
935 return true;
936 }
937 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
938 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
939
940 if (!changed_ifinfo && !changed_neigh)
941 return false;
942
943 /* first for NULL ... */
944 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
945 BATADV_IF_DEFAULT);
946 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
947 best_neigh_node);
948 if (best_neigh_node)
949 batadv_neigh_node_free_ref(best_neigh_node);
950
951 /* ... then for all other interfaces. */
952 rcu_read_lock();
953 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
954 if (hard_iface->if_status != BATADV_IF_ACTIVE)
955 continue;
956
957 if (hard_iface->soft_iface != bat_priv->soft_iface)
958 continue;
959
960 best_neigh_node = batadv_find_best_neighbor(bat_priv,
961 orig_node,
962 hard_iface);
963 batadv_update_route(bat_priv, orig_node, hard_iface,
964 best_neigh_node);
965 if (best_neigh_node)
966 batadv_neigh_node_free_ref(best_neigh_node);
967 }
968 rcu_read_unlock();
969
970 return false;
971 }
972
_batadv_purge_orig(struct batadv_priv * bat_priv)973 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
974 {
975 struct batadv_hashtable *hash = bat_priv->orig_hash;
976 struct hlist_node *node_tmp;
977 struct hlist_head *head;
978 spinlock_t *list_lock; /* spinlock to protect write access */
979 struct batadv_orig_node *orig_node;
980 u32 i;
981
982 if (!hash)
983 return;
984
985 /* for all origins... */
986 for (i = 0; i < hash->size; i++) {
987 head = &hash->table[i];
988 list_lock = &hash->list_locks[i];
989
990 spin_lock_bh(list_lock);
991 hlist_for_each_entry_safe(orig_node, node_tmp,
992 head, hash_entry) {
993 if (batadv_purge_orig_node(bat_priv, orig_node)) {
994 batadv_gw_node_delete(bat_priv, orig_node);
995 hlist_del_rcu(&orig_node->hash_entry);
996 batadv_tt_global_del_orig(orig_node->bat_priv,
997 orig_node, -1,
998 "originator timed out");
999 batadv_orig_node_free_ref(orig_node);
1000 continue;
1001 }
1002
1003 batadv_frag_purge_orig(orig_node,
1004 batadv_frag_check_entry);
1005 }
1006 spin_unlock_bh(list_lock);
1007 }
1008
1009 batadv_gw_election(bat_priv);
1010 }
1011
batadv_purge_orig(struct work_struct * work)1012 static void batadv_purge_orig(struct work_struct *work)
1013 {
1014 struct delayed_work *delayed_work;
1015 struct batadv_priv *bat_priv;
1016
1017 delayed_work = container_of(work, struct delayed_work, work);
1018 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1019 _batadv_purge_orig(bat_priv);
1020 queue_delayed_work(batadv_event_workqueue,
1021 &bat_priv->orig_work,
1022 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1023 }
1024
batadv_purge_orig_ref(struct batadv_priv * bat_priv)1025 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1026 {
1027 _batadv_purge_orig(bat_priv);
1028 }
1029
batadv_orig_seq_print_text(struct seq_file * seq,void * offset)1030 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1031 {
1032 struct net_device *net_dev = (struct net_device *)seq->private;
1033 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1034 struct batadv_hard_iface *primary_if;
1035
1036 primary_if = batadv_seq_print_text_primary_if_get(seq);
1037 if (!primary_if)
1038 return 0;
1039
1040 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1041 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1042 primary_if->net_dev->dev_addr, net_dev->name,
1043 bat_priv->bat_algo_ops->name);
1044
1045 batadv_hardif_free_ref(primary_if);
1046
1047 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1048 seq_puts(seq,
1049 "No printing function for this routing protocol\n");
1050 return 0;
1051 }
1052
1053 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1054 BATADV_IF_DEFAULT);
1055
1056 return 0;
1057 }
1058
1059 /**
1060 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1061 * outgoing interface
1062 * @seq: debugfs table seq_file struct
1063 * @offset: not used
1064 *
1065 * Returns 0
1066 */
batadv_orig_hardif_seq_print_text(struct seq_file * seq,void * offset)1067 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1068 {
1069 struct net_device *net_dev = (struct net_device *)seq->private;
1070 struct batadv_hard_iface *hard_iface;
1071 struct batadv_priv *bat_priv;
1072
1073 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1074
1075 if (!hard_iface || !hard_iface->soft_iface) {
1076 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1077 goto out;
1078 }
1079
1080 bat_priv = netdev_priv(hard_iface->soft_iface);
1081 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1082 seq_puts(seq,
1083 "No printing function for this routing protocol\n");
1084 goto out;
1085 }
1086
1087 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1088 seq_puts(seq, "Interface not active\n");
1089 goto out;
1090 }
1091
1092 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1093 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1094 hard_iface->net_dev->dev_addr,
1095 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1096
1097 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1098
1099 out:
1100 if (hard_iface)
1101 batadv_hardif_free_ref(hard_iface);
1102 return 0;
1103 }
1104
batadv_orig_hash_add_if(struct batadv_hard_iface * hard_iface,unsigned int max_if_num)1105 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1106 unsigned int max_if_num)
1107 {
1108 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1109 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1110 struct batadv_hashtable *hash = bat_priv->orig_hash;
1111 struct hlist_head *head;
1112 struct batadv_orig_node *orig_node;
1113 u32 i;
1114 int ret;
1115
1116 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1117 * if_num
1118 */
1119 for (i = 0; i < hash->size; i++) {
1120 head = &hash->table[i];
1121
1122 rcu_read_lock();
1123 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1124 ret = 0;
1125 if (bao->bat_orig_add_if)
1126 ret = bao->bat_orig_add_if(orig_node,
1127 max_if_num);
1128 if (ret == -ENOMEM)
1129 goto err;
1130 }
1131 rcu_read_unlock();
1132 }
1133
1134 return 0;
1135
1136 err:
1137 rcu_read_unlock();
1138 return -ENOMEM;
1139 }
1140
batadv_orig_hash_del_if(struct batadv_hard_iface * hard_iface,unsigned int max_if_num)1141 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1142 unsigned int max_if_num)
1143 {
1144 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1145 struct batadv_hashtable *hash = bat_priv->orig_hash;
1146 struct hlist_head *head;
1147 struct batadv_hard_iface *hard_iface_tmp;
1148 struct batadv_orig_node *orig_node;
1149 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1150 u32 i;
1151 int ret;
1152
1153 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1154 * if_num
1155 */
1156 for (i = 0; i < hash->size; i++) {
1157 head = &hash->table[i];
1158
1159 rcu_read_lock();
1160 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1161 ret = 0;
1162 if (bao->bat_orig_del_if)
1163 ret = bao->bat_orig_del_if(orig_node,
1164 max_if_num,
1165 hard_iface->if_num);
1166 if (ret == -ENOMEM)
1167 goto err;
1168 }
1169 rcu_read_unlock();
1170 }
1171
1172 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1173 rcu_read_lock();
1174 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1175 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1176 continue;
1177
1178 if (hard_iface == hard_iface_tmp)
1179 continue;
1180
1181 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1182 continue;
1183
1184 if (hard_iface_tmp->if_num > hard_iface->if_num)
1185 hard_iface_tmp->if_num--;
1186 }
1187 rcu_read_unlock();
1188
1189 hard_iface->if_num = -1;
1190 return 0;
1191
1192 err:
1193 rcu_read_unlock();
1194 return -ENOMEM;
1195 }
1196