1 /* Copyright (C) 2007-2015 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19
20 #include <linux/atomic.h>
21 #include <linux/bug.h>
22 #include <linux/byteorder/generic.h>
23 #include <linux/crc32c.h>
24 #include <linux/errno.h>
25 #include <linux/fs.h>
26 #include <linux/if_ether.h>
27 #include <linux/if_vlan.h>
28 #include <linux/init.h>
29 #include <linux/ip.h>
30 #include <linux/ipv6.h>
31 #include <linux/kernel.h>
32 #include <linux/list.h>
33 #include <linux/lockdep.h>
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/netdevice.h>
37 #include <linux/pkt_sched.h>
38 #include <linux/rculist.h>
39 #include <linux/rcupdate.h>
40 #include <linux/seq_file.h>
41 #include <linux/skbuff.h>
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/stddef.h>
45 #include <linux/string.h>
46 #include <linux/workqueue.h>
47 #include <net/dsfield.h>
48 #include <net/rtnetlink.h>
49
50 #include "bat_algo.h"
51 #include "bridge_loop_avoidance.h"
52 #include "debugfs.h"
53 #include "distributed-arp-table.h"
54 #include "gateway_client.h"
55 #include "gateway_common.h"
56 #include "hard-interface.h"
57 #include "icmp_socket.h"
58 #include "multicast.h"
59 #include "network-coding.h"
60 #include "originator.h"
61 #include "packet.h"
62 #include "routing.h"
63 #include "send.h"
64 #include "soft-interface.h"
65 #include "translation-table.h"
66
67 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
68 * list traversals just rcu-locked
69 */
70 struct list_head batadv_hardif_list;
71 static int (*batadv_rx_handler[256])(struct sk_buff *,
72 struct batadv_hard_iface *);
73 char batadv_routing_algo[20] = "BATMAN_IV";
74 static struct hlist_head batadv_algo_list;
75
76 unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
77
78 struct workqueue_struct *batadv_event_workqueue;
79
80 static void batadv_recv_handler_init(void);
81
batadv_init(void)82 static int __init batadv_init(void)
83 {
84 INIT_LIST_HEAD(&batadv_hardif_list);
85 INIT_HLIST_HEAD(&batadv_algo_list);
86
87 batadv_recv_handler_init();
88
89 batadv_iv_init();
90 batadv_nc_init();
91
92 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
93
94 if (!batadv_event_workqueue)
95 return -ENOMEM;
96
97 batadv_socket_init();
98 batadv_debugfs_init();
99
100 register_netdevice_notifier(&batadv_hard_if_notifier);
101 rtnl_link_register(&batadv_link_ops);
102
103 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
104 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
105
106 return 0;
107 }
108
batadv_exit(void)109 static void __exit batadv_exit(void)
110 {
111 batadv_debugfs_destroy();
112 rtnl_link_unregister(&batadv_link_ops);
113 unregister_netdevice_notifier(&batadv_hard_if_notifier);
114 batadv_hardif_remove_interfaces();
115
116 flush_workqueue(batadv_event_workqueue);
117 destroy_workqueue(batadv_event_workqueue);
118 batadv_event_workqueue = NULL;
119
120 rcu_barrier();
121 }
122
batadv_mesh_init(struct net_device * soft_iface)123 int batadv_mesh_init(struct net_device *soft_iface)
124 {
125 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
126 int ret;
127
128 spin_lock_init(&bat_priv->forw_bat_list_lock);
129 spin_lock_init(&bat_priv->forw_bcast_list_lock);
130 spin_lock_init(&bat_priv->tt.changes_list_lock);
131 spin_lock_init(&bat_priv->tt.req_list_lock);
132 spin_lock_init(&bat_priv->tt.roam_list_lock);
133 spin_lock_init(&bat_priv->tt.last_changeset_lock);
134 spin_lock_init(&bat_priv->tt.commit_lock);
135 spin_lock_init(&bat_priv->gw.list_lock);
136 #ifdef CONFIG_BATMAN_ADV_MCAST
137 spin_lock_init(&bat_priv->mcast.want_lists_lock);
138 #endif
139 spin_lock_init(&bat_priv->tvlv.container_list_lock);
140 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
141 spin_lock_init(&bat_priv->softif_vlan_list_lock);
142
143 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
144 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
145 INIT_HLIST_HEAD(&bat_priv->gw.list);
146 #ifdef CONFIG_BATMAN_ADV_MCAST
147 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
148 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
149 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
150 #endif
151 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
152 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
153 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
154 #ifdef CONFIG_BATMAN_ADV_MCAST
155 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
156 #endif
157 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
158 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
159 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
160
161 ret = batadv_originator_init(bat_priv);
162 if (ret < 0) {
163 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
164 goto err_orig;
165 }
166
167 ret = batadv_tt_init(bat_priv);
168 if (ret < 0) {
169 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
170 goto err_tt;
171 }
172
173 ret = batadv_bla_init(bat_priv);
174 if (ret < 0) {
175 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
176 goto err_bla;
177 }
178
179 ret = batadv_dat_init(bat_priv);
180 if (ret < 0) {
181 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
182 goto err_dat;
183 }
184
185 ret = batadv_nc_mesh_init(bat_priv);
186 if (ret < 0) {
187 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
188 goto err_nc;
189 }
190
191 batadv_gw_init(bat_priv);
192 batadv_mcast_init(bat_priv);
193
194 atomic_set(&bat_priv->gw.reselect, 0);
195 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
196
197 return 0;
198
199 err_nc:
200 batadv_dat_free(bat_priv);
201 err_dat:
202 batadv_bla_free(bat_priv);
203 err_bla:
204 batadv_tt_free(bat_priv);
205 err_tt:
206 batadv_originator_free(bat_priv);
207 err_orig:
208 batadv_purge_outstanding_packets(bat_priv, NULL);
209 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
210
211 return ret;
212 }
213
batadv_mesh_free(struct net_device * soft_iface)214 void batadv_mesh_free(struct net_device *soft_iface)
215 {
216 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
217
218 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
219
220 batadv_purge_outstanding_packets(bat_priv, NULL);
221
222 batadv_gw_node_free(bat_priv);
223 batadv_nc_mesh_free(bat_priv);
224 batadv_dat_free(bat_priv);
225 batadv_bla_free(bat_priv);
226
227 batadv_mcast_free(bat_priv);
228
229 /* Free the TT and the originator tables only after having terminated
230 * all the other depending components which may use these structures for
231 * their purposes.
232 */
233 batadv_tt_free(bat_priv);
234
235 /* Since the originator table clean up routine is accessing the TT
236 * tables as well, it has to be invoked after the TT tables have been
237 * freed and marked as empty. This ensures that no cleanup RCU callbacks
238 * accessing the TT data are scheduled for later execution.
239 */
240 batadv_originator_free(bat_priv);
241
242 batadv_gw_free(bat_priv);
243
244 free_percpu(bat_priv->bat_counters);
245 bat_priv->bat_counters = NULL;
246
247 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
248 }
249
250 /**
251 * batadv_is_my_mac - check if the given mac address belongs to any of the real
252 * interfaces in the current mesh
253 * @bat_priv: the bat priv with all the soft interface information
254 * @addr: the address to check
255 *
256 * Returns 'true' if the mac address was found, false otherwise.
257 */
batadv_is_my_mac(struct batadv_priv * bat_priv,const u8 * addr)258 bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
259 {
260 const struct batadv_hard_iface *hard_iface;
261 bool is_my_mac = false;
262
263 rcu_read_lock();
264 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
265 if (hard_iface->if_status != BATADV_IF_ACTIVE)
266 continue;
267
268 if (hard_iface->soft_iface != bat_priv->soft_iface)
269 continue;
270
271 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
272 is_my_mac = true;
273 break;
274 }
275 }
276 rcu_read_unlock();
277 return is_my_mac;
278 }
279
280 /**
281 * batadv_seq_print_text_primary_if_get - called from debugfs table printing
282 * function that requires the primary interface
283 * @seq: debugfs table seq_file struct
284 *
285 * Returns primary interface if found or NULL otherwise.
286 */
287 struct batadv_hard_iface *
batadv_seq_print_text_primary_if_get(struct seq_file * seq)288 batadv_seq_print_text_primary_if_get(struct seq_file *seq)
289 {
290 struct net_device *net_dev = (struct net_device *)seq->private;
291 struct batadv_priv *bat_priv = netdev_priv(net_dev);
292 struct batadv_hard_iface *primary_if;
293
294 primary_if = batadv_primary_if_get_selected(bat_priv);
295
296 if (!primary_if) {
297 seq_printf(seq,
298 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
299 net_dev->name);
300 goto out;
301 }
302
303 if (primary_if->if_status == BATADV_IF_ACTIVE)
304 goto out;
305
306 seq_printf(seq,
307 "BATMAN mesh %s disabled - primary interface not active\n",
308 net_dev->name);
309 batadv_hardif_free_ref(primary_if);
310 primary_if = NULL;
311
312 out:
313 return primary_if;
314 }
315
316 /**
317 * batadv_max_header_len - calculate maximum encapsulation overhead for a
318 * payload packet
319 *
320 * Return the maximum encapsulation overhead in bytes.
321 */
batadv_max_header_len(void)322 int batadv_max_header_len(void)
323 {
324 int header_len = 0;
325
326 header_len = max_t(int, header_len,
327 sizeof(struct batadv_unicast_packet));
328 header_len = max_t(int, header_len,
329 sizeof(struct batadv_unicast_4addr_packet));
330 header_len = max_t(int, header_len,
331 sizeof(struct batadv_bcast_packet));
332
333 #ifdef CONFIG_BATMAN_ADV_NC
334 header_len = max_t(int, header_len,
335 sizeof(struct batadv_coded_packet));
336 #endif
337
338 return header_len + ETH_HLEN;
339 }
340
341 /**
342 * batadv_skb_set_priority - sets skb priority according to packet content
343 * @skb: the packet to be sent
344 * @offset: offset to the packet content
345 *
346 * This function sets a value between 256 and 263 (802.1d priority), which
347 * can be interpreted by the cfg80211 or other drivers.
348 */
batadv_skb_set_priority(struct sk_buff * skb,int offset)349 void batadv_skb_set_priority(struct sk_buff *skb, int offset)
350 {
351 struct iphdr ip_hdr_tmp, *ip_hdr;
352 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
353 struct ethhdr ethhdr_tmp, *ethhdr;
354 struct vlan_ethhdr *vhdr, vhdr_tmp;
355 u32 prio;
356
357 /* already set, do nothing */
358 if (skb->priority >= 256 && skb->priority <= 263)
359 return;
360
361 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), ðhdr_tmp);
362 if (!ethhdr)
363 return;
364
365 switch (ethhdr->h_proto) {
366 case htons(ETH_P_8021Q):
367 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
368 sizeof(*vhdr), &vhdr_tmp);
369 if (!vhdr)
370 return;
371 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
372 prio = prio >> VLAN_PRIO_SHIFT;
373 break;
374 case htons(ETH_P_IP):
375 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
376 sizeof(*ip_hdr), &ip_hdr_tmp);
377 if (!ip_hdr)
378 return;
379 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
380 break;
381 case htons(ETH_P_IPV6):
382 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
383 sizeof(*ip6_hdr), &ip6_hdr_tmp);
384 if (!ip6_hdr)
385 return;
386 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
387 break;
388 default:
389 return;
390 }
391
392 skb->priority = prio + 256;
393 }
394
batadv_recv_unhandled_packet(struct sk_buff * skb,struct batadv_hard_iface * recv_if)395 static int batadv_recv_unhandled_packet(struct sk_buff *skb,
396 struct batadv_hard_iface *recv_if)
397 {
398 return NET_RX_DROP;
399 }
400
401 /* incoming packets with the batman ethertype received on any active hard
402 * interface
403 */
batadv_batman_skb_recv(struct sk_buff * skb,struct net_device * dev,struct packet_type * ptype,struct net_device * orig_dev)404 int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
405 struct packet_type *ptype,
406 struct net_device *orig_dev)
407 {
408 struct batadv_priv *bat_priv;
409 struct batadv_ogm_packet *batadv_ogm_packet;
410 struct batadv_hard_iface *hard_iface;
411 u8 idx;
412 int ret;
413
414 hard_iface = container_of(ptype, struct batadv_hard_iface,
415 batman_adv_ptype);
416 skb = skb_share_check(skb, GFP_ATOMIC);
417
418 /* skb was released by skb_share_check() */
419 if (!skb)
420 goto err_out;
421
422 /* packet should hold at least type and version */
423 if (unlikely(!pskb_may_pull(skb, 2)))
424 goto err_free;
425
426 /* expect a valid ethernet header here. */
427 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
428 goto err_free;
429
430 if (!hard_iface->soft_iface)
431 goto err_free;
432
433 bat_priv = netdev_priv(hard_iface->soft_iface);
434
435 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
436 goto err_free;
437
438 /* discard frames on not active interfaces */
439 if (hard_iface->if_status != BATADV_IF_ACTIVE)
440 goto err_free;
441
442 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
443
444 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
445 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
446 "Drop packet: incompatible batman version (%i)\n",
447 batadv_ogm_packet->version);
448 goto err_free;
449 }
450
451 /* reset control block to avoid left overs from previous users */
452 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
453
454 /* all receive handlers return whether they received or reused
455 * the supplied skb. if not, we have to free the skb.
456 */
457 idx = batadv_ogm_packet->packet_type;
458 ret = (*batadv_rx_handler[idx])(skb, hard_iface);
459
460 if (ret == NET_RX_DROP)
461 kfree_skb(skb);
462
463 /* return NET_RX_SUCCESS in any case as we
464 * most probably dropped the packet for
465 * routing-logical reasons.
466 */
467 return NET_RX_SUCCESS;
468
469 err_free:
470 kfree_skb(skb);
471 err_out:
472 return NET_RX_DROP;
473 }
474
batadv_recv_handler_init(void)475 static void batadv_recv_handler_init(void)
476 {
477 int i;
478
479 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
480 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
481
482 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
483 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
484
485 /* compile time checks for sizes */
486 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
487 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
488 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
489 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
490 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
491 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
492 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
493 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
494 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
495 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
496 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
497 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
498 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
499 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
500 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
501 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
502
503 /* broadcast packet */
504 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
505
506 /* unicast packets ... */
507 /* unicast with 4 addresses packet */
508 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
509 /* unicast packet */
510 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
511 /* unicast tvlv packet */
512 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
513 /* batman icmp packet */
514 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
515 /* Fragmented packets */
516 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
517 }
518
519 int
batadv_recv_handler_register(u8 packet_type,int (* recv_handler)(struct sk_buff *,struct batadv_hard_iface *))520 batadv_recv_handler_register(u8 packet_type,
521 int (*recv_handler)(struct sk_buff *,
522 struct batadv_hard_iface *))
523 {
524 int (*curr)(struct sk_buff *,
525 struct batadv_hard_iface *);
526 curr = batadv_rx_handler[packet_type];
527
528 if ((curr != batadv_recv_unhandled_packet) &&
529 (curr != batadv_recv_unhandled_unicast_packet))
530 return -EBUSY;
531
532 batadv_rx_handler[packet_type] = recv_handler;
533 return 0;
534 }
535
batadv_recv_handler_unregister(u8 packet_type)536 void batadv_recv_handler_unregister(u8 packet_type)
537 {
538 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
539 }
540
batadv_algo_get(char * name)541 static struct batadv_algo_ops *batadv_algo_get(char *name)
542 {
543 struct batadv_algo_ops *bat_algo_ops = NULL, *bat_algo_ops_tmp;
544
545 hlist_for_each_entry(bat_algo_ops_tmp, &batadv_algo_list, list) {
546 if (strcmp(bat_algo_ops_tmp->name, name) != 0)
547 continue;
548
549 bat_algo_ops = bat_algo_ops_tmp;
550 break;
551 }
552
553 return bat_algo_ops;
554 }
555
batadv_algo_register(struct batadv_algo_ops * bat_algo_ops)556 int batadv_algo_register(struct batadv_algo_ops *bat_algo_ops)
557 {
558 struct batadv_algo_ops *bat_algo_ops_tmp;
559
560 bat_algo_ops_tmp = batadv_algo_get(bat_algo_ops->name);
561 if (bat_algo_ops_tmp) {
562 pr_info("Trying to register already registered routing algorithm: %s\n",
563 bat_algo_ops->name);
564 return -EEXIST;
565 }
566
567 /* all algorithms must implement all ops (for now) */
568 if (!bat_algo_ops->bat_iface_enable ||
569 !bat_algo_ops->bat_iface_disable ||
570 !bat_algo_ops->bat_iface_update_mac ||
571 !bat_algo_ops->bat_primary_iface_set ||
572 !bat_algo_ops->bat_ogm_schedule ||
573 !bat_algo_ops->bat_ogm_emit ||
574 !bat_algo_ops->bat_neigh_cmp ||
575 !bat_algo_ops->bat_neigh_is_equiv_or_better) {
576 pr_info("Routing algo '%s' does not implement required ops\n",
577 bat_algo_ops->name);
578 return -EINVAL;
579 }
580
581 INIT_HLIST_NODE(&bat_algo_ops->list);
582 hlist_add_head(&bat_algo_ops->list, &batadv_algo_list);
583
584 return 0;
585 }
586
batadv_algo_select(struct batadv_priv * bat_priv,char * name)587 int batadv_algo_select(struct batadv_priv *bat_priv, char *name)
588 {
589 struct batadv_algo_ops *bat_algo_ops;
590
591 bat_algo_ops = batadv_algo_get(name);
592 if (!bat_algo_ops)
593 return -EINVAL;
594
595 bat_priv->bat_algo_ops = bat_algo_ops;
596
597 return 0;
598 }
599
batadv_algo_seq_print_text(struct seq_file * seq,void * offset)600 int batadv_algo_seq_print_text(struct seq_file *seq, void *offset)
601 {
602 struct batadv_algo_ops *bat_algo_ops;
603
604 seq_puts(seq, "Available routing algorithms:\n");
605
606 hlist_for_each_entry(bat_algo_ops, &batadv_algo_list, list) {
607 seq_printf(seq, " * %s\n", bat_algo_ops->name);
608 }
609
610 return 0;
611 }
612
613 /**
614 * batadv_skb_crc32 - calculate CRC32 of the whole packet and skip bytes in
615 * the header
616 * @skb: skb pointing to fragmented socket buffers
617 * @payload_ptr: Pointer to position inside the head buffer of the skb
618 * marking the start of the data to be CRC'ed
619 *
620 * payload_ptr must always point to an address in the skb head buffer and not to
621 * a fragment.
622 */
batadv_skb_crc32(struct sk_buff * skb,u8 * payload_ptr)623 __be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
624 {
625 u32 crc = 0;
626 unsigned int from;
627 unsigned int to = skb->len;
628 struct skb_seq_state st;
629 const u8 *data;
630 unsigned int len;
631 unsigned int consumed = 0;
632
633 from = (unsigned int)(payload_ptr - skb->data);
634
635 skb_prepare_seq_read(skb, from, to, &st);
636 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
637 crc = crc32c(crc, data, len);
638 consumed += len;
639 }
640
641 return htonl(crc);
642 }
643
644 /**
645 * batadv_tvlv_handler_free_ref - decrement the tvlv handler refcounter and
646 * possibly free it
647 * @tvlv_handler: the tvlv handler to free
648 */
649 static void
batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler * tvlv_handler)650 batadv_tvlv_handler_free_ref(struct batadv_tvlv_handler *tvlv_handler)
651 {
652 if (atomic_dec_and_test(&tvlv_handler->refcount))
653 kfree_rcu(tvlv_handler, rcu);
654 }
655
656 /**
657 * batadv_tvlv_handler_get - retrieve tvlv handler from the tvlv handler list
658 * based on the provided type and version (both need to match)
659 * @bat_priv: the bat priv with all the soft interface information
660 * @type: tvlv handler type to look for
661 * @version: tvlv handler version to look for
662 *
663 * Returns tvlv handler if found or NULL otherwise.
664 */
665 static struct batadv_tvlv_handler
batadv_tvlv_handler_get(struct batadv_priv * bat_priv,u8 type,u8 version)666 *batadv_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 version)
667 {
668 struct batadv_tvlv_handler *tvlv_handler_tmp, *tvlv_handler = NULL;
669
670 rcu_read_lock();
671 hlist_for_each_entry_rcu(tvlv_handler_tmp,
672 &bat_priv->tvlv.handler_list, list) {
673 if (tvlv_handler_tmp->type != type)
674 continue;
675
676 if (tvlv_handler_tmp->version != version)
677 continue;
678
679 if (!atomic_inc_not_zero(&tvlv_handler_tmp->refcount))
680 continue;
681
682 tvlv_handler = tvlv_handler_tmp;
683 break;
684 }
685 rcu_read_unlock();
686
687 return tvlv_handler;
688 }
689
690 /**
691 * batadv_tvlv_container_free_ref - decrement the tvlv container refcounter and
692 * possibly free it
693 * @tvlv: the tvlv container to free
694 */
batadv_tvlv_container_free_ref(struct batadv_tvlv_container * tvlv)695 static void batadv_tvlv_container_free_ref(struct batadv_tvlv_container *tvlv)
696 {
697 if (atomic_dec_and_test(&tvlv->refcount))
698 kfree(tvlv);
699 }
700
701 /**
702 * batadv_tvlv_container_get - retrieve tvlv container from the tvlv container
703 * list based on the provided type and version (both need to match)
704 * @bat_priv: the bat priv with all the soft interface information
705 * @type: tvlv container type to look for
706 * @version: tvlv container version to look for
707 *
708 * Has to be called with the appropriate locks being acquired
709 * (tvlv.container_list_lock).
710 *
711 * Returns tvlv container if found or NULL otherwise.
712 */
713 static struct batadv_tvlv_container
batadv_tvlv_container_get(struct batadv_priv * bat_priv,u8 type,u8 version)714 *batadv_tvlv_container_get(struct batadv_priv *bat_priv, u8 type, u8 version)
715 {
716 struct batadv_tvlv_container *tvlv_tmp, *tvlv = NULL;
717
718 hlist_for_each_entry(tvlv_tmp, &bat_priv->tvlv.container_list, list) {
719 if (tvlv_tmp->tvlv_hdr.type != type)
720 continue;
721
722 if (tvlv_tmp->tvlv_hdr.version != version)
723 continue;
724
725 if (!atomic_inc_not_zero(&tvlv_tmp->refcount))
726 continue;
727
728 tvlv = tvlv_tmp;
729 break;
730 }
731
732 return tvlv;
733 }
734
735 /**
736 * batadv_tvlv_container_list_size - calculate the size of the tvlv container
737 * list entries
738 * @bat_priv: the bat priv with all the soft interface information
739 *
740 * Has to be called with the appropriate locks being acquired
741 * (tvlv.container_list_lock).
742 *
743 * Returns size of all currently registered tvlv containers in bytes.
744 */
batadv_tvlv_container_list_size(struct batadv_priv * bat_priv)745 static u16 batadv_tvlv_container_list_size(struct batadv_priv *bat_priv)
746 {
747 struct batadv_tvlv_container *tvlv;
748 u16 tvlv_len = 0;
749
750 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
751 tvlv_len += sizeof(struct batadv_tvlv_hdr);
752 tvlv_len += ntohs(tvlv->tvlv_hdr.len);
753 }
754
755 return tvlv_len;
756 }
757
758 /**
759 * batadv_tvlv_container_remove - remove tvlv container from the tvlv container
760 * list
761 * @bat_priv: the bat priv with all the soft interface information
762 * @tvlv: the to be removed tvlv container
763 *
764 * Has to be called with the appropriate locks being acquired
765 * (tvlv.container_list_lock).
766 */
batadv_tvlv_container_remove(struct batadv_priv * bat_priv,struct batadv_tvlv_container * tvlv)767 static void batadv_tvlv_container_remove(struct batadv_priv *bat_priv,
768 struct batadv_tvlv_container *tvlv)
769 {
770 lockdep_assert_held(&bat_priv->tvlv.container_list_lock);
771
772 if (!tvlv)
773 return;
774
775 hlist_del(&tvlv->list);
776
777 /* first call to decrement the counter, second call to free */
778 batadv_tvlv_container_free_ref(tvlv);
779 batadv_tvlv_container_free_ref(tvlv);
780 }
781
782 /**
783 * batadv_tvlv_container_unregister - unregister tvlv container based on the
784 * provided type and version (both need to match)
785 * @bat_priv: the bat priv with all the soft interface information
786 * @type: tvlv container type to unregister
787 * @version: tvlv container type to unregister
788 */
batadv_tvlv_container_unregister(struct batadv_priv * bat_priv,u8 type,u8 version)789 void batadv_tvlv_container_unregister(struct batadv_priv *bat_priv,
790 u8 type, u8 version)
791 {
792 struct batadv_tvlv_container *tvlv;
793
794 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
795 tvlv = batadv_tvlv_container_get(bat_priv, type, version);
796 batadv_tvlv_container_remove(bat_priv, tvlv);
797 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
798 }
799
800 /**
801 * batadv_tvlv_container_register - register tvlv type, version and content
802 * to be propagated with each (primary interface) OGM
803 * @bat_priv: the bat priv with all the soft interface information
804 * @type: tvlv container type
805 * @version: tvlv container version
806 * @tvlv_value: tvlv container content
807 * @tvlv_value_len: tvlv container content length
808 *
809 * If a container of the same type and version was already registered the new
810 * content is going to replace the old one.
811 */
batadv_tvlv_container_register(struct batadv_priv * bat_priv,u8 type,u8 version,void * tvlv_value,u16 tvlv_value_len)812 void batadv_tvlv_container_register(struct batadv_priv *bat_priv,
813 u8 type, u8 version,
814 void *tvlv_value, u16 tvlv_value_len)
815 {
816 struct batadv_tvlv_container *tvlv_old, *tvlv_new;
817
818 if (!tvlv_value)
819 tvlv_value_len = 0;
820
821 tvlv_new = kzalloc(sizeof(*tvlv_new) + tvlv_value_len, GFP_ATOMIC);
822 if (!tvlv_new)
823 return;
824
825 tvlv_new->tvlv_hdr.version = version;
826 tvlv_new->tvlv_hdr.type = type;
827 tvlv_new->tvlv_hdr.len = htons(tvlv_value_len);
828
829 memcpy(tvlv_new + 1, tvlv_value, ntohs(tvlv_new->tvlv_hdr.len));
830 INIT_HLIST_NODE(&tvlv_new->list);
831 atomic_set(&tvlv_new->refcount, 1);
832
833 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
834 tvlv_old = batadv_tvlv_container_get(bat_priv, type, version);
835 batadv_tvlv_container_remove(bat_priv, tvlv_old);
836 hlist_add_head(&tvlv_new->list, &bat_priv->tvlv.container_list);
837 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
838 }
839
840 /**
841 * batadv_tvlv_realloc_packet_buff - reallocate packet buffer to accommodate
842 * requested packet size
843 * @packet_buff: packet buffer
844 * @packet_buff_len: packet buffer size
845 * @min_packet_len: requested packet minimum size
846 * @additional_packet_len: requested additional packet size on top of minimum
847 * size
848 *
849 * Returns true of the packet buffer could be changed to the requested size,
850 * false otherwise.
851 */
batadv_tvlv_realloc_packet_buff(unsigned char ** packet_buff,int * packet_buff_len,int min_packet_len,int additional_packet_len)852 static bool batadv_tvlv_realloc_packet_buff(unsigned char **packet_buff,
853 int *packet_buff_len,
854 int min_packet_len,
855 int additional_packet_len)
856 {
857 unsigned char *new_buff;
858
859 new_buff = kmalloc(min_packet_len + additional_packet_len, GFP_ATOMIC);
860
861 /* keep old buffer if kmalloc should fail */
862 if (!new_buff)
863 return false;
864
865 memcpy(new_buff, *packet_buff, min_packet_len);
866 kfree(*packet_buff);
867 *packet_buff = new_buff;
868 *packet_buff_len = min_packet_len + additional_packet_len;
869
870 return true;
871 }
872
873 /**
874 * batadv_tvlv_container_ogm_append - append tvlv container content to given
875 * OGM packet buffer
876 * @bat_priv: the bat priv with all the soft interface information
877 * @packet_buff: ogm packet buffer
878 * @packet_buff_len: ogm packet buffer size including ogm header and tvlv
879 * content
880 * @packet_min_len: ogm header size to be preserved for the OGM itself
881 *
882 * The ogm packet might be enlarged or shrunk depending on the current size
883 * and the size of the to-be-appended tvlv containers.
884 *
885 * Returns size of all appended tvlv containers in bytes.
886 */
batadv_tvlv_container_ogm_append(struct batadv_priv * bat_priv,unsigned char ** packet_buff,int * packet_buff_len,int packet_min_len)887 u16 batadv_tvlv_container_ogm_append(struct batadv_priv *bat_priv,
888 unsigned char **packet_buff,
889 int *packet_buff_len, int packet_min_len)
890 {
891 struct batadv_tvlv_container *tvlv;
892 struct batadv_tvlv_hdr *tvlv_hdr;
893 u16 tvlv_value_len;
894 void *tvlv_value;
895 bool ret;
896
897 spin_lock_bh(&bat_priv->tvlv.container_list_lock);
898 tvlv_value_len = batadv_tvlv_container_list_size(bat_priv);
899
900 ret = batadv_tvlv_realloc_packet_buff(packet_buff, packet_buff_len,
901 packet_min_len, tvlv_value_len);
902
903 if (!ret)
904 goto end;
905
906 if (!tvlv_value_len)
907 goto end;
908
909 tvlv_value = (*packet_buff) + packet_min_len;
910
911 hlist_for_each_entry(tvlv, &bat_priv->tvlv.container_list, list) {
912 tvlv_hdr = tvlv_value;
913 tvlv_hdr->type = tvlv->tvlv_hdr.type;
914 tvlv_hdr->version = tvlv->tvlv_hdr.version;
915 tvlv_hdr->len = tvlv->tvlv_hdr.len;
916 tvlv_value = tvlv_hdr + 1;
917 memcpy(tvlv_value, tvlv + 1, ntohs(tvlv->tvlv_hdr.len));
918 tvlv_value = (u8 *)tvlv_value + ntohs(tvlv->tvlv_hdr.len);
919 }
920
921 end:
922 spin_unlock_bh(&bat_priv->tvlv.container_list_lock);
923 return tvlv_value_len;
924 }
925
926 /**
927 * batadv_tvlv_call_handler - parse the given tvlv buffer to call the
928 * appropriate handlers
929 * @bat_priv: the bat priv with all the soft interface information
930 * @tvlv_handler: tvlv callback function handling the tvlv content
931 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
932 * @orig_node: orig node emitting the ogm packet
933 * @src: source mac address of the unicast packet
934 * @dst: destination mac address of the unicast packet
935 * @tvlv_value: tvlv content
936 * @tvlv_value_len: tvlv content length
937 *
938 * Returns success if handler was not found or the return value of the handler
939 * callback.
940 */
batadv_tvlv_call_handler(struct batadv_priv * bat_priv,struct batadv_tvlv_handler * tvlv_handler,bool ogm_source,struct batadv_orig_node * orig_node,u8 * src,u8 * dst,void * tvlv_value,u16 tvlv_value_len)941 static int batadv_tvlv_call_handler(struct batadv_priv *bat_priv,
942 struct batadv_tvlv_handler *tvlv_handler,
943 bool ogm_source,
944 struct batadv_orig_node *orig_node,
945 u8 *src, u8 *dst,
946 void *tvlv_value, u16 tvlv_value_len)
947 {
948 if (!tvlv_handler)
949 return NET_RX_SUCCESS;
950
951 if (ogm_source) {
952 if (!tvlv_handler->ogm_handler)
953 return NET_RX_SUCCESS;
954
955 if (!orig_node)
956 return NET_RX_SUCCESS;
957
958 tvlv_handler->ogm_handler(bat_priv, orig_node,
959 BATADV_NO_FLAGS,
960 tvlv_value, tvlv_value_len);
961 tvlv_handler->flags |= BATADV_TVLV_HANDLER_OGM_CALLED;
962 } else {
963 if (!src)
964 return NET_RX_SUCCESS;
965
966 if (!dst)
967 return NET_RX_SUCCESS;
968
969 if (!tvlv_handler->unicast_handler)
970 return NET_RX_SUCCESS;
971
972 return tvlv_handler->unicast_handler(bat_priv, src,
973 dst, tvlv_value,
974 tvlv_value_len);
975 }
976
977 return NET_RX_SUCCESS;
978 }
979
980 /**
981 * batadv_tvlv_containers_process - parse the given tvlv buffer to call the
982 * appropriate handlers
983 * @bat_priv: the bat priv with all the soft interface information
984 * @ogm_source: flag indicating wether the tvlv is an ogm or a unicast packet
985 * @orig_node: orig node emitting the ogm packet
986 * @src: source mac address of the unicast packet
987 * @dst: destination mac address of the unicast packet
988 * @tvlv_value: tvlv content
989 * @tvlv_value_len: tvlv content length
990 *
991 * Returns success when processing an OGM or the return value of all called
992 * handler callbacks.
993 */
batadv_tvlv_containers_process(struct batadv_priv * bat_priv,bool ogm_source,struct batadv_orig_node * orig_node,u8 * src,u8 * dst,void * tvlv_value,u16 tvlv_value_len)994 int batadv_tvlv_containers_process(struct batadv_priv *bat_priv,
995 bool ogm_source,
996 struct batadv_orig_node *orig_node,
997 u8 *src, u8 *dst,
998 void *tvlv_value, u16 tvlv_value_len)
999 {
1000 struct batadv_tvlv_handler *tvlv_handler;
1001 struct batadv_tvlv_hdr *tvlv_hdr;
1002 u16 tvlv_value_cont_len;
1003 u8 cifnotfound = BATADV_TVLV_HANDLER_OGM_CIFNOTFND;
1004 int ret = NET_RX_SUCCESS;
1005
1006 while (tvlv_value_len >= sizeof(*tvlv_hdr)) {
1007 tvlv_hdr = tvlv_value;
1008 tvlv_value_cont_len = ntohs(tvlv_hdr->len);
1009 tvlv_value = tvlv_hdr + 1;
1010 tvlv_value_len -= sizeof(*tvlv_hdr);
1011
1012 if (tvlv_value_cont_len > tvlv_value_len)
1013 break;
1014
1015 tvlv_handler = batadv_tvlv_handler_get(bat_priv,
1016 tvlv_hdr->type,
1017 tvlv_hdr->version);
1018
1019 ret |= batadv_tvlv_call_handler(bat_priv, tvlv_handler,
1020 ogm_source, orig_node,
1021 src, dst, tvlv_value,
1022 tvlv_value_cont_len);
1023 if (tvlv_handler)
1024 batadv_tvlv_handler_free_ref(tvlv_handler);
1025 tvlv_value = (u8 *)tvlv_value + tvlv_value_cont_len;
1026 tvlv_value_len -= tvlv_value_cont_len;
1027 }
1028
1029 if (!ogm_source)
1030 return ret;
1031
1032 rcu_read_lock();
1033 hlist_for_each_entry_rcu(tvlv_handler,
1034 &bat_priv->tvlv.handler_list, list) {
1035 if ((tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CIFNOTFND) &&
1036 !(tvlv_handler->flags & BATADV_TVLV_HANDLER_OGM_CALLED))
1037 tvlv_handler->ogm_handler(bat_priv, orig_node,
1038 cifnotfound, NULL, 0);
1039
1040 tvlv_handler->flags &= ~BATADV_TVLV_HANDLER_OGM_CALLED;
1041 }
1042 rcu_read_unlock();
1043
1044 return NET_RX_SUCCESS;
1045 }
1046
1047 /**
1048 * batadv_tvlv_ogm_receive - process an incoming ogm and call the appropriate
1049 * handlers
1050 * @bat_priv: the bat priv with all the soft interface information
1051 * @batadv_ogm_packet: ogm packet containing the tvlv containers
1052 * @orig_node: orig node emitting the ogm packet
1053 */
batadv_tvlv_ogm_receive(struct batadv_priv * bat_priv,struct batadv_ogm_packet * batadv_ogm_packet,struct batadv_orig_node * orig_node)1054 void batadv_tvlv_ogm_receive(struct batadv_priv *bat_priv,
1055 struct batadv_ogm_packet *batadv_ogm_packet,
1056 struct batadv_orig_node *orig_node)
1057 {
1058 void *tvlv_value;
1059 u16 tvlv_value_len;
1060
1061 if (!batadv_ogm_packet)
1062 return;
1063
1064 tvlv_value_len = ntohs(batadv_ogm_packet->tvlv_len);
1065 if (!tvlv_value_len)
1066 return;
1067
1068 tvlv_value = batadv_ogm_packet + 1;
1069
1070 batadv_tvlv_containers_process(bat_priv, true, orig_node, NULL, NULL,
1071 tvlv_value, tvlv_value_len);
1072 }
1073
1074 /**
1075 * batadv_tvlv_handler_register - register tvlv handler based on the provided
1076 * type and version (both need to match) for ogm tvlv payload and/or unicast
1077 * payload
1078 * @bat_priv: the bat priv with all the soft interface information
1079 * @optr: ogm tvlv handler callback function. This function receives the orig
1080 * node, flags and the tvlv content as argument to process.
1081 * @uptr: unicast tvlv handler callback function. This function receives the
1082 * source & destination of the unicast packet as well as the tvlv content
1083 * to process.
1084 * @type: tvlv handler type to be registered
1085 * @version: tvlv handler version to be registered
1086 * @flags: flags to enable or disable TVLV API behavior
1087 */
batadv_tvlv_handler_register(struct batadv_priv * bat_priv,void (* optr)(struct batadv_priv * bat_priv,struct batadv_orig_node * orig,u8 flags,void * tvlv_value,u16 tvlv_value_len),int (* uptr)(struct batadv_priv * bat_priv,u8 * src,u8 * dst,void * tvlv_value,u16 tvlv_value_len),u8 type,u8 version,u8 flags)1088 void batadv_tvlv_handler_register(struct batadv_priv *bat_priv,
1089 void (*optr)(struct batadv_priv *bat_priv,
1090 struct batadv_orig_node *orig,
1091 u8 flags,
1092 void *tvlv_value,
1093 u16 tvlv_value_len),
1094 int (*uptr)(struct batadv_priv *bat_priv,
1095 u8 *src, u8 *dst,
1096 void *tvlv_value,
1097 u16 tvlv_value_len),
1098 u8 type, u8 version, u8 flags)
1099 {
1100 struct batadv_tvlv_handler *tvlv_handler;
1101
1102 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1103
1104 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1105 if (tvlv_handler) {
1106 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1107 batadv_tvlv_handler_free_ref(tvlv_handler);
1108 return;
1109 }
1110
1111 tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
1112 if (!tvlv_handler) {
1113 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1114 return;
1115 }
1116
1117 tvlv_handler->ogm_handler = optr;
1118 tvlv_handler->unicast_handler = uptr;
1119 tvlv_handler->type = type;
1120 tvlv_handler->version = version;
1121 tvlv_handler->flags = flags;
1122 atomic_set(&tvlv_handler->refcount, 1);
1123 INIT_HLIST_NODE(&tvlv_handler->list);
1124
1125 hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
1126 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1127 }
1128
1129 /**
1130 * batadv_tvlv_handler_unregister - unregister tvlv handler based on the
1131 * provided type and version (both need to match)
1132 * @bat_priv: the bat priv with all the soft interface information
1133 * @type: tvlv handler type to be unregistered
1134 * @version: tvlv handler version to be unregistered
1135 */
batadv_tvlv_handler_unregister(struct batadv_priv * bat_priv,u8 type,u8 version)1136 void batadv_tvlv_handler_unregister(struct batadv_priv *bat_priv,
1137 u8 type, u8 version)
1138 {
1139 struct batadv_tvlv_handler *tvlv_handler;
1140
1141 tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
1142 if (!tvlv_handler)
1143 return;
1144
1145 batadv_tvlv_handler_free_ref(tvlv_handler);
1146 spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
1147 hlist_del_rcu(&tvlv_handler->list);
1148 spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
1149 batadv_tvlv_handler_free_ref(tvlv_handler);
1150 }
1151
1152 /**
1153 * batadv_tvlv_unicast_send - send a unicast packet with tvlv payload to the
1154 * specified host
1155 * @bat_priv: the bat priv with all the soft interface information
1156 * @src: source mac address of the unicast packet
1157 * @dst: destination mac address of the unicast packet
1158 * @type: tvlv type
1159 * @version: tvlv version
1160 * @tvlv_value: tvlv content
1161 * @tvlv_value_len: tvlv content length
1162 */
batadv_tvlv_unicast_send(struct batadv_priv * bat_priv,u8 * src,u8 * dst,u8 type,u8 version,void * tvlv_value,u16 tvlv_value_len)1163 void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, u8 *src,
1164 u8 *dst, u8 type, u8 version,
1165 void *tvlv_value, u16 tvlv_value_len)
1166 {
1167 struct batadv_unicast_tvlv_packet *unicast_tvlv_packet;
1168 struct batadv_tvlv_hdr *tvlv_hdr;
1169 struct batadv_orig_node *orig_node;
1170 struct sk_buff *skb = NULL;
1171 unsigned char *tvlv_buff;
1172 unsigned int tvlv_len;
1173 ssize_t hdr_len = sizeof(*unicast_tvlv_packet);
1174 bool ret = false;
1175
1176 orig_node = batadv_orig_hash_find(bat_priv, dst);
1177 if (!orig_node)
1178 goto out;
1179
1180 tvlv_len = sizeof(*tvlv_hdr) + tvlv_value_len;
1181
1182 skb = netdev_alloc_skb_ip_align(NULL, ETH_HLEN + hdr_len + tvlv_len);
1183 if (!skb)
1184 goto out;
1185
1186 skb->priority = TC_PRIO_CONTROL;
1187 skb_reserve(skb, ETH_HLEN);
1188 tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
1189 unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
1190 unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
1191 unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
1192 unicast_tvlv_packet->ttl = BATADV_TTL;
1193 unicast_tvlv_packet->reserved = 0;
1194 unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
1195 unicast_tvlv_packet->align = 0;
1196 ether_addr_copy(unicast_tvlv_packet->src, src);
1197 ether_addr_copy(unicast_tvlv_packet->dst, dst);
1198
1199 tvlv_buff = (unsigned char *)(unicast_tvlv_packet + 1);
1200 tvlv_hdr = (struct batadv_tvlv_hdr *)tvlv_buff;
1201 tvlv_hdr->version = version;
1202 tvlv_hdr->type = type;
1203 tvlv_hdr->len = htons(tvlv_value_len);
1204 tvlv_buff += sizeof(*tvlv_hdr);
1205 memcpy(tvlv_buff, tvlv_value, tvlv_value_len);
1206
1207 if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
1208 ret = true;
1209
1210 out:
1211 if (skb && !ret)
1212 kfree_skb(skb);
1213 if (orig_node)
1214 batadv_orig_node_free_ref(orig_node);
1215 }
1216
1217 /**
1218 * batadv_get_vid - extract the VLAN identifier from skb if any
1219 * @skb: the buffer containing the packet
1220 * @header_len: length of the batman header preceding the ethernet header
1221 *
1222 * If the packet embedded in the skb is vlan tagged this function returns the
1223 * VID with the BATADV_VLAN_HAS_TAG flag. Otherwise BATADV_NO_FLAGS is returned.
1224 */
batadv_get_vid(struct sk_buff * skb,size_t header_len)1225 unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
1226 {
1227 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
1228 struct vlan_ethhdr *vhdr;
1229 unsigned short vid;
1230
1231 if (ethhdr->h_proto != htons(ETH_P_8021Q))
1232 return BATADV_NO_FLAGS;
1233
1234 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
1235 return BATADV_NO_FLAGS;
1236
1237 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
1238 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
1239 vid |= BATADV_VLAN_HAS_TAG;
1240
1241 return vid;
1242 }
1243
1244 /**
1245 * batadv_vlan_ap_isola_get - return the AP isolation status for the given vlan
1246 * @bat_priv: the bat priv with all the soft interface information
1247 * @vid: the VLAN identifier for which the AP isolation attributed as to be
1248 * looked up
1249 *
1250 * Returns true if AP isolation is on for the VLAN idenfied by vid, false
1251 * otherwise
1252 */
batadv_vlan_ap_isola_get(struct batadv_priv * bat_priv,unsigned short vid)1253 bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
1254 {
1255 bool ap_isolation_enabled = false;
1256 struct batadv_softif_vlan *vlan;
1257
1258 /* if the AP isolation is requested on a VLAN, then check for its
1259 * setting in the proper VLAN private data structure
1260 */
1261 vlan = batadv_softif_vlan_get(bat_priv, vid);
1262 if (vlan) {
1263 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
1264 batadv_softif_vlan_free_ref(vlan);
1265 }
1266
1267 return ap_isolation_enabled;
1268 }
1269
batadv_param_set_ra(const char * val,const struct kernel_param * kp)1270 static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
1271 {
1272 struct batadv_algo_ops *bat_algo_ops;
1273 char *algo_name = (char *)val;
1274 size_t name_len = strlen(algo_name);
1275
1276 if (name_len > 0 && algo_name[name_len - 1] == '\n')
1277 algo_name[name_len - 1] = '\0';
1278
1279 bat_algo_ops = batadv_algo_get(algo_name);
1280 if (!bat_algo_ops) {
1281 pr_err("Routing algorithm '%s' is not supported\n", algo_name);
1282 return -EINVAL;
1283 }
1284
1285 return param_set_copystring(algo_name, kp);
1286 }
1287
1288 static const struct kernel_param_ops batadv_param_ops_ra = {
1289 .set = batadv_param_set_ra,
1290 .get = param_get_string,
1291 };
1292
1293 static struct kparam_string batadv_param_string_ra = {
1294 .maxlen = sizeof(batadv_routing_algo),
1295 .string = batadv_routing_algo,
1296 };
1297
1298 module_param_cb(routing_algo, &batadv_param_ops_ra, &batadv_param_string_ra,
1299 0644);
1300 module_init(batadv_init);
1301 module_exit(batadv_exit);
1302
1303 MODULE_LICENSE("GPL");
1304
1305 MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
1306 MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
1307 MODULE_SUPPORTED_DEVICE(BATADV_DRIVER_DEVICE);
1308 MODULE_VERSION(BATADV_SOURCE_VERSION);
1309