1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2008, 2009 open80211s Ltd.
4 * Author: Luis Carlos Cobo <luisca@cozybit.com>
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/list.h>
9 #include <linux/random.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/string.h>
13 #include <net/mac80211.h>
14 #include "wme.h"
15 #include "ieee80211_i.h"
16 #include "mesh.h"
17
18 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
19
mesh_table_hash(const void * addr,u32 len,u32 seed)20 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
21 {
22 /* Use last four bytes of hw addr as hash index */
23 return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
24 }
25
26 static const struct rhashtable_params mesh_rht_params = {
27 .nelem_hint = 2,
28 .automatic_shrinking = true,
29 .key_len = ETH_ALEN,
30 .key_offset = offsetof(struct mesh_path, dst),
31 .head_offset = offsetof(struct mesh_path, rhash),
32 .hashfn = mesh_table_hash,
33 };
34
mpath_expired(struct mesh_path * mpath)35 static inline bool mpath_expired(struct mesh_path *mpath)
36 {
37 return (mpath->flags & MESH_PATH_ACTIVE) &&
38 time_after(jiffies, mpath->exp_time) &&
39 !(mpath->flags & MESH_PATH_FIXED);
40 }
41
mesh_path_rht_free(void * ptr,void * tblptr)42 static void mesh_path_rht_free(void *ptr, void *tblptr)
43 {
44 struct mesh_path *mpath = ptr;
45 struct mesh_table *tbl = tblptr;
46
47 mesh_path_free_rcu(tbl, mpath);
48 }
49
mesh_table_init(struct mesh_table * tbl)50 static void mesh_table_init(struct mesh_table *tbl)
51 {
52 INIT_HLIST_HEAD(&tbl->known_gates);
53 INIT_HLIST_HEAD(&tbl->walk_head);
54 atomic_set(&tbl->entries, 0);
55 spin_lock_init(&tbl->gates_lock);
56 spin_lock_init(&tbl->walk_lock);
57
58 /* rhashtable_init() may fail only in case of wrong
59 * mesh_rht_params
60 */
61 WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
62 }
63
mesh_table_free(struct mesh_table * tbl)64 static void mesh_table_free(struct mesh_table *tbl)
65 {
66 rhashtable_free_and_destroy(&tbl->rhead,
67 mesh_path_rht_free, tbl);
68 }
69
70 /**
71 * mesh_path_assign_nexthop - update mesh path next hop
72 *
73 * @mpath: mesh path to update
74 * @sta: next hop to assign
75 *
76 * Locking: mpath->state_lock must be held when calling this function
77 */
mesh_path_assign_nexthop(struct mesh_path * mpath,struct sta_info * sta)78 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
79 {
80 struct sk_buff *skb;
81 struct ieee80211_hdr *hdr;
82 unsigned long flags;
83
84 rcu_assign_pointer(mpath->next_hop, sta);
85
86 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
87 skb_queue_walk(&mpath->frame_queue, skb) {
88 hdr = (struct ieee80211_hdr *) skb->data;
89 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
90 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
91 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
92 }
93
94 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
95 }
96
prepare_for_gate(struct sk_buff * skb,char * dst_addr,struct mesh_path * gate_mpath)97 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
98 struct mesh_path *gate_mpath)
99 {
100 struct ieee80211_hdr *hdr;
101 struct ieee80211s_hdr *mshdr;
102 int mesh_hdrlen, hdrlen;
103 char *next_hop;
104
105 hdr = (struct ieee80211_hdr *) skb->data;
106 hdrlen = ieee80211_hdrlen(hdr->frame_control);
107 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
108
109 if (!(mshdr->flags & MESH_FLAGS_AE)) {
110 /* size of the fixed part of the mesh header */
111 mesh_hdrlen = 6;
112
113 /* make room for the two extended addresses */
114 skb_push(skb, 2 * ETH_ALEN);
115 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
116
117 hdr = (struct ieee80211_hdr *) skb->data;
118
119 /* we preserve the previous mesh header and only add
120 * the new addreses */
121 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
122 mshdr->flags = MESH_FLAGS_AE_A5_A6;
123 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
124 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
125 }
126
127 /* update next hop */
128 hdr = (struct ieee80211_hdr *) skb->data;
129 rcu_read_lock();
130 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
131 memcpy(hdr->addr1, next_hop, ETH_ALEN);
132 rcu_read_unlock();
133 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
134 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
135 }
136
137 /**
138 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
139 *
140 * This function is used to transfer or copy frames from an unresolved mpath to
141 * a gate mpath. The function also adds the Address Extension field and
142 * updates the next hop.
143 *
144 * If a frame already has an Address Extension field, only the next hop and
145 * destination addresses are updated.
146 *
147 * The gate mpath must be an active mpath with a valid mpath->next_hop.
148 *
149 * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
150 * @from_mpath: The failed mpath
151 * @copy: When true, copy all the frames to the new mpath queue. When false,
152 * move them.
153 */
mesh_path_move_to_queue(struct mesh_path * gate_mpath,struct mesh_path * from_mpath,bool copy)154 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
155 struct mesh_path *from_mpath,
156 bool copy)
157 {
158 struct sk_buff *skb, *fskb, *tmp;
159 struct sk_buff_head failq;
160 unsigned long flags;
161
162 if (WARN_ON(gate_mpath == from_mpath))
163 return;
164 if (WARN_ON(!gate_mpath->next_hop))
165 return;
166
167 __skb_queue_head_init(&failq);
168
169 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
170 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
171 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
172
173 skb_queue_walk_safe(&failq, fskb, tmp) {
174 if (skb_queue_len(&gate_mpath->frame_queue) >=
175 MESH_FRAME_QUEUE_LEN) {
176 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
177 break;
178 }
179
180 skb = skb_copy(fskb, GFP_ATOMIC);
181 if (WARN_ON(!skb))
182 break;
183
184 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
185 skb_queue_tail(&gate_mpath->frame_queue, skb);
186
187 if (copy)
188 continue;
189
190 __skb_unlink(fskb, &failq);
191 kfree_skb(fskb);
192 }
193
194 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
195 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
196
197 if (!copy)
198 return;
199
200 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
201 skb_queue_splice(&failq, &from_mpath->frame_queue);
202 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
203 }
204
205
mpath_lookup(struct mesh_table * tbl,const u8 * dst,struct ieee80211_sub_if_data * sdata)206 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
207 struct ieee80211_sub_if_data *sdata)
208 {
209 struct mesh_path *mpath;
210
211 mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
212
213 if (mpath && mpath_expired(mpath)) {
214 spin_lock_bh(&mpath->state_lock);
215 mpath->flags &= ~MESH_PATH_ACTIVE;
216 spin_unlock_bh(&mpath->state_lock);
217 }
218 return mpath;
219 }
220
221 /**
222 * mesh_path_lookup - look up a path in the mesh path table
223 * @sdata: local subif
224 * @dst: hardware address (ETH_ALEN length) of destination
225 *
226 * Returns: pointer to the mesh path structure, or NULL if not found
227 *
228 * Locking: must be called within a read rcu section.
229 */
230 struct mesh_path *
mesh_path_lookup(struct ieee80211_sub_if_data * sdata,const u8 * dst)231 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
232 {
233 return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
234 }
235
236 struct mesh_path *
mpp_path_lookup(struct ieee80211_sub_if_data * sdata,const u8 * dst)237 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
238 {
239 return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
240 }
241
242 static struct mesh_path *
__mesh_path_lookup_by_idx(struct mesh_table * tbl,int idx)243 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
244 {
245 int i = 0;
246 struct mesh_path *mpath;
247
248 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
249 if (i++ == idx)
250 break;
251 }
252
253 if (!mpath)
254 return NULL;
255
256 if (mpath_expired(mpath)) {
257 spin_lock_bh(&mpath->state_lock);
258 mpath->flags &= ~MESH_PATH_ACTIVE;
259 spin_unlock_bh(&mpath->state_lock);
260 }
261 return mpath;
262 }
263
264 /**
265 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
266 * @idx: index
267 * @sdata: local subif, or NULL for all entries
268 *
269 * Returns: pointer to the mesh path structure, or NULL if not found.
270 *
271 * Locking: must be called within a read rcu section.
272 */
273 struct mesh_path *
mesh_path_lookup_by_idx(struct ieee80211_sub_if_data * sdata,int idx)274 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
275 {
276 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
277 }
278
279 /**
280 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
281 * @idx: index
282 * @sdata: local subif, or NULL for all entries
283 *
284 * Returns: pointer to the proxy path structure, or NULL if not found.
285 *
286 * Locking: must be called within a read rcu section.
287 */
288 struct mesh_path *
mpp_path_lookup_by_idx(struct ieee80211_sub_if_data * sdata,int idx)289 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
290 {
291 return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
292 }
293
294 /**
295 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
296 * @mpath: gate path to add to table
297 */
mesh_path_add_gate(struct mesh_path * mpath)298 int mesh_path_add_gate(struct mesh_path *mpath)
299 {
300 struct mesh_table *tbl;
301 int err;
302
303 rcu_read_lock();
304 tbl = &mpath->sdata->u.mesh.mesh_paths;
305
306 spin_lock_bh(&mpath->state_lock);
307 if (mpath->is_gate) {
308 err = -EEXIST;
309 spin_unlock_bh(&mpath->state_lock);
310 goto err_rcu;
311 }
312 mpath->is_gate = true;
313 mpath->sdata->u.mesh.num_gates++;
314
315 spin_lock(&tbl->gates_lock);
316 hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
317 spin_unlock(&tbl->gates_lock);
318
319 spin_unlock_bh(&mpath->state_lock);
320
321 mpath_dbg(mpath->sdata,
322 "Mesh path: Recorded new gate: %pM. %d known gates\n",
323 mpath->dst, mpath->sdata->u.mesh.num_gates);
324 err = 0;
325 err_rcu:
326 rcu_read_unlock();
327 return err;
328 }
329
330 /**
331 * mesh_gate_del - remove a mesh gate from the list of known gates
332 * @tbl: table which holds our list of known gates
333 * @mpath: gate mpath
334 */
mesh_gate_del(struct mesh_table * tbl,struct mesh_path * mpath)335 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
336 {
337 lockdep_assert_held(&mpath->state_lock);
338 if (!mpath->is_gate)
339 return;
340
341 mpath->is_gate = false;
342 spin_lock_bh(&tbl->gates_lock);
343 hlist_del_rcu(&mpath->gate_list);
344 mpath->sdata->u.mesh.num_gates--;
345 spin_unlock_bh(&tbl->gates_lock);
346
347 mpath_dbg(mpath->sdata,
348 "Mesh path: Deleted gate: %pM. %d known gates\n",
349 mpath->dst, mpath->sdata->u.mesh.num_gates);
350 }
351
352 /**
353 * mesh_gate_num - number of gates known to this interface
354 * @sdata: subif data
355 */
mesh_gate_num(struct ieee80211_sub_if_data * sdata)356 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
357 {
358 return sdata->u.mesh.num_gates;
359 }
360
361 static
mesh_path_new(struct ieee80211_sub_if_data * sdata,const u8 * dst,gfp_t gfp_flags)362 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
363 const u8 *dst, gfp_t gfp_flags)
364 {
365 struct mesh_path *new_mpath;
366
367 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
368 if (!new_mpath)
369 return NULL;
370
371 memcpy(new_mpath->dst, dst, ETH_ALEN);
372 eth_broadcast_addr(new_mpath->rann_snd_addr);
373 new_mpath->is_root = false;
374 new_mpath->sdata = sdata;
375 new_mpath->flags = 0;
376 skb_queue_head_init(&new_mpath->frame_queue);
377 new_mpath->exp_time = jiffies;
378 spin_lock_init(&new_mpath->state_lock);
379 timer_setup(&new_mpath->timer, mesh_path_timer, 0);
380
381 return new_mpath;
382 }
383
384 /**
385 * mesh_path_add - allocate and add a new path to the mesh path table
386 * @dst: destination address of the path (ETH_ALEN length)
387 * @sdata: local subif
388 *
389 * Returns: 0 on success
390 *
391 * State: the initial state of the new path is set to 0
392 */
mesh_path_add(struct ieee80211_sub_if_data * sdata,const u8 * dst)393 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
394 const u8 *dst)
395 {
396 struct mesh_table *tbl;
397 struct mesh_path *mpath, *new_mpath;
398
399 if (ether_addr_equal(dst, sdata->vif.addr))
400 /* never add ourselves as neighbours */
401 return ERR_PTR(-ENOTSUPP);
402
403 if (is_multicast_ether_addr(dst))
404 return ERR_PTR(-ENOTSUPP);
405
406 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
407 return ERR_PTR(-ENOSPC);
408
409 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
410 if (!new_mpath)
411 return ERR_PTR(-ENOMEM);
412
413 tbl = &sdata->u.mesh.mesh_paths;
414 spin_lock_bh(&tbl->walk_lock);
415 mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
416 &new_mpath->rhash,
417 mesh_rht_params);
418 if (!mpath)
419 hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
420 spin_unlock_bh(&tbl->walk_lock);
421
422 if (mpath) {
423 kfree(new_mpath);
424
425 if (IS_ERR(mpath))
426 return mpath;
427
428 new_mpath = mpath;
429 }
430
431 sdata->u.mesh.mesh_paths_generation++;
432 return new_mpath;
433 }
434
mpp_path_add(struct ieee80211_sub_if_data * sdata,const u8 * dst,const u8 * mpp)435 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
436 const u8 *dst, const u8 *mpp)
437 {
438 struct mesh_table *tbl;
439 struct mesh_path *new_mpath;
440 int ret;
441
442 if (ether_addr_equal(dst, sdata->vif.addr))
443 /* never add ourselves as neighbours */
444 return -ENOTSUPP;
445
446 if (is_multicast_ether_addr(dst))
447 return -ENOTSUPP;
448
449 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
450
451 if (!new_mpath)
452 return -ENOMEM;
453
454 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
455 tbl = &sdata->u.mesh.mpp_paths;
456
457 spin_lock_bh(&tbl->walk_lock);
458 ret = rhashtable_lookup_insert_fast(&tbl->rhead,
459 &new_mpath->rhash,
460 mesh_rht_params);
461 if (!ret)
462 hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
463 spin_unlock_bh(&tbl->walk_lock);
464
465 if (ret)
466 kfree(new_mpath);
467
468 sdata->u.mesh.mpp_paths_generation++;
469 return ret;
470 }
471
472
473 /**
474 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
475 *
476 * @sta: broken peer link
477 *
478 * This function must be called from the rate control algorithm if enough
479 * delivery errors suggest that a peer link is no longer usable.
480 */
mesh_plink_broken(struct sta_info * sta)481 void mesh_plink_broken(struct sta_info *sta)
482 {
483 struct ieee80211_sub_if_data *sdata = sta->sdata;
484 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
485 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
486 struct mesh_path *mpath;
487
488 rcu_read_lock();
489 hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
490 if (rcu_access_pointer(mpath->next_hop) == sta &&
491 mpath->flags & MESH_PATH_ACTIVE &&
492 !(mpath->flags & MESH_PATH_FIXED)) {
493 spin_lock_bh(&mpath->state_lock);
494 mpath->flags &= ~MESH_PATH_ACTIVE;
495 ++mpath->sn;
496 spin_unlock_bh(&mpath->state_lock);
497 mesh_path_error_tx(sdata,
498 sdata->u.mesh.mshcfg.element_ttl,
499 mpath->dst, mpath->sn,
500 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
501 }
502 }
503 rcu_read_unlock();
504 }
505
mesh_path_free_rcu(struct mesh_table * tbl,struct mesh_path * mpath)506 static void mesh_path_free_rcu(struct mesh_table *tbl,
507 struct mesh_path *mpath)
508 {
509 struct ieee80211_sub_if_data *sdata = mpath->sdata;
510
511 spin_lock_bh(&mpath->state_lock);
512 mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
513 mesh_gate_del(tbl, mpath);
514 spin_unlock_bh(&mpath->state_lock);
515 del_timer_sync(&mpath->timer);
516 atomic_dec(&sdata->u.mesh.mpaths);
517 atomic_dec(&tbl->entries);
518 mesh_path_flush_pending(mpath);
519 kfree_rcu(mpath, rcu);
520 }
521
__mesh_path_del(struct mesh_table * tbl,struct mesh_path * mpath)522 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
523 {
524 hlist_del_rcu(&mpath->walk_list);
525 rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
526 mesh_path_free_rcu(tbl, mpath);
527 }
528
529 /**
530 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
531 *
532 * @sta: mesh peer to match
533 *
534 * RCU notes: this function is called when a mesh plink transitions from
535 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
536 * allows path creation. This will happen before the sta can be freed (because
537 * sta_info_destroy() calls this) so any reader in a rcu read block will be
538 * protected against the plink disappearing.
539 */
mesh_path_flush_by_nexthop(struct sta_info * sta)540 void mesh_path_flush_by_nexthop(struct sta_info *sta)
541 {
542 struct ieee80211_sub_if_data *sdata = sta->sdata;
543 struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
544 struct mesh_path *mpath;
545 struct hlist_node *n;
546
547 spin_lock_bh(&tbl->walk_lock);
548 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
549 if (rcu_access_pointer(mpath->next_hop) == sta)
550 __mesh_path_del(tbl, mpath);
551 }
552 spin_unlock_bh(&tbl->walk_lock);
553 }
554
mpp_flush_by_proxy(struct ieee80211_sub_if_data * sdata,const u8 * proxy)555 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
556 const u8 *proxy)
557 {
558 struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
559 struct mesh_path *mpath;
560 struct hlist_node *n;
561
562 spin_lock_bh(&tbl->walk_lock);
563 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
564 if (ether_addr_equal(mpath->mpp, proxy))
565 __mesh_path_del(tbl, mpath);
566 }
567 spin_unlock_bh(&tbl->walk_lock);
568 }
569
table_flush_by_iface(struct mesh_table * tbl)570 static void table_flush_by_iface(struct mesh_table *tbl)
571 {
572 struct mesh_path *mpath;
573 struct hlist_node *n;
574
575 spin_lock_bh(&tbl->walk_lock);
576 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
577 __mesh_path_del(tbl, mpath);
578 }
579 spin_unlock_bh(&tbl->walk_lock);
580 }
581
582 /**
583 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
584 *
585 * This function deletes both mesh paths as well as mesh portal paths.
586 *
587 * @sdata: interface data to match
588 *
589 */
mesh_path_flush_by_iface(struct ieee80211_sub_if_data * sdata)590 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
591 {
592 table_flush_by_iface(&sdata->u.mesh.mesh_paths);
593 table_flush_by_iface(&sdata->u.mesh.mpp_paths);
594 }
595
596 /**
597 * table_path_del - delete a path from the mesh or mpp table
598 *
599 * @tbl: mesh or mpp path table
600 * @sdata: local subif
601 * @addr: dst address (ETH_ALEN length)
602 *
603 * Returns: 0 if successful
604 */
table_path_del(struct mesh_table * tbl,struct ieee80211_sub_if_data * sdata,const u8 * addr)605 static int table_path_del(struct mesh_table *tbl,
606 struct ieee80211_sub_if_data *sdata,
607 const u8 *addr)
608 {
609 struct mesh_path *mpath;
610
611 spin_lock_bh(&tbl->walk_lock);
612 mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
613 if (!mpath) {
614 spin_unlock_bh(&tbl->walk_lock);
615 return -ENXIO;
616 }
617
618 __mesh_path_del(tbl, mpath);
619 spin_unlock_bh(&tbl->walk_lock);
620 return 0;
621 }
622
623
624 /**
625 * mesh_path_del - delete a mesh path from the table
626 *
627 * @addr: dst address (ETH_ALEN length)
628 * @sdata: local subif
629 *
630 * Returns: 0 if successful
631 */
mesh_path_del(struct ieee80211_sub_if_data * sdata,const u8 * addr)632 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
633 {
634 int err;
635
636 /* flush relevant mpp entries first */
637 mpp_flush_by_proxy(sdata, addr);
638
639 err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
640 sdata->u.mesh.mesh_paths_generation++;
641 return err;
642 }
643
644 /**
645 * mesh_path_tx_pending - sends pending frames in a mesh path queue
646 *
647 * @mpath: mesh path to activate
648 *
649 * Locking: the state_lock of the mpath structure must NOT be held when calling
650 * this function.
651 */
mesh_path_tx_pending(struct mesh_path * mpath)652 void mesh_path_tx_pending(struct mesh_path *mpath)
653 {
654 if (mpath->flags & MESH_PATH_ACTIVE)
655 ieee80211_add_pending_skbs(mpath->sdata->local,
656 &mpath->frame_queue);
657 }
658
659 /**
660 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
661 *
662 * @mpath: mesh path whose queue will be emptied
663 *
664 * If there is only one gate, the frames are transferred from the failed mpath
665 * queue to that gate's queue. If there are more than one gates, the frames
666 * are copied from each gate to the next. After frames are copied, the
667 * mpath queues are emptied onto the transmission queue.
668 */
mesh_path_send_to_gates(struct mesh_path * mpath)669 int mesh_path_send_to_gates(struct mesh_path *mpath)
670 {
671 struct ieee80211_sub_if_data *sdata = mpath->sdata;
672 struct mesh_table *tbl;
673 struct mesh_path *from_mpath = mpath;
674 struct mesh_path *gate;
675 bool copy = false;
676
677 tbl = &sdata->u.mesh.mesh_paths;
678
679 rcu_read_lock();
680 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
681 if (gate->flags & MESH_PATH_ACTIVE) {
682 mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
683 mesh_path_move_to_queue(gate, from_mpath, copy);
684 from_mpath = gate;
685 copy = true;
686 } else {
687 mpath_dbg(sdata,
688 "Not forwarding to %pM (flags %#x)\n",
689 gate->dst, gate->flags);
690 }
691 }
692
693 hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
694 mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
695 mesh_path_tx_pending(gate);
696 }
697 rcu_read_unlock();
698
699 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
700 }
701
702 /**
703 * mesh_path_discard_frame - discard a frame whose path could not be resolved
704 *
705 * @skb: frame to discard
706 * @sdata: network subif the frame was to be sent through
707 *
708 * Locking: the function must me called within a rcu_read_lock region
709 */
mesh_path_discard_frame(struct ieee80211_sub_if_data * sdata,struct sk_buff * skb)710 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
711 struct sk_buff *skb)
712 {
713 ieee80211_free_txskb(&sdata->local->hw, skb);
714 sdata->u.mesh.mshstats.dropped_frames_no_route++;
715 }
716
717 /**
718 * mesh_path_flush_pending - free the pending queue of a mesh path
719 *
720 * @mpath: mesh path whose queue has to be freed
721 *
722 * Locking: the function must me called within a rcu_read_lock region
723 */
mesh_path_flush_pending(struct mesh_path * mpath)724 void mesh_path_flush_pending(struct mesh_path *mpath)
725 {
726 struct sk_buff *skb;
727
728 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
729 mesh_path_discard_frame(mpath->sdata, skb);
730 }
731
732 /**
733 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
734 *
735 * @mpath: the mesh path to modify
736 * @next_hop: the next hop to force
737 *
738 * Locking: this function must be called holding mpath->state_lock
739 */
mesh_path_fix_nexthop(struct mesh_path * mpath,struct sta_info * next_hop)740 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
741 {
742 spin_lock_bh(&mpath->state_lock);
743 mesh_path_assign_nexthop(mpath, next_hop);
744 mpath->sn = 0xffff;
745 mpath->metric = 0;
746 mpath->hop_count = 0;
747 mpath->exp_time = 0;
748 mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
749 mesh_path_activate(mpath);
750 spin_unlock_bh(&mpath->state_lock);
751 ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
752 /* init it at a low value - 0 start is tricky */
753 ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
754 mesh_path_tx_pending(mpath);
755 }
756
mesh_pathtbl_init(struct ieee80211_sub_if_data * sdata)757 void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
758 {
759 mesh_table_init(&sdata->u.mesh.mesh_paths);
760 mesh_table_init(&sdata->u.mesh.mpp_paths);
761 }
762
763 static
mesh_path_tbl_expire(struct ieee80211_sub_if_data * sdata,struct mesh_table * tbl)764 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
765 struct mesh_table *tbl)
766 {
767 struct mesh_path *mpath;
768 struct hlist_node *n;
769
770 spin_lock_bh(&tbl->walk_lock);
771 hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
772 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
773 (!(mpath->flags & MESH_PATH_FIXED)) &&
774 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
775 __mesh_path_del(tbl, mpath);
776 }
777 spin_unlock_bh(&tbl->walk_lock);
778 }
779
mesh_path_expire(struct ieee80211_sub_if_data * sdata)780 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
781 {
782 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
783 mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
784 }
785
mesh_pathtbl_unregister(struct ieee80211_sub_if_data * sdata)786 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
787 {
788 mesh_table_free(&sdata->u.mesh.mesh_paths);
789 mesh_table_free(&sdata->u.mesh.mpp_paths);
790 }
791