• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Copyright(c) 2007 - 2017 Realtek Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms of version 2 of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12  * more details.
13  *
14  *****************************************************************************/
15 #define _RTW_MESH_PATHTBL_C_
16 
17 #ifdef CONFIG_RTW_MESH
18 #include <drv_types.h>
19 #include <linux/jhash.h>
20 
21 #ifdef PLATFORM_LINUX
22 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
rtw_mpath_free_rcu(struct rtw_mesh_path * mpath)23 static void rtw_mpath_free_rcu(struct rtw_mesh_path *mpath)
24 {
25 	kfree_rcu(mpath, rcu);
26 	rtw_mstat_update(MSTAT_TYPE_PHY, MSTAT_FREE, sizeof(struct rtw_mesh_path));
27 }
28 #else
rtw_mpath_free_rcu_callback(rtw_rcu_head * head)29 static void rtw_mpath_free_rcu_callback(rtw_rcu_head *head)
30 {
31 	struct rtw_mesh_path *mpath;
32 
33 	mpath = container_of(head, struct rtw_mesh_path, rcu);
34 	rtw_mfree(mpath, sizeof(struct rtw_mesh_path));
35 }
36 
rtw_mpath_free_rcu(struct rtw_mesh_path * mpath)37 static void rtw_mpath_free_rcu(struct rtw_mesh_path *mpath)
38 {
39 	call_rcu(&mpath->rcu, rtw_mpath_free_rcu_callback);
40 }
41 #endif
42 #endif /* PLATFORM_LINUX */
43 
44 static void rtw_mesh_path_free_rcu(struct rtw_mesh_table *tbl, struct rtw_mesh_path *mpath);
45 
rtw_mesh_table_hash(const void * addr,u32 len,u32 seed)46 static u32 rtw_mesh_table_hash(const void *addr, u32 len, u32 seed)
47 {
48 	/* Use last four bytes of hw addr as hash index */
49 	return jhash_1word(*(u32 *)(addr+2), seed);
50 }
51 
52 static const rtw_rhashtable_params rtw_mesh_rht_params = {
53 	.nelem_hint = 2,
54 	.automatic_shrinking = true,
55 	.key_len = ETH_ALEN,
56 	.key_offset = offsetof(struct rtw_mesh_path, dst),
57 	.head_offset = offsetof(struct rtw_mesh_path, rhash),
58 	.hashfn = rtw_mesh_table_hash,
59 };
60 
rtw_mpath_expired(struct rtw_mesh_path * mpath)61 static inline bool rtw_mpath_expired(struct rtw_mesh_path *mpath)
62 {
63 	return (mpath->flags & RTW_MESH_PATH_ACTIVE) &&
64 	       rtw_time_after(rtw_get_current_time(), mpath->exp_time) &&
65 	       !(mpath->flags & RTW_MESH_PATH_FIXED);
66 }
67 
rtw_mesh_path_rht_free(void * ptr,void * tblptr)68 static void rtw_mesh_path_rht_free(void *ptr, void *tblptr)
69 {
70 	struct rtw_mesh_path *mpath = ptr;
71 	struct rtw_mesh_table *tbl = tblptr;
72 
73 	rtw_mesh_path_free_rcu(tbl, mpath);
74 }
75 
rtw_mesh_table_alloc(void)76 static struct rtw_mesh_table *rtw_mesh_table_alloc(void)
77 {
78 	struct rtw_mesh_table *newtbl;
79 
80 	newtbl = rtw_malloc(sizeof(struct rtw_mesh_table));
81 	if (!newtbl)
82 		return NULL;
83 
84 	rtw_hlist_head_init(&newtbl->known_gates);
85 	ATOMIC_SET(&newtbl->entries,  0);
86 	_rtw_spinlock_init(&newtbl->gates_lock);
87 
88 	return newtbl;
89 }
90 
rtw_mesh_table_free(struct rtw_mesh_table * tbl)91 static void rtw_mesh_table_free(struct rtw_mesh_table *tbl)
92 {
93 	rtw_rhashtable_free_and_destroy(&tbl->rhead,
94 				    rtw_mesh_path_rht_free, tbl);
95 	rtw_mfree(tbl, sizeof(struct rtw_mesh_table));
96 }
97 
98 /**
99  *
100  * rtw_mesh_path_assign_nexthop - update mesh path next hop
101  *
102  * @mpath: mesh path to update
103  * @sta: next hop to assign
104  *
105  * Locking: mpath->state_lock must be held when calling this function
106  */
rtw_mesh_path_assign_nexthop(struct rtw_mesh_path * mpath,struct sta_info * sta)107 void rtw_mesh_path_assign_nexthop(struct rtw_mesh_path *mpath, struct sta_info *sta)
108 {
109 	struct xmit_frame *xframe;
110 	_list *list, *head;
111 
112 	rtw_rcu_assign_pointer(mpath->next_hop, sta);
113 
114 	enter_critical_bh(&mpath->frame_queue.lock);
115 	head = &mpath->frame_queue.queue;
116 	list = get_next(head);
117 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
118 		xframe = LIST_CONTAINOR(list, struct xmit_frame, list);
119 		list = get_next(list);
120 		_rtw_memcpy(xframe->attrib.ra, sta->cmn.mac_addr, ETH_ALEN);
121 	}
122 
123 	exit_critical_bh(&mpath->frame_queue.lock);
124 }
125 
rtw_prepare_for_gate(struct xmit_frame * xframe,char * dst_addr,struct rtw_mesh_path * gate_mpath)126 static void rtw_prepare_for_gate(struct xmit_frame *xframe, char *dst_addr,
127 			     struct rtw_mesh_path *gate_mpath)
128 {
129 	struct pkt_attrib *attrib = &xframe->attrib;
130 	char *next_hop;
131 
132 	if (attrib->mesh_frame_mode == MESH_UCAST_DATA)
133 		attrib->mesh_frame_mode = MESH_UCAST_PX_DATA;
134 
135 	/* update next hop */
136 	rtw_rcu_read_lock();
137 	next_hop = rtw_rcu_dereference(gate_mpath->next_hop)->cmn.mac_addr;
138 	_rtw_memcpy(attrib->ra, next_hop, ETH_ALEN);
139 	rtw_rcu_read_unlock();
140 	_rtw_memcpy(attrib->mda, dst_addr, ETH_ALEN);
141 }
142 
143 /**
144  *
145  * rtw_mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
146  *
147  * This function is used to transfer or copy frames from an unresolved mpath to
148  * a gate mpath.  The function also adds the Address Extension field and
149  * updates the next hop.
150  *
151  * If a frame already has an Address Extension field, only the next hop and
152  * destination addresses are updated.
153  *
154  * The gate mpath must be an active mpath with a valid mpath->next_hop.
155  *
156  * @mpath: An active mpath the frames will be sent to (i.e. the gate)
157  * @from_mpath: The failed mpath
158  * @copy: When true, copy all the frames to the new mpath queue.  When false,
159  * move them.
160  */
rtw_mesh_path_move_to_queue(struct rtw_mesh_path * gate_mpath,struct rtw_mesh_path * from_mpath,bool copy)161 static void rtw_mesh_path_move_to_queue(struct rtw_mesh_path *gate_mpath,
162 				    struct rtw_mesh_path *from_mpath,
163 				    bool copy)
164 {
165 	struct xmit_frame *fskb;
166 	_list *list, *head;
167 	_list failq;
168 	u32 failq_len;
169 	_irqL flags;
170 
171 	if (rtw_warn_on(gate_mpath == from_mpath))
172 		return;
173 	if (rtw_warn_on(!gate_mpath->next_hop))
174 		return;
175 
176 	_rtw_init_listhead(&failq);
177 
178 	_enter_critical_bh(&from_mpath->frame_queue.lock, &flags);
179 	rtw_list_splice_init(&from_mpath->frame_queue.queue, &failq);
180 	failq_len = from_mpath->frame_queue_len;
181 	from_mpath->frame_queue_len = 0;
182 	_exit_critical_bh(&from_mpath->frame_queue.lock, &flags);
183 
184 	head = &failq;
185 	list = get_next(head);
186 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
187 		if (gate_mpath->frame_queue_len >= RTW_MESH_FRAME_QUEUE_LEN) {
188 			RTW_MPATH_DBG(FUNC_ADPT_FMT" mpath queue for gate %pM is full!\n"
189 				, FUNC_ADPT_ARG(gate_mpath->adapter), gate_mpath->dst);
190 			break;
191 		}
192 
193 		fskb = LIST_CONTAINOR(list, struct xmit_frame, list);
194 		list = get_next(list);
195 
196 		rtw_list_delete(&fskb->list);
197 		failq_len--;
198 		rtw_prepare_for_gate(fskb, gate_mpath->dst, gate_mpath);
199 		_enter_critical_bh(&gate_mpath->frame_queue.lock, &flags);
200 		rtw_list_insert_tail(&fskb->list, get_list_head(&gate_mpath->frame_queue));
201 		gate_mpath->frame_queue_len++;
202 		_exit_critical_bh(&gate_mpath->frame_queue.lock, &flags);
203 
204 		#if 0 /* TODO: copy */
205 		skb = rtw_skb_copy(fskb);
206 		if (rtw_warn_on(!skb))
207 			break;
208 
209 		rtw_prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
210 		skb_queue_tail(&gate_mpath->frame_queue, skb);
211 
212 		if (copy)
213 			continue;
214 
215 		__skb_unlink(fskb, &failq);
216 		rtw_skb_free(fskb);
217 		#endif
218 	}
219 
220 	RTW_MPATH_DBG(FUNC_ADPT_FMT" mpath queue for gate %pM has %d frames\n"
221 		, FUNC_ADPT_ARG(gate_mpath->adapter), gate_mpath->dst, gate_mpath->frame_queue_len);
222 
223 	if (!copy)
224 		return;
225 
226 	_enter_critical_bh(&from_mpath->frame_queue.lock, &flags);
227 	rtw_list_splice(&failq, &from_mpath->frame_queue.queue);
228 	from_mpath->frame_queue_len += failq_len;
229 	_exit_critical_bh(&from_mpath->frame_queue.lock, &flags);
230 }
231 
232 
rtw_mpath_lookup(struct rtw_mesh_table * tbl,const u8 * dst)233 static struct rtw_mesh_path *rtw_mpath_lookup(struct rtw_mesh_table *tbl, const u8 *dst)
234 {
235 	struct rtw_mesh_path *mpath;
236 
237 	if (!tbl)
238 		return NULL;
239 
240 	mpath = rtw_rhashtable_lookup_fast(&tbl->rhead, dst, rtw_mesh_rht_params);
241 
242 	if (mpath && rtw_mpath_expired(mpath)) {
243 		enter_critical_bh(&mpath->state_lock);
244 		mpath->flags &= ~RTW_MESH_PATH_ACTIVE;
245 		exit_critical_bh(&mpath->state_lock);
246 	}
247 	return mpath;
248 }
249 
250 /**
251  * rtw_mesh_path_lookup - look up a path in the mesh path table
252  * @sdata: local subif
253  * @dst: hardware address (ETH_ALEN length) of destination
254  *
255  * Returns: pointer to the mesh path structure, or NULL if not found
256  *
257  * Locking: must be called within a read rcu section.
258  */
259 struct rtw_mesh_path *
rtw_mesh_path_lookup(_adapter * adapter,const u8 * dst)260 rtw_mesh_path_lookup(_adapter *adapter, const u8 *dst)
261 {
262 	return rtw_mpath_lookup(adapter->mesh_info.mesh_paths, dst);
263 }
264 
265 struct rtw_mesh_path *
rtw_mpp_path_lookup(_adapter * adapter,const u8 * dst)266 rtw_mpp_path_lookup(_adapter *adapter, const u8 *dst)
267 {
268 	return rtw_mpath_lookup(adapter->mesh_info.mpp_paths, dst);
269 }
270 
271 static struct rtw_mesh_path *
__rtw_mesh_path_lookup_by_idx(struct rtw_mesh_table * tbl,int idx)272 __rtw_mesh_path_lookup_by_idx(struct rtw_mesh_table *tbl, int idx)
273 {
274 	int i = 0, ret;
275 	struct rtw_mesh_path *mpath = NULL;
276 	rtw_rhashtable_iter iter;
277 
278 	if (!tbl)
279 		return NULL;
280 
281 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
282 	if (ret)
283 		return NULL;
284 
285 	ret = rtw_rhashtable_walk_start(&iter);
286 	if (ret && ret != -EAGAIN)
287 		goto err;
288 
289 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
290 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
291 			continue;
292 		if (IS_ERR(mpath))
293 			break;
294 		if (i++ == idx)
295 			break;
296 	}
297 err:
298 	rtw_rhashtable_walk_stop(&iter);
299 	rtw_rhashtable_walk_exit(&iter);
300 
301 	if (IS_ERR(mpath) || !mpath)
302 		return NULL;
303 
304 	if (rtw_mpath_expired(mpath)) {
305 		enter_critical_bh(&mpath->state_lock);
306 		mpath->flags &= ~RTW_MESH_PATH_ACTIVE;
307 		exit_critical_bh(&mpath->state_lock);
308 	}
309 	return mpath;
310 }
311 
312 /**
313  * rtw_mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
314  * @idx: index
315  * @sdata: local subif, or NULL for all entries
316  *
317  * Returns: pointer to the mesh path structure, or NULL if not found.
318  *
319  * Locking: must be called within a read rcu section.
320  */
321 struct rtw_mesh_path *
rtw_mesh_path_lookup_by_idx(_adapter * adapter,int idx)322 rtw_mesh_path_lookup_by_idx(_adapter *adapter, int idx)
323 {
324 	return __rtw_mesh_path_lookup_by_idx(adapter->mesh_info.mesh_paths, idx);
325 }
326 
dump_mpath(void * sel,_adapter * adapter)327 void dump_mpath(void *sel, _adapter *adapter)
328 {
329 	struct rtw_mesh_path *mpath;
330 	int idx = 0;
331 	char dst[ETH_ALEN];
332 	char next_hop[ETH_ALEN];
333 	u32 sn, metric, qlen;
334 	u32 exp_ms = 0, dto_ms;
335 	u8 drty;
336 	enum rtw_mesh_path_flags flags;
337 
338 	RTW_PRINT_SEL(sel, "%-17s %-17s %-10s %-10s %-4s %-6s %-6s %-4s flags\n"
339 		, "dst", "next_hop", "sn", "metric", "qlen", "exp_ms", "dto_ms", "drty"
340 	);
341 
342 	do {
343 		rtw_rcu_read_lock();
344 
345 		mpath = rtw_mesh_path_lookup_by_idx(adapter, idx);
346 		if (mpath) {
347 			_rtw_memcpy(dst, mpath->dst, ETH_ALEN);
348 			_rtw_memcpy(next_hop, mpath->next_hop->cmn.mac_addr, ETH_ALEN);
349 			sn = mpath->sn;
350 			metric = mpath->metric;
351 			qlen = mpath->frame_queue_len;
352 			if (rtw_time_after(mpath->exp_time, rtw_get_current_time()))
353 				exp_ms = rtw_get_remaining_time_ms(mpath->exp_time);
354 			dto_ms = rtw_systime_to_ms(mpath->discovery_timeout);
355 			drty = mpath->discovery_retries;
356 			flags = mpath->flags;
357 		}
358 
359 		rtw_rcu_read_unlock();
360 
361 		if (mpath) {
362 			RTW_PRINT_SEL(sel, MAC_FMT" "MAC_FMT" %10u %10u %4u %6u %6u %4u%s%s%s%s%s%s%s%s%s%s\n"
363 				, MAC_ARG(dst), MAC_ARG(next_hop), sn, metric, qlen
364 				, exp_ms < 999999 ? exp_ms : 999999
365 				, dto_ms < 999999 ? dto_ms : 999999
366 				, drty
367 				, (flags & RTW_MESH_PATH_ACTIVE) ? " ACT" : ""
368 				, (flags & RTW_MESH_PATH_RESOLVING) ? " RSVING" : ""
369 				, (flags & RTW_MESH_PATH_SN_VALID) ? " SN_VALID" : ""
370 				, (flags & RTW_MESH_PATH_FIXED) ?  " FIXED" : ""
371 				, (flags & RTW_MESH_PATH_RESOLVED) ? " RSVED" : ""
372 				, (flags & RTW_MESH_PATH_REQ_QUEUED) ? " REQ_IN_Q" : ""
373 				, (flags & RTW_MESH_PATH_DELETED) ? " DELETED" : ""
374 				, (flags & RTW_MESH_PATH_ROOT_ADD_CHK) ? " R_ADD_CHK" : ""
375 				, (flags & RTW_MESH_PATH_PEER_AKA) ? " PEER_AKA" : ""
376 				, (flags & RTW_MESH_PATH_BCAST_PREQ) ? " BC_PREQ" : ""
377 			);
378 		}
379 
380 		idx++;
381 	} while (mpath);
382 }
383 
384 /**
385  * rtw_mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
386  * @idx: index
387  * @sdata: local subif, or NULL for all entries
388  *
389  * Returns: pointer to the proxy path structure, or NULL if not found.
390  *
391  * Locking: must be called within a read rcu section.
392  */
393 struct rtw_mesh_path *
rtw_mpp_path_lookup_by_idx(_adapter * adapter,int idx)394 rtw_mpp_path_lookup_by_idx(_adapter *adapter, int idx)
395 {
396 	return __rtw_mesh_path_lookup_by_idx(adapter->mesh_info.mpp_paths, idx);
397 }
398 
399 /**
400  * rtw_mesh_path_add_gate - add the given mpath to a mesh gate to our path table
401  * @mpath: gate path to add to table
402  */
rtw_mesh_path_add_gate(struct rtw_mesh_path * mpath)403 int rtw_mesh_path_add_gate(struct rtw_mesh_path *mpath)
404 {
405 	struct rtw_mesh_cfg *mcfg;
406 	struct rtw_mesh_info *minfo;
407 	struct rtw_mesh_table *tbl;
408 	int err, ori_num_gates;
409 
410 	rtw_rcu_read_lock();
411 	tbl = mpath->adapter->mesh_info.mesh_paths;
412 	if (!tbl) {
413 		err = -ENOENT;
414 		goto err_rcu;
415 	}
416 
417 	enter_critical_bh(&mpath->state_lock);
418 	mcfg = &mpath->adapter->mesh_cfg;
419 	mpath->gate_timeout = rtw_get_current_time() +
420 			      rtw_ms_to_systime(mcfg->path_gate_timeout_factor *
421 					        mpath->gate_ann_int);
422 	if (mpath->is_gate) {
423 		err = -EEXIST;
424 		exit_critical_bh(&mpath->state_lock);
425 		goto err_rcu;
426 	}
427 
428 	minfo = &mpath->adapter->mesh_info;
429 	mpath->is_gate = true;
430 	_rtw_spinlock(&tbl->gates_lock);
431 	ori_num_gates = minfo->num_gates;
432 	minfo->num_gates++;
433 	rtw_hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
434 
435 	if (ori_num_gates == 0
436 		|| rtw_macaddr_is_larger(mpath->dst, minfo->max_addr_gate->dst)
437 	) {
438 		minfo->max_addr_gate = mpath;
439 		minfo->max_addr_gate_is_larger_than_self =
440 			rtw_macaddr_is_larger(mpath->dst, adapter_mac_addr(mpath->adapter));
441 	}
442 
443 	_rtw_spinunlock(&tbl->gates_lock);
444 
445 	exit_critical_bh(&mpath->state_lock);
446 
447 	if (ori_num_gates == 0) {
448 		update_beacon(mpath->adapter, WLAN_EID_MESH_CONFIG, NULL, _TRUE, 0);
449 		#if CONFIG_RTW_MESH_CTO_MGATE_CARRIER
450 		if (!rtw_mesh_cto_mgate_required(mpath->adapter))
451 			rtw_netif_carrier_on(mpath->adapter->pnetdev);
452 		#endif
453 	}
454 
455 	RTW_MPATH_DBG(
456 		  FUNC_ADPT_FMT" Mesh path: Recorded new gate: %pM. %d known gates\n",
457 		  FUNC_ADPT_ARG(mpath->adapter),
458 		  mpath->dst, mpath->adapter->mesh_info.num_gates);
459 	err = 0;
460 err_rcu:
461 	rtw_rcu_read_unlock();
462 	return err;
463 }
464 
465 /**
466  * rtw_mesh_gate_del - remove a mesh gate from the list of known gates
467  * @tbl: table which holds our list of known gates
468  * @mpath: gate mpath
469  */
rtw_mesh_gate_del(struct rtw_mesh_table * tbl,struct rtw_mesh_path * mpath)470 void rtw_mesh_gate_del(struct rtw_mesh_table *tbl, struct rtw_mesh_path *mpath)
471 {
472 	struct rtw_mesh_cfg *mcfg;
473 	struct rtw_mesh_info *minfo;
474 	int ori_num_gates;
475 
476 	rtw_lockdep_assert_held(&mpath->state_lock);
477 	if (!mpath->is_gate)
478 		return;
479 
480 	mcfg = &mpath->adapter->mesh_cfg;
481 	minfo = &mpath->adapter->mesh_info;
482 
483 	mpath->is_gate = false;
484 	enter_critical_bh(&tbl->gates_lock);
485 	rtw_hlist_del_rcu(&mpath->gate_list);
486 	ori_num_gates = minfo->num_gates;
487 	minfo->num_gates--;
488 
489 	if (ori_num_gates == 1) {
490 		minfo->max_addr_gate = NULL;
491 		minfo->max_addr_gate_is_larger_than_self = 0;
492 	} else if (minfo->max_addr_gate == mpath) {
493 		struct rtw_mesh_path *gate, *max_addr_gate = NULL;
494 		rtw_hlist_node *node;
495 
496 		rtw_hlist_for_each_entry_rcu(gate, node, &tbl->known_gates, gate_list) {
497 			if (!max_addr_gate || rtw_macaddr_is_larger(gate->dst, max_addr_gate->dst))
498 				max_addr_gate = gate;
499 		}
500 		minfo->max_addr_gate = max_addr_gate;
501 		minfo->max_addr_gate_is_larger_than_self =
502 			rtw_macaddr_is_larger(max_addr_gate->dst, adapter_mac_addr(mpath->adapter));
503 	}
504 
505 	exit_critical_bh(&tbl->gates_lock);
506 
507 	if (ori_num_gates == 1) {
508 		update_beacon(mpath->adapter, WLAN_EID_MESH_CONFIG, NULL, _TRUE, 0);
509 		#if CONFIG_RTW_MESH_CTO_MGATE_CARRIER
510 		if (rtw_mesh_cto_mgate_required(mpath->adapter))
511 			rtw_netif_carrier_off(mpath->adapter->pnetdev);
512 		#endif
513 	}
514 
515 	RTW_MPATH_DBG(
516 		  FUNC_ADPT_FMT" Mesh path: Deleted gate: %pM. %d known gates\n",
517 		  FUNC_ADPT_ARG(mpath->adapter),
518 		  mpath->dst, mpath->adapter->mesh_info.num_gates);
519 }
520 
521 /**
522  * rtw_mesh_gate_search - search a mesh gate from the list of known gates
523  * @tbl: table which holds our list of known gates
524  * @addr: address of gate
525  */
rtw_mesh_gate_search(struct rtw_mesh_table * tbl,const u8 * addr)526 bool rtw_mesh_gate_search(struct rtw_mesh_table *tbl, const u8 *addr)
527 {
528 	struct rtw_mesh_path *gate;
529 	rtw_hlist_node *node;
530 	bool exist = 0;
531 
532 	rtw_rcu_read_lock();
533 	rtw_hlist_for_each_entry_rcu(gate, node, &tbl->known_gates, gate_list) {
534 		if (_rtw_memcmp(gate->dst, addr, ETH_ALEN) == _TRUE) {
535 			exist = 1;
536 			break;
537 		}
538 	}
539 
540 	rtw_rcu_read_unlock();
541 
542 	return exist;
543 }
544 
545 /**
546  * rtw_mesh_gate_num - number of gates known to this interface
547  * @sdata: subif data
548  */
rtw_mesh_gate_num(_adapter * adapter)549 int rtw_mesh_gate_num(_adapter *adapter)
550 {
551 	return adapter->mesh_info.num_gates;
552 }
553 
rtw_mesh_is_primary_gate(_adapter * adapter)554 bool rtw_mesh_is_primary_gate(_adapter *adapter)
555 {
556 	struct rtw_mesh_cfg *mcfg = &adapter->mesh_cfg;
557 	struct rtw_mesh_info *minfo = &adapter->mesh_info;
558 
559 	return mcfg->dot11MeshGateAnnouncementProtocol
560 		&& !minfo->max_addr_gate_is_larger_than_self;
561 }
562 
dump_known_gates(void * sel,_adapter * adapter)563 void dump_known_gates(void *sel, _adapter *adapter)
564 {
565 	struct rtw_mesh_info *minfo = &adapter->mesh_info;
566 	struct rtw_mesh_table *tbl;
567 	struct rtw_mesh_path *gate;
568 	rtw_hlist_node *node;
569 
570 	if (!rtw_mesh_gate_num(adapter))
571 		goto exit;
572 
573 	rtw_rcu_read_lock();
574 
575 	tbl = minfo->mesh_paths;
576 	if (!tbl)
577 		goto unlock;
578 
579 	RTW_PRINT_SEL(sel, "num:%d\n", rtw_mesh_gate_num(adapter));
580 
581 	rtw_hlist_for_each_entry_rcu(gate, node, &tbl->known_gates, gate_list) {
582 		RTW_PRINT_SEL(sel, "%c"MAC_FMT"\n"
583 			, gate == minfo->max_addr_gate ? '*' : ' '
584 			, MAC_ARG(gate->dst));
585 	}
586 
587 unlock:
588 	rtw_rcu_read_unlock();
589 exit:
590 	return;
591 }
592 
593 static
rtw_mesh_path_new(_adapter * adapter,const u8 * dst)594 struct rtw_mesh_path *rtw_mesh_path_new(_adapter *adapter,
595 				const u8 *dst)
596 {
597 	struct rtw_mesh_path *new_mpath;
598 
599 	new_mpath = rtw_zmalloc(sizeof(struct rtw_mesh_path));
600 	if (!new_mpath)
601 		return NULL;
602 
603 	_rtw_memcpy(new_mpath->dst, dst, ETH_ALEN);
604 	_rtw_memset(new_mpath->rann_snd_addr, 0xFF, ETH_ALEN);
605 	new_mpath->is_root = false;
606 	new_mpath->adapter = adapter;
607 	new_mpath->flags = 0;
608 	new_mpath->gate_asked = false;
609 	_rtw_init_queue(&new_mpath->frame_queue);
610 	new_mpath->frame_queue_len = 0;
611 	new_mpath->exp_time = rtw_get_current_time();
612 	_rtw_spinlock_init(&new_mpath->state_lock);
613 	rtw_init_timer(&new_mpath->timer, adapter, rtw_mesh_path_timer, new_mpath);
614 
615 	return new_mpath;
616 }
617 
618 /**
619  * rtw_mesh_path_add - allocate and add a new path to the mesh path table
620  * @dst: destination address of the path (ETH_ALEN length)
621  * @sdata: local subif
622  *
623  * Returns: 0 on success
624  *
625  * State: the initial state of the new path is set to 0
626  */
rtw_mesh_path_add(_adapter * adapter,const u8 * dst)627 struct rtw_mesh_path *rtw_mesh_path_add(_adapter *adapter,
628 				const u8 *dst)
629 {
630 	struct rtw_mesh_table *tbl = adapter->mesh_info.mesh_paths;
631 	struct rtw_mesh_path *mpath, *new_mpath;
632 	int ret;
633 
634 	if (!tbl)
635 		return ERR_PTR(-ENOTSUPP);
636 
637 	if (_rtw_memcmp(dst, adapter_mac_addr(adapter), ETH_ALEN) == _TRUE)
638 		/* never add ourselves as neighbours */
639 		return ERR_PTR(-ENOTSUPP);
640 
641 	if (is_multicast_mac_addr(dst))
642 		return ERR_PTR(-ENOTSUPP);
643 
644 	if (ATOMIC_INC_UNLESS(&adapter->mesh_info.mpaths, RTW_MESH_MAX_MPATHS) == 0)
645 		return ERR_PTR(-ENOSPC);
646 
647 	new_mpath = rtw_mesh_path_new(adapter, dst);
648 	if (!new_mpath)
649 		return ERR_PTR(-ENOMEM);
650 
651 	do {
652 		ret = rtw_rhashtable_lookup_insert_fast(&tbl->rhead,
653 						    &new_mpath->rhash,
654 						    rtw_mesh_rht_params);
655 
656 		if (ret == -EEXIST)
657 			mpath = rtw_rhashtable_lookup_fast(&tbl->rhead,
658 						       dst,
659 						       rtw_mesh_rht_params);
660 
661 	} while (unlikely(ret == -EEXIST && !mpath));
662 
663 	if (ret && ret != -EEXIST)
664 		return ERR_PTR(ret);
665 
666 	/* At this point either new_mpath was added, or we found a
667 	 * matching entry already in the table; in the latter case
668 	 * free the unnecessary new entry.
669 	 */
670 	if (ret == -EEXIST) {
671 		rtw_mfree(new_mpath, sizeof(struct rtw_mesh_path));
672 		new_mpath = mpath;
673 	}
674 	adapter->mesh_info.mesh_paths_generation++;
675 	return new_mpath;
676 }
677 
rtw_mpp_path_add(_adapter * adapter,const u8 * dst,const u8 * mpp)678 int rtw_mpp_path_add(_adapter *adapter,
679 		 const u8 *dst, const u8 *mpp)
680 {
681 	struct rtw_mesh_table *tbl = adapter->mesh_info.mpp_paths;
682 	struct rtw_mesh_path *new_mpath;
683 	int ret;
684 
685 	if (!tbl)
686 		return -ENOTSUPP;
687 
688 	if (_rtw_memcmp(dst, adapter_mac_addr(adapter), ETH_ALEN) == _TRUE)
689 		/* never add ourselves as neighbours */
690 		return -ENOTSUPP;
691 
692 	if (is_multicast_mac_addr(dst))
693 		return -ENOTSUPP;
694 
695 	new_mpath = rtw_mesh_path_new(adapter, dst);
696 
697 	if (!new_mpath)
698 		return -ENOMEM;
699 
700 	_rtw_memcpy(new_mpath->mpp, mpp, ETH_ALEN);
701 	ret = rtw_rhashtable_lookup_insert_fast(&tbl->rhead,
702 					    &new_mpath->rhash,
703 					    rtw_mesh_rht_params);
704 
705 	adapter->mesh_info.mpp_paths_generation++;
706 	return ret;
707 }
708 
dump_mpp(void * sel,_adapter * adapter)709 void dump_mpp(void *sel, _adapter *adapter)
710 {
711 	struct rtw_mesh_path *mpath;
712 	int idx = 0;
713 	char dst[ETH_ALEN];
714 	char mpp[ETH_ALEN];
715 
716 	RTW_PRINT_SEL(sel, "%-17s %-17s\n", "dst", "mpp");
717 
718 	do {
719 		rtw_rcu_read_lock();
720 
721 		mpath = rtw_mpp_path_lookup_by_idx(adapter, idx);
722 		if (mpath) {
723 			_rtw_memcpy(dst, mpath->dst, ETH_ALEN);
724 			_rtw_memcpy(mpp, mpath->mpp, ETH_ALEN);
725 		}
726 
727 		rtw_rcu_read_unlock();
728 
729 		if (mpath) {
730 			RTW_PRINT_SEL(sel, MAC_FMT" "MAC_FMT"\n"
731 				, MAC_ARG(dst), MAC_ARG(mpp));
732 		}
733 
734 		idx++;
735 	} while (mpath);
736 }
737 
738 /**
739  * rtw_mesh_plink_broken - deactivates paths and sends perr when a link breaks
740  *
741  * @sta: broken peer link
742  *
743  * This function must be called from the rate control algorithm if enough
744  * delivery errors suggest that a peer link is no longer usable.
745  */
rtw_mesh_plink_broken(struct sta_info * sta)746 void rtw_mesh_plink_broken(struct sta_info *sta)
747 {
748 	_adapter *adapter = sta->padapter;
749 	struct rtw_mesh_table *tbl = adapter->mesh_info.mesh_paths;
750 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
751 	struct rtw_mesh_path *mpath;
752 	rtw_rhashtable_iter iter;
753 	int ret;
754 
755 	if (!tbl)
756 		return;
757 
758 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
759 	if (ret)
760 		return;
761 
762 	ret = rtw_rhashtable_walk_start(&iter);
763 	if (ret && ret != -EAGAIN)
764 		goto out;
765 
766 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
767 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
768 			continue;
769 		if (IS_ERR(mpath))
770 			break;
771 		if (rtw_rcu_access_pointer(mpath->next_hop) == sta &&
772 		    mpath->flags & RTW_MESH_PATH_ACTIVE &&
773 		    !(mpath->flags & RTW_MESH_PATH_FIXED)) {
774 			enter_critical_bh(&mpath->state_lock);
775 			mpath->flags &= ~RTW_MESH_PATH_ACTIVE;
776 			++mpath->sn;
777 			exit_critical_bh(&mpath->state_lock);
778 			rtw_mesh_path_error_tx(adapter,
779 				adapter->mesh_cfg.element_ttl,
780 				mpath->dst, mpath->sn,
781 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
782 		}
783 	}
784 out:
785 	rtw_rhashtable_walk_stop(&iter);
786 	rtw_rhashtable_walk_exit(&iter);
787 }
788 
rtw_mesh_path_free_rcu(struct rtw_mesh_table * tbl,struct rtw_mesh_path * mpath)789 static void rtw_mesh_path_free_rcu(struct rtw_mesh_table *tbl,
790 			       struct rtw_mesh_path *mpath)
791 {
792 	_adapter *adapter = mpath->adapter;
793 
794 	enter_critical_bh(&mpath->state_lock);
795 	mpath->flags |= RTW_MESH_PATH_RESOLVING | RTW_MESH_PATH_DELETED;
796 	rtw_mesh_gate_del(tbl, mpath);
797 	exit_critical_bh(&mpath->state_lock);
798 	_cancel_timer_ex(&mpath->timer);
799 	ATOMIC_DEC(&adapter->mesh_info.mpaths);
800 	ATOMIC_DEC(&tbl->entries);
801 	_rtw_spinlock_free(&mpath->state_lock);
802 
803 	rtw_mesh_path_flush_pending(mpath);
804 
805 	rtw_mpath_free_rcu(mpath);
806 }
807 
__rtw_mesh_path_del(struct rtw_mesh_table * tbl,struct rtw_mesh_path * mpath)808 static void __rtw_mesh_path_del(struct rtw_mesh_table *tbl, struct rtw_mesh_path *mpath)
809 {
810 	rtw_rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, rtw_mesh_rht_params);
811 	rtw_mesh_path_free_rcu(tbl, mpath);
812 }
813 
814 /**
815  * rtw_mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
816  *
817  * @sta: mesh peer to match
818  *
819  * RCU notes: this function is called when a mesh plink transitions from
820  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
821  * allows path creation. This will happen before the sta can be freed (because
822  * sta_info_destroy() calls this) so any reader in a rcu read block will be
823  * protected against the plink disappearing.
824  */
rtw_mesh_path_flush_by_nexthop(struct sta_info * sta)825 void rtw_mesh_path_flush_by_nexthop(struct sta_info *sta)
826 {
827 	_adapter *adapter = sta->padapter;
828 	struct rtw_mesh_table *tbl = adapter->mesh_info.mesh_paths;
829 	struct rtw_mesh_path *mpath;
830 	rtw_rhashtable_iter iter;
831 	int ret;
832 
833 	if (!tbl)
834 		return;
835 
836 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
837 	if (ret)
838 		return;
839 
840 	ret = rtw_rhashtable_walk_start(&iter);
841 	if (ret && ret != -EAGAIN)
842 		goto out;
843 
844 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
845 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
846 			continue;
847 		if (IS_ERR(mpath))
848 			break;
849 
850 		if (rtw_rcu_access_pointer(mpath->next_hop) == sta)
851 			__rtw_mesh_path_del(tbl, mpath);
852 	}
853 out:
854 	rtw_rhashtable_walk_stop(&iter);
855 	rtw_rhashtable_walk_exit(&iter);
856 }
857 
rtw_mpp_flush_by_proxy(_adapter * adapter,const u8 * proxy)858 static void rtw_mpp_flush_by_proxy(_adapter *adapter,
859 			       const u8 *proxy)
860 {
861 	struct rtw_mesh_table *tbl = adapter->mesh_info.mpp_paths;
862 	struct rtw_mesh_path *mpath;
863 	rtw_rhashtable_iter iter;
864 	int ret;
865 
866 	if (!tbl)
867 		return;
868 
869 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
870 	if (ret)
871 		return;
872 
873 	ret = rtw_rhashtable_walk_start(&iter);
874 	if (ret && ret != -EAGAIN)
875 		goto out;
876 
877 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
878 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
879 			continue;
880 		if (IS_ERR(mpath))
881 			break;
882 
883 		if (_rtw_memcmp(mpath->mpp, proxy, ETH_ALEN) == _TRUE)
884 			__rtw_mesh_path_del(tbl, mpath);
885 	}
886 out:
887 	rtw_rhashtable_walk_stop(&iter);
888 	rtw_rhashtable_walk_exit(&iter);
889 }
890 
rtw_table_flush_by_iface(struct rtw_mesh_table * tbl)891 static void rtw_table_flush_by_iface(struct rtw_mesh_table *tbl)
892 {
893 	struct rtw_mesh_path *mpath;
894 	rtw_rhashtable_iter iter;
895 	int ret;
896 
897 	if (!tbl)
898 		return;
899 
900 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
901 	if (ret)
902 		return;
903 
904 	ret = rtw_rhashtable_walk_start(&iter);
905 	if (ret && ret != -EAGAIN)
906 		goto out;
907 
908 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
909 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
910 			continue;
911 		if (IS_ERR(mpath))
912 			break;
913 		__rtw_mesh_path_del(tbl, mpath);
914 	}
915 out:
916 	rtw_rhashtable_walk_stop(&iter);
917 	rtw_rhashtable_walk_exit(&iter);
918 }
919 
920 /**
921  * rtw_mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
922  *
923  * This function deletes both mesh paths as well as mesh portal paths.
924  *
925  * @sdata: interface data to match
926  *
927  */
rtw_mesh_path_flush_by_iface(_adapter * adapter)928 void rtw_mesh_path_flush_by_iface(_adapter *adapter)
929 {
930 	rtw_table_flush_by_iface(adapter->mesh_info.mesh_paths);
931 	rtw_table_flush_by_iface(adapter->mesh_info.mpp_paths);
932 }
933 
934 /**
935  * rtw_table_path_del - delete a path from the mesh or mpp table
936  *
937  * @tbl: mesh or mpp path table
938  * @sdata: local subif
939  * @addr: dst address (ETH_ALEN length)
940  *
941  * Returns: 0 if successful
942  */
rtw_table_path_del(struct rtw_mesh_table * tbl,const u8 * addr)943 static int rtw_table_path_del(struct rtw_mesh_table *tbl,
944 			  const u8 *addr)
945 {
946 	struct rtw_mesh_path *mpath;
947 
948 	if (!tbl)
949 		return -ENXIO;
950 
951 	rtw_rcu_read_lock();
952 	mpath = rtw_rhashtable_lookup_fast(&tbl->rhead, addr, rtw_mesh_rht_params);
953 	if (!mpath) {
954 		rtw_rcu_read_unlock();
955 		return -ENXIO;
956 	}
957 
958 	__rtw_mesh_path_del(tbl, mpath);
959 	rtw_rcu_read_unlock();
960 	return 0;
961 }
962 
963 
964 /**
965  * rtw_mesh_path_del - delete a mesh path from the table
966  *
967  * @addr: dst address (ETH_ALEN length)
968  * @sdata: local subif
969  *
970  * Returns: 0 if successful
971  */
rtw_mesh_path_del(_adapter * adapter,const u8 * addr)972 int rtw_mesh_path_del(_adapter *adapter, const u8 *addr)
973 {
974 	int err;
975 
976 	/* flush relevant mpp entries first */
977 	rtw_mpp_flush_by_proxy(adapter, addr);
978 
979 	err = rtw_table_path_del(adapter->mesh_info.mesh_paths, addr);
980 	adapter->mesh_info.mesh_paths_generation++;
981 	return err;
982 }
983 
984 /**
985  * rtw_mesh_path_tx_pending - sends pending frames in a mesh path queue
986  *
987  * @mpath: mesh path to activate
988  *
989  * Locking: the state_lock of the mpath structure must NOT be held when calling
990  * this function.
991  */
rtw_mesh_path_tx_pending(struct rtw_mesh_path * mpath)992 void rtw_mesh_path_tx_pending(struct rtw_mesh_path *mpath)
993 {
994 	if (mpath->flags & RTW_MESH_PATH_ACTIVE) {
995 		struct rtw_mesh_info *minfo = &mpath->adapter->mesh_info;
996 		_list q;
997 		u32 q_len = 0;
998 
999 		_rtw_init_listhead(&q);
1000 
1001 		/* move to local queue */
1002 		enter_critical_bh(&mpath->frame_queue.lock);
1003 		if (mpath->frame_queue_len) {
1004 			rtw_list_splice_init(&mpath->frame_queue.queue, &q);
1005 			q_len = mpath->frame_queue_len;
1006 			mpath->frame_queue_len = 0;
1007 		}
1008 		exit_critical_bh(&mpath->frame_queue.lock);
1009 
1010 		if (q_len) {
1011 			/* move to mpath_tx_queue */
1012 			enter_critical_bh(&minfo->mpath_tx_queue.lock);
1013 			rtw_list_splice_tail(&q, &minfo->mpath_tx_queue.queue);
1014 			minfo->mpath_tx_queue_len += q_len;
1015 			exit_critical_bh(&minfo->mpath_tx_queue.lock);
1016 
1017 			/* schedule mpath_tx_tasklet */
1018 			tasklet_hi_schedule(&minfo->mpath_tx_tasklet);
1019 		}
1020 	}
1021 }
1022 
1023 /**
1024  * rtw_mesh_path_send_to_gates - sends pending frames to all known mesh gates
1025  *
1026  * @mpath: mesh path whose queue will be emptied
1027  *
1028  * If there is only one gate, the frames are transferred from the failed mpath
1029  * queue to that gate's queue.  If there are more than one gates, the frames
1030  * are copied from each gate to the next.  After frames are copied, the
1031  * mpath queues are emptied onto the transmission queue.
1032  */
rtw_mesh_path_send_to_gates(struct rtw_mesh_path * mpath)1033 int rtw_mesh_path_send_to_gates(struct rtw_mesh_path *mpath)
1034 {
1035 	_adapter *adapter = mpath->adapter;
1036 	struct rtw_mesh_table *tbl;
1037 	struct rtw_mesh_path *from_mpath = mpath;
1038 	struct rtw_mesh_path *gate;
1039 	bool copy = false;
1040 	rtw_hlist_node *node;
1041 
1042 	tbl = adapter->mesh_info.mesh_paths;
1043 	if (!tbl)
1044 		return 0;
1045 
1046 	rtw_rcu_read_lock();
1047 	rtw_hlist_for_each_entry_rcu(gate, node, &tbl->known_gates, gate_list) {
1048 		if (gate->flags & RTW_MESH_PATH_ACTIVE) {
1049 			RTW_MPATH_DBG(FUNC_ADPT_FMT" Forwarding to %pM\n",
1050 				FUNC_ADPT_ARG(adapter), gate->dst);
1051 			rtw_mesh_path_move_to_queue(gate, from_mpath, copy);
1052 			from_mpath = gate;
1053 			copy = true;
1054 		} else {
1055 			RTW_MPATH_DBG(
1056 				  FUNC_ADPT_FMT" Not forwarding to %pM (flags %#x)\n",
1057 				  FUNC_ADPT_ARG(adapter), gate->dst, gate->flags);
1058 		}
1059 	}
1060 
1061 	rtw_hlist_for_each_entry_rcu(gate, node, &tbl->known_gates, gate_list) {
1062 		RTW_MPATH_DBG(FUNC_ADPT_FMT" Sending to %pM\n",
1063 			FUNC_ADPT_ARG(adapter), gate->dst);
1064 		rtw_mesh_path_tx_pending(gate);
1065 	}
1066 	rtw_rcu_read_unlock();
1067 
1068 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
1069 }
1070 
1071 /**
1072  * rtw_mesh_path_discard_frame - discard a frame whose path could not be resolved
1073  *
1074  * @skb: frame to discard
1075  * @sdata: network subif the frame was to be sent through
1076  *
1077  * Locking: the function must me called within a rcu_read_lock region
1078  */
rtw_mesh_path_discard_frame(_adapter * adapter,struct xmit_frame * xframe)1079 void rtw_mesh_path_discard_frame(_adapter *adapter,
1080 			     struct xmit_frame *xframe)
1081 {
1082 	rtw_free_xmitframe(&adapter->xmitpriv, xframe);
1083 	adapter->mesh_info.mshstats.dropped_frames_no_route++;
1084 }
1085 
1086 /**
1087  * rtw_mesh_path_flush_pending - free the pending queue of a mesh path
1088  *
1089  * @mpath: mesh path whose queue has to be freed
1090  *
1091  * Locking: the function must me called within a rcu_read_lock region
1092  */
rtw_mesh_path_flush_pending(struct rtw_mesh_path * mpath)1093 void rtw_mesh_path_flush_pending(struct rtw_mesh_path *mpath)
1094 {
1095 	struct xmit_frame *xframe;
1096 	_list *list, *head;
1097 	_list tmp;
1098 
1099 	_rtw_init_listhead(&tmp);
1100 
1101 	enter_critical_bh(&mpath->frame_queue.lock);
1102 	rtw_list_splice_init(&mpath->frame_queue.queue, &tmp);
1103 	mpath->frame_queue_len = 0;
1104 	exit_critical_bh(&mpath->frame_queue.lock);
1105 
1106 	head = &tmp;
1107 	list = get_next(head);
1108 	while (rtw_end_of_queue_search(head, list) == _FALSE) {
1109 		xframe = LIST_CONTAINOR(list, struct xmit_frame, list);
1110 		list = get_next(list);
1111 		rtw_list_delete(&xframe->list);
1112 		rtw_mesh_path_discard_frame(mpath->adapter, xframe);
1113 	}
1114 }
1115 
1116 /**
1117  * rtw_mesh_path_fix_nexthop - force a specific next hop for a mesh path
1118  *
1119  * @mpath: the mesh path to modify
1120  * @next_hop: the next hop to force
1121  *
1122  * Locking: this function must be called holding mpath->state_lock
1123  */
rtw_mesh_path_fix_nexthop(struct rtw_mesh_path * mpath,struct sta_info * next_hop)1124 void rtw_mesh_path_fix_nexthop(struct rtw_mesh_path *mpath, struct sta_info *next_hop)
1125 {
1126 	enter_critical_bh(&mpath->state_lock);
1127 	rtw_mesh_path_assign_nexthop(mpath, next_hop);
1128 	mpath->sn = 0xffff;
1129 	mpath->metric = 0;
1130 	mpath->hop_count = 0;
1131 	mpath->exp_time = 0;
1132 	mpath->flags = RTW_MESH_PATH_FIXED | RTW_MESH_PATH_SN_VALID;
1133 	rtw_mesh_path_activate(mpath);
1134 	exit_critical_bh(&mpath->state_lock);
1135 	rtw_ewma_err_rate_init(&next_hop->metrics.err_rate);
1136 	/* init it at a low value - 0 start is tricky */
1137 	rtw_ewma_err_rate_add(&next_hop->metrics.err_rate, 1);
1138 	rtw_mesh_path_tx_pending(mpath);
1139 }
1140 
rtw_mesh_pathtbl_init(_adapter * adapter)1141 int rtw_mesh_pathtbl_init(_adapter *adapter)
1142 {
1143 	struct rtw_mesh_table *tbl_path, *tbl_mpp;
1144 	int ret;
1145 
1146 	tbl_path = rtw_mesh_table_alloc();
1147 	if (!tbl_path)
1148 		return -ENOMEM;
1149 
1150 	tbl_mpp = rtw_mesh_table_alloc();
1151 	if (!tbl_mpp) {
1152 		ret = -ENOMEM;
1153 		goto free_path;
1154 	}
1155 
1156 	rtw_rhashtable_init(&tbl_path->rhead, &rtw_mesh_rht_params);
1157 	rtw_rhashtable_init(&tbl_mpp->rhead, &rtw_mesh_rht_params);
1158 
1159 	adapter->mesh_info.mesh_paths = tbl_path;
1160 	adapter->mesh_info.mpp_paths = tbl_mpp;
1161 
1162 	return 0;
1163 
1164 free_path:
1165 	rtw_mesh_table_free(tbl_path);
1166 	return ret;
1167 }
1168 
1169 static
rtw_mesh_path_tbl_expire(_adapter * adapter,struct rtw_mesh_table * tbl)1170 void rtw_mesh_path_tbl_expire(_adapter *adapter,
1171 			  struct rtw_mesh_table *tbl)
1172 {
1173 	struct rtw_mesh_path *mpath;
1174 	rtw_rhashtable_iter iter;
1175 	int ret;
1176 
1177 	if (!tbl)
1178 		return;
1179 
1180 	ret = rtw_rhashtable_walk_enter(&tbl->rhead, &iter);
1181 	if (ret)
1182 		return;
1183 
1184 	ret = rtw_rhashtable_walk_start(&iter);
1185 	if (ret && ret != -EAGAIN)
1186 		goto out;
1187 
1188 	while ((mpath = rtw_rhashtable_walk_next(&iter))) {
1189 		if (IS_ERR(mpath) && PTR_ERR(mpath) == -EAGAIN)
1190 			continue;
1191 		if (IS_ERR(mpath))
1192 			break;
1193 		if ((!(mpath->flags & RTW_MESH_PATH_RESOLVING)) &&
1194 		    (!(mpath->flags & RTW_MESH_PATH_FIXED)) &&
1195 		     rtw_time_after(rtw_get_current_time(), mpath->exp_time + RTW_MESH_PATH_EXPIRE))
1196 			__rtw_mesh_path_del(tbl, mpath);
1197 
1198 		if (mpath->is_gate &&  /* need not to deal with non-gate case */
1199 		    rtw_time_after(rtw_get_current_time(), mpath->gate_timeout)) {
1200 			RTW_MPATH_DBG(FUNC_ADPT_FMT"mpath [%pM] expired systime is %lu systime is %lu\n",
1201 				      FUNC_ADPT_ARG(adapter), mpath->dst,
1202 				      mpath->gate_timeout, rtw_get_current_time());
1203 			enter_critical_bh(&mpath->state_lock);
1204 			if (mpath->gate_asked) { /* asked gate before */
1205 				rtw_mesh_gate_del(tbl, mpath);
1206 				exit_critical_bh(&mpath->state_lock);
1207 			} else {
1208 				mpath->gate_asked = true;
1209 				mpath->gate_timeout = rtw_get_current_time() + rtw_ms_to_systime(mpath->gate_ann_int);
1210 				exit_critical_bh(&mpath->state_lock);
1211 				rtw_mesh_queue_preq(mpath, RTW_PREQ_Q_F_START | RTW_PREQ_Q_F_REFRESH);
1212 				RTW_MPATH_DBG(FUNC_ADPT_FMT"mpath [%pM] ask mesh gate existence (is_root=%d)\n",
1213 				      FUNC_ADPT_ARG(adapter), mpath->dst, mpath->is_root);
1214 			}
1215 		}
1216 	}
1217 
1218 out:
1219 	rtw_rhashtable_walk_stop(&iter);
1220 	rtw_rhashtable_walk_exit(&iter);
1221 }
1222 
rtw_mesh_path_expire(_adapter * adapter)1223 void rtw_mesh_path_expire(_adapter *adapter)
1224 {
1225 	rtw_mesh_path_tbl_expire(adapter, adapter->mesh_info.mesh_paths);
1226 	rtw_mesh_path_tbl_expire(adapter, adapter->mesh_info.mpp_paths);
1227 }
1228 
rtw_mesh_pathtbl_unregister(_adapter * adapter)1229 void rtw_mesh_pathtbl_unregister(_adapter *adapter)
1230 {
1231 	if (adapter->mesh_info.mesh_paths) {
1232 		rtw_mesh_table_free(adapter->mesh_info.mesh_paths);
1233 		adapter->mesh_info.mesh_paths = NULL;
1234 	}
1235 
1236 	if (adapter->mesh_info.mpp_paths) {
1237 		rtw_mesh_table_free(adapter->mesh_info.mpp_paths);
1238 		adapter->mesh_info.mpp_paths = NULL;
1239 	}
1240 }
1241 #endif /* CONFIG_RTW_MESH */
1242 
1243