• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Data Transmission thread implementation for XRadio drivers
3  *
4  * Copyright (c) 2013
5  * Xradio Technology Co., Ltd. <www.xradiotech.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <net/mac80211.h>
13 #include <linux/kthread.h>
14 #include <uapi/linux/sched/types.h>
15 
16 #include "xradio.h"
17 #include "bh.h"
18 #include "hwio.h"
19 #include "wsm.h"
20 #include "sbus.h"
21 
22 #ifdef SUPPORT_FW_DBG_INF
23 #include "fw_dbg_inf.h"
24 #endif
25 
26 /* TODO: Verify these numbers with WSM specification. */
27 #define DOWNLOAD_BLOCK_SIZE_WR	(0x1000 - 4)
28 /* an SPI message cannot be bigger than (2"12-1)*2 bytes
29  * "*2" to cvt to bytes */
30 #define MAX_SZ_RD_WR_BUFFERS	(DOWNLOAD_BLOCK_SIZE_WR*2)
31 #define PIGGYBACK_CTRL_REG	(2)
32 #define EFFECTIVE_BUF_SIZE	(MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
33 
34 #define DEV_WAKEUP_MAX_TIME  (HZ<<1)   /* =HZ*2 = 2s*/
35 #define DEV_WAKEUP_WAIT_TIME (HZ/50)   /*=20ms*/
36 #define BH_TX_BURST_NONTXOP  (16)
37 
38 #if (SDIO_BLOCK_SIZE > 500)
39 #define SKB_CACHE_LEN   (SDIO_BLOCK_SIZE)
40 #elif (SDIO_BLOCK_SIZE > 250)
41 #define SKB_CACHE_LEN   (SDIO_BLOCK_SIZE<<1)
42 #else
43 #define SKB_CACHE_LEN    xr_sdio_blksize_align(500)
44 #endif
45 #define SKB_RESV_MAX    (1900)
46 
47 int tx_burst_limit = BH_TX_BURST_NONTXOP;
48 
49 /* Suspend state privates */
50 enum xradio_bh_pm_state {
51 	XRADIO_BH_RESUMED = 0,
52 	XRADIO_BH_SUSPEND,
53 	XRADIO_BH_SUSPENDED,
54 	XRADIO_BH_RESUME,
55 };
56 typedef int (*xradio_wsm_handler) (struct xradio_common *hw_priv, u8 *data,
57 				   size_t size);
58 
bh_time_interval(struct timespec64 * oldtime)59 static inline u32 bh_time_interval(struct timespec64 *oldtime)
60 {
61 	u32 time_int;
62 	struct timespec64 newtime;
63 	xr_do_gettimeofday(&newtime);
64 	time_int = (newtime.tv_sec - oldtime->tv_sec) * 1000000 + \
65 			   (long)(newtime.tv_nsec - oldtime->tv_nsec) / 1000;
66 	return time_int;
67 }
68 
69 
70 #ifdef MCAST_FWDING
71 int wsm_release_buffer_to_fw(struct xradio_vif *priv, int count);
72 #endif
73 static int xradio_bh(void *arg);
74 static void xradio_put_skb(struct xradio_common *hw_priv, struct sk_buff *skb);
75 static struct sk_buff *xradio_get_skb(struct xradio_common *hw_priv, size_t len, u8 *flags);
76 static inline int xradio_put_resv_skb(struct xradio_common *hw_priv,
77 									  struct sk_buff *skb, u8 flags);
78 
79 #ifdef BH_PROC_THREAD
80 static int xradio_proc(void *arg);
bh_proc_init(struct xradio_common * hw_priv)81 int bh_proc_init(struct xradio_common *hw_priv)
82 {
83 	int ret = 0;
84 	int i;
85 	struct bh_items *pool = NULL;
86 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
87 
88 	memset(&hw_priv->proc, 0, sizeof(struct bh_proc));
89 	/* init locks and wait_queue */
90 	spin_lock_init(&hw_priv->proc.lock);
91 	init_waitqueue_head(&hw_priv->proc.proc_wq);
92 
93 	/* init pool and txrx queues */
94 	atomic_set(&hw_priv->proc.proc_tx, 0);
95 	atomic_set(&hw_priv->proc.rx_queued, 0);
96 	atomic_set(&hw_priv->proc.tx_queued, 0);
97 	INIT_LIST_HEAD(&hw_priv->proc.bh_tx);
98 	INIT_LIST_HEAD(&hw_priv->proc.bh_rx);
99 	INIT_LIST_HEAD(&hw_priv->proc.bh_free);
100 	pool = xr_kzalloc(PROC_POOL_SIZE, false);
101 	if (!pool)
102 		return -ENOMEM;
103 	else
104 		hw_priv->proc.bh_pool[0] = pool;
105 	for (i = 0; i < ITEM_RESERVED; ++i)
106 		list_add_tail(&pool[i].head, &hw_priv->proc.bh_free);
107 
108 	/* init proc thread.*/
109 	hw_priv->proc.proc_state = 0;
110 	hw_priv->proc.proc_thread =
111 		kthread_create(&xradio_proc, hw_priv, XRADIO_PROC_THREAD);
112 	if (IS_ERR(hw_priv->proc.proc_thread)) {
113 		ret = PTR_ERR(hw_priv->proc.proc_thread);
114 		hw_priv->proc.proc_thread = NULL;
115 	} else {
116 #ifdef HAS_PUT_TASK_STRUCT
117 		get_task_struct(hw_priv->proc.proc_thread);
118 #endif
119 		wake_up_process(hw_priv->proc.proc_thread);
120 	}
121 	return ret;
122 }
123 
bh_proc_flush_txqueue(struct xradio_common * hw_priv,int if_id)124 int bh_proc_flush_txqueue(struct xradio_common *hw_priv, int if_id)
125 {
126 	struct bh_items *item = NULL, *tmp = NULL;
127 	spin_lock(&hw_priv->proc.lock);
128 	/*flush proc tx queue, no need to dev_kfree_skb */
129 	list_for_each_entry_safe(item, tmp, &hw_priv->proc.bh_tx, head) {
130 		if (item) {
131 			if (XRWL_ALL_IFS == if_id || item->if_id == if_id) {
132 				item->data = NULL;
133 				list_move_tail(&item->head, &hw_priv->proc.bh_free);
134 				atomic_sub(1, &hw_priv->proc.tx_queued);
135 			}
136 		} else {
137 			bh_printk(XRADIO_DBG_ERROR,
138 					"%s tx item is NULL!\n", __func__);
139 		}
140 	}
141 	if (XRWL_ALL_IFS == if_id) {
142 		INIT_LIST_HEAD(&hw_priv->proc.bh_tx);
143 		atomic_set(&hw_priv->proc.tx_queued, 0);
144 		atomic_set(&hw_priv->proc.proc_tx, 0);
145 	}
146 	spin_unlock(&hw_priv->proc.lock);
147 	return 0;
148 }
bh_proc_flush_queue(struct xradio_common * hw_priv)149 int bh_proc_flush_queue(struct xradio_common *hw_priv)
150 {
151 	struct bh_items *item = NULL;
152 	spin_lock(&hw_priv->proc.lock);
153 	/*flush proc rx queue */
154 	while (!list_empty(&hw_priv->proc.bh_rx)) {
155 		item = list_first_entry(&hw_priv->proc.bh_rx,
156 							struct bh_items, head);
157 		if (item) {
158 			if (item->data) {
159 				dev_kfree_skb((struct sk_buff *)item->data);
160 				item->data = NULL;
161 			} else {
162 				bh_printk(XRADIO_DBG_ERROR,
163 					"%s item->data is NULL!\n", __func__);
164 			}
165 			list_move_tail(&item->head, &hw_priv->proc.bh_free);
166 		} else {
167 			bh_printk(XRADIO_DBG_ERROR,
168 					"%s rx item is NULL!\n", __func__);
169 		}
170 	}
171 	INIT_LIST_HEAD(&hw_priv->proc.bh_rx);
172 	atomic_set(&hw_priv->proc.rx_queued, 0);
173 	spin_unlock(&hw_priv->proc.lock);
174 
175 	/*flush proc tx queue, no need to dev_kfree_skb */
176 	bh_proc_flush_txqueue(hw_priv, XRWL_ALL_IFS);
177 	return 0;
178 }
179 
bh_proc_deinit(struct xradio_common * hw_priv)180 void bh_proc_deinit(struct xradio_common *hw_priv)
181 {
182 	struct task_struct *thread = hw_priv->proc.proc_thread;
183 	int i = 0;
184 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
185 
186 	/* deinit proc thread */
187 	if (thread) {
188 		hw_priv->proc.proc_thread = NULL;
189 		kthread_stop(thread);
190 	#ifdef HAS_PUT_TASK_STRUCT
191 		put_task_struct(thread);
192 	#endif
193 	} else {
194 		bh_printk(XRADIO_DBG_WARN,
195 			"%s thread is NULL!\n", __func__);
196 	}
197 
198 	/* clear tx/rx queue */
199 	bh_proc_flush_queue(hw_priv);
200 
201 	/* clear free queue */
202 	INIT_LIST_HEAD(&hw_priv->proc.bh_free);
203 
204 	/*free proc pool*/
205 	for (i = 0; i < PROC_POOL_NUM; i++) {
206 		if (hw_priv->proc.bh_pool[i]) {
207 			kfree(hw_priv->proc.bh_pool[i]);
208 			hw_priv->proc.bh_pool[i] = NULL;
209 		} else if (i == 0) {
210 			bh_printk(XRADIO_DBG_WARN,
211 				"%s bh_pool[0] is NULL!\n", __func__);
212 		}
213 	}
214 
215 	return ;
216 }
217 
bh_proc_reinit(struct xradio_common * hw_priv)218 int bh_proc_reinit(struct xradio_common *hw_priv)
219 {
220 	bh_proc_deinit(hw_priv);
221 	return bh_proc_init(hw_priv);
222 }
223 
xradio_get_free_item(struct xradio_common * hw_priv)224 static struct bh_items *xradio_get_free_item(struct xradio_common *hw_priv)
225 {
226 	struct bh_items *item = NULL;
227 	if (likely(!list_empty(&hw_priv->proc.bh_free))) {
228 		item = list_first_entry(&hw_priv->proc.bh_free,
229 			struct bh_items, head);
230 	} else {
231 		int i = 0;
232 		struct bh_items *pool = NULL;
233 		for (i = 0; i < PROC_POOL_NUM; i++) {
234 			if (!hw_priv->proc.bh_pool[i]) {
235 				pool = xr_kzalloc(PROC_POOL_SIZE, false);
236 				hw_priv->proc.bh_pool[i] = pool;
237 				break;
238 			}
239 		}
240 		if (pool) {
241 			bh_printk(XRADIO_DBG_WARN, "%s alloc pool%d!\n",
242 				__func__, i);
243 			for (i = 0; i < ITEM_RESERVED; ++i)
244 				list_add_tail(&pool[i].head, &hw_priv->proc.bh_free);
245 			item = list_first_entry(&hw_priv->proc.bh_free,
246 				struct bh_items, head);
247 		} else {
248 			bh_printk(XRADIO_DBG_ERROR, "%s Failed alloc pool%d!\n",
249 				__func__, i);
250 		}
251 	}
252 	return item;
253 }
254 
xradio_proc_wakeup(struct xradio_common * hw_priv)255 void xradio_proc_wakeup(struct xradio_common *hw_priv)
256 {
257 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
258 #if BH_PROC_TX
259 	if (atomic_add_return(1, &hw_priv->proc.proc_tx) == 1) {
260 		bh_printk(XRADIO_DBG_NIY, "%s\n", __func__);
261 		wake_up(&hw_priv->proc.proc_wq);
262 	}
263 #else
264 	xradio_bh_wakeup(hw_priv);
265 #endif
266 }
267 
268 #if PERF_INFO_TEST
269 struct timespec64 proc_start_time;
270 #endif
271 
272 #if BH_PROC_DPA
273 #define PROC_HIGH_IDX  0
274 #define PROC_LOW_IDX  4
275 const struct thread_dpa g_dpa[] = {
276 	{SCHED_FIFO,  25},
277 	{SCHED_FIFO,  50},
278 	{SCHED_FIFO,  75},
279 	{SCHED_FIFO,  99},
280 	{SCHED_NORMAL, 0}
281 };
thread_dpa_up(struct task_struct * p,s8 * prio_index)282 int thread_dpa_up(struct task_struct *p, s8 *prio_index)
283 {
284 	int ret = 0;
285 	s8  idx = 0;
286 	if (unlikely(!p || !prio_index)) {
287 		bh_printk(XRADIO_DBG_ERROR,
288 			"%s, task_struct=%p, prio_index=%p\n",
289 			__func__, p, prio_index);
290 		return -EINVAL;
291 	}
292 	idx = (*prio_index) - 1;
293 	if (idx > PROC_HIGH_IDX) {
294 		struct sched_param param = {
295 			.sched_priority = g_dpa[idx].priority
296 		};
297 		bh_printk(XRADIO_DBG_NIY, "%s=%d\n", __func__, idx);
298 		ret = sched_setscheduler(p, g_dpa[idx].policy, &param);
299 		if (!ret)
300 			*prio_index = idx;
301 		else
302 			bh_printk(XRADIO_DBG_ERROR,
303 				"%s, sched_setscheduler failed, idx=%d\n",
304 				__func__, idx);
305 		return ret;
306 	} else {
307 		bh_printk(XRADIO_DBG_NIY, "%s, prio_index=%d\n",
308 			__func__, idx + 1);
309 		return 0;
310 	}
311 }
thread_dpa_down(struct task_struct * p,u8 * prio_index)312 int thread_dpa_down(struct task_struct *p, u8 *prio_index)
313 {
314 	int ret = 0;
315 	s8  idx = 0;
316 	if (unlikely(!p || !prio_index)) {
317 		bh_printk(XRADIO_DBG_ERROR,
318 			"%s, task_struct=%p, prio_index=%p\n",
319 			__func__, p, prio_index);
320 		return -EINVAL;
321 	}
322 	idx = (*prio_index) + 1;
323 	if (idx < PROC_LOW_IDX) {
324 		struct sched_param param = {
325 			.sched_priority = g_dpa[idx].priority
326 		};
327 		bh_printk(XRADIO_DBG_NIY, "%s=%d\n", __func__, idx);
328 		ret = sched_setscheduler(p, g_dpa[idx].policy, &param);
329 		if (!ret)
330 			*prio_index = idx;
331 		else
332 			bh_printk(XRADIO_DBG_ERROR,
333 				"%s, sched_setscheduler failed, idx=%d\n",
334 				__func__, idx);
335 		return ret;
336 	} else {
337 		bh_printk(XRADIO_DBG_NIY, "%s, prio_index=%d\n",
338 			__func__, idx - 1);
339 		return 0;
340 	}
341 }
proc_set_priority(struct xradio_common * hw_priv,u8 idx)342 static inline int proc_set_priority(struct xradio_common *hw_priv, u8 idx)
343 {
344 	struct sched_param param = {
345 		.sched_priority = g_dpa[idx].priority
346 	};
347 	hw_priv->proc.proc_prio = idx;
348 	return sched_setscheduler(hw_priv->proc.proc_thread,
349 			g_dpa[idx].policy, &param);
350 }
351 int dpa_proc_tx;
352 int dpa_proc_rx;
353 u32 proc_dpa_cnt;
354 u32 proc_up_cnt;
355 u32 proc_down_cnt;
proc_dpa_update(struct xradio_common * hw_priv)356 static inline int proc_dpa_update(struct xradio_common *hw_priv)
357 {
358 	int tx_ret = 0;
359 	int rx_ret = 0;
360 	int dpa_old = 0;
361 	int i = 0;
362 
363 	if (!hw_priv->proc.proc_thread)
364 		return -ENOENT;
365 	++proc_dpa_cnt;
366 	/*update by rx.*/
367 	dpa_old = dpa_proc_rx;
368 	dpa_proc_rx = atomic_read(&hw_priv->proc.rx_queued);
369 	if (dpa_proc_rx >= (ITEM_RESERVED>>2) ||
370 		dpa_proc_rx >= (dpa_old + 10)) {
371 		rx_ret = 1;
372 	} else if ((dpa_proc_rx + 20) < dpa_old ||
373 		dpa_proc_rx < (ITEM_RESERVED>>5)) {
374 		rx_ret = -1;
375 	}
376 
377 	/* update by tx.*/
378 	dpa_old = dpa_proc_tx;
379 	for (dpa_proc_tx = 0, i = 0; i < 4; ++i) {
380 		dpa_proc_tx += hw_priv->tx_queue[i].num_queued -
381 			hw_priv->tx_queue[i].num_pending;
382 	}
383 	if (dpa_proc_tx > (dpa_old + 10) ||
384 		dpa_proc_tx > XRWL_HOST_VIF0_11N_THROTTLE) {
385 		tx_ret = 1;
386 	} else if ((dpa_proc_tx + 10) < dpa_old ||
387 		dpa_proc_tx < (XRWL_HOST_VIF0_11N_THROTTLE>>2)) {
388 		tx_ret = -1;
389 	}
390 
391 	if (rx_ret > 0 || tx_ret > 0) {
392 		++proc_up_cnt;
393 		if (++hw_priv->proc.prio_cnt > 10) {
394 			hw_priv->proc.prio_cnt = 0;
395 			return thread_dpa_up(hw_priv->proc.proc_thread,
396 				&hw_priv->proc.proc_prio);
397 		}
398 	} else if (rx_ret < 0 && tx_ret < 0) {
399 		++proc_down_cnt;
400 		if (--hw_priv->proc.prio_cnt < -10) {
401 			hw_priv->proc.prio_cnt = 0;
402 			return thread_dpa_down(hw_priv->proc.proc_thread,
403 				&hw_priv->proc.proc_prio);
404 		}
405 	}
406 	return 0;
407 }
408 #endif
409 
xradio_proc(void * arg)410 static int xradio_proc(void *arg)
411 {
412 	struct xradio_common *hw_priv = arg;
413 #if !BH_PROC_DPA
414 	struct sched_param param = {
415 		.sched_priority = 99
416 	};
417 #endif
418 	int ret = 0;
419 	int term = 0;
420 	int tx = 0;
421 	int rx = 0;
422 #if BH_PROC_DPA
423 	int dpa_num = 0;
424 #endif
425 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
426 
427 #if BH_PROC_DPA
428 	ret = proc_set_priority(hw_priv, 3);
429 #else
430 	ret = sched_setscheduler(hw_priv->proc.proc_thread,
431 			SCHED_FIFO, &param);
432 #endif
433 	if (ret)
434 		bh_printk(XRADIO_DBG_WARN, "%s sched_setscheduler failed(%d)\n",
435 			__func__, ret);
436 
437 	for (;;) {
438 		PERF_INFO_GETTIME(&proc_start_time);
439 		ret = wait_event_interruptible(hw_priv->proc.proc_wq, ({
440 			term = kthread_should_stop();
441 #if BH_PROC_RX
442 			rx = atomic_read(&hw_priv->proc.rx_queued);
443 #else
444 			rx = 0;
445 #endif
446 #if BH_PROC_TX
447 			tx = atomic_xchg(&hw_priv->proc.proc_tx, 0);
448 #else
449 			tx = 0;
450 #endif
451 			(term || ((rx || tx) &&
452 			!hw_priv->bh_error && !hw_priv->proc.proc_state &&
453 			XRADIO_BH_RESUMED == atomic_read(&hw_priv->bh_suspend))); }));
454 
455 		/* 0--proc is going to be shut down */
456 		if (term) {
457 			bh_printk(XRADIO_DBG_NIY, "%s exit!\n", __func__);
458 			break;
459 		} else if (ret < 0) {
460 			bh_printk(XRADIO_DBG_ERROR, "%s wait_event err=%d!\n",
461 				__func__, ret);
462 			continue;  /*continue to wait for exit */
463 		}
464 		PERF_INFO_STAMP_UPDATE(&proc_start_time, &proc_wait, 0);
465 
466 		while (rx || tx) {
467 			bh_printk(XRADIO_DBG_NIY, "%s rx=%d, tx=%d\n",
468 				__func__, rx, tx);
469 #if BH_PROC_RX
470 			/* 1--handle rx*/
471 			if (rx) {
472 				size_t rx_len = 0;
473 				spin_lock(&hw_priv->proc.lock);
474 				if (likely(!list_empty(&hw_priv->proc.bh_rx))) {
475 					struct bh_items *rx_item = NULL;
476 					struct sk_buff *rx_skb   = NULL;
477 					u8 flags = 0;
478 					rx_item = list_first_entry(&hw_priv->proc.bh_rx,
479 						struct bh_items, head);
480 					if (rx_item) {
481 						rx_skb = (struct sk_buff *)rx_item->data;
482 						flags  = rx_item->flags;
483 						rx_item->data = NULL;
484 						rx_len = rx_item->datalen;
485 						list_move_tail(&rx_item->head,
486 							&hw_priv->proc.bh_free);
487 					}
488 					rx = atomic_sub_return(1, &hw_priv->proc.rx_queued);
489 					spin_unlock(&hw_priv->proc.lock);
490 					if (rx_skb) {
491 						ret = wsm_handle_rx(hw_priv, rx_item->flags, &rx_skb);
492 						/* Reclaim the SKB buffer */
493 						if (rx_skb) {
494 							if (xradio_put_resv_skb(hw_priv, rx_skb, rx_item->flags))
495 								xradio_put_skb(hw_priv, rx_skb);
496 							rx_skb = NULL;
497 						}
498 						if (ret) {
499 							bh_printk(XRADIO_DBG_ERROR,
500 								"wsm_handle_rx err=%d!\n", ret);
501 							break;
502 						}
503 					} else {
504 						bh_printk(XRADIO_DBG_ERROR,
505 							"%s rx_item data is NULL\n", __func__);
506 					}
507 					hw_priv->proc.rxed_num++;
508 				} else {
509 					rx = 0;
510 					hw_priv->proc.proc_state = 1;  /*need to restart proc*/
511 					bh_printk(XRADIO_DBG_WARN,
512 						"rx_queued=%d, but proc.bh_rx is empty!\n",
513 						atomic_read(&hw_priv->proc.rx_queued));
514 					spin_unlock(&hw_priv->proc.lock);
515 				}
516 				PERF_INFO_STAMP_UPDATE(&proc_start_time, &proc_rx, rx_len);
517 			}
518 #endif
519 
520 #if BH_PROC_TX
521 			/* 2--handle tx*/
522 			if (tx) {
523 				u8 *data = NULL;
524 				size_t tx_len = 0;
525 				int burst = 0;
526 				int vif_selected = 0;
527 				ret = wsm_get_tx(hw_priv, &data, &tx_len,
528 					&burst, &vif_selected);
529 				if (ret < 0) {
530 					bh_printk(XRADIO_DBG_ERROR,
531 								"wsm_get_tx err=%d!\n", ret);
532 					tx = 0;
533 					break;
534 				} else if (ret) {
535 					struct bh_items *item  = NULL;
536 					spin_lock(&hw_priv->proc.lock);
537 					item = xradio_get_free_item(hw_priv);
538 					if (likely(item)) {
539 						SYS_BUG(item->data);
540 						item->data = data;
541 						item->datalen = tx_len;
542 						if (unlikely(item->datalen != tx_len)) {
543 							bh_printk(XRADIO_DBG_ERROR,
544 								"%s datalen=%u, tx_len=%zu.\n",
545 								__func__, item->datalen, tx_len);
546 						}
547 						item->if_id = vif_selected;
548 						item->flags = 0;
549 						list_move_tail(&item->head, &hw_priv->proc.bh_tx);
550 						spin_unlock(&hw_priv->proc.lock);
551 						if (atomic_add_return(1, &hw_priv->proc.tx_queued) == 1 &&
552 							hw_priv->bh_thread) {
553 							xradio_bh_wakeup(hw_priv);
554 						}
555 						hw_priv->proc.txed_num++;
556 						bh_printk(XRADIO_DBG_NIY,
557 							"%s Tx if=%d, datalen=%zu, queued=%d\n",
558 							__func__, vif_selected, tx_len,
559 							atomic_read(&hw_priv->proc.tx_queued));
560 					} else {
561 						bh_printk(XRADIO_DBG_ERROR,
562 							"%s pool is empty\n", __func__);
563 						spin_unlock(&hw_priv->proc.lock);
564 						hw_priv->proc.proc_state = 1; /*need to restart proc*/
565 						break;
566 					}
567 				} else {
568 					tx = 0;
569 					bh_printk(XRADIO_DBG_NIY, "wsm_get_tx no data!\n");
570 				}
571 				PERF_INFO_STAMP_UPDATE(&proc_start_time, &proc_tx, tx_len);
572 			}
573 #endif
574 
575 #if BH_PROC_DPA
576 			if (++dpa_num > 20) {
577 				proc_dpa_update(hw_priv);
578 				dpa_num = 0;
579 			}
580 #endif
581 		}  /* while */
582 
583 		if (hw_priv->proc.proc_state) {
584 			/* proc error occurs, to restart driver.*/
585 			hw_priv->bh_error = 1;
586 		}
587 
588 #if 0
589 		/* for debug */
590 		if (!atomic_read(&hw_priv->proc.proc_tx)) {
591 			int num = 0;
592 			int pending = 0;
593 			int i = 0;
594 			for (i = 0; i < 4; ++i) {
595 				pending += hw_priv->tx_queue[i].num_pending;
596 				num += hw_priv->tx_queue[i].num_queued -
597 					hw_priv->tx_queue[i].num_pending;
598 			}
599 			if (num && !atomic_read(&hw_priv->proc.proc_tx)) {
600 				bh_printk(XRADIO_DBG_NIY,
601 					"%s rx=%d, tx=%d, num=%d, pending=%d, "
602 					" rx_queued=%d, bufuse=%d\n",
603 					__func__, rx, tx, num, pending,
604 					atomic_read(&hw_priv->proc.rx_queued),
605 					hw_priv->hw_bufs_used);
606 			}
607 		}
608 #endif
609 
610 	} /* for (;;) */
611 	return 0;
612 }
613 
614 #if BH_PROC_TX
xradio_bh_get(struct xradio_common * hw_priv,u8 ** data,size_t * tx_len,int * burst,int * vif_selected)615 static inline int xradio_bh_get(struct xradio_common *hw_priv, u8 **data,
616 			size_t *tx_len, int *burst, int *vif_selected)
617 {
618 	int ret = 0;
619 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
620 	/* check cmd first */
621 	spin_lock(&hw_priv->wsm_cmd.lock);
622 	if (hw_priv->wsm_cmd.ptr) {
623 		*data = hw_priv->wsm_cmd.ptr;
624 		*tx_len = hw_priv->wsm_cmd.len;
625 		*burst = atomic_read(&hw_priv->proc.tx_queued) + 1;
626 		*vif_selected = -1;
627 		spin_unlock(&hw_priv->wsm_cmd.lock);
628 		return 1;
629 	}
630 	spin_unlock(&hw_priv->wsm_cmd.lock);
631 
632 	/* check tx data */
633 	spin_lock(&hw_priv->proc.lock);
634 	if (!list_empty(&hw_priv->proc.bh_tx) &&
635 		!atomic_read(&hw_priv->tx_lock) &&
636 		hw_priv->hw_bufs_used < hw_priv->wsm_caps.numInpChBufs) {
637 		struct bh_items *item = list_first_entry(
638 			&hw_priv->proc.bh_tx, struct bh_items, head);
639 		if (item && item->data) {
640 			struct xradio_queue_item *queue_item =
641 				(struct xradio_queue_item *)item->data;
642 			queue_item->xmit_timestamp = jiffies;
643 			queue_item->xmit_to_fw = 1;
644 			*data = queue_item->skb->data;
645 			*tx_len = item->datalen;
646 			*vif_selected = item->if_id;
647 			*burst = atomic_sub_return(1, &hw_priv->proc.tx_queued) + 1;
648 			item->data = NULL;
649 			list_move_tail(&item->head, &hw_priv->proc.bh_free);
650 			ret = 1;
651 			bh_printk(XRADIO_DBG_NIY, "%s tx_len=%zu, burst=%d!\n",
652 				__func__, *tx_len, *burst);
653 		} else {
654 			bh_printk(XRADIO_DBG_ERROR, "%s item=%p, data=%p!\n",
655 				__func__, item, item->data);
656 			ret = -ENOENT;
657 		}
658 	}
659 	spin_unlock(&hw_priv->proc.lock);
660 	return ret;
661 }
662 #endif
663 
664 #if PERF_INFO_TEST
665 struct timespec64 bh_put_time;
666 #endif
667 
xradio_bh_put(struct xradio_common * hw_priv,struct sk_buff ** skb_p,u8 flags)668 static inline int xradio_bh_put(struct xradio_common *hw_priv,
669 		struct sk_buff **skb_p, u8 flags)
670 {
671 	struct bh_items *item = NULL;
672 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
673 
674 	PERF_INFO_GETTIME(&bh_put_time);
675 	spin_lock(&hw_priv->proc.lock);
676 	if (unlikely(!hw_priv->proc.proc_thread)) {
677 		spin_unlock(&hw_priv->proc.lock);
678 		bh_printk(XRADIO_DBG_WARN,
679 			"%s proc_thread is stopped!\n", __func__);
680 		dev_kfree_skb(*skb_p);
681 		*skb_p = NULL;
682 		return 0;
683 	}
684 	item = xradio_get_free_item(hw_priv);
685 	if (likely(item)) {
686 		SYS_BUG(item->data);
687 		item->data = (u8 *)(*skb_p);
688 		item->datalen = (*skb_p)->len;
689 		if (unlikely(item->datalen != (*skb_p)->len)) {
690 			bh_printk(XRADIO_DBG_ERROR,
691 				"%s datalen=%u, skblen=%u.\n",
692 				__func__, item->datalen, (*skb_p)->len);
693 		}
694 		item->flags = flags;
695 		if ((flags & ITEM_F_CMDCFM))
696 			list_move(&item->head, &hw_priv->proc.bh_rx);
697 		else
698 			list_move_tail(&item->head, &hw_priv->proc.bh_rx);
699 		spin_unlock(&hw_priv->proc.lock);
700 		PERF_INFO_STAMP_UPDATE(&bh_put_time, &get_item, 0);
701 		if (atomic_add_return(1, &hw_priv->proc.rx_queued) == 1) {
702 			wake_up(&hw_priv->proc.proc_wq);
703 		}
704 		*skb_p = NULL;
705 		PERF_INFO_STAMP(&bh_put_time, &wake_proc, 0);
706 	} else {
707 		bh_printk(XRADIO_DBG_ERROR,
708 			"%s pool is empty!\n", __func__);
709 		goto err;
710 	}
711 	return 0;
712 
713 err:
714 	spin_unlock(&hw_priv->proc.lock);
715 	return -ENOENT;
716 }
717 #endif /* #ifdef BH_PROC_THREAD */
718 
xradio_register_bh(struct xradio_common * hw_priv)719 int xradio_register_bh(struct xradio_common *hw_priv)
720 {
721 	int err = 0;
722 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
723 
724 	SYS_BUG(hw_priv->bh_thread);
725 	atomic_set(&hw_priv->bh_rx, 0);
726 	atomic_set(&hw_priv->bh_tx, 0);
727 	atomic_set(&hw_priv->bh_term, 0);
728 	atomic_set(&hw_priv->bh_suspend, XRADIO_BH_RESUMED);
729 	hw_priv->buf_id_tx = 0;
730 	hw_priv->buf_id_rx = 0;
731 #ifdef BH_USE_SEMAPHORE
732 	sema_init(&hw_priv->bh_sem, 0);
733 	atomic_set(&hw_priv->bh_wk, 0);
734 #else
735 	init_waitqueue_head(&hw_priv->bh_wq);
736 #endif
737 	init_waitqueue_head(&hw_priv->bh_evt_wq);
738 
739 	hw_priv->bh_thread = kthread_create(&xradio_bh, hw_priv, XRADIO_BH_THREAD);
740 	if (IS_ERR(hw_priv->bh_thread)) {
741 		err = PTR_ERR(hw_priv->bh_thread);
742 		hw_priv->bh_thread = NULL;
743 	} else {
744 #ifdef HAS_PUT_TASK_STRUCT
745 		get_task_struct(hw_priv->bh_thread);
746 #endif
747 		wake_up_process(hw_priv->bh_thread);
748 	}
749 
750 	return err;
751 }
752 
xradio_unregister_bh(struct xradio_common * hw_priv)753 void xradio_unregister_bh(struct xradio_common *hw_priv)
754 {
755 	struct task_struct *thread = hw_priv->bh_thread;
756 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
757 
758 	if (SYS_WARN(!thread))
759 		return;
760 
761 	hw_priv->bh_thread = NULL;
762 	kthread_stop(thread);
763 #ifdef HAS_PUT_TASK_STRUCT
764 	put_task_struct(thread);
765 #endif
766 	bh_printk(XRADIO_DBG_NIY, "Unregister success.\n");
767 }
768 
xradio_irq_handler(void * priv)769 void xradio_irq_handler(void *priv)
770 {
771 	struct xradio_common *hw_priv = (struct xradio_common *)priv;
772 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
773 	DBG_INT_ADD(irq_count);
774 	if (/* SYS_WARN */(hw_priv->bh_error))
775 		return;
776 #ifdef BH_USE_SEMAPHORE
777 	atomic_add(1, &hw_priv->bh_rx);
778 	if (atomic_add_return(1, &hw_priv->bh_wk) == 1) {
779 		up(&hw_priv->bh_sem);
780 	}
781 #else
782 	if (atomic_add_return(1, &hw_priv->bh_rx) == 1) {
783 		wake_up(&hw_priv->bh_wq);
784 	}
785 #endif
786 
787 }
788 
xradio_bh_wakeup(struct xradio_common * hw_priv)789 void xradio_bh_wakeup(struct xradio_common *hw_priv)
790 {
791 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
792 	if (hw_priv->bh_error) {
793 		bh_printk(XRADIO_DBG_ERROR, "%s bh_error=%d\n",
794 			__func__, hw_priv->bh_error);
795 		return;
796 	}
797 #ifdef BH_USE_SEMAPHORE
798 	atomic_add(1, &hw_priv->bh_tx);
799 	if (atomic_add_return(1, &hw_priv->bh_wk) == 1) {
800 		up(&hw_priv->bh_sem);
801 	}
802 #else
803 	if (atomic_add_return(1, &hw_priv->bh_tx) == 1) {
804 		wake_up(&hw_priv->bh_wq);
805 	}
806 #endif
807 }
808 
xradio_bh_suspend(struct xradio_common * hw_priv)809 int xradio_bh_suspend(struct xradio_common *hw_priv)
810 {
811 
812 #ifdef MCAST_FWDING
813 	int i = 0;
814 	struct xradio_vif *priv = NULL;
815 #endif
816 
817 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
818 
819 	if (hw_priv->bh_thread == NULL)
820 		return 0;
821 
822 	if (hw_priv->bh_error)
823 		return -EINVAL;
824 
825 #ifdef MCAST_FWDING
826 	xradio_for_each_vif(hw_priv, priv, i) {
827 		if (!priv)
828 			continue;
829 		if ((priv->multicast_filter.enable)
830 		    && (priv->join_status == XRADIO_JOIN_STATUS_AP)) {
831 			wsm_release_buffer_to_fw(priv,
832 						 (hw_priv->wsm_caps.
833 						  numInpChBufs - 1));
834 			break;
835 		}
836 	}
837 #endif
838 
839 	atomic_set(&hw_priv->bh_suspend, XRADIO_BH_SUSPEND);
840 #ifdef BH_USE_SEMAPHORE
841 	up(&hw_priv->bh_sem);
842 #else
843 	wake_up(&hw_priv->bh_wq);
844 #endif
845 	return wait_event_timeout(hw_priv->bh_evt_wq, (hw_priv->bh_error ||
846 		XRADIO_BH_SUSPENDED == atomic_read(&hw_priv->bh_suspend)),
847 		    1 * HZ) ? 0 : -ETIMEDOUT;
848 }
849 
xradio_bh_resume(struct xradio_common * hw_priv)850 int xradio_bh_resume(struct xradio_common *hw_priv)
851 {
852 
853 #ifdef MCAST_FWDING
854 	int ret;
855 	int i = 0;
856 	struct xradio_vif *priv = NULL;
857 #endif
858 
859 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
860 	if (hw_priv->bh_error || atomic_read(&hw_priv->bh_term)) {
861 		return -EINVAL;
862 	}
863 
864 	atomic_set(&hw_priv->bh_suspend, XRADIO_BH_RESUME);
865 #ifdef BH_USE_SEMAPHORE
866 	up(&hw_priv->bh_sem);
867 #else
868 	wake_up(&hw_priv->bh_wq);
869 #endif
870 
871 #ifdef MCAST_FWDING
872 	ret = wait_event_timeout(hw_priv->bh_evt_wq, (hw_priv->bh_error ||
873 	     XRADIO_BH_RESUMED == atomic_read(&hw_priv->bh_suspend)), 1 * HZ) ?
874 	     0 : -ETIMEDOUT;
875 
876 	xradio_for_each_vif(hw_priv, priv, i) {
877 		if (!priv)
878 			continue;
879 		if ((priv->join_status == XRADIO_JOIN_STATUS_AP) &&
880 			  (priv->multicast_filter.enable)) {
881 			u8 count = 0;
882 			SYS_WARN(wsm_request_buffer_request(priv, &count));
883 			bh_printk(XRADIO_DBG_NIY, "Reclaim Buff %d \n", count);
884 			break;
885 		}
886 	}
887 
888 	return ret;
889 #else
890 	return wait_event_timeout(hw_priv->bh_evt_wq, hw_priv->bh_error ||
891 		(XRADIO_BH_RESUMED == atomic_read(&hw_priv->bh_suspend)),
892 		1 * HZ) ? 0 : -ETIMEDOUT;
893 #endif
894 
895 }
896 
wsm_alloc_tx_buffer(struct xradio_common * hw_priv)897 static inline void wsm_alloc_tx_buffer(struct xradio_common *hw_priv)
898 {
899 	++hw_priv->hw_bufs_used;
900 }
901 
wsm_release_tx_buffer(struct xradio_common * hw_priv,int count)902 int wsm_release_tx_buffer(struct xradio_common *hw_priv, int count)
903 {
904 	int ret = 0;
905 	int hw_bufs_used = hw_priv->hw_bufs_used;
906 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
907 
908 	hw_priv->hw_bufs_used -= count;
909 	if (SYS_WARN(hw_priv->hw_bufs_used < 0)) {
910 		/* Tx data patch stops when all but one hw buffers are used.
911 		   So, re-start tx path in case we find hw_bufs_used equals
912 		   numInputChBufs - 1.
913 		 */
914 		bh_printk(XRADIO_DBG_ERROR, "%s, hw_bufs_used=%d, count=%d.\n",
915 			  __func__, hw_priv->hw_bufs_used, count);
916 		ret = -1;
917 	} else if (hw_bufs_used >= (hw_priv->wsm_caps.numInpChBufs - 1))
918 		ret = 1;
919 	if (!hw_priv->hw_bufs_used)
920 		wake_up(&hw_priv->bh_evt_wq);
921 	return ret;
922 }
923 
wsm_release_vif_tx_buffer(struct xradio_common * hw_priv,int if_id,int count)924 int wsm_release_vif_tx_buffer(struct xradio_common *hw_priv,
925 							  int if_id, int count)
926 {
927 	int ret = 0;
928 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
929 
930 	hw_priv->hw_bufs_used_vif[if_id] -= count;
931 	if (!hw_priv->hw_bufs_used_vif[if_id])
932 		wake_up(&hw_priv->bh_evt_wq);
933 
934 	if (hw_priv->hw_bufs_used_vif[if_id] < 0) {
935 		bh_printk(XRADIO_DBG_WARN,
936 			"%s, if=%d, used=%d, count=%d.\n", __func__, if_id,
937 			hw_priv->hw_bufs_used_vif[if_id], count);
938 		ret = -1;
939 	}
940 	return ret;
941 }
942 
943 #ifdef MCAST_FWDING
wsm_release_buffer_to_fw(struct xradio_vif * priv,int count)944 int wsm_release_buffer_to_fw(struct xradio_vif *priv, int count)
945 {
946 	int i;
947 	u8 flags;
948 	struct wsm_hdr *wsm;
949 	struct xradio_common *hw_priv = priv->hw_priv;
950 	struct wsm_buf *buf = &hw_priv->wsm_release_buf;
951 	size_t buf_len = buf->end - buf->begin;
952 
953 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
954 
955 	if (priv->join_status != XRADIO_JOIN_STATUS_AP || buf_len == 0) {
956 		return 0;
957 	}
958 	bh_printk(XRADIO_DBG_NIY, "Rel buffer to FW %d, %d\n",
959 		  count, hw_priv->hw_bufs_used);
960 
961 	for (i = 0; i < count; i++) {
962 		if ((hw_priv->hw_bufs_used + 1) < hw_priv->wsm_caps.numInpChBufs) {
963 			/* Fill Buffer Request Msg */
964 			flags = i ? 0 : 0x1;
965 			buf->data[0] = flags;
966 
967 			/* Add sequence number */
968 			wsm = (struct wsm_hdr *)buf->begin;
969 			wsm->id &= __cpu_to_le32(~WSM_TX_SEQ(WSM_TX_SEQ_MAX));
970 			wsm->id |= cpu_to_le32(WSM_TX_SEQ(hw_priv->wsm_tx_seq));
971 			bh_printk(XRADIO_DBG_NIY, "REL %d, len=%d, buflen=%zu\n",
972 				  hw_priv->wsm_tx_seq, wsm->len, buf_len);
973 
974 			wsm_alloc_tx_buffer(hw_priv);
975 			if (SYS_WARN(xradio_data_write(hw_priv, buf->begin, buf_len))) {
976 				break;
977 			}
978 			hw_priv->buf_released = 1;
979 			hw_priv->wsm_tx_seq = (hw_priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
980 		} else
981 			break;
982 	}
983 
984 	if (i == count) {
985 		return 0;
986 	}
987 
988 	/* Should not be here */
989 	bh_printk(XRADIO_DBG_ERROR, "Error, Less HW buf %d, %d.\n",
990 		  hw_priv->hw_bufs_used, hw_priv->wsm_caps.numInpChBufs);
991 	SYS_WARN(1);
992 	return -1;
993 }
994 #endif
995 
996 /* reserve a packet for the case dev_alloc_skb failed in bh.*/
xradio_init_resv_skb(struct xradio_common * hw_priv)997 int xradio_init_resv_skb(struct xradio_common *hw_priv)
998 {
999 	int len = SKB_RESV_MAX + WSM_TX_EXTRA_HEADROOM + \
1000 			   8 + 12;	/* TKIP IV + ICV and MIC */
1001 	len = xr_sdio_blksize_align(len);
1002 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
1003 
1004 	spin_lock_init(&hw_priv->cache_lock);
1005 	hw_priv->skb_reserved = xr_alloc_skb(len);
1006 	if (hw_priv->skb_reserved) {
1007 		hw_priv->skb_resv_len = len;
1008 		skb_reserve(hw_priv->skb_reserved,
1009 			WSM_TX_EXTRA_HEADROOM + 8 /* TKIP IV */
1010 			- WSM_RX_EXTRA_HEADROOM);
1011 	} else {
1012 		bh_printk(XRADIO_DBG_WARN, "%s xr_alloc_skb failed(%d)\n",
1013 			__func__, len);
1014 	}
1015 	return 0;
1016 }
1017 
xradio_deinit_resv_skb(struct xradio_common * hw_priv)1018 void xradio_deinit_resv_skb(struct xradio_common *hw_priv)
1019 {
1020 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
1021 	if (hw_priv->skb_reserved) {
1022 		dev_kfree_skb(hw_priv->skb_reserved);
1023 		hw_priv->skb_reserved = NULL;
1024 		hw_priv->skb_resv_len = 0;
1025 	}
1026 }
1027 
xradio_realloc_resv_skb(struct xradio_common * hw_priv,struct sk_buff * skb,u8 flags)1028 int xradio_realloc_resv_skb(struct xradio_common *hw_priv,
1029 							struct sk_buff *skb, u8 flags)
1030 {
1031 	/* spin_lock(&hw_priv->cache_lock); */
1032 	if (!hw_priv->skb_reserved && hw_priv->skb_resv_len) {
1033 		hw_priv->skb_reserved = xr_alloc_skb(hw_priv->skb_resv_len);
1034 		if (!hw_priv->skb_reserved && (flags & ITEM_F_RESERVE)) {
1035 			hw_priv->skb_reserved = skb;
1036 			skb_reserve(hw_priv->skb_reserved,
1037 				WSM_TX_EXTRA_HEADROOM + 8 /* TKIP IV */
1038 				- WSM_RX_EXTRA_HEADROOM);
1039 			/* spin_unlock(&hw_priv->cache_lock); */
1040 			bh_printk(XRADIO_DBG_WARN, "%s xr_alloc_skb failed(%d)\n",
1041 				__func__, hw_priv->skb_resv_len);
1042 			return -1;
1043 		}
1044 	}
1045 	/* spin_unlock(&hw_priv->cache_lock); */
1046 	return 0; /* realloc sbk success, deliver to upper.*/
1047 }
1048 
xradio_get_resv_skb(struct xradio_common * hw_priv,size_t len)1049 static inline struct sk_buff *xradio_get_resv_skb(struct xradio_common *hw_priv,
1050 												  size_t len)
1051 {	struct sk_buff *skb = NULL;
1052 	/* spin_lock(&hw_priv->cache_lock); */
1053 	if (hw_priv->skb_reserved && len <= hw_priv->skb_resv_len) {
1054 		skb = hw_priv->skb_reserved;
1055 		hw_priv->skb_reserved = NULL;
1056 	}
1057 	/* spin_unlock(&hw_priv->cache_lock); */
1058 	return skb;
1059 }
1060 
xradio_put_resv_skb(struct xradio_common * hw_priv,struct sk_buff * skb,u8 flags)1061 static inline int xradio_put_resv_skb(struct xradio_common *hw_priv,
1062 									  struct sk_buff *skb, u8 flags)
1063 {
1064 	/* spin_lock(&hw_priv->cache_lock); */
1065 	if (!hw_priv->skb_reserved && hw_priv->skb_resv_len &&
1066 	    (flags & ITEM_F_RESERVE)) {
1067 		hw_priv->skb_reserved = skb;
1068 		/* spin_unlock(&hw_priv->cache_lock); */
1069 		return 0;
1070 	}
1071 	/* spin_unlock(&hw_priv->cache_lock); */
1072 	return 1; /* sbk not put to reserve*/
1073 }
1074 
xradio_get_skb(struct xradio_common * hw_priv,size_t len,u8 * flags)1075 static struct sk_buff *xradio_get_skb(struct xradio_common *hw_priv, size_t len, u8 *flags)
1076 {
1077 	struct sk_buff *skb = NULL;
1078 	size_t alloc_len = (len > SKB_CACHE_LEN) ? len : SKB_CACHE_LEN;
1079 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
1080 
1081 	/* TKIP IV + TKIP ICV and MIC - Piggyback.*/
1082 	alloc_len += WSM_TX_EXTRA_HEADROOM + 8 + 12 - 2;
1083 	if (len > SKB_CACHE_LEN || !hw_priv->skb_cache) {
1084 		skb = xr_alloc_skb_pf(alloc_len);
1085 		/* In AP mode RXed SKB can be looped back as a broadcast.
1086 		 * Here we reserve enough space for headers. */
1087 		if (skb) {
1088 			skb_reserve(skb, WSM_TX_EXTRA_HEADROOM + 8 /* TKIP IV */
1089 					    - WSM_RX_EXTRA_HEADROOM);
1090 		} else {
1091 			skb = xradio_get_resv_skb(hw_priv, alloc_len);
1092 			if (skb) {
1093 				*flags |= ITEM_F_RESERVE;
1094 				bh_printk(XRADIO_DBG_WARN, "%s get skb_reserved(%zu)!\n",
1095 					__func__, alloc_len);
1096 			} else {
1097 				bh_printk(XRADIO_DBG_ERROR, "%s xr_alloc_skb failed(%zu)!\n",
1098 					__func__, alloc_len);
1099 			}
1100 		}
1101 	} else {
1102 		/* don't care cache because min len is SKB_CACHE_LEN*/
1103 		/* spin_lock(&hw_priv->cache_lock); */
1104 		skb = hw_priv->skb_cache;
1105 		hw_priv->skb_cache = NULL;
1106 		/* spin_unlock(&hw_priv->cache_lock); */
1107 	}
1108 	return skb;
1109 }
1110 
xradio_put_skb(struct xradio_common * hw_priv,struct sk_buff * skb)1111 static void xradio_put_skb(struct xradio_common *hw_priv, struct sk_buff *skb)
1112 {
1113 	bh_printk(XRADIO_DBG_TRC, "%s\n", __func__);
1114 	/* spin_lock(&hw_priv->cache_lock); */
1115 	if (hw_priv->skb_cache)
1116 		dev_kfree_skb(skb);
1117 	else {
1118 		hw_priv->skb_cache = skb;
1119 	}
1120 	/* spin_unlock(&hw_priv->cache_lock); */
1121 }
1122 
xradio_bh_read_ctrl_reg(struct xradio_common * hw_priv,u16 * ctrl_reg)1123 static int xradio_bh_read_ctrl_reg(struct xradio_common *hw_priv,
1124 				   u16 *ctrl_reg)
1125 {
1126 	int ret = 0;
1127 
1128 	ret = xradio_reg_read_16(hw_priv, HIF_CONTROL_REG_ID, ctrl_reg);
1129 	if (ret) {
1130 		*ctrl_reg = 0;
1131 		ret = 0;
1132 		bh_printk(XRADIO_DBG_NIY, "read ctrl failed, SDIO DCE occupied!\n");
1133 		ret = xradio_reg_read_16(hw_priv, HIF_CONTROL_REG_ID, ctrl_reg);
1134 		if (ret) {
1135 			hw_priv->bh_error = 1;
1136 			bh_printk(XRADIO_DBG_ERROR, "Failed to read control register.\n");
1137 		}
1138 	}
1139 	return ret;
1140 }
1141 
xradio_device_sleep(struct xradio_common * hw_priv)1142 static inline int xradio_device_sleep(struct xradio_common *hw_priv)
1143 {
1144 	int ret;
1145 	ret = xradio_reg_write_32(hw_priv, HIF_CONTROL_REG_ID, 0);
1146 	if (ret) {
1147 		hw_priv->bh_error = 1;
1148 		bh_printk(XRADIO_DBG_ERROR, "%s:control reg failed.\n", __func__);
1149 	}
1150 
1151 	return ret;
1152 }
1153 
1154 struct timespec64 wakeup_time;
xradio_device_wakeup(struct xradio_common * hw_priv,u16 * ctrl_reg_ptr)1155 static int xradio_device_wakeup(struct xradio_common *hw_priv, u16 *ctrl_reg_ptr)
1156 {
1157 	int ret = 0;
1158 	unsigned long time = 0;
1159 
1160 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
1161 
1162 	PERF_INFO_GETTIME(&wakeup_time);
1163 	/* To force the device to be always-on, the host sets WLAN_UP to 1 */
1164 	ret = xradio_reg_write_16(hw_priv, HIF_CONTROL_REG_ID, HIF_CTRL_WUP_BIT);
1165 	if (SYS_WARN(ret))
1166 		return ret;
1167 
1168 	ret = xradio_bh_read_ctrl_reg(hw_priv, ctrl_reg_ptr);
1169 	if (SYS_WARN(ret))
1170 		return ret;
1171 
1172 	/* If the device returns WLAN_RDY as 1, the device is active and will
1173 	 * remain active. */
1174 	time = jiffies + DEV_WAKEUP_MAX_TIME;
1175 	while (!(*ctrl_reg_ptr & (HIF_CTRL_RDY_BIT|HIF_CTRL_NEXT_LEN_MASK)) &&
1176 		   time_before(jiffies, time) && !ret) {
1177 #ifdef BH_USE_SEMAPHORE
1178 		msleep(1);
1179 #else
1180 		wait_event_timeout(hw_priv->bh_wq,
1181 				atomic_read(&hw_priv->bh_rx), DEV_WAKEUP_WAIT_TIME);
1182 #endif
1183 		ret = xradio_bh_read_ctrl_reg(hw_priv, ctrl_reg_ptr);
1184 	}
1185 
1186 	PERF_INFO_STAMP(&wakeup_time, &dev_wake, 0);
1187 
1188 	if (likely(*ctrl_reg_ptr & HIF_CTRL_RDY_BIT)) {
1189 		bh_printk(XRADIO_DBG_NIY, "Device awake, t=%ldms.\n",
1190 			(jiffies+DEV_WAKEUP_MAX_TIME-time)*1000/HZ);
1191 		return 1;
1192 	} else if (*ctrl_reg_ptr & HIF_CTRL_NEXT_LEN_MASK) { /*device has data to rx.*/
1193 		bh_printk(XRADIO_DBG_NIY, "To rx data before wakeup, len=%d.\n",
1194 				(*ctrl_reg_ptr & HIF_CTRL_NEXT_LEN_MASK)<<1);
1195 		return (int)(*ctrl_reg_ptr & HIF_CTRL_NEXT_LEN_MASK);
1196 	} else {
1197 		bh_printk(XRADIO_DBG_ERROR, "Device cannot wakeup in %dms.\n",
1198 				DEV_WAKEUP_MAX_TIME*1000/HZ);
1199 		hw_priv->hw_cant_wakeup = true;
1200 		return -1;
1201 	}
1202 }
1203 
1204 #ifdef BH_COMINGRX_FORECAST
xradio_comingrx_update(struct xradio_common * hw_priv)1205 static bool xradio_comingrx_update(struct xradio_common *hw_priv)
1206 {
1207 	static bool is_full;
1208 	static unsigned long tmo;
1209 	if (hw_priv->hw_bufs_used >= (hw_priv->wsm_caps.numInpChBufs-1)) {
1210 		if (is_full == false) {
1211 			tmo = jiffies + (HZ/166);/*1/166s = 6ms*/
1212 		}
1213 		is_full = true;
1214 	} else {
1215 		tmo = jiffies - 1;
1216 		is_full = false;
1217 	}
1218 
1219 	if (time_before(jiffies, tmo))
1220 		return true;
1221 	else
1222 		return false;
1223 
1224 }
1225 #endif
1226 
1227 /* Must be called from BH thraed. */
xradio_enable_powersave(struct xradio_vif * priv,bool enable)1228 void xradio_enable_powersave(struct xradio_vif *priv, bool enable)
1229 {
1230 	priv->powersave_enabled = enable;
1231 	bh_printk(XRADIO_DBG_NIY, "Powerave is %s.\n",
1232 		  enable ? "enabled" : "disabled");
1233 }
1234 
1235 #if PERF_INFO_TEST
1236 struct timespec64 tx_start_time1;
1237 struct timespec64 tx_start_time2;
1238 struct timespec64 rx_start_time1;
1239 struct timespec64 rx_start_time2;
1240 struct timespec64 bh_start_time;
1241 struct timespec64 sdio_reg_time;
1242 extern struct timespec64 last_showtime;
1243 #endif
1244 
1245 u32  sdio_reg_cnt1;
1246 u32  sdio_reg_cnt2;
1247 u32  sdio_reg_cnt3;
1248 u32  sdio_reg_cnt4;
1249 u32  sdio_reg_cnt5;
1250 u32  sdio_reg_cnt6;
1251 u32  tx_limit_cnt1;
1252 u32  tx_limit_cnt2;
1253 u32  tx_limit_cnt3;
1254 u32  tx_limit_cnt4;
1255 u32  tx_limit_cnt5;
1256 u32  tx_limit_cnt6;
1257 
xradio_bh(void * arg)1258 static int xradio_bh(void *arg)
1259 {
1260 	struct xradio_common *hw_priv = arg;
1261 	struct sched_param param = {
1262 		.sched_priority = 1
1263 	};
1264 	int ret = 0;
1265 	struct sk_buff *skb_rx = NULL;
1266 	size_t read_len = 0;
1267 	int rx = 0, tx = 0, term, suspend;
1268 	struct wsm_hdr *wsm;
1269 	size_t wsm_len;
1270 	int wsm_id;
1271 	u8 wsm_seq;
1272 	int rx_resync = 1;
1273 	u16 ctrl_reg = 0;
1274 	int tx_allowed;
1275 	int pending_tx = 0;
1276 	int tx_burst;
1277 	int tx_bursted = 0;
1278 	int rx_burst = 0;
1279 	long status;
1280 	bool coming_rx = false;
1281 #if 0
1282 	u32 dummy;
1283 #endif
1284 	int reg_read = 1;
1285 	int vif_selected;
1286 
1287 	bh_printk(XRADIO_DBG_MSG, "%s\n", __func__);
1288 	ret = sched_setscheduler(hw_priv->bh_thread, SCHED_FIFO, &param);
1289 	if (ret)
1290 		bh_printk(XRADIO_DBG_WARN, "%s sched_setscheduler failed(%d)\n",
1291 			__func__, ret);
1292 
1293 	PERF_INFO_GETTIME(&last_showtime);
1294 	for (;;) {
1295 		PERF_INFO_GETTIME(&bh_start_time);
1296 		/* Check if devices can sleep, and set time to wait for interrupt. */
1297 		if (!hw_priv->hw_bufs_used && !pending_tx &&
1298 		    hw_priv->powersave_enabled && !hw_priv->device_can_sleep &&
1299 		    !atomic_read(&hw_priv->recent_scan) &&
1300 		    atomic_read(&hw_priv->bh_rx) == 0 &&
1301 		    atomic_read(&hw_priv->bh_tx) == 0) {
1302 			bh_printk(XRADIO_DBG_MSG, "Device idle, can sleep.\n");
1303 			SYS_WARN(xradio_device_sleep(hw_priv));
1304 			hw_priv->device_can_sleep = true;
1305 			status = (HZ>>3);	/*1/8s = 125ms*/
1306 		} else if (hw_priv->hw_bufs_used >=
1307 			(hw_priv->wsm_caps.numInpChBufs - 1)) {
1308 			/* don't wait too long if some frames to confirm
1309 			 * and miss interrupt.*/
1310 			status = (HZ>>4);	/*1/16s=62ms.*/
1311 		} else {
1312 			status = (HZ>>3);	/*1/8s = 125ms*/
1313 		}
1314 
1315 
1316 #if 0
1317 		/* Dummy Read for SDIO retry mechanism */
1318 		if (atomic_read(&hw_priv->bh_rx) == 0 &&
1319 		    atomic_read(&hw_priv->bh_tx) == 0) {
1320 			xradio_reg_read(hw_priv, HIF_CONFIG_REG_ID, &dummy, sizeof(dummy));
1321 		}
1322 #endif
1323 
1324 #if 0
1325 		/* If a packet has already been txed to the device then read the
1326 		 * control register for a probable interrupt miss before going
1327 		 * further to wait for interrupt; if the read length is non-zero
1328 		 * then it means there is some data to be received */
1329 		if (hw_priv->hw_bufs_used) {
1330 			PERF_INFO_GETTIME(&sdio_reg_time);
1331 			atomic_xchg(&hw_priv->bh_rx, 0);
1332 			xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1333 			++reg_read;
1334 			++sdio_reg_cnt1;
1335 			PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1336 			if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1337 				DBG_INT_ADD(fix_miss_cnt);
1338 				rx = 1;
1339 				goto data_proc;
1340 			} else {
1341 				++sdio_reg_cnt5;
1342 			}
1343 		}
1344 #endif
1345 
1346 #ifdef BH_COMINGRX_FORECAST
1347 		coming_rx = xradio_comingrx_update(hw_priv);
1348 
1349 		if (coming_rx) {
1350 			PERF_INFO_GETTIME(&sdio_reg_time);
1351 			atomic_xchg(&hw_priv->bh_rx, 0);
1352 			xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1353 			++reg_read;
1354 			++sdio_reg_cnt1;
1355 			PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1356 			if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1357 				DBG_INT_ADD(fix_miss_cnt);
1358 				rx = 1;
1359 				goto data_proc;
1360 			} else {
1361 				++sdio_reg_cnt5;
1362 			}
1363 		}
1364 #endif
1365 
1366 		PERF_INFO_GETTIME(&sdio_reg_time);
1367 		/* Wait for Events in HZ/8 */
1368 #ifdef BH_USE_SEMAPHORE
1369 		rx = atomic_xchg(&hw_priv->bh_rx, 0);
1370 		tx = atomic_xchg(&hw_priv->bh_tx, 0);
1371 		suspend = pending_tx ? 0 : atomic_read(&hw_priv->bh_suspend);
1372 		term = kthread_should_stop();
1373 		if (!(rx || tx || coming_rx || term || suspend || hw_priv->bh_error)) {
1374 			atomic_set(&hw_priv->bh_wk, 0);
1375 			status = (long)(down_timeout(&hw_priv->bh_sem, status) != -ETIME);
1376 			rx = atomic_xchg(&hw_priv->bh_rx, 0);
1377 			tx = atomic_xchg(&hw_priv->bh_tx, 0);
1378 			suspend = pending_tx ? 0 : atomic_read(&hw_priv->bh_suspend);
1379 			term = kthread_should_stop();
1380 		}
1381 #else
1382 		status = wait_event_interruptible_timeout(hw_priv->bh_wq, ({
1383 			 rx = atomic_xchg(&hw_priv->bh_rx, 0);
1384 			 tx = atomic_xchg(&hw_priv->bh_tx, 0);
1385 			 term = kthread_should_stop();
1386 			 suspend = pending_tx ? 0 : atomic_read(&hw_priv->bh_suspend);
1387 			 (rx || tx || coming_rx || term || suspend || hw_priv->bh_error); }),
1388 			 status);
1389 #endif
1390 		PERF_INFO_STAMP(&sdio_reg_time, &bh_wait, 0);
1391 
1392 		/* 0--bh is going to be shut down */
1393 		if (term) {
1394 			bh_printk(XRADIO_DBG_MSG, "xradio_bh exit!\n");
1395 			break;
1396 		}
1397 		/* 1--An fatal error occurs */
1398 		if (status < 0 || hw_priv->bh_error) {
1399 			bh_printk(XRADIO_DBG_ERROR, "bh_error=%d, status=%ld\n",
1400 				  hw_priv->bh_error, status);
1401 			hw_priv->bh_error = __LINE__;
1402 			break;
1403 		}
1404 
1405 		/* 2--Wait for interrupt time out */
1406 		if (!status) {
1407 			DBG_INT_ADD(bh_idle);
1408 			/* Check if miss interrupt. */
1409 			PERF_INFO_GETTIME(&sdio_reg_time);
1410 			xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1411 			PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1412 			++reg_read;
1413 			++sdio_reg_cnt2;
1414 			if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1415 				bh_printk(XRADIO_DBG_WARN, "miss interrupt!\n");
1416 				DBG_INT_ADD(int_miss_cnt);
1417 				rx = 1;
1418 				goto data_proc;
1419 			} else {
1420 				++sdio_reg_cnt5;
1421 			}
1422 
1423 			/* There are some frames to be confirmed. */
1424 			if (hw_priv->hw_bufs_used) {
1425 				long timeout = 0;
1426 				bool pending = 0;
1427 				bh_printk(XRADIO_DBG_NIY, "Need confirm:%d!\n",
1428 					  hw_priv->hw_bufs_used);
1429 				/* Check if frame transmission is timed out. */
1430 				pending = xradio_query_txpkt_timeout(hw_priv, XRWL_ALL_IFS,
1431 					       hw_priv->pending_frame_id, &timeout);
1432 				/* There are some frames confirm time out. */
1433 				if (pending && timeout < 0) {
1434 					bh_printk(XRADIO_DBG_ERROR,
1435 						  "query_txpkt_timeout:%ld!\n", timeout);
1436 					hw_priv->bh_error = __LINE__;
1437 					break;
1438 				}
1439 				rx = 1;	/* Go to check rx again. */
1440 			} else if (!pending_tx) {
1441 				if (hw_priv->powersave_enabled &&
1442 					!hw_priv->device_can_sleep &&
1443 					!atomic_read(&hw_priv->recent_scan)) {
1444 					/* Device is idle, we can go to sleep. */
1445 					bh_printk(XRADIO_DBG_MSG,
1446 						  "Device idle(timeout), can sleep.\n");
1447 					SYS_WARN(xradio_device_sleep(hw_priv));
1448 					hw_priv->device_can_sleep = true;
1449 				}
1450 				PERF_INFO_STAMP(&bh_start_time, &bh_others, 0);
1451 				continue;
1452 			}
1453 		/* 3--Host suspend request. */
1454 		} else if (suspend) {
1455 			bh_printk(XRADIO_DBG_NIY, "Host suspend request.\n");
1456 			/* Check powersave setting again. */
1457 			if (hw_priv->powersave_enabled) {
1458 				bh_printk(XRADIO_DBG_MSG,
1459 					 "Device idle(host suspend), can sleep.\n");
1460 				SYS_WARN(xradio_device_sleep(hw_priv));
1461 				hw_priv->device_can_sleep = true;
1462 			}
1463 
1464 			/* bh thread go to suspend. */
1465 			atomic_set(&hw_priv->bh_suspend, XRADIO_BH_SUSPENDED);
1466 			wake_up(&hw_priv->bh_evt_wq);
1467 #ifdef BH_USE_SEMAPHORE
1468 			do {
1469 				status = down_timeout(&hw_priv->bh_sem, HZ/10);
1470 				term = kthread_should_stop();
1471 			} while (XRADIO_BH_RESUME != atomic_read(&hw_priv->bh_suspend) &&
1472 				     !term && !hw_priv->bh_error);
1473 			if (XRADIO_BH_RESUME != atomic_read(&hw_priv->bh_suspend))
1474 				status = -1;
1475 			else
1476 				status = 0;
1477 #else
1478 			status = wait_event_interruptible(hw_priv->bh_wq, ({
1479 				term = kthread_should_stop();
1480 				(XRADIO_BH_RESUME == atomic_read(&hw_priv->bh_suspend) ||
1481 				term || hw_priv->bh_error); }));
1482 #endif
1483 			if (hw_priv->bh_error) {
1484 				bh_printk(XRADIO_DBG_ERROR, "bh error during bh suspend.\n");
1485 				break;
1486 			} else if (term) {
1487 				bh_printk(XRADIO_DBG_WARN, "bh exit during bh suspend.\n");
1488 				break;
1489 			} else if (status < 0) {
1490 				bh_printk(XRADIO_DBG_ERROR,
1491 					  "Failed to wait for resume: %ld.\n", status);
1492 				hw_priv->bh_error = __LINE__;
1493 				break;
1494 			}
1495 			bh_printk(XRADIO_DBG_NIY, "Host resume.\n");
1496 			atomic_set(&hw_priv->bh_suspend, XRADIO_BH_RESUMED);
1497 			wake_up(&hw_priv->bh_evt_wq);
1498 			atomic_add(1, &hw_priv->bh_rx);
1499 			continue;
1500 		}
1501 		/* query stuck frames in firmware. */
1502 		if (atomic_xchg(&hw_priv->query_cnt, 0)) {
1503 			if (schedule_work(&hw_priv->query_work) <= 0)
1504 				atomic_add(1, &hw_priv->query_cnt);
1505 		}
1506 
1507 #if 0
1508 		/* If a packet has already been txed to the device then read the
1509 		 * control register for a probable interrupt miss before going
1510 		 * further to wait for interrupt; if the read length is non-zero
1511 		 * then it means there is some data to be received */
1512 		if ((hw_priv->wsm_caps.numInpChBufs -
1513 			hw_priv->hw_bufs_used) <= 1 && !reg_read) {
1514 			PERF_INFO_GETTIME(&sdio_reg_time);
1515 			atomic_xchg(&hw_priv->bh_rx, 0);
1516 			xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1517 			++sdio_reg_cnt1;
1518 			PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1519 			if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1520 				DBG_INT_ADD(fix_miss_cnt);
1521 				rx = 1;
1522 				goto data_proc;
1523 			} else {
1524 				++sdio_reg_cnt5;
1525 			}
1526 		}
1527 #endif
1528 
1529 		/* 4--Rx & Tx process. */
1530 data_proc:
1531 		term = kthread_should_stop();
1532 		if (hw_priv->bh_error || term)
1533 			break;
1534 		/*pre-txrx*/
1535 		tx_bursted = 0;
1536 
1537 		rx += atomic_xchg(&hw_priv->bh_rx, 0);
1538 		if (rx) {
1539 			size_t alloc_len;
1540 			u8 *data;
1541 			u8 flags;
1542 
1543 			/* Check ctrl_reg again. */
1544 			if (!(ctrl_reg & HIF_CTRL_NEXT_LEN_MASK)) {
1545 				PERF_INFO_GETTIME(&sdio_reg_time);
1546 				if (SYS_WARN(xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg))) {
1547 					hw_priv->bh_error = __LINE__;
1548 					break;
1549 				}
1550 				++reg_read;
1551 				++sdio_reg_cnt3;
1552 				PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1553 			}
1554 			PERF_INFO_STAMP(&bh_start_time, &bh_others, 0);
1555 
1556 			/* read_len=ctrl_reg*2.*/
1557 			read_len = (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK)<<1;
1558 			if (!read_len) {
1559 				++sdio_reg_cnt6;
1560 				rx = 0;
1561 				goto tx;
1562 			}
1563 
1564 rx:
1565 			reg_read = 0;
1566 			flags = 0;
1567 			PERF_INFO_GETTIME(&rx_start_time1);
1568 			if (SYS_WARN((read_len < sizeof(struct wsm_hdr)) ||
1569 				     (read_len > EFFECTIVE_BUF_SIZE))) {
1570 				bh_printk(XRADIO_DBG_ERROR, "Invalid read len: %zu", read_len);
1571 				hw_priv->bh_error = __LINE__;
1572 				break;
1573 			}
1574 #if BH_PROC_RX
1575 			if (unlikely(atomic_read(&hw_priv->proc.rx_queued) >=
1576 				((ITEM_RESERVED*PROC_POOL_NUM) - XRWL_MAX_QUEUE_SZ - 1))) {
1577 				bh_printk(XRADIO_DBG_WARN,
1578 					"Too many rx packets, proc cannot handle in time!\n");
1579 				msleep(10);
1580 				goto tx; /* too many rx packets to be handled, do tx first*/
1581 			}
1582 #endif
1583 
1584 			/* Add SIZE of PIGGYBACK reg (CONTROL Reg)
1585 			 * to the NEXT Message length + 2 Bytes for SKB */
1586 			read_len = read_len + 2;
1587 			alloc_len = hw_priv->sbus_ops->align_size(hw_priv->sbus_priv,
1588 				      read_len);
1589 			/* Check if not exceeding XRADIO capabilities */
1590 			if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
1591 				bh_printk(XRADIO_DBG_ERROR,
1592 					"Read aligned len: %zu\n", alloc_len);
1593 			} else {
1594 				bh_printk(XRADIO_DBG_MSG,
1595 					"Rx len=%zu, aligned len=%zu\n",
1596 					read_len, alloc_len);
1597 			}
1598 
1599 			/* Get skb buffer. */
1600 			skb_rx = xradio_get_skb(hw_priv, alloc_len, &flags);
1601 			if (SYS_WARN(!skb_rx)) {
1602 				bh_printk(XRADIO_DBG_ERROR, "xradio_get_skb failed.\n");
1603 				hw_priv->bh_error = __LINE__;
1604 				break;
1605 			}
1606 			skb_trim(skb_rx, 0);
1607 			skb_put(skb_rx, read_len);
1608 			data = skb_rx->data;
1609 			if (SYS_WARN(!data)) {
1610 				bh_printk(XRADIO_DBG_ERROR, "skb data is NULL.\n");
1611 				hw_priv->bh_error = __LINE__;
1612 				break;
1613 			}
1614 			PERF_INFO_STAMP(&rx_start_time1, &prepare_rx, alloc_len);
1615 
1616 			/* Read data from device. */
1617 			PERF_INFO_GETTIME(&rx_start_time2);
1618 			if (SYS_WARN(xradio_data_read(hw_priv, data, alloc_len))) {
1619 				hw_priv->bh_error = __LINE__;
1620 				break;
1621 			}
1622 			DBG_INT_ADD(rx_total_cnt);
1623 
1624 			PERF_INFO_STAMP_UPDATE(&rx_start_time2, &sdio_read, alloc_len);
1625 
1626 			/* Piggyback */
1627 			ctrl_reg = __le16_to_cpu(((__le16 *)data)[(alloc_len >> 1) - 1]);
1628 
1629 			/* check wsm length. */
1630 			wsm = (struct wsm_hdr *)data;
1631 			wsm_len = __le32_to_cpu(wsm->len);
1632 			if (SYS_WARN(wsm_len > read_len)) {
1633 				bh_printk(XRADIO_DBG_ERROR, "wsm_id=0x%04x, wsm_len=%zu.\n",
1634 						(__le32_to_cpu(wsm->id) & 0xFFF), wsm_len);
1635 				hw_priv->bh_error = __LINE__;
1636 				break;
1637 			}
1638 
1639 			/* dump rx data. */
1640 #if defined(CONFIG_XRADIO_DEBUG)
1641 			if (unlikely(hw_priv->wsm_enable_wsm_dumps)) {
1642 				u16 msgid, ifid;
1643 				u16 *p = (u16 *) data;
1644 				msgid = (*(p + 1)) & WSM_MSG_ID_MASK;
1645 				ifid = (*(p + 1)) >> 6;
1646 				ifid &= 0xF;
1647 				bh_printk(XRADIO_DBG_ALWY,
1648 					  "[DUMP] msgid 0x%.4X ifid %d len %d\n",
1649 					  msgid, ifid, *p);
1650 				print_hex_dump_bytes("<-- ", DUMP_PREFIX_NONE, data,
1651 				   min(wsm_len, (size_t)hw_priv->wsm_dump_max_size));
1652 			}
1653 #endif /* CONFIG_XRADIO_DEBUG */
1654 
1655 			/* extract wsm id and seq. */
1656 			wsm_id = __le32_to_cpu(wsm->id) & 0xFFF;
1657 			wsm_seq = (__le32_to_cpu(wsm->id) >> 13) & 7;
1658 			/* for multi-rx indication, there two case.*/
1659 			if (ROUND4(wsm_len) < read_len - 2)
1660 				skb_trim(skb_rx, read_len - 2);
1661 			else
1662 				skb_trim(skb_rx, wsm_len);
1663 
1664 			/* process exceptions. */
1665 			if (unlikely(wsm_id == 0x0800)) {
1666 				bh_printk(XRADIO_DBG_ERROR, "firmware exception!\n");
1667 				wsm_handle_exception(hw_priv, &data[sizeof(*wsm)],
1668 						     wsm_len - sizeof(*wsm));
1669 				hw_priv->bh_error = __LINE__;
1670 				break;
1671 			} else if (likely(!rx_resync)) {
1672 				if (SYS_WARN(wsm_seq != hw_priv->wsm_rx_seq)) {
1673 					bh_printk(XRADIO_DBG_ERROR, "wsm_seq=%d.\n", wsm_seq);
1674 					hw_priv->bh_error = __LINE__;
1675 					break;
1676 				}
1677 			}
1678 			hw_priv->wsm_rx_seq = (wsm_seq + 1) & 7;
1679 			rx_resync = 0;
1680 #if (DGB_XRADIO_HWT)
1681 			rx_resync = 1;	/*0 -> 1, HWT test, should not check this.*/
1682 #endif
1683 
1684 			/* Process tx frames confirm. */
1685 			if (wsm_id & 0x0400) {
1686 				int rc = 0;
1687 				int if_id = 0;
1688 				u32 *cfm = (u32 *)(wsm + 1);
1689 				wsm_id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX);
1690 				if (wsm_id == 0x041E) {
1691 					int cfm_cnt = *cfm;
1692 					struct wsm_tx_confirm *tx_cfm =
1693 						(struct wsm_tx_confirm *)(cfm + 1);
1694 					bh_printk(XRADIO_DBG_NIY, "multi-cfm %d.\n", cfm_cnt);
1695 
1696 					rc = wsm_release_tx_buffer(hw_priv, cfm_cnt);
1697 					do {
1698 						if_id = xradio_queue_get_if_id(tx_cfm->packetID);
1699 						wsm_release_vif_tx_buffer(hw_priv, if_id, 1);
1700 						tx_cfm = (struct wsm_tx_confirm *)((u8 *)tx_cfm +
1701 							offsetof(struct wsm_tx_confirm, link_id));
1702 					} while (--cfm_cnt);
1703 				} else {
1704 					rc = wsm_release_tx_buffer(hw_priv, 1);
1705 					if (wsm_id == 0x0404) {
1706 						if_id = xradio_queue_get_if_id(*cfm);
1707 						wsm_release_vif_tx_buffer(hw_priv, if_id, 1);
1708 					} else {
1709 #if BH_PROC_RX
1710 						flags |= ITEM_F_CMDCFM;
1711 #endif
1712 					}
1713 					bh_printk(XRADIO_DBG_NIY, "cfm id=0x%04x.\n", wsm_id);
1714 				}
1715 				if (SYS_WARN(rc < 0)) {
1716 					bh_printk(XRADIO_DBG_ERROR, "tx buffer < 0.\n");
1717 					hw_priv->bh_error = __LINE__;
1718 					break;
1719 				} else if (rc > 0) {
1720 					tx = 1;
1721 					xradio_proc_wakeup(hw_priv);
1722 				}
1723 			}
1724 
1725 			/* WSM processing frames. */
1726 #if BH_PROC_RX
1727 			if (SYS_WARN(xradio_bh_put(hw_priv, &skb_rx, flags))) {
1728 				bh_printk(XRADIO_DBG_ERROR, "xradio_bh_put failed.\n");
1729 				hw_priv->bh_error = __LINE__;
1730 				break;
1731 			}
1732 #else
1733 			if (SYS_WARN(wsm_handle_rx(hw_priv, flags, &skb_rx))) {
1734 				bh_printk(XRADIO_DBG_ERROR, "wsm_handle_rx failed.\n");
1735 				hw_priv->bh_error = __LINE__;
1736 				break;
1737 			}
1738 			/* Reclaim the SKB buffer */
1739 			if (skb_rx) {
1740 				if (xradio_put_resv_skb(hw_priv, skb_rx, flags))
1741 					xradio_put_skb(hw_priv, skb_rx);
1742 				skb_rx = NULL;
1743 			}
1744 #endif
1745 			PERF_INFO_STAMP(&rx_start_time2, &handle_rx, wsm_len);
1746 			PERF_INFO_STAMP(&rx_start_time1, &data_rx, wsm_len);
1747 
1748 			/* Check if rx burst */
1749 			read_len = (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK)<<1;
1750 			if (!read_len) {
1751 				rx = 0;
1752 				rx_burst = 0;
1753 				goto tx;
1754 			} else if (rx_burst) {
1755 				xradio_debug_rx_burst(hw_priv);
1756 				--rx_burst;
1757 				goto rx;
1758 			}
1759 		} else {
1760 			PERF_INFO_STAMP(&bh_start_time, &bh_others, 0);
1761 		}
1762 
1763 tx:
1764 		SYS_BUG(hw_priv->hw_bufs_used > hw_priv->wsm_caps.numInpChBufs);
1765 		tx += pending_tx + atomic_xchg(&hw_priv->bh_tx, 0);
1766 #if BH_PROC_TX
1767 		tx += atomic_read(&hw_priv->proc.tx_queued);
1768 #endif
1769 		pending_tx = 0;
1770 		tx_burst = hw_priv->wsm_caps.numInpChBufs - hw_priv->hw_bufs_used;
1771 		tx_allowed = tx_burst > 0;
1772 		if (tx && tx_allowed) {
1773 			int ret;
1774 			u8 *data;
1775 			size_t tx_len;
1776 #if 0
1777 			int  num = 0, i;
1778 #endif
1779 
1780 			PERF_INFO_GETTIME(&tx_start_time1);
1781 			/* Wake up the devices */
1782 			if (hw_priv->device_can_sleep) {
1783 				ret = xradio_device_wakeup(hw_priv, &ctrl_reg);
1784 				if (SYS_WARN(ret < 0)) {
1785 					hw_priv->bh_error = __LINE__;
1786 					break;
1787 				} else if (ret == 1) {
1788 					hw_priv->device_can_sleep = false;
1789 				} else if (ret > 1) {
1790 					rx = 1;
1791 					ctrl_reg = (ret & HIF_CTRL_NEXT_LEN_MASK);
1792 					goto data_proc;
1793 				} else {	/* Wait for "awake" interrupt */
1794 					pending_tx = tx;
1795 					continue;
1796 				}
1797 			}
1798 			/* Increase Tx buffer */
1799 			wsm_alloc_tx_buffer(hw_priv);
1800 
1801 #if (DGB_XRADIO_HWT)
1802 			/*hardware test.*/
1803 			ret = get_hwt_hif_tx(hw_priv, &data, &tx_len,
1804 					     &tx_burst, &vif_selected);
1805 			if (ret <= 0)
1806 #endif /*DGB_XRADIO_HWT*/
1807 
1808 #if BH_PROC_TX
1809 				/* Get data to send and send it. */
1810 				ret = xradio_bh_get(hw_priv, &data, &tx_len, &tx_burst,
1811 						 &vif_selected);
1812 #else
1813 				/* Get data to send and send it. */
1814 				ret = wsm_get_tx(hw_priv, &data, &tx_len, &tx_burst,
1815 						 &vif_selected);
1816 #endif
1817 			if (ret <= 0) {
1818 				if (hw_priv->hw_bufs_used >= hw_priv->wsm_caps.numInpChBufs)
1819 					++tx_limit_cnt3;
1820 #if BH_PROC_TX
1821 				if (list_empty(&hw_priv->proc.bh_tx))
1822 					++tx_limit_cnt4;
1823 #endif
1824 				wsm_release_tx_buffer(hw_priv, 1);
1825 				if (SYS_WARN(ret < 0)) {
1826 					bh_printk(XRADIO_DBG_ERROR, "get tx packet=%d.\n", ret);
1827 					hw_priv->bh_error = __LINE__;
1828 					break;
1829 				}
1830 				tx = 0;
1831 				DBG_INT_ADD(tx_limit);
1832 				PERF_INFO_STAMP(&tx_start_time1, &prepare_tx, 0);
1833 			} else {
1834 				wsm = (struct wsm_hdr *)data;
1835 				SYS_BUG(tx_len < sizeof(*wsm));
1836 				if (SYS_BUG(__le32_to_cpu(wsm->len) != tx_len)) {
1837 					bh_printk(XRADIO_DBG_ERROR, "%s wsmlen=%u, tx_len=%zu.\n",
1838 						__func__, __le32_to_cpu(wsm->len), tx_len);
1839 				}
1840 
1841 				/* Continue to send next data if have any. */
1842 				atomic_add(1, &hw_priv->bh_tx);
1843 
1844 				if (tx_len <= 8)
1845 					tx_len = 16;
1846 				/* Align tx length and check it. */
1847 				/* HACK!!! Platform limitation.
1848 				 * It is also supported by upper layer:
1849 				 * there is always enough space at the end of the buffer. */
1850 				tx_len = hw_priv->sbus_ops->align_size(hw_priv->sbus_priv,
1851 								       tx_len);
1852 				/* Check if not exceeding XRADIO capabilities */
1853 				if (tx_len > EFFECTIVE_BUF_SIZE) {
1854 					bh_printk(XRADIO_DBG_WARN,
1855 						  "Write aligned len: %zu\n", tx_len);
1856 				} else {
1857 					bh_printk(XRADIO_DBG_MSG,
1858 						"Tx len=%d, aligned len=%zu\n",
1859 						wsm->len, tx_len);
1860 				}
1861 
1862 				/* Make sequence number. */
1863 				wsm->id &= __cpu_to_le32(~WSM_TX_SEQ(WSM_TX_SEQ_MAX));
1864 				wsm->id |= cpu_to_le32(WSM_TX_SEQ(hw_priv->wsm_tx_seq));
1865 
1866 				if ((wsm->id & WSM_MSG_ID_MASK) != 0x0004)
1867 					hw_priv->wsm_cmd.seq = cpu_to_le32(WSM_TX_SEQ(hw_priv->wsm_tx_seq));
1868 
1869 				PERF_INFO_STAMP(&tx_start_time1, &prepare_tx, tx_len);
1870 				PERF_INFO_GETTIME(&tx_start_time2);
1871 				/* Send the data to devices. */
1872 				if (SYS_WARN(xradio_data_write(hw_priv, data, tx_len))) {
1873 					wsm_release_tx_buffer(hw_priv, 1);
1874 					bh_printk(XRADIO_DBG_ERROR, "xradio_data_write failed\n");
1875 					hw_priv->bh_error = __LINE__;
1876 					break;
1877 				}
1878 				DBG_INT_ADD(tx_total_cnt);
1879 				PERF_INFO_STAMP(&tx_start_time2, &sdio_write, tx_len);
1880 
1881 #if defined(CONFIG_XRADIO_DEBUG)
1882 				if (unlikely(hw_priv->wsm_enable_wsm_dumps)) {
1883 					u16 msgid, ifid;
1884 					u16 *p = (u16 *) data;
1885 					msgid = (*(p + 1)) & 0x3F;
1886 					ifid = (*(p + 1)) >> 6;
1887 					ifid &= 0xF;
1888 					if (msgid == 0x0006) {
1889 						bh_printk(XRADIO_DBG_ALWY,
1890 							  "[DUMP] >>> msgid 0x%.4X ifid %d" \
1891 							  "len %d MIB 0x%.4X\n",
1892 							  msgid, ifid, *p, *(p + 2));
1893 					} else {
1894 						bh_printk(XRADIO_DBG_ALWY,
1895 							  "[DUMP] >>> msgid 0x%.4X ifid %d " \
1896 							  "len %d\n", msgid, ifid, *p);
1897 					}
1898 					print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, data,
1899 							     min(__le32_to_cpu(wsm->len),
1900 							     hw_priv->wsm_dump_max_size));
1901 				}
1902 #endif /* CONFIG_XRADIO_DEBUG */
1903 
1904 				/* Process after data have sent. */
1905 				if (vif_selected != -1) {
1906 					hw_priv->hw_bufs_used_vif[vif_selected]++;
1907 				}
1908 				wsm_txed(hw_priv, data);
1909 				hw_priv->wsm_tx_seq = (hw_priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
1910 
1911 				PERF_INFO_STAMP(&tx_start_time1, &data_tx, wsm->len);
1912 
1913 				/* Check for burst. */
1914 #if !BH_PROC_TX
1915 				/*if not proc tx, just look to burst limit.*/
1916 				tx_burst = 2;
1917 #endif
1918 				if (tx_burst > 1 && tx_bursted < tx_burst_limit &&
1919 					(hw_priv->wsm_caps.numInpChBufs -
1920 					hw_priv->hw_bufs_used) > 1) {
1921 					xradio_debug_tx_burst(hw_priv);
1922 					if (rx_burst < tx_burst_limit)
1923 						++rx_burst;
1924 					++tx_bursted;
1925 					goto tx;
1926 				} else {
1927 					if (tx_bursted >= tx_burst_limit)
1928 						++tx_limit_cnt5;
1929 					if (tx_burst <= 1)
1930 						++tx_limit_cnt6;
1931 				}
1932 			}
1933 		} else {
1934 			/*no tx or not allow to tx, pending it.*/
1935 			pending_tx = tx;
1936 			if (!tx)
1937 				++tx_limit_cnt1;
1938 			if (!tx_allowed)
1939 				++tx_limit_cnt2;
1940 		}
1941 
1942 		PERF_INFO_GETTIME(&bh_start_time);
1943 		/*Check if there are frames to be rx. */
1944 		if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1945 			DBG_INT_ADD(next_rx_cnt);
1946 			rx = 1;
1947 			goto data_proc;
1948 		}
1949 		/*if no rx, we check tx again.*/
1950 		if (tx + atomic_xchg(&hw_priv->bh_tx, 0)) {
1951 			if (hw_priv->hw_bufs_used < (hw_priv->wsm_caps.numInpChBufs - 1)) {
1952 				tx = 1;
1953 				goto data_proc;
1954 			} else { /*if no tx buffer, we check rx reg.*/
1955 				PERF_INFO_GETTIME(&sdio_reg_time);
1956 				atomic_xchg(&hw_priv->bh_rx, 0);
1957 				xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1958 				++reg_read;
1959 				++sdio_reg_cnt1;
1960 				PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1961 				if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1962 					DBG_INT_ADD(fix_miss_cnt);
1963 					rx = 1;
1964 					goto data_proc;
1965 				} else {
1966 					++sdio_reg_cnt5;
1967 				}
1968 			}
1969 			if (hw_priv->hw_bufs_used < hw_priv->wsm_caps.numInpChBufs) {
1970 				tx = 1;
1971 				goto data_proc;
1972 			}
1973 		}
1974 
1975 
1976 #if 0
1977 		/*One more to check rx if reg has not be read. */
1978 		if (!reg_read && hw_priv->hw_bufs_used >=
1979 			(hw_priv->wsm_caps.numInpChBufs - 1)) {
1980 			atomic_xchg(&hw_priv->bh_rx, 0);
1981 			PERF_INFO_GETTIME(&sdio_reg_time);
1982 			xradio_bh_read_ctrl_reg(hw_priv, &ctrl_reg);
1983 			++reg_read;
1984 			++sdio_reg_cnt4;
1985 			PERF_INFO_STAMP(&sdio_reg_time, &sdio_reg, 4);
1986 			if (ctrl_reg & HIF_CTRL_NEXT_LEN_MASK) {
1987 				DBG_INT_ADD(fix_miss_cnt);
1988 				rx = 1;
1989 				goto data_proc;
1990 			} else {
1991 				++sdio_reg_cnt5;
1992 				rx = 0;
1993 			}
1994 		}
1995 #endif
1996 		DBG_INT_ADD(tx_rx_idle);
1997 		PERF_INFO_STAMP(&bh_start_time, &bh_others, 0);
1998 
1999 #if 0
2000 		if (hw_priv->wsm_caps.numInpChBufs - hw_priv->hw_bufs_used > 1 &&
2001 		    atomic_read(&hw_priv->bh_tx) == 0 && pending_tx == 0 &&
2002 			!tx && atomic_read(&hw_priv->tx_lock) == 0) {
2003 			int i = 0;
2004 			for (i = 0; i < 4; ++i) {
2005 				if (hw_priv->tx_queue[i].num_queued - hw_priv->tx_queue[i].num_pending) {
2006 					bh_printk(XRADIO_DBG_NIY, "queued=%d, pending=%d, buf=%d.\n",
2007 					hw_priv->tx_queue[i].num_queued,
2008 					hw_priv->tx_queue[i].num_pending,
2009 					hw_priv->wsm_caps.numInpChBufs - hw_priv->hw_bufs_used);
2010 					tx = 1;
2011 					xradio_proc_wakeup(hw_priv);
2012 					goto data_proc;
2013 				}
2014 			}
2015 		}
2016 #endif
2017 	}			/* for (;;) */
2018 
2019 	/* Free the SKB buffer when exit. */
2020 	if (skb_rx) {
2021 		dev_kfree_skb(skb_rx);
2022 		skb_rx = NULL;
2023 	}
2024 
2025 	/* If BH Error, handle it. */
2026 	if (!term) {
2027 		bh_printk(XRADIO_DBG_ERROR, "Fatal error, exitting code=%d.\n",
2028 			  hw_priv->bh_error);
2029 
2030 #ifdef SUPPORT_FW_DBG_INF
2031 		xradio_fw_dbg_dump_in_direct_mode(hw_priv);
2032 #endif
2033 
2034 #ifdef HW_ERROR_WIFI_RESET
2035 		/* notify upper layer to restart wifi.
2036 		 * don't do it in debug version. */
2037 #ifdef CONFIG_XRADIO_ETF
2038 		/* we should restart manually in etf mode.*/
2039 		if (!etf_is_connect() &&
2040 			XRADIO_BH_RESUMED == atomic_read(&hw_priv->bh_suspend)) {
2041 			wsm_upper_restart(hw_priv);
2042 		}
2043 #else
2044 		if (XRADIO_BH_RESUMED == atomic_read(&hw_priv->bh_suspend))
2045 			wsm_upper_restart(hw_priv);
2046 #endif
2047 #endif
2048 		/* TODO: schedule_work(recovery) */
2049 #ifndef HAS_PUT_TASK_STRUCT
2050 		/* The only reason of having this stupid code here is
2051 		 * that __put_task_struct is not exported by kernel. */
2052 		for (;;) {
2053 #ifdef BH_USE_SEMAPHORE
2054 			status = down_timeout(&hw_priv->bh_sem, HZ/10);
2055 			term = kthread_should_stop();
2056 			status = 0;
2057 #else
2058 			int status = wait_event_interruptible(hw_priv->bh_wq, ({
2059 				     term = kthread_should_stop();
2060 				     (term); }));
2061 #endif
2062 			if (status || term)
2063 				break;
2064 		}
2065 #endif
2066 	}
2067 	atomic_add(1, &hw_priv->bh_term);	/*debug info, show bh status.*/
2068 	return 0;
2069 }
2070