• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Queue implementation for XRadio drivers
3  *
4  * Copyright (c) 2013
5  * Xradio Technology Co., Ltd. <www.xradiotech.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 
12 #include <net/mac80211.h>
13 #include <linux/sched.h>
14 #include <linux/list.h>
15 #include "xradio.h"
16 #include "queue.h"
17 #ifdef CONFIG_XRADIO_TESTMODE
18 #include <linux/time.h>
19 #endif /*CONFIG_XRADIO_TESTMODE*/
20 
__xradio_queue_lock(struct xradio_queue * queue)21 static inline void __xradio_queue_lock(struct xradio_queue *queue)
22 {
23 	struct xradio_queue_stats *stats = queue->stats;
24 	if (queue->tx_locked_cnt++ == 0) {
25 		txrx_printk(XRADIO_DBG_MSG, "[TX] Queue %d is locked.\n",
26 				queue->queue_id);
27 		mac80211_stop_queue(stats->hw_priv->hw, queue->queue_id);
28 		DBG_INT_ADD(queue_lock_cnt);
29 	}
30 }
31 
__xradio_queue_unlock(struct xradio_queue * queue)32 static inline void __xradio_queue_unlock(struct xradio_queue *queue)
33 {
34 	struct xradio_queue_stats *stats = queue->stats;
35 	SYS_BUG(!queue->tx_locked_cnt);
36 	if (--queue->tx_locked_cnt == 0) {
37 		txrx_printk(XRADIO_DBG_MSG, "[TX] Queue %d is unlocked.\n",
38 				queue->queue_id);
39 		mac80211_wake_queue(stats->hw_priv->hw, queue->queue_id);
40 	}
41 }
42 
xradio_queue_parse_id(u32 packetID,u8 * queue_generation,u8 * queue_id,u8 * item_generation,u8 * item_id,u8 * if_id,u8 * link_id)43 static inline void xradio_queue_parse_id(u32 packetID, u8 *queue_generation,
44 						u8 *queue_id,
45 						u8 *item_generation,
46 						u8 *item_id,
47 						u8 *if_id,
48 						u8 *link_id)
49 {
50 	*item_id		= (packetID >>  0) & 0xFF;
51 	*item_generation	= (packetID >>  8) & 0xFF;
52 	*queue_id		= (packetID >> 16) & 0xF;
53 	*if_id			= (packetID >> 20) & 0xF;
54 	*link_id		= (packetID >> 24) & 0xF;
55 	*queue_generation	= (packetID >> 28) & 0xF;
56 }
57 
xradio_queue_make_packet_id(u8 queue_generation,u8 queue_id,u8 item_generation,u8 item_id,u8 if_id,u8 link_id)58 static inline u32 xradio_queue_make_packet_id(u8 queue_generation,
59 						u8 queue_id,
60 						u8 item_generation, u8 item_id,
61 						u8 if_id, u8 link_id)
62 {
63 	/*TODO:COMBO: Add interfaceID to the packetID */
64 	return ((u32)item_id << 0) |
65 		((u32)item_generation << 8) |
66 		((u32)queue_id << 16) |
67 		((u32)if_id << 20) |
68 		((u32)link_id << 24) |
69 		((u32)queue_generation << 28);
70 }
71 
xradio_queue_post_gc(struct xradio_queue_stats * stats)72 static void xradio_queue_post_gc(struct xradio_queue_stats *stats)
73 {
74 	struct xradio_queue_item *item;
75 
76 	while (!list_empty(&stats->gc_list)) {
77 		spin_lock_bh(&stats->lock);
78 		item = list_first_entry(
79 			&stats->gc_list, struct xradio_queue_item, head);
80 		list_del(&item->head);
81 		spin_unlock_bh(&stats->lock);
82 		stats->skb_dtor(stats->hw_priv, item->skb, &item->txpriv);
83 		kfree(item);
84 	}
85 }
86 
xradio_queue_register_post_gc(struct xradio_queue_stats * stats,struct xradio_queue_item * item)87 static void xradio_queue_register_post_gc(struct xradio_queue_stats *stats,
88 				     struct xradio_queue_item *item)
89 {
90 	struct xradio_queue_item *gc_item;
91 	gc_item = xr_kmalloc(sizeof(struct xradio_queue_item), false);
92 	SYS_BUG(!gc_item);
93 	memcpy(gc_item, item, sizeof(struct xradio_queue_item));
94 	spin_lock_bh(&stats->lock);
95 	list_add_tail(&gc_item->head, &stats->gc_list);
96 	spin_unlock_bh(&stats->lock);
97 }
98 
__xradio_queue_gc(struct xradio_queue * queue,bool unlock)99 static void __xradio_queue_gc(struct xradio_queue *queue,
100 			      bool unlock)
101 {
102 	struct xradio_queue_stats *stats = queue->stats;
103 	struct xradio_queue_item *item = NULL;
104 	/* struct xradio_vif *priv; */
105 	int if_id;
106 	bool wakeup_stats = false;
107 
108 	while (!list_empty(&queue->queue)) {
109 		struct xradio_txpriv *txpriv;
110 		item = list_first_entry(
111 			&queue->queue, struct xradio_queue_item, head);
112 		if (time_before(jiffies, item->queue_timestamp+queue->ttl))
113 			break;
114 
115 		txpriv = &item->txpriv;
116 		if_id = txpriv->if_id;
117 		--queue->num_queued;
118 		--queue->num_queued_vif[if_id];
119 		--queue->link_map_cache[if_id][txpriv->link_id];
120 		spin_lock_bh(&stats->lock);
121 		--stats->num_queued[if_id];
122 		if (!--stats->link_map_cache[if_id][txpriv->link_id])
123 			wakeup_stats = true;
124 		spin_unlock_bh(&stats->lock);
125 		/*
126 		priv = xrwl_hwpriv_to_vifpriv(stats->hw_priv, if_id);
127 		if (priv) {
128 			xradio_debug_tx_ttl(priv);
129 			spin_unlock(&priv->vif_lock);
130 		} */
131 		xradio_queue_register_post_gc(stats, item);
132 		item->skb = NULL;
133 		list_move_tail(&item->head, &queue->free_pool);
134 	}
135 
136 	if (wakeup_stats)
137 		wake_up(&stats->wait_link_id_empty);
138 
139 	if (queue->overfull) {
140 		if (queue->num_queued <= ((stats->hw_priv->vif0_throttle +
141 						stats->hw_priv->vif1_throttle+2)>>1)) {
142 			queue->overfull = false;
143 			if (unlock) {
144 				__xradio_queue_unlock(queue);
145 			}
146 		} else if (item) {
147 			unsigned long tmo = item->queue_timestamp + queue->ttl;
148 			mod_timer(&queue->gc, tmo);
149 #ifdef CONFIG_PM
150 			xradio_pm_stay_awake(&stats->hw_priv->pm_state,
151 					tmo - jiffies);
152 #endif
153 		}
154 	}
155 }
156 
xradio_queue_gc(struct timer_list * t)157 static void xradio_queue_gc(struct timer_list *t)
158 {
159 	struct xradio_queue *queue = from_timer(queue, t, gc);
160 
161 	spin_lock_bh(&queue->lock);
162 	__xradio_queue_gc(queue, true);
163 	spin_unlock_bh(&queue->lock);
164 	if (!list_empty(&queue->stats->gc_list))
165 		queue_work(queue->stats->hw_priv->workqueue, &queue->stats->gc_work);
166 }
167 
xradio_skb_post_gc(struct xradio_common * hw_priv,struct sk_buff * skb,const struct xradio_txpriv * txpriv)168 void xradio_skb_post_gc(struct xradio_common *hw_priv,
169 		     struct sk_buff *skb,
170 		     const struct xradio_txpriv *txpriv)
171 {
172 	struct xradio_queue_item skb_drop;
173 	skb_drop.skb = skb;
174 	skb_drop.txpriv = *txpriv;
175 	xradio_queue_register_post_gc(&hw_priv->tx_queue_stats, &skb_drop);
176 	queue_work(hw_priv->workqueue, &hw_priv->tx_queue_stats.gc_work);
177 }
178 
179 
xradio_queue_gc_work(struct work_struct * work)180 void xradio_queue_gc_work(struct work_struct *work)
181 {
182 	struct xradio_queue_stats *stats =
183 		container_of(work, struct xradio_queue_stats, gc_work);
184 
185 	xradio_queue_post_gc(stats);
186 }
187 
xradio_queue_stats_init(struct xradio_queue_stats * stats,size_t map_capacity,xradio_queue_skb_dtor_t skb_dtor,struct xradio_common * hw_priv)188 int xradio_queue_stats_init(struct xradio_queue_stats *stats,
189 			    size_t map_capacity,
190 			    xradio_queue_skb_dtor_t skb_dtor,
191 			    struct xradio_common *hw_priv)
192 {
193 	int i;
194 
195 	memset(stats, 0, sizeof(*stats));
196 	stats->map_capacity = map_capacity;
197 	stats->skb_dtor = skb_dtor;
198 	stats->hw_priv = hw_priv;
199 	spin_lock_init(&stats->lock);
200 	init_waitqueue_head(&stats->wait_link_id_empty);
201 	INIT_WORK(&stats->gc_work, xradio_queue_gc_work);
202 	INIT_LIST_HEAD(&stats->gc_list);
203 	for (i = 0; i < XRWL_MAX_VIFS; i++) {
204 		stats->link_map_cache[i] = (int *)xr_kzalloc(sizeof(int) * map_capacity, false);
205 		if (!stats->link_map_cache[i]) {
206 			for (i--; i >= 0; i--)
207 				kfree(stats->link_map_cache[i]);
208 			return -ENOMEM;
209 		}
210 	}
211 
212 	return 0;
213 }
214 
xradio_queue_init(struct xradio_queue * queue,struct xradio_queue_stats * stats,u8 queue_id,size_t capacity,unsigned long ttl)215 int xradio_queue_init(struct xradio_queue *queue,
216 		      struct xradio_queue_stats *stats,
217 		      u8 queue_id,
218 		      size_t capacity,
219 		      unsigned long ttl)
220 {
221 	int i;
222 
223 	memset(queue, 0, sizeof(*queue));
224 	queue->stats = stats;
225 	queue->capacity = capacity;
226 	queue->queue_id = queue_id;
227 	queue->ttl = ttl;
228 	INIT_LIST_HEAD(&queue->queue);
229 	INIT_LIST_HEAD(&queue->pending);
230 	INIT_LIST_HEAD(&queue->free_pool);
231 	spin_lock_init(&queue->lock);
232 	/*init_timer(&queue->gc);
233 	queue->gc.data = (unsigned long)queue;
234 	queue->gc.function = xradio_queue_gc;*/
235 	timer_setup(&queue->gc, xradio_queue_gc, 0);
236 
237 	queue->pool = xr_kzalloc(sizeof(struct xradio_queue_item) * capacity,
238 					false);
239 	if (!queue->pool)
240 		return -ENOMEM;
241 
242 	for (i = 0; i < XRWL_MAX_VIFS; i++) {
243 		queue->link_map_cache[i] =
244 			(int *)xr_kzalloc(sizeof(int) * stats->map_capacity, false);
245 		if (!queue->link_map_cache[i]) {
246 			for (i--; i >= 0; i--)
247 				kfree(queue->link_map_cache[i]);
248 			kfree(queue->pool);
249 			queue->pool = NULL;
250 			return -ENOMEM;
251 		}
252 	}
253 
254 	for (i = 0; i < capacity; ++i)
255 		list_add_tail(&queue->pool[i].head, &queue->free_pool);
256 
257 	return 0;
258 }
259 
260 /* TODO:COMBO: Flush only a particular interface specific parts */
xradio_queue_clear(struct xradio_queue * queue,int if_id)261 int xradio_queue_clear(struct xradio_queue *queue, int if_id)
262 {
263 	int i, cnt, iter;
264 	struct xradio_queue_stats *stats = queue->stats;
265 	struct xradio_queue_item *item = NULL, *tmp = NULL;
266 
267 #if BH_PROC_TX
268 	bh_proc_flush_txqueue(stats->hw_priv, if_id);
269 #endif
270 
271 	cnt = 0;
272 	spin_lock_bh(&queue->lock);
273 
274 #ifdef QUEUE_GEN_IF_TABLE
275 	if (XRWL_ALL_IFS == if_id) {
276 		for (i = 0; i < XRWL_ALL_IFS; i++) {
277 			queue->generation[i]++;
278 			queue->generation[i] &= 0xf;
279 		}
280 	} else {
281 		queue->generation[if_id]++;
282 		queue->generation[if_id] &= 0xf;
283 	}
284 #else
285 	queue->generation++;
286 	queue->generation &= 0xf;
287 #endif
288 	//list_splice_tail_init(&queue->queue, &queue->pending);
289 	list_for_each_entry_safe(item, tmp, &queue->queue, head) {
290 		SYS_WARN(!item->skb);
291 		if (XRWL_ALL_IFS == if_id || item->txpriv.if_id == if_id) {
292 			xradio_queue_register_post_gc(stats, item);
293 			item->skb = NULL;
294 			list_move_tail(&item->head, &queue->free_pool);
295 			--queue->num_queued;
296 		}
297 	}
298 	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
299 		SYS_WARN(!item->skb);
300 		if (XRWL_ALL_IFS == if_id || item->txpriv.if_id == if_id) {
301 			xradio_queue_register_post_gc(stats, item);
302 			item->skb = NULL;
303 			list_move_tail(&item->head, &queue->free_pool);
304 			--queue->num_pending;
305 			--queue->num_queued;
306 		}
307 	}
308 	if (XRWL_ALL_IFS != if_id) {
309 		queue->num_queued_vif[if_id] = 0;
310 		queue->num_pending_vif[if_id] = 0;
311 	} else {
312 		queue->num_queued = 0;
313 		queue->num_pending = 0;
314 		for (iter = 0; iter < XRWL_MAX_VIFS; iter++) {
315 			queue->num_queued_vif[iter] = 0;
316 			queue->num_pending_vif[iter] = 0;
317 		}
318 	}
319 	spin_lock_bh(&stats->lock);
320 	if (XRWL_ALL_IFS != if_id) {
321 		for (i = 0; i < stats->map_capacity; ++i) {
322 			stats->num_queued[if_id] -=
323 				queue->link_map_cache[if_id][i];
324 			stats->link_map_cache[if_id][i] -=
325 				queue->link_map_cache[if_id][i];
326 			queue->link_map_cache[if_id][i] = 0;
327 		}
328 	} else {
329 		for (iter = 0; iter < XRWL_MAX_VIFS; iter++) {
330 			for (i = 0; i < stats->map_capacity; ++i) {
331 				stats->num_queued[iter] -=
332 					queue->link_map_cache[iter][i];
333 				stats->link_map_cache[iter][i] -=
334 					queue->link_map_cache[iter][i];
335 				queue->link_map_cache[iter][i] = 0;
336 			}
337 		}
338 	}
339 	spin_unlock_bh(&stats->lock);
340 	if (unlikely(queue->overfull)) {
341 		if (queue->num_queued <= ((stats->hw_priv->vif0_throttle +
342 			stats->hw_priv->vif1_throttle+2)>>1)) {
343 			queue->overfull = false;
344 			__xradio_queue_unlock(queue);
345 		}
346 	}
347 	spin_unlock_bh(&queue->lock);
348 	wake_up(&stats->wait_link_id_empty);
349 	if (!list_empty(&stats->gc_list))
350 		xradio_queue_post_gc(stats);
351 	return 0;
352 }
353 
xradio_queue_stats_deinit(struct xradio_queue_stats * stats)354 void xradio_queue_stats_deinit(struct xradio_queue_stats *stats)
355 {
356 	int i;
357 
358 	for (i = 0; i < XRWL_MAX_VIFS ; i++) {
359 		kfree(stats->link_map_cache[i]);
360 		stats->link_map_cache[i] = NULL;
361 	}
362 }
363 
xradio_queue_deinit(struct xradio_queue * queue)364 void xradio_queue_deinit(struct xradio_queue *queue)
365 {
366 	int i;
367 
368 	xradio_queue_clear(queue, XRWL_ALL_IFS);
369 	del_timer_sync(&queue->gc);
370 	INIT_LIST_HEAD(&queue->free_pool);
371 	kfree(queue->pool);
372 	for (i = 0; i < XRWL_MAX_VIFS; i++) {
373 		kfree(queue->link_map_cache[i]);
374 		queue->link_map_cache[i] = NULL;
375 	}
376 	queue->pool = NULL;
377 	queue->capacity = 0;
378 }
379 
xradio_queue_get_num_queued(struct xradio_vif * priv,struct xradio_queue * queue,u32 link_id_map)380 size_t xradio_queue_get_num_queued(struct xradio_vif *priv,
381 				   struct xradio_queue *queue,
382 				   u32 link_id_map)
383 {
384 	size_t ret;
385 	int i, bit;
386 	size_t map_capacity = queue->stats->map_capacity;
387 
388 	if (!link_id_map)
389 		return 0;
390 
391 	spin_lock_bh(&queue->lock);
392 	if (likely(link_id_map == (u32) -1)) {
393 		ret = queue->num_queued_vif[priv->if_id] -
394 			queue->num_pending_vif[priv->if_id];
395 	} else {
396 		ret = 0;
397 		for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
398 			if (link_id_map & bit)
399 				ret +=
400 				queue->link_map_cache[priv->if_id][i];
401 		}
402 	}
403 	spin_unlock_bh(&queue->lock);
404 	return ret;
405 }
406 
xradio_queue_put(struct xradio_queue * queue,struct sk_buff * skb,struct xradio_txpriv * txpriv)407 int xradio_queue_put(struct xradio_queue *queue, struct sk_buff *skb,
408 		     struct xradio_txpriv *txpriv)
409 {
410 	int ret = 0;
411 #ifdef CONFIG_XRADIO_TESTMODE
412 	struct timespec64 tmval;
413 #endif /*CONFIG_XRADIO_TESTMODE*/
414 	LIST_HEAD(gc_list);
415 	struct xradio_queue_stats *stats = queue->stats;
416 	/* TODO:COMBO: Add interface ID info to queue item */
417 
418 	if (txpriv->link_id >= queue->stats->map_capacity)
419 		return -EINVAL;
420 
421 	spin_lock_bh(&queue->lock);
422 	if (!SYS_WARN(list_empty(&queue->free_pool))) {
423 		struct xradio_queue_item *item = list_first_entry(
424 			&queue->free_pool, struct xradio_queue_item, head);
425 		SYS_BUG(item->skb);
426 
427 		list_move_tail(&item->head, &queue->queue);
428 		item->skb = skb;
429 		item->txpriv = *txpriv;
430 		item->generation  = 1; /* avoid packet ID is 0.*/
431 		item->pack_stk_wr = 0;
432 		item->packetID = xradio_queue_make_packet_id(
433 #ifdef QUEUE_GEN_IF_TABLE
434 			queue->generation[txpriv->if_id],
435 #else
436 			queue->generation,
437 #endif
438 			queue->queue_id,
439 			item->generation, item - queue->pool,
440 			txpriv->if_id, txpriv->raw_link_id);
441 		item->queue_timestamp = jiffies;
442 #ifdef CONFIG_XRADIO_TESTMODE
443 		xr_do_gettimeofday(&tmval);
444 		item->qdelay_timestamp = tmval.tv_usec;
445 #endif /*CONFIG_XRADIO_TESTMODE*/
446 
447 #ifdef TES_P2P_0002_ROC_RESTART
448 		if (TES_P2P_0002_state == TES_P2P_0002_STATE_SEND_RESP) {
449 			TES_P2P_0002_packet_id = item->packetID;
450 			TES_P2P_0002_state = TES_P2P_0002_STATE_GET_PKTID;
451 			txrx_printk(XRADIO_DBG_WARN, "[ROC_RESTART_STATE_GET_PKTID]\n");
452 		}
453 #endif
454 
455 		++queue->num_queued;
456 		++queue->num_queued_vif[txpriv->if_id];
457 		++queue->link_map_cache[txpriv->if_id][txpriv->link_id];
458 
459 		spin_lock_bh(&stats->lock);
460 		++stats->num_queued[txpriv->if_id];
461 		++stats->link_map_cache[txpriv->if_id][txpriv->link_id];
462 		spin_unlock_bh(&stats->lock);
463 
464 		/*
465 		 * TX may happen in parallel sometimes.
466 		 * Leave extra queue slots so we don't overflow.
467 		 */
468 		if (queue->overfull == false &&
469 				queue->num_queued >=
470 		((stats->hw_priv->vif0_throttle + stats->hw_priv->vif1_throttle)
471 				- (num_present_cpus() - 1))) {
472 			DBG_INT_ADD(overfull_lock_cnt);
473 			queue->overfull = true;
474 			__xradio_queue_lock(queue);
475 			mod_timer(&queue->gc, jiffies);
476 			txrx_printk(XRADIO_DBG_NIY, "!lock queue\n");
477 		}
478 	} else {
479 		ret = -ENOENT;
480 	}
481 #if 0
482 	txrx_printk(XRADIO_DBG_ERROR, "queue_put queue %d, %d, %d\n",
483 		queue->num_queued,
484 		queue->link_map_cache[txpriv->if_id][txpriv->link_id],
485 		queue->num_pending);
486 	txrx_printk(XRADIO_DBG_ERROR, "queue_put stats %d, %d\n", stats->num_queued,
487 		stats->link_map_cache[txpriv->if_id][txpriv->link_id]);
488 #endif
489 	spin_unlock_bh(&queue->lock);
490 	return ret;
491 }
492 
493 #if BH_PROC_TX
xradio_queue_get(struct xradio_queue * queue,int if_id,u32 link_id_map,struct wsm_tx ** tx,struct ieee80211_tx_info ** tx_info,struct xradio_txpriv ** txpriv,u8 ** data)494 int xradio_queue_get(struct xradio_queue *queue,
495 			int if_id,
496 		     u32 link_id_map,
497 		     struct wsm_tx **tx,
498 		     struct ieee80211_tx_info **tx_info,
499 		     struct xradio_txpriv **txpriv, u8 **data)
500 #else
501 int xradio_queue_get(struct xradio_queue *queue,
502 			int if_id,
503 		     u32 link_id_map,
504 		     struct wsm_tx **tx,
505 		     struct ieee80211_tx_info **tx_info,
506 		     struct xradio_txpriv **txpriv)
507 #endif
508 {
509 	int ret = -ENOENT;
510 	struct xradio_queue_item *item;
511 	struct xradio_queue_stats *stats = queue->stats;
512 	bool wakeup_stats = false;
513 #ifdef CONFIG_XRADIO_TESTMODE
514 	struct timespec64 tmval;
515 #endif /*CONFIG_XRADIO_TESTMODE*/
516 
517 	spin_lock_bh(&queue->lock);
518 	list_for_each_entry(item, &queue->queue, head) {
519 		if ((item->txpriv.if_id == if_id) &&
520 			(link_id_map & BIT(item->txpriv.link_id))) {
521 			ret = 0;
522 			break;
523 		}
524 	}
525 
526 	if (!SYS_WARN(ret)) {
527 		*tx = (struct wsm_tx *)item->skb->data;
528 		*tx_info = IEEE80211_SKB_CB(item->skb);
529 		*txpriv = &item->txpriv;
530 		(*tx)->packetID = __cpu_to_le32(item->packetID);
531 		list_move_tail(&item->head, &queue->pending);
532 		++queue->num_pending;
533 		++queue->num_pending_vif[item->txpriv.if_id];
534 		--queue->link_map_cache[item->txpriv.if_id]
535 				[item->txpriv.link_id];
536 		item->xmit_timestamp = jiffies;
537 #if BH_PROC_TX
538 		item->xmit_to_fw = 0;
539 		*data = (u8 *)item;
540 #endif
541 
542 #ifdef CONFIG_XRADIO_TESTMODE
543 		xr_do_gettimeofday(&tmval);
544 		item->mdelay_timestamp = tmval.tv_usec;
545 #endif /*CONFIG_XRADIO_TESTMODE*/
546 
547 		spin_lock_bh(&stats->lock);
548 		--stats->num_queued[item->txpriv.if_id];
549 		if (!--stats->link_map_cache[item->txpriv.if_id]
550 					[item->txpriv.link_id])
551 			wakeup_stats = true;
552 
553 		spin_unlock_bh(&stats->lock);
554 #if 0
555 		txrx_printk(XRADIO_DBG_ERROR, "queue_get queue %d, %d, %d\n",
556 		queue->num_queued,
557 		queue->link_map_cache[item->txpriv.if_id][item->txpriv.link_id],
558 		queue->num_pending);
559 		txrx_printk(XRADIO_DBG_ERROR, "queue_get stats %d, %d\n",
560 			    stats->num_queued,
561 		stats->link_map_cache[item->txpriv.if_id]
562 		[item->txpriv.link_id]);
563 #endif
564 	} else {/*add debug info for warning*/
565 		struct xradio_queue_item *item_tmp;
566 		txrx_printk(XRADIO_DBG_WARN,
567 			"%s, if_id=%d, link_id_map=%08x, queued=%zu, pending=%zu\n",
568 			__func__, if_id, link_id_map, queue->num_queued_vif[if_id],
569 			queue->num_pending_vif[if_id]);
570 		list_for_each_entry(item_tmp, &queue->queue, head) {
571 			txrx_printk(XRADIO_DBG_WARN, "%s, item_if_id=%d, item_link_id=%d\n",
572 				__func__, item_tmp->txpriv.if_id, item_tmp->txpriv.link_id);
573 		}
574 	}
575 	spin_unlock_bh(&queue->lock);
576 	if (wakeup_stats)
577 		wake_up(&stats->wait_link_id_empty);
578 
579 	return ret;
580 }
581 
582 #ifdef CONFIG_XRADIO_TESTMODE
xradio_queue_requeue(struct xradio_common * hw_priv,struct xradio_queue * queue,u32 packetID,bool check)583 int xradio_queue_requeue(struct xradio_common *hw_priv,
584 	struct xradio_queue *queue, u32 packetID, bool check)
585 #else
586 int xradio_queue_requeue(struct xradio_queue *queue, u32 packetID, bool check)
587 #endif
588 {
589 	int ret = 0;
590 	u8 queue_generation, queue_id, item_generation, item_id, if_id, link_id;
591 	struct xradio_queue_item *item;
592 	struct xradio_queue_stats *stats = queue->stats;
593 
594 	xradio_queue_parse_id(packetID, &queue_generation, &queue_id,
595 				&item_generation, &item_id, &if_id, &link_id);
596 
597 	item = &queue->pool[item_id];
598 #ifdef P2P_MULTIVIF
599 	if (check && item->txpriv.if_id == XRWL_GENERIC_IF_ID) {
600 #else
601 	if (check && item->txpriv.offchannel_if_id == XRWL_GENERIC_IF_ID) {
602 #endif
603 		txrx_printk(XRADIO_DBG_MSG, "Requeued frame dropped for "
604 						"generic interface id.\n");
605 #ifdef CONFIG_XRADIO_TESTMODE
606 		xradio_queue_remove(hw_priv, queue, packetID);
607 #else
608 		xradio_queue_remove(queue, packetID);
609 #endif
610 		return 0;
611 	}
612 
613 #ifndef P2P_MULTIVIF
614 	if (!check)
615 		item->txpriv.offchannel_if_id = XRWL_GENERIC_IF_ID;
616 #endif
617 
618 	/*if_id = item->txpriv.if_id;*/
619 
620 	spin_lock_bh(&queue->lock);
621 	SYS_BUG(queue_id != queue->queue_id);
622 #ifdef QUEUE_GEN_IF_TABLE
623 	if (unlikely(queue_generation != queue->generation[if_id])) {
624 #else
625 	if (unlikely(queue_generation != queue->generation)) {
626 #endif
627 		ret = -ENOENT;
628 	} else if (unlikely(item_id >= (unsigned) queue->capacity)) {
629 		SYS_WARN(1);
630 		ret = -EINVAL;
631 	} else if (unlikely(item->generation != item_generation)) {
632 		SYS_WARN(1);
633 		ret = -ENOENT;
634 	} else {
635 		--queue->num_pending;
636 		--queue->num_pending_vif[if_id];
637 		++queue->link_map_cache[if_id][item->txpriv.link_id];
638 
639 		spin_lock_bh(&stats->lock);
640 		++stats->num_queued[item->txpriv.if_id];
641 		++stats->link_map_cache[if_id][item->txpriv.link_id];
642 		spin_unlock_bh(&stats->lock);
643 
644 		item->generation = ++item_generation;
645 		item->packetID = xradio_queue_make_packet_id(
646 			queue_generation, queue_id, item_generation, item_id,
647 			if_id, link_id);
648 		list_move(&item->head, &queue->queue);
649 #if 0
650 		txrx_printk(XRADIO_DBG_ERROR, "queue_requeue queue %d, %d, %d\n",
651 		queue->num_queued,
652 		queue->link_map_cache[if_id][item->txpriv.link_id],
653 		queue->num_pending);
654 		txrx_printk(XRADIO_DBG_ERROR, "queue_requeue stats %d, %d\n",
655 		stats->num_queued,
656 		stats->link_map_cache[if_id][item->txpriv.link_id]);
657 #endif
658 	}
659 	spin_unlock_bh(&queue->lock);
660 	return ret;
661 }
662 
663 int xradio_queue_requeue_all(struct xradio_queue *queue)
664 {
665 	struct xradio_queue_stats *stats = queue->stats;
666 	spin_lock_bh(&queue->lock);
667 	while (!list_empty(&queue->pending)) {
668 		struct xradio_queue_item *item = list_entry(
669 			queue->pending.prev, struct xradio_queue_item, head);
670 
671 		--queue->num_pending;
672 		--queue->num_pending_vif[item->txpriv.if_id];
673 		++queue->link_map_cache[item->txpriv.if_id]
674 				[item->txpriv.link_id];
675 
676 		spin_lock_bh(&stats->lock);
677 		++stats->num_queued[item->txpriv.if_id];
678 		++stats->link_map_cache[item->txpriv.if_id]
679 				[item->txpriv.link_id];
680 		spin_unlock_bh(&stats->lock);
681 
682 		++item->generation;
683 		item->packetID = xradio_queue_make_packet_id(
684 #ifdef QUEUE_GEN_IF_TABLE
685 			queue->generation[item->txpriv.if_id],
686 #else
687 			queue->generation,
688 #endif
689 			queue->queue_id,
690 
691 			item->generation, item - queue->pool,
692 			item->txpriv.if_id, item->txpriv.raw_link_id);
693 		list_move(&item->head, &queue->queue);
694 	}
695 	spin_unlock_bh(&queue->lock);
696 
697 	return 0;
698 }
699 #ifdef CONFIG_XRADIO_TESTMODE
700 int xradio_queue_remove(struct xradio_common *hw_priv,
701 				struct xradio_queue *queue, u32 packetID)
702 #else
703 int xradio_queue_remove(struct xradio_queue *queue, u32 packetID)
704 #endif /*CONFIG_XRADIO_TESTMODE*/
705 {
706 	int ret = 0;
707 	u8 queue_generation, queue_id, item_generation, item_id, if_id, link_id;
708 	struct xradio_queue_item *item;
709 	struct xradio_queue_stats *stats = queue->stats;
710 	struct sk_buff *gc_skb = NULL;
711 	struct xradio_txpriv gc_txpriv;
712 
713 	xradio_queue_parse_id(packetID, &queue_generation, &queue_id,
714 				&item_generation, &item_id, &if_id, &link_id);
715 
716 	item = &queue->pool[item_id];
717 
718 	spin_lock_bh(&queue->lock);
719 	SYS_BUG(queue_id != queue->queue_id);
720 	/*TODO:COMBO:Add check for interface ID also */
721 #ifdef QUEUE_GEN_IF_TABLE
722 	if (unlikely(queue_generation != queue->generation[if_id])) {
723 #else
724 	if (unlikely(queue_generation != queue->generation)) {
725 #endif
726 		ret = -ENOENT;
727 	} else if (unlikely(item_id >= (unsigned) queue->capacity)) {
728 		SYS_WARN(1);
729 		ret = -EINVAL;
730 	} else if (unlikely(item->generation != item_generation)) {
731 		SYS_WARN(1);
732 		ret = -ENOENT;
733 	} else {
734 		gc_txpriv = item->txpriv;
735 		gc_skb = item->skb;
736 		item->skb = NULL;
737 		--queue->num_pending;
738 		--queue->num_pending_vif[if_id];
739 		--queue->num_queued;
740 		--queue->num_queued_vif[if_id];
741 		++queue->num_sent;
742 		++item->generation;
743 #ifdef CONFIG_XRADIO_TESTMODE
744 		spin_lock_bh(&hw_priv->tsm_lock);
745 		if (hw_priv->start_stop_tsm.start) {
746 			if (queue_id == hw_priv->tsm_info.ac) {
747 				struct timespec64 tmval;
748 				unsigned long queue_delay;
749 				unsigned long media_delay;
750 				xr_do_gettimeofday(&tmval);
751 
752 				if (tmval.tv_usec > item->qdelay_timestamp)
753 					queue_delay = tmval.tv_usec -
754 						item->qdelay_timestamp;
755 				else
756 					queue_delay = tmval.tv_usec +
757 					1000000 - item->qdelay_timestamp;
758 
759 				if (tmval.tv_usec > item->mdelay_timestamp)
760 					media_delay = tmval.tv_usec -
761 						item->mdelay_timestamp;
762 				else
763 					media_delay = tmval.tv_usec +
764 					1000000 - item->mdelay_timestamp;
765 				hw_priv->tsm_info.sum_media_delay +=
766 							media_delay;
767 				hw_priv->tsm_info.sum_pkt_q_delay += queue_delay;
768 				if (queue_delay <= 10000)
769 					hw_priv->tsm_stats.bin0++;
770 				else if (queue_delay <= 20000)
771 					hw_priv->tsm_stats.bin1++;
772 				else if (queue_delay <= 40000)
773 					hw_priv->tsm_stats.bin2++;
774 				else
775 					hw_priv->tsm_stats.bin3++;
776 			}
777 		}
778 		spin_unlock_bh(&hw_priv->tsm_lock);
779 #endif /*CONFIG_XRADIO_TESTMODE*/
780 		/* Do not use list_move_tail here, but list_move:
781 		 * try to utilize cache row.
782 		 */
783 		list_move(&item->head, &queue->free_pool);
784 
785 		if (unlikely(queue->overfull) &&
786 		    (queue->num_queued <= ((stats->hw_priv->vif0_throttle +
787 		     stats->hw_priv->vif1_throttle + 2)>>1))) {
788 			queue->overfull = false;
789 			__xradio_queue_unlock(queue);
790 		}
791 	}
792 	spin_unlock_bh(&queue->lock);
793 
794 #if 0
795 	txrx_printk(XRADIO_DBG_ERROR, "queue_drop queue %d, %d, %d\n",
796 		queue->num_queued, queue->link_map_cache[if_id][0],
797 		queue->num_pending);
798 	txrx_printk(XRADIO_DBG_ERROR, "queue_drop stats %d, %d\n",
799 		    stats->num_queued, stats->link_map_cache[if_id][0]);
800 #endif
801 	if (gc_skb)
802 		stats->skb_dtor(stats->hw_priv, gc_skb, &gc_txpriv);
803 
804 	return ret;
805 }
806 
807 int xradio_queue_get_skb(struct xradio_queue *queue, u32 packetID,
808 			 struct sk_buff **skb,
809 			 const struct xradio_txpriv **txpriv)
810 {
811 	int ret = 0;
812 	u8 queue_generation, queue_id, item_generation, item_id, if_id, link_id;
813 	struct xradio_queue_item *item;
814 
815 	xradio_queue_parse_id(packetID, &queue_generation, &queue_id,
816 				&item_generation, &item_id, &if_id, &link_id);
817 
818 	item = &queue->pool[item_id];
819 
820 	spin_lock_bh(&queue->lock);
821 	SYS_BUG(queue_id != queue->queue_id);
822 	/* TODO:COMBO: Add check for interface ID here */
823 #ifdef QUEUE_GEN_IF_TABLE
824 	if (unlikely(queue_generation != queue->generation[if_id])) {
825 #else
826 	if (unlikely(queue_generation != queue->generation)) {
827 #endif
828 		txrx_printk(XRADIO_DBG_WARN, "%s, queue generation match failed!\n",
829 				__func__);
830 		ret = -ENOENT;
831 	} else if (unlikely(item_id >= (unsigned) queue->capacity)) {
832 		txrx_printk(XRADIO_DBG_WARN, "%s, invalid item_id!\n", __func__);
833 		ret = -EINVAL;
834 	} else if (unlikely(item->generation != item_generation)) {
835 		txrx_printk(XRADIO_DBG_WARN, "%s, item generation match failed!\n",
836 				__func__);
837 		ret = -ENOENT;
838 	} else {
839 		*skb = item->skb;
840 		*txpriv = &item->txpriv;
841 	}
842 	spin_unlock_bh(&queue->lock);
843 	return ret;
844 }
845 
846 void xradio_queue_lock(struct xradio_queue *queue)
847 {
848 	spin_lock_bh(&queue->lock);
849 	__xradio_queue_lock(queue);
850 	spin_unlock_bh(&queue->lock);
851 }
852 
853 void xradio_queue_unlock(struct xradio_queue *queue)
854 {
855 	spin_lock_bh(&queue->lock);
856 	__xradio_queue_unlock(queue);
857 	spin_unlock_bh(&queue->lock);
858 }
859 
860 bool xradio_queue_get_xmit_timestamp(struct xradio_queue *queue,
861 				     unsigned long *timestamp, int if_id,
862 				     u32 pending_frameID, u32 *Old_frame_ID)
863 {
864 	struct xradio_queue_item *item;
865 	bool ret;
866 
867 	spin_lock_bh(&queue->lock);
868 	ret = !list_empty(&queue->pending);
869 	if (ret) {
870 		list_for_each_entry(item, &queue->pending, head) {
871 			if (((if_id == XRWL_GENERIC_IF_ID) ||
872 				(if_id == XRWL_ALL_IFS) ||
873 					(item->txpriv.if_id == if_id)) &&
874 					(item->packetID != pending_frameID)) {
875 				if (time_before(item->xmit_timestamp,
876 							*timestamp)) {
877 					*timestamp = item->xmit_timestamp;
878 					*Old_frame_ID = item->packetID;
879 				}
880 			}
881 		}
882 	}
883 	spin_unlock_bh(&queue->lock);
884 	return ret;
885 }
886 
887 bool xradio_queue_stats_is_empty(struct xradio_queue_stats *stats,
888 				 u32 link_id_map, int if_id)
889 {
890 	bool empty = true;
891 
892 	spin_lock_bh(&stats->lock);
893 	if (link_id_map == (u32)-1)
894 		empty = stats->num_queued[if_id] == 0;
895 	else {
896 		int i, if_id;
897 		for (if_id = 0; if_id < XRWL_MAX_VIFS; if_id++) {
898 			for (i = 0; i < stats->map_capacity; ++i) {
899 				if (link_id_map & BIT(i)) {
900 					if (stats->link_map_cache[if_id][i]) {
901 						empty = false;
902 						break;
903 					}
904 				}
905 			}
906 		}
907 	}
908 	spin_unlock_bh(&stats->lock);
909 
910 	return empty;
911 }
912 
913 bool xradio_query_txpkt_timeout(struct xradio_common *hw_priv, int if_id,
914 				u32 pending_pkt_id, long *timeout)
915 {
916 	int i;
917 	bool pending = false;
918 	unsigned long timestamp = jiffies;
919 	struct xradio_queue      *queue = NULL;
920 	struct xradio_queue_item *item  = NULL;
921 	struct xradio_queue      *old_queue = NULL;
922 	struct xradio_queue_item *old_item  = NULL;
923 	u8 pack_stk_wr = 0;
924 
925 	/* Get oldest frame.*/
926 	for (i = 0; i < AC_QUEUE_NUM; ++i) {
927 		queue = &hw_priv->tx_queue[i];
928 		spin_lock_bh(&queue->lock);
929 		if (!list_empty(&queue->pending)) {
930 			list_for_each_entry(item, &queue->pending, head) {
931 #if BH_PROC_TX
932 				if (!item->xmit_to_fw)
933 					continue;
934 #endif
935 				if (((if_id == XRWL_GENERIC_IF_ID) ||
936 					 (if_id == XRWL_ALL_IFS) ||
937 					 (item->txpriv.if_id == if_id)) &&
938 					 (item->packetID != pending_pkt_id)) {
939 					if (time_before(item->xmit_timestamp, timestamp)) {
940 						timestamp   = item->xmit_timestamp;
941 						pack_stk_wr = item->pack_stk_wr;
942 						old_queue   = queue;
943 						old_item    = item;
944 					}
945 					pending = true;
946 				}
947 			}
948 		}
949 		spin_unlock_bh(&queue->lock);
950 	}
951 	if (!pending)
952 		return false;
953 
954 	/* Check if frame transmission is timed out.
955 	 * add (WSM_CMD_LAST_CHANCE_TIMEOUT>>1) for stuck workaround.*/
956 	*timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies;
957 	if (hw_priv->BT_active) {
958 		*timeout += msecs_to_jiffies((BT_MAX_BLOCK_TIME>>1));
959 	}
960 	if (unlikely(*timeout < 0) && !pack_stk_wr) {
961 		struct ieee80211_hdr *frame = NULL;
962 		const struct xradio_txpriv *txpriv = NULL;
963 		u16 fctl = 0x0;
964 		u32 len  = 0x0;
965 		u8 if_id = 0, link_id = 0, tid = 0;
966 
967 		/* query the timeout frame. */
968 		spin_lock_bh(&old_queue->lock);
969 		if (likely(old_item->skb && !hw_priv->query_packetID)) {
970 			hw_priv->query_packetID = old_item->packetID;
971 			old_item->pack_stk_wr = 1;
972 			atomic_add(1, &hw_priv->query_cnt);
973 
974 			/* Info of stuck frames for debug.*/
975 			txpriv = &old_item->txpriv;
976 			frame  = (struct ieee80211_hdr *)(&old_item->skb->data[txpriv->offset]);
977 			fctl   = frame->frame_control;
978 			len    = old_item->skb->len;
979 			if_id  = txpriv->if_id;
980 			link_id = txpriv->link_id;
981 			tid = txpriv->tid;
982 		}
983 		spin_unlock_bh(&old_queue->lock);
984 		/* Dump Info of stuck frames. */
985 		if (frame) {
986 			txrx_printk(XRADIO_DBG_ERROR, "TX confirm timeout(%ds).\n",
987 				    WSM_CMD_LAST_CHANCE_TIMEOUT/HZ);
988 			txrx_printk(XRADIO_DBG_ALWY, "if=%d, linkid=%d, tid=%d, " \
989 				    "old_packetID=0x%08x, fctl=0x%04x, len=%d, wr=%d\n",
990 				    if_id, link_id, tid,
991 				    hw_priv->query_packetID, fctl, len,
992 				    pack_stk_wr);
993 		}
994 		/* Return half of timeout for query packet. */
995 		*timeout = (WSM_CMD_LAST_CHANCE_TIMEOUT>>1);
996 	} else if (unlikely(pack_stk_wr)) {
997 		*timeout = *timeout + (WSM_CMD_LAST_CHANCE_TIMEOUT>>1);
998 		if (*timeout < 0)
999 			txrx_printk(XRADIO_DBG_ERROR, "%s, wr and timeout, packetID=0x%08x\n",
1000 					__func__, old_item->packetID);
1001 		else
1002 			txrx_printk(XRADIO_DBG_MSG, "%s, wr and timeout=%ld\n",
1003 					__func__, *timeout);
1004 	}
1005 	return pending;
1006 }
1007