• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers
4  *
5  * Copyright (c) 2010, ST-Ericsson
6  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7  */
8 
9 #include <net/mac80211.h>
10 #include <linux/sched.h>
11 #include "queue.h"
12 #include "cw1200.h"
13 #include "debug.h"
14 
15 /* private */ struct cw1200_queue_item
16 {
17 	struct list_head	head;
18 	struct sk_buff		*skb;
19 	u32			packet_id;
20 	unsigned long		queue_timestamp;
21 	unsigned long		xmit_timestamp;
22 	struct cw1200_txpriv	txpriv;
23 	u8			generation;
24 };
25 
__cw1200_queue_lock(struct cw1200_queue * queue)26 static inline void __cw1200_queue_lock(struct cw1200_queue *queue)
27 {
28 	struct cw1200_queue_stats *stats = queue->stats;
29 	if (queue->tx_locked_cnt++ == 0) {
30 		pr_debug("[TX] Queue %d is locked.\n",
31 			 queue->queue_id);
32 		ieee80211_stop_queue(stats->priv->hw, queue->queue_id);
33 	}
34 }
35 
__cw1200_queue_unlock(struct cw1200_queue * queue)36 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue)
37 {
38 	struct cw1200_queue_stats *stats = queue->stats;
39 	BUG_ON(!queue->tx_locked_cnt);
40 	if (--queue->tx_locked_cnt == 0) {
41 		pr_debug("[TX] Queue %d is unlocked.\n",
42 			 queue->queue_id);
43 		ieee80211_wake_queue(stats->priv->hw, queue->queue_id);
44 	}
45 }
46 
cw1200_queue_parse_id(u32 packet_id,u8 * queue_generation,u8 * queue_id,u8 * item_generation,u8 * item_id)47 static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation,
48 					 u8 *queue_id, u8 *item_generation,
49 					 u8 *item_id)
50 {
51 	*item_id		= (packet_id >>  0) & 0xFF;
52 	*item_generation	= (packet_id >>  8) & 0xFF;
53 	*queue_id		= (packet_id >> 16) & 0xFF;
54 	*queue_generation	= (packet_id >> 24) & 0xFF;
55 }
56 
cw1200_queue_mk_packet_id(u8 queue_generation,u8 queue_id,u8 item_generation,u8 item_id)57 static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id,
58 					    u8 item_generation, u8 item_id)
59 {
60 	return ((u32)item_id << 0) |
61 		((u32)item_generation << 8) |
62 		((u32)queue_id << 16) |
63 		((u32)queue_generation << 24);
64 }
65 
cw1200_queue_post_gc(struct cw1200_queue_stats * stats,struct list_head * gc_list)66 static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats,
67 				 struct list_head *gc_list)
68 {
69 	struct cw1200_queue_item *item, *tmp;
70 
71 	list_for_each_entry_safe(item, tmp, gc_list, head) {
72 		list_del(&item->head);
73 		stats->skb_dtor(stats->priv, item->skb, &item->txpriv);
74 		kfree(item);
75 	}
76 }
77 
cw1200_queue_register_post_gc(struct list_head * gc_list,struct cw1200_queue_item * item)78 static void cw1200_queue_register_post_gc(struct list_head *gc_list,
79 					  struct cw1200_queue_item *item)
80 {
81 	struct cw1200_queue_item *gc_item;
82 	gc_item = kmemdup(item, sizeof(struct cw1200_queue_item),
83 			GFP_ATOMIC);
84 	BUG_ON(!gc_item);
85 	list_add_tail(&gc_item->head, gc_list);
86 }
87 
__cw1200_queue_gc(struct cw1200_queue * queue,struct list_head * head,bool unlock)88 static void __cw1200_queue_gc(struct cw1200_queue *queue,
89 			      struct list_head *head,
90 			      bool unlock)
91 {
92 	struct cw1200_queue_stats *stats = queue->stats;
93 	struct cw1200_queue_item *item = NULL, *tmp;
94 	bool wakeup_stats = false;
95 
96 	list_for_each_entry_safe(item, tmp, &queue->queue, head) {
97 		if (jiffies - item->queue_timestamp < queue->ttl)
98 			break;
99 		--queue->num_queued;
100 		--queue->link_map_cache[item->txpriv.link_id];
101 		spin_lock_bh(&stats->lock);
102 		--stats->num_queued;
103 		if (!--stats->link_map_cache[item->txpriv.link_id])
104 			wakeup_stats = true;
105 		spin_unlock_bh(&stats->lock);
106 		cw1200_debug_tx_ttl(stats->priv);
107 		cw1200_queue_register_post_gc(head, item);
108 		item->skb = NULL;
109 		list_move_tail(&item->head, &queue->free_pool);
110 	}
111 
112 	if (wakeup_stats)
113 		wake_up(&stats->wait_link_id_empty);
114 
115 	if (queue->overfull) {
116 		if (queue->num_queued <= (queue->capacity >> 1)) {
117 			queue->overfull = false;
118 			if (unlock)
119 				__cw1200_queue_unlock(queue);
120 		} else if (item) {
121 			unsigned long tmo = item->queue_timestamp + queue->ttl;
122 			mod_timer(&queue->gc, tmo);
123 			cw1200_pm_stay_awake(&stats->priv->pm_state,
124 					     tmo - jiffies);
125 		}
126 	}
127 }
128 
cw1200_queue_gc(struct timer_list * t)129 static void cw1200_queue_gc(struct timer_list *t)
130 {
131 	LIST_HEAD(list);
132 	struct cw1200_queue *queue =
133 		from_timer(queue, t, gc);
134 
135 	spin_lock_bh(&queue->lock);
136 	__cw1200_queue_gc(queue, &list, true);
137 	spin_unlock_bh(&queue->lock);
138 	cw1200_queue_post_gc(queue->stats, &list);
139 }
140 
cw1200_queue_stats_init(struct cw1200_queue_stats * stats,size_t map_capacity,cw1200_queue_skb_dtor_t skb_dtor,struct cw1200_common * priv)141 int cw1200_queue_stats_init(struct cw1200_queue_stats *stats,
142 			    size_t map_capacity,
143 			    cw1200_queue_skb_dtor_t skb_dtor,
144 			    struct cw1200_common *priv)
145 {
146 	memset(stats, 0, sizeof(*stats));
147 	stats->map_capacity = map_capacity;
148 	stats->skb_dtor = skb_dtor;
149 	stats->priv = priv;
150 	spin_lock_init(&stats->lock);
151 	init_waitqueue_head(&stats->wait_link_id_empty);
152 
153 	stats->link_map_cache = kcalloc(map_capacity, sizeof(int),
154 					GFP_KERNEL);
155 	if (!stats->link_map_cache)
156 		return -ENOMEM;
157 
158 	return 0;
159 }
160 
cw1200_queue_init(struct cw1200_queue * queue,struct cw1200_queue_stats * stats,u8 queue_id,size_t capacity,unsigned long ttl)161 int cw1200_queue_init(struct cw1200_queue *queue,
162 		      struct cw1200_queue_stats *stats,
163 		      u8 queue_id,
164 		      size_t capacity,
165 		      unsigned long ttl)
166 {
167 	size_t i;
168 
169 	memset(queue, 0, sizeof(*queue));
170 	queue->stats = stats;
171 	queue->capacity = capacity;
172 	queue->queue_id = queue_id;
173 	queue->ttl = ttl;
174 	INIT_LIST_HEAD(&queue->queue);
175 	INIT_LIST_HEAD(&queue->pending);
176 	INIT_LIST_HEAD(&queue->free_pool);
177 	spin_lock_init(&queue->lock);
178 	timer_setup(&queue->gc, cw1200_queue_gc, 0);
179 
180 	queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item),
181 			      GFP_KERNEL);
182 	if (!queue->pool)
183 		return -ENOMEM;
184 
185 	queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int),
186 					GFP_KERNEL);
187 	if (!queue->link_map_cache) {
188 		kfree(queue->pool);
189 		queue->pool = NULL;
190 		return -ENOMEM;
191 	}
192 
193 	for (i = 0; i < capacity; ++i)
194 		list_add_tail(&queue->pool[i].head, &queue->free_pool);
195 
196 	return 0;
197 }
198 
cw1200_queue_clear(struct cw1200_queue * queue)199 int cw1200_queue_clear(struct cw1200_queue *queue)
200 {
201 	int i;
202 	LIST_HEAD(gc_list);
203 	struct cw1200_queue_stats *stats = queue->stats;
204 	struct cw1200_queue_item *item, *tmp;
205 
206 	spin_lock_bh(&queue->lock);
207 	queue->generation++;
208 	list_splice_tail_init(&queue->queue, &queue->pending);
209 	list_for_each_entry_safe(item, tmp, &queue->pending, head) {
210 		WARN_ON(!item->skb);
211 		cw1200_queue_register_post_gc(&gc_list, item);
212 		item->skb = NULL;
213 		list_move_tail(&item->head, &queue->free_pool);
214 	}
215 	queue->num_queued = 0;
216 	queue->num_pending = 0;
217 
218 	spin_lock_bh(&stats->lock);
219 	for (i = 0; i < stats->map_capacity; ++i) {
220 		stats->num_queued -= queue->link_map_cache[i];
221 		stats->link_map_cache[i] -= queue->link_map_cache[i];
222 		queue->link_map_cache[i] = 0;
223 	}
224 	spin_unlock_bh(&stats->lock);
225 	if (queue->overfull) {
226 		queue->overfull = false;
227 		__cw1200_queue_unlock(queue);
228 	}
229 	spin_unlock_bh(&queue->lock);
230 	wake_up(&stats->wait_link_id_empty);
231 	cw1200_queue_post_gc(stats, &gc_list);
232 	return 0;
233 }
234 
cw1200_queue_stats_deinit(struct cw1200_queue_stats * stats)235 void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats)
236 {
237 	kfree(stats->link_map_cache);
238 	stats->link_map_cache = NULL;
239 }
240 
cw1200_queue_deinit(struct cw1200_queue * queue)241 void cw1200_queue_deinit(struct cw1200_queue *queue)
242 {
243 	cw1200_queue_clear(queue);
244 	del_timer_sync(&queue->gc);
245 	INIT_LIST_HEAD(&queue->free_pool);
246 	kfree(queue->pool);
247 	kfree(queue->link_map_cache);
248 	queue->pool = NULL;
249 	queue->link_map_cache = NULL;
250 	queue->capacity = 0;
251 }
252 
cw1200_queue_get_num_queued(struct cw1200_queue * queue,u32 link_id_map)253 size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue,
254 				   u32 link_id_map)
255 {
256 	size_t ret;
257 	int i, bit;
258 	size_t map_capacity = queue->stats->map_capacity;
259 
260 	if (!link_id_map)
261 		return 0;
262 
263 	spin_lock_bh(&queue->lock);
264 	if (link_id_map == (u32)-1) {
265 		ret = queue->num_queued - queue->num_pending;
266 	} else {
267 		ret = 0;
268 		for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) {
269 			if (link_id_map & bit)
270 				ret += queue->link_map_cache[i];
271 		}
272 	}
273 	spin_unlock_bh(&queue->lock);
274 	return ret;
275 }
276 
cw1200_queue_put(struct cw1200_queue * queue,struct sk_buff * skb,struct cw1200_txpriv * txpriv)277 int cw1200_queue_put(struct cw1200_queue *queue,
278 		     struct sk_buff *skb,
279 		     struct cw1200_txpriv *txpriv)
280 {
281 	int ret = 0;
282 	struct cw1200_queue_stats *stats = queue->stats;
283 
284 	if (txpriv->link_id >= queue->stats->map_capacity)
285 		return -EINVAL;
286 
287 	spin_lock_bh(&queue->lock);
288 	if (!WARN_ON(list_empty(&queue->free_pool))) {
289 		struct cw1200_queue_item *item = list_first_entry(
290 			&queue->free_pool, struct cw1200_queue_item, head);
291 		BUG_ON(item->skb);
292 
293 		list_move_tail(&item->head, &queue->queue);
294 		item->skb = skb;
295 		item->txpriv = *txpriv;
296 		item->generation = 0;
297 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
298 							    queue->queue_id,
299 							    item->generation,
300 							    item - queue->pool);
301 		item->queue_timestamp = jiffies;
302 
303 		++queue->num_queued;
304 		++queue->link_map_cache[txpriv->link_id];
305 
306 		spin_lock_bh(&stats->lock);
307 		++stats->num_queued;
308 		++stats->link_map_cache[txpriv->link_id];
309 		spin_unlock_bh(&stats->lock);
310 
311 		/* TX may happen in parallel sometimes.
312 		 * Leave extra queue slots so we don't overflow.
313 		 */
314 		if (queue->overfull == false &&
315 		    queue->num_queued >=
316 		    (queue->capacity - (num_present_cpus() - 1))) {
317 			queue->overfull = true;
318 			__cw1200_queue_lock(queue);
319 			mod_timer(&queue->gc, jiffies);
320 		}
321 	} else {
322 		ret = -ENOENT;
323 	}
324 	spin_unlock_bh(&queue->lock);
325 	return ret;
326 }
327 
cw1200_queue_get(struct cw1200_queue * queue,u32 link_id_map,struct wsm_tx ** tx,struct ieee80211_tx_info ** tx_info,const struct cw1200_txpriv ** txpriv)328 int cw1200_queue_get(struct cw1200_queue *queue,
329 		     u32 link_id_map,
330 		     struct wsm_tx **tx,
331 		     struct ieee80211_tx_info **tx_info,
332 		     const struct cw1200_txpriv **txpriv)
333 {
334 	int ret = -ENOENT;
335 	struct cw1200_queue_item *item;
336 	struct cw1200_queue_stats *stats = queue->stats;
337 	bool wakeup_stats = false;
338 
339 	spin_lock_bh(&queue->lock);
340 	list_for_each_entry(item, &queue->queue, head) {
341 		if (link_id_map & BIT(item->txpriv.link_id)) {
342 			ret = 0;
343 			break;
344 		}
345 	}
346 
347 	if (!WARN_ON(ret)) {
348 		*tx = (struct wsm_tx *)item->skb->data;
349 		*tx_info = IEEE80211_SKB_CB(item->skb);
350 		*txpriv = &item->txpriv;
351 		(*tx)->packet_id = item->packet_id;
352 		list_move_tail(&item->head, &queue->pending);
353 		++queue->num_pending;
354 		--queue->link_map_cache[item->txpriv.link_id];
355 		item->xmit_timestamp = jiffies;
356 
357 		spin_lock_bh(&stats->lock);
358 		--stats->num_queued;
359 		if (!--stats->link_map_cache[item->txpriv.link_id])
360 			wakeup_stats = true;
361 		spin_unlock_bh(&stats->lock);
362 	}
363 	spin_unlock_bh(&queue->lock);
364 	if (wakeup_stats)
365 		wake_up(&stats->wait_link_id_empty);
366 	return ret;
367 }
368 
cw1200_queue_requeue(struct cw1200_queue * queue,u32 packet_id)369 int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id)
370 {
371 	int ret = 0;
372 	u8 queue_generation, queue_id, item_generation, item_id;
373 	struct cw1200_queue_item *item;
374 	struct cw1200_queue_stats *stats = queue->stats;
375 
376 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
377 			      &item_generation, &item_id);
378 
379 	item = &queue->pool[item_id];
380 
381 	spin_lock_bh(&queue->lock);
382 	BUG_ON(queue_id != queue->queue_id);
383 	if (queue_generation != queue->generation) {
384 		ret = -ENOENT;
385 	} else if (item_id >= (unsigned) queue->capacity) {
386 		WARN_ON(1);
387 		ret = -EINVAL;
388 	} else if (item->generation != item_generation) {
389 		WARN_ON(1);
390 		ret = -ENOENT;
391 	} else {
392 		--queue->num_pending;
393 		++queue->link_map_cache[item->txpriv.link_id];
394 
395 		spin_lock_bh(&stats->lock);
396 		++stats->num_queued;
397 		++stats->link_map_cache[item->txpriv.link_id];
398 		spin_unlock_bh(&stats->lock);
399 
400 		item->generation = ++item_generation;
401 		item->packet_id = cw1200_queue_mk_packet_id(queue_generation,
402 							    queue_id,
403 							    item_generation,
404 							    item_id);
405 		list_move(&item->head, &queue->queue);
406 	}
407 	spin_unlock_bh(&queue->lock);
408 	return ret;
409 }
410 
cw1200_queue_requeue_all(struct cw1200_queue * queue)411 int cw1200_queue_requeue_all(struct cw1200_queue *queue)
412 {
413 	struct cw1200_queue_item *item, *tmp;
414 	struct cw1200_queue_stats *stats = queue->stats;
415 	spin_lock_bh(&queue->lock);
416 
417 	list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) {
418 		--queue->num_pending;
419 		++queue->link_map_cache[item->txpriv.link_id];
420 
421 		spin_lock_bh(&stats->lock);
422 		++stats->num_queued;
423 		++stats->link_map_cache[item->txpriv.link_id];
424 		spin_unlock_bh(&stats->lock);
425 
426 		++item->generation;
427 		item->packet_id = cw1200_queue_mk_packet_id(queue->generation,
428 							    queue->queue_id,
429 							    item->generation,
430 							    item - queue->pool);
431 		list_move(&item->head, &queue->queue);
432 	}
433 	spin_unlock_bh(&queue->lock);
434 
435 	return 0;
436 }
437 
cw1200_queue_remove(struct cw1200_queue * queue,u32 packet_id)438 int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id)
439 {
440 	int ret = 0;
441 	u8 queue_generation, queue_id, item_generation, item_id;
442 	struct cw1200_queue_item *item;
443 	struct cw1200_queue_stats *stats = queue->stats;
444 	struct sk_buff *gc_skb = NULL;
445 	struct cw1200_txpriv gc_txpriv;
446 
447 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
448 			      &item_generation, &item_id);
449 
450 	item = &queue->pool[item_id];
451 
452 	spin_lock_bh(&queue->lock);
453 	BUG_ON(queue_id != queue->queue_id);
454 	if (queue_generation != queue->generation) {
455 		ret = -ENOENT;
456 	} else if (item_id >= (unsigned) queue->capacity) {
457 		WARN_ON(1);
458 		ret = -EINVAL;
459 	} else if (item->generation != item_generation) {
460 		WARN_ON(1);
461 		ret = -ENOENT;
462 	} else {
463 		gc_txpriv = item->txpriv;
464 		gc_skb = item->skb;
465 		item->skb = NULL;
466 		--queue->num_pending;
467 		--queue->num_queued;
468 		++queue->num_sent;
469 		++item->generation;
470 		/* Do not use list_move_tail here, but list_move:
471 		 * try to utilize cache row.
472 		 */
473 		list_move(&item->head, &queue->free_pool);
474 
475 		if (queue->overfull &&
476 		    (queue->num_queued <= (queue->capacity >> 1))) {
477 			queue->overfull = false;
478 			__cw1200_queue_unlock(queue);
479 		}
480 	}
481 	spin_unlock_bh(&queue->lock);
482 
483 	if (gc_skb)
484 		stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv);
485 
486 	return ret;
487 }
488 
cw1200_queue_get_skb(struct cw1200_queue * queue,u32 packet_id,struct sk_buff ** skb,const struct cw1200_txpriv ** txpriv)489 int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id,
490 			 struct sk_buff **skb,
491 			 const struct cw1200_txpriv **txpriv)
492 {
493 	int ret = 0;
494 	u8 queue_generation, queue_id, item_generation, item_id;
495 	struct cw1200_queue_item *item;
496 	cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id,
497 			      &item_generation, &item_id);
498 
499 	item = &queue->pool[item_id];
500 
501 	spin_lock_bh(&queue->lock);
502 	BUG_ON(queue_id != queue->queue_id);
503 	if (queue_generation != queue->generation) {
504 		ret = -ENOENT;
505 	} else if (item_id >= (unsigned) queue->capacity) {
506 		WARN_ON(1);
507 		ret = -EINVAL;
508 	} else if (item->generation != item_generation) {
509 		WARN_ON(1);
510 		ret = -ENOENT;
511 	} else {
512 		*skb = item->skb;
513 		*txpriv = &item->txpriv;
514 	}
515 	spin_unlock_bh(&queue->lock);
516 	return ret;
517 }
518 
cw1200_queue_lock(struct cw1200_queue * queue)519 void cw1200_queue_lock(struct cw1200_queue *queue)
520 {
521 	spin_lock_bh(&queue->lock);
522 	__cw1200_queue_lock(queue);
523 	spin_unlock_bh(&queue->lock);
524 }
525 
cw1200_queue_unlock(struct cw1200_queue * queue)526 void cw1200_queue_unlock(struct cw1200_queue *queue)
527 {
528 	spin_lock_bh(&queue->lock);
529 	__cw1200_queue_unlock(queue);
530 	spin_unlock_bh(&queue->lock);
531 }
532 
cw1200_queue_get_xmit_timestamp(struct cw1200_queue * queue,unsigned long * timestamp,u32 pending_frame_id)533 bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue,
534 				     unsigned long *timestamp,
535 				     u32 pending_frame_id)
536 {
537 	struct cw1200_queue_item *item;
538 	bool ret;
539 
540 	spin_lock_bh(&queue->lock);
541 	ret = !list_empty(&queue->pending);
542 	if (ret) {
543 		list_for_each_entry(item, &queue->pending, head) {
544 			if (item->packet_id != pending_frame_id)
545 				if (time_before(item->xmit_timestamp,
546 						*timestamp))
547 					*timestamp = item->xmit_timestamp;
548 		}
549 	}
550 	spin_unlock_bh(&queue->lock);
551 	return ret;
552 }
553 
cw1200_queue_stats_is_empty(struct cw1200_queue_stats * stats,u32 link_id_map)554 bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats,
555 				 u32 link_id_map)
556 {
557 	bool empty = true;
558 
559 	spin_lock_bh(&stats->lock);
560 	if (link_id_map == (u32)-1) {
561 		empty = stats->num_queued == 0;
562 	} else {
563 		int i;
564 		for (i = 0; i < stats->map_capacity; ++i) {
565 			if (link_id_map & BIT(i)) {
566 				if (stats->link_map_cache[i]) {
567 					empty = false;
568 					break;
569 				}
570 			}
571 		}
572 	}
573 	spin_unlock_bh(&stats->lock);
574 
575 	return empty;
576 }
577