• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 	Copyright (C) 2004 - 2008 rt2x00 SourceForge Project
3 	<http://rt2x00.serialmonkey.com>
4 
5 	This program is free software; you can redistribute it and/or modify
6 	it under the terms of the GNU General Public License as published by
7 	the Free Software Foundation; either version 2 of the License, or
8 	(at your option) any later version.
9 
10 	This program is distributed in the hope that it will be useful,
11 	but WITHOUT ANY WARRANTY; without even the implied warranty of
12 	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 	GNU General Public License for more details.
14 
15 	You should have received a copy of the GNU General Public License
16 	along with this program; if not, write to the
17 	Free Software Foundation, Inc.,
18 	59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19  */
20 
21 /*
22 	Module: rt2x00lib
23 	Abstract: rt2x00 queue specific routines.
24  */
25 
26 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/dma-mapping.h>
29 
30 #include "rt2x00.h"
31 #include "rt2x00lib.h"
32 
rt2x00queue_alloc_rxskb(struct rt2x00_dev * rt2x00dev,struct queue_entry * entry)33 struct sk_buff *rt2x00queue_alloc_rxskb(struct rt2x00_dev *rt2x00dev,
34 					struct queue_entry *entry)
35 {
36 	struct sk_buff *skb;
37 	struct skb_frame_desc *skbdesc;
38 	unsigned int frame_size;
39 	unsigned int head_size = 0;
40 	unsigned int tail_size = 0;
41 
42 	/*
43 	 * The frame size includes descriptor size, because the
44 	 * hardware directly receive the frame into the skbuffer.
45 	 */
46 	frame_size = entry->queue->data_size + entry->queue->desc_size;
47 
48 	/*
49 	 * The payload should be aligned to a 4-byte boundary,
50 	 * this means we need at least 3 bytes for moving the frame
51 	 * into the correct offset.
52 	 */
53 	head_size = 4;
54 
55 	/*
56 	 * For IV/EIV/ICV assembly we must make sure there is
57 	 * at least 8 bytes bytes available in headroom for IV/EIV
58 	 * and 8 bytes for ICV data as tailroon.
59 	 */
60 	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
61 		head_size += 8;
62 		tail_size += 8;
63 	}
64 
65 	/*
66 	 * Allocate skbuffer.
67 	 */
68 	skb = dev_alloc_skb(frame_size + head_size + tail_size);
69 	if (!skb)
70 		return NULL;
71 
72 	/*
73 	 * Make sure we not have a frame with the requested bytes
74 	 * available in the head and tail.
75 	 */
76 	skb_reserve(skb, head_size);
77 	skb_put(skb, frame_size);
78 
79 	/*
80 	 * Populate skbdesc.
81 	 */
82 	skbdesc = get_skb_frame_desc(skb);
83 	memset(skbdesc, 0, sizeof(*skbdesc));
84 	skbdesc->entry = entry;
85 
86 	if (test_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags)) {
87 		skbdesc->skb_dma = dma_map_single(rt2x00dev->dev,
88 						  skb->data,
89 						  skb->len,
90 						  DMA_FROM_DEVICE);
91 		skbdesc->flags |= SKBDESC_DMA_MAPPED_RX;
92 	}
93 
94 	return skb;
95 }
96 
rt2x00queue_map_txskb(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb)97 void rt2x00queue_map_txskb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
98 {
99 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
100 
101 	/*
102 	 * If device has requested headroom, we should make sure that
103 	 * is also mapped to the DMA so it can be used for transfering
104 	 * additional descriptor information to the hardware.
105 	 */
106 	skb_push(skb, rt2x00dev->hw->extra_tx_headroom);
107 
108 	skbdesc->skb_dma =
109 	    dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_TO_DEVICE);
110 
111 	/*
112 	 * Restore data pointer to original location again.
113 	 */
114 	skb_pull(skb, rt2x00dev->hw->extra_tx_headroom);
115 
116 	skbdesc->flags |= SKBDESC_DMA_MAPPED_TX;
117 }
118 EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb);
119 
rt2x00queue_unmap_skb(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb)120 void rt2x00queue_unmap_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
121 {
122 	struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
123 
124 	if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) {
125 		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma, skb->len,
126 				 DMA_FROM_DEVICE);
127 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX;
128 	}
129 
130 	if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) {
131 		/*
132 		 * Add headroom to the skb length, it has been removed
133 		 * by the driver, but it was actually mapped to DMA.
134 		 */
135 		dma_unmap_single(rt2x00dev->dev, skbdesc->skb_dma,
136 				 skb->len + rt2x00dev->hw->extra_tx_headroom,
137 				 DMA_TO_DEVICE);
138 		skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX;
139 	}
140 }
141 
rt2x00queue_free_skb(struct rt2x00_dev * rt2x00dev,struct sk_buff * skb)142 void rt2x00queue_free_skb(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb)
143 {
144 	if (!skb)
145 		return;
146 
147 	rt2x00queue_unmap_skb(rt2x00dev, skb);
148 	dev_kfree_skb_any(skb);
149 }
150 
rt2x00queue_create_tx_descriptor(struct queue_entry * entry,struct txentry_desc * txdesc)151 static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
152 					     struct txentry_desc *txdesc)
153 {
154 	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
155 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
156 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)entry->skb->data;
157 	struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0];
158 	struct ieee80211_rate *rate =
159 	    ieee80211_get_tx_rate(rt2x00dev->hw, tx_info);
160 	const struct rt2x00_rate *hwrate;
161 	unsigned int data_length;
162 	unsigned int duration;
163 	unsigned int residual;
164 	unsigned long irqflags;
165 
166 	memset(txdesc, 0, sizeof(*txdesc));
167 
168 	/*
169 	 * Initialize information from queue
170 	 */
171 	txdesc->queue = entry->queue->qid;
172 	txdesc->cw_min = entry->queue->cw_min;
173 	txdesc->cw_max = entry->queue->cw_max;
174 	txdesc->aifs = entry->queue->aifs;
175 
176 	/* Data length + CRC */
177 	data_length = entry->skb->len + 4;
178 
179 	/*
180 	 * Check whether this frame is to be acked.
181 	 */
182 	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK))
183 		__set_bit(ENTRY_TXD_ACK, &txdesc->flags);
184 
185 	if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags) &&
186 	    !entry->skb->do_not_encrypt) {
187 		/* Apply crypto specific descriptor information */
188 		rt2x00crypto_create_tx_descriptor(entry, txdesc);
189 
190 		/*
191 		 * Extend frame length to include all encryption overhead
192 		 * that will be added by the hardware.
193 		 */
194 		data_length += rt2x00crypto_tx_overhead(tx_info);
195 	}
196 
197 	/*
198 	 * Check if this is a RTS/CTS frame
199 	 */
200 	if (ieee80211_is_rts(hdr->frame_control) ||
201 	    ieee80211_is_cts(hdr->frame_control)) {
202 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
203 		if (ieee80211_is_rts(hdr->frame_control))
204 			__set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags);
205 		else
206 			__set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags);
207 		if (tx_info->control.rts_cts_rate_idx >= 0)
208 			rate =
209 			    ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info);
210 	}
211 
212 	/*
213 	 * Determine retry information.
214 	 */
215 	txdesc->retry_limit = tx_info->control.rates[0].count - 1;
216 	if (txdesc->retry_limit >= rt2x00dev->long_retry)
217 		__set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags);
218 
219 	/*
220 	 * Check if more fragments are pending
221 	 */
222 	if (ieee80211_has_morefrags(hdr->frame_control)) {
223 		__set_bit(ENTRY_TXD_BURST, &txdesc->flags);
224 		__set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags);
225 	}
226 
227 	/*
228 	 * Beacons and probe responses require the tsf timestamp
229 	 * to be inserted into the frame.
230 	 */
231 	if (ieee80211_is_beacon(hdr->frame_control) ||
232 	    ieee80211_is_probe_resp(hdr->frame_control))
233 		__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
234 
235 	/*
236 	 * Determine with what IFS priority this frame should be send.
237 	 * Set ifs to IFS_SIFS when the this is not the first fragment,
238 	 * or this fragment came after RTS/CTS.
239 	 */
240 	if (test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) {
241 		txdesc->ifs = IFS_SIFS;
242 	} else if (tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) {
243 		__set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags);
244 		txdesc->ifs = IFS_BACKOFF;
245 	} else {
246 		txdesc->ifs = IFS_SIFS;
247 	}
248 
249 	/*
250 	 * Hardware should insert sequence counter.
251 	 * FIXME: We insert a software sequence counter first for
252 	 * hardware that doesn't support hardware sequence counting.
253 	 *
254 	 * This is wrong because beacons are not getting sequence
255 	 * numbers assigned properly.
256 	 *
257 	 * A secondary problem exists for drivers that cannot toggle
258 	 * sequence counting per-frame, since those will override the
259 	 * sequence counter given by mac80211.
260 	 */
261 	if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
262 		if (likely(tx_info->control.vif)) {
263 			struct rt2x00_intf *intf;
264 
265 			intf = vif_to_intf(tx_info->control.vif);
266 
267 			spin_lock_irqsave(&intf->seqlock, irqflags);
268 
269 			if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
270 				intf->seqno += 0x10;
271 			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
272 			hdr->seq_ctrl |= cpu_to_le16(intf->seqno);
273 
274 			spin_unlock_irqrestore(&intf->seqlock, irqflags);
275 
276 			__set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
277 		}
278 	}
279 
280 	/*
281 	 * PLCP setup
282 	 * Length calculation depends on OFDM/CCK rate.
283 	 */
284 	hwrate = rt2x00_get_rate(rate->hw_value);
285 	txdesc->signal = hwrate->plcp;
286 	txdesc->service = 0x04;
287 
288 	if (hwrate->flags & DEV_RATE_OFDM) {
289 		__set_bit(ENTRY_TXD_OFDM_RATE, &txdesc->flags);
290 
291 		txdesc->length_high = (data_length >> 6) & 0x3f;
292 		txdesc->length_low = data_length & 0x3f;
293 	} else {
294 		/*
295 		 * Convert length to microseconds.
296 		 */
297 		residual = GET_DURATION_RES(data_length, hwrate->bitrate);
298 		duration = GET_DURATION(data_length, hwrate->bitrate);
299 
300 		if (residual != 0) {
301 			duration++;
302 
303 			/*
304 			 * Check if we need to set the Length Extension
305 			 */
306 			if (hwrate->bitrate == 110 && residual <= 30)
307 				txdesc->service |= 0x80;
308 		}
309 
310 		txdesc->length_high = (duration >> 8) & 0xff;
311 		txdesc->length_low = duration & 0xff;
312 
313 		/*
314 		 * When preamble is enabled we should set the
315 		 * preamble bit for the signal.
316 		 */
317 		if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
318 			txdesc->signal |= 0x08;
319 	}
320 }
321 
rt2x00queue_write_tx_descriptor(struct queue_entry * entry,struct txentry_desc * txdesc)322 static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
323 					    struct txentry_desc *txdesc)
324 {
325 	struct data_queue *queue = entry->queue;
326 	struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
327 
328 	rt2x00dev->ops->lib->write_tx_desc(rt2x00dev, entry->skb, txdesc);
329 
330 	/*
331 	 * All processing on the frame has been completed, this means
332 	 * it is now ready to be dumped to userspace through debugfs.
333 	 */
334 	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TX, entry->skb);
335 
336 	/*
337 	 * Check if we need to kick the queue, there are however a few rules
338 	 *	1) Don't kick beacon queue
339 	 *	2) Don't kick unless this is the last in frame in a burst.
340 	 *	   When the burst flag is set, this frame is always followed
341 	 *	   by another frame which in some way are related to eachother.
342 	 *	   This is true for fragments, RTS or CTS-to-self frames.
343 	 *	3) Rule 2 can be broken when the available entries
344 	 *	   in the queue are less then a certain threshold.
345 	 */
346 	if (entry->queue->qid == QID_BEACON)
347 		return;
348 
349 	if (rt2x00queue_threshold(queue) ||
350 	    !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
351 		rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, queue->qid);
352 }
353 
rt2x00queue_write_tx_frame(struct data_queue * queue,struct sk_buff * skb)354 int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb)
355 {
356 	struct ieee80211_tx_info *tx_info;
357 	struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
358 	struct txentry_desc txdesc;
359 	struct skb_frame_desc *skbdesc;
360 	unsigned int iv_len = 0;
361 	u8 rate_idx, rate_flags;
362 
363 	if (unlikely(rt2x00queue_full(queue)))
364 		return -ENOBUFS;
365 
366 	if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) {
367 		ERROR(queue->rt2x00dev,
368 		      "Arrived at non-free entry in the non-full queue %d.\n"
369 		      "Please file bug report to %s.\n",
370 		      queue->qid, DRV_PROJECT);
371 		return -EINVAL;
372 	}
373 
374 	/*
375 	 * Copy all TX descriptor information into txdesc,
376 	 * after that we are free to use the skb->cb array
377 	 * for our information.
378 	 */
379 	entry->skb = skb;
380 	rt2x00queue_create_tx_descriptor(entry, &txdesc);
381 
382 	if (IEEE80211_SKB_CB(skb)->control.hw_key != NULL)
383 		iv_len = IEEE80211_SKB_CB(skb)->control.hw_key->iv_len;
384 
385 	/*
386 	 * All information is retrieved from the skb->cb array,
387 	 * now we should claim ownership of the driver part of that
388 	 * array, preserving the bitrate index and flags.
389 	 */
390 	tx_info = IEEE80211_SKB_CB(skb);
391 	rate_idx = tx_info->control.rates[0].idx;
392 	rate_flags = tx_info->control.rates[0].flags;
393 	skbdesc = get_skb_frame_desc(skb);
394 	memset(skbdesc, 0, sizeof(*skbdesc));
395 	skbdesc->entry = entry;
396 	skbdesc->tx_rate_idx = rate_idx;
397 	skbdesc->tx_rate_flags = rate_flags;
398 
399 	/*
400 	 * When hardware encryption is supported, and this frame
401 	 * is to be encrypted, we should strip the IV/EIV data from
402 	 * the frame so we can provide it to the driver seperately.
403 	 */
404 	if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) &&
405 	    !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) {
406 		if (test_bit(CONFIG_CRYPTO_COPY_IV, &queue->rt2x00dev->flags))
407 			rt2x00crypto_tx_copy_iv(skb, iv_len);
408 		else
409 			rt2x00crypto_tx_remove_iv(skb, iv_len);
410 	}
411 
412 	/*
413 	 * It could be possible that the queue was corrupted and this
414 	 * call failed. Since we always return NETDEV_TX_OK to mac80211,
415 	 * this frame will simply be dropped.
416 	 */
417 	if (unlikely(queue->rt2x00dev->ops->lib->write_tx_data(entry))) {
418 		clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
419 		entry->skb = NULL;
420 		return -EIO;
421 	}
422 
423 	if (test_bit(DRIVER_REQUIRE_DMA, &queue->rt2x00dev->flags))
424 		rt2x00queue_map_txskb(queue->rt2x00dev, skb);
425 
426 	set_bit(ENTRY_DATA_PENDING, &entry->flags);
427 
428 	rt2x00queue_index_inc(queue, Q_INDEX);
429 	rt2x00queue_write_tx_descriptor(entry, &txdesc);
430 
431 	return 0;
432 }
433 
rt2x00queue_update_beacon(struct rt2x00_dev * rt2x00dev,struct ieee80211_vif * vif)434 int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
435 			      struct ieee80211_vif *vif)
436 {
437 	struct rt2x00_intf *intf = vif_to_intf(vif);
438 	struct skb_frame_desc *skbdesc;
439 	struct txentry_desc txdesc;
440 	__le32 desc[16];
441 
442 	if (unlikely(!intf->beacon))
443 		return -ENOBUFS;
444 
445 	intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif);
446 	if (!intf->beacon->skb)
447 		return -ENOMEM;
448 
449 	/*
450 	 * Copy all TX descriptor information into txdesc,
451 	 * after that we are free to use the skb->cb array
452 	 * for our information.
453 	 */
454 	rt2x00queue_create_tx_descriptor(intf->beacon, &txdesc);
455 
456 	/*
457 	 * For the descriptor we use a local array from where the
458 	 * driver can move it to the correct location required for
459 	 * the hardware.
460 	 */
461 	memset(desc, 0, sizeof(desc));
462 
463 	/*
464 	 * Fill in skb descriptor
465 	 */
466 	skbdesc = get_skb_frame_desc(intf->beacon->skb);
467 	memset(skbdesc, 0, sizeof(*skbdesc));
468 	skbdesc->desc = desc;
469 	skbdesc->desc_len = intf->beacon->queue->desc_size;
470 	skbdesc->entry = intf->beacon;
471 
472 	/*
473 	 * Write TX descriptor into reserved room in front of the beacon.
474 	 */
475 	rt2x00queue_write_tx_descriptor(intf->beacon, &txdesc);
476 
477 	/*
478 	 * Send beacon to hardware.
479 	 * Also enable beacon generation, which might have been disabled
480 	 * by the driver during the config_beacon() callback function.
481 	 */
482 	rt2x00dev->ops->lib->write_beacon(intf->beacon);
483 	rt2x00dev->ops->lib->kick_tx_queue(rt2x00dev, QID_BEACON);
484 
485 	return 0;
486 }
487 
rt2x00queue_get_queue(struct rt2x00_dev * rt2x00dev,const enum data_queue_qid queue)488 struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
489 					 const enum data_queue_qid queue)
490 {
491 	int atim = test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
492 
493 	if (queue < rt2x00dev->ops->tx_queues && rt2x00dev->tx)
494 		return &rt2x00dev->tx[queue];
495 
496 	if (!rt2x00dev->bcn)
497 		return NULL;
498 
499 	if (queue == QID_BEACON)
500 		return &rt2x00dev->bcn[0];
501 	else if (queue == QID_ATIM && atim)
502 		return &rt2x00dev->bcn[1];
503 
504 	return NULL;
505 }
506 EXPORT_SYMBOL_GPL(rt2x00queue_get_queue);
507 
rt2x00queue_get_entry(struct data_queue * queue,enum queue_index index)508 struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
509 					  enum queue_index index)
510 {
511 	struct queue_entry *entry;
512 	unsigned long irqflags;
513 
514 	if (unlikely(index >= Q_INDEX_MAX)) {
515 		ERROR(queue->rt2x00dev,
516 		      "Entry requested from invalid index type (%d)\n", index);
517 		return NULL;
518 	}
519 
520 	spin_lock_irqsave(&queue->lock, irqflags);
521 
522 	entry = &queue->entries[queue->index[index]];
523 
524 	spin_unlock_irqrestore(&queue->lock, irqflags);
525 
526 	return entry;
527 }
528 EXPORT_SYMBOL_GPL(rt2x00queue_get_entry);
529 
rt2x00queue_index_inc(struct data_queue * queue,enum queue_index index)530 void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
531 {
532 	unsigned long irqflags;
533 
534 	if (unlikely(index >= Q_INDEX_MAX)) {
535 		ERROR(queue->rt2x00dev,
536 		      "Index change on invalid index type (%d)\n", index);
537 		return;
538 	}
539 
540 	spin_lock_irqsave(&queue->lock, irqflags);
541 
542 	queue->index[index]++;
543 	if (queue->index[index] >= queue->limit)
544 		queue->index[index] = 0;
545 
546 	if (index == Q_INDEX) {
547 		queue->length++;
548 	} else if (index == Q_INDEX_DONE) {
549 		queue->length--;
550 		queue->count++;
551 	}
552 
553 	spin_unlock_irqrestore(&queue->lock, irqflags);
554 }
555 
rt2x00queue_reset(struct data_queue * queue)556 static void rt2x00queue_reset(struct data_queue *queue)
557 {
558 	unsigned long irqflags;
559 
560 	spin_lock_irqsave(&queue->lock, irqflags);
561 
562 	queue->count = 0;
563 	queue->length = 0;
564 	memset(queue->index, 0, sizeof(queue->index));
565 
566 	spin_unlock_irqrestore(&queue->lock, irqflags);
567 }
568 
rt2x00queue_init_queues(struct rt2x00_dev * rt2x00dev)569 void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
570 {
571 	struct data_queue *queue;
572 	unsigned int i;
573 
574 	queue_for_each(rt2x00dev, queue) {
575 		rt2x00queue_reset(queue);
576 
577 		for (i = 0; i < queue->limit; i++) {
578 			queue->entries[i].flags = 0;
579 
580 			rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
581 		}
582 	}
583 }
584 
rt2x00queue_alloc_entries(struct data_queue * queue,const struct data_queue_desc * qdesc)585 static int rt2x00queue_alloc_entries(struct data_queue *queue,
586 				     const struct data_queue_desc *qdesc)
587 {
588 	struct queue_entry *entries;
589 	unsigned int entry_size;
590 	unsigned int i;
591 
592 	rt2x00queue_reset(queue);
593 
594 	queue->limit = qdesc->entry_num;
595 	queue->threshold = DIV_ROUND_UP(qdesc->entry_num, 10);
596 	queue->data_size = qdesc->data_size;
597 	queue->desc_size = qdesc->desc_size;
598 
599 	/*
600 	 * Allocate all queue entries.
601 	 */
602 	entry_size = sizeof(*entries) + qdesc->priv_size;
603 	entries = kzalloc(queue->limit * entry_size, GFP_KERNEL);
604 	if (!entries)
605 		return -ENOMEM;
606 
607 #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
608 	( ((char *)(__base)) + ((__limit) * (__esize)) + \
609 	    ((__index) * (__psize)) )
610 
611 	for (i = 0; i < queue->limit; i++) {
612 		entries[i].flags = 0;
613 		entries[i].queue = queue;
614 		entries[i].skb = NULL;
615 		entries[i].entry_idx = i;
616 		entries[i].priv_data =
617 		    QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit,
618 					    sizeof(*entries), qdesc->priv_size);
619 	}
620 
621 #undef QUEUE_ENTRY_PRIV_OFFSET
622 
623 	queue->entries = entries;
624 
625 	return 0;
626 }
627 
rt2x00queue_free_skbs(struct rt2x00_dev * rt2x00dev,struct data_queue * queue)628 static void rt2x00queue_free_skbs(struct rt2x00_dev *rt2x00dev,
629 				  struct data_queue *queue)
630 {
631 	unsigned int i;
632 
633 	if (!queue->entries)
634 		return;
635 
636 	for (i = 0; i < queue->limit; i++) {
637 		if (queue->entries[i].skb)
638 			rt2x00queue_free_skb(rt2x00dev, queue->entries[i].skb);
639 	}
640 }
641 
rt2x00queue_alloc_rxskbs(struct rt2x00_dev * rt2x00dev,struct data_queue * queue)642 static int rt2x00queue_alloc_rxskbs(struct rt2x00_dev *rt2x00dev,
643 				    struct data_queue *queue)
644 {
645 	unsigned int i;
646 	struct sk_buff *skb;
647 
648 	for (i = 0; i < queue->limit; i++) {
649 		skb = rt2x00queue_alloc_rxskb(rt2x00dev, &queue->entries[i]);
650 		if (!skb)
651 			return -ENOMEM;
652 		queue->entries[i].skb = skb;
653 	}
654 
655 	return 0;
656 }
657 
rt2x00queue_initialize(struct rt2x00_dev * rt2x00dev)658 int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev)
659 {
660 	struct data_queue *queue;
661 	int status;
662 
663 	status = rt2x00queue_alloc_entries(rt2x00dev->rx, rt2x00dev->ops->rx);
664 	if (status)
665 		goto exit;
666 
667 	tx_queue_for_each(rt2x00dev, queue) {
668 		status = rt2x00queue_alloc_entries(queue, rt2x00dev->ops->tx);
669 		if (status)
670 			goto exit;
671 	}
672 
673 	status = rt2x00queue_alloc_entries(rt2x00dev->bcn, rt2x00dev->ops->bcn);
674 	if (status)
675 		goto exit;
676 
677 	if (test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags)) {
678 		status = rt2x00queue_alloc_entries(&rt2x00dev->bcn[1],
679 						   rt2x00dev->ops->atim);
680 		if (status)
681 			goto exit;
682 	}
683 
684 	status = rt2x00queue_alloc_rxskbs(rt2x00dev, rt2x00dev->rx);
685 	if (status)
686 		goto exit;
687 
688 	return 0;
689 
690 exit:
691 	ERROR(rt2x00dev, "Queue entries allocation failed.\n");
692 
693 	rt2x00queue_uninitialize(rt2x00dev);
694 
695 	return status;
696 }
697 
rt2x00queue_uninitialize(struct rt2x00_dev * rt2x00dev)698 void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
699 {
700 	struct data_queue *queue;
701 
702 	rt2x00queue_free_skbs(rt2x00dev, rt2x00dev->rx);
703 
704 	queue_for_each(rt2x00dev, queue) {
705 		kfree(queue->entries);
706 		queue->entries = NULL;
707 	}
708 }
709 
rt2x00queue_init(struct rt2x00_dev * rt2x00dev,struct data_queue * queue,enum data_queue_qid qid)710 static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
711 			     struct data_queue *queue, enum data_queue_qid qid)
712 {
713 	spin_lock_init(&queue->lock);
714 
715 	queue->rt2x00dev = rt2x00dev;
716 	queue->qid = qid;
717 	queue->txop = 0;
718 	queue->aifs = 2;
719 	queue->cw_min = 5;
720 	queue->cw_max = 10;
721 }
722 
rt2x00queue_allocate(struct rt2x00_dev * rt2x00dev)723 int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
724 {
725 	struct data_queue *queue;
726 	enum data_queue_qid qid;
727 	unsigned int req_atim =
728 	    !!test_bit(DRIVER_REQUIRE_ATIM_QUEUE, &rt2x00dev->flags);
729 
730 	/*
731 	 * We need the following queues:
732 	 * RX: 1
733 	 * TX: ops->tx_queues
734 	 * Beacon: 1
735 	 * Atim: 1 (if required)
736 	 */
737 	rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim;
738 
739 	queue = kzalloc(rt2x00dev->data_queues * sizeof(*queue), GFP_KERNEL);
740 	if (!queue) {
741 		ERROR(rt2x00dev, "Queue allocation failed.\n");
742 		return -ENOMEM;
743 	}
744 
745 	/*
746 	 * Initialize pointers
747 	 */
748 	rt2x00dev->rx = queue;
749 	rt2x00dev->tx = &queue[1];
750 	rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues];
751 
752 	/*
753 	 * Initialize queue parameters.
754 	 * RX: qid = QID_RX
755 	 * TX: qid = QID_AC_BE + index
756 	 * TX: cw_min: 2^5 = 32.
757 	 * TX: cw_max: 2^10 = 1024.
758 	 * BCN: qid = QID_BEACON
759 	 * ATIM: qid = QID_ATIM
760 	 */
761 	rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
762 
763 	qid = QID_AC_BE;
764 	tx_queue_for_each(rt2x00dev, queue)
765 		rt2x00queue_init(rt2x00dev, queue, qid++);
766 
767 	rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[0], QID_BEACON);
768 	if (req_atim)
769 		rt2x00queue_init(rt2x00dev, &rt2x00dev->bcn[1], QID_ATIM);
770 
771 	return 0;
772 }
773 
rt2x00queue_free(struct rt2x00_dev * rt2x00dev)774 void rt2x00queue_free(struct rt2x00_dev *rt2x00dev)
775 {
776 	kfree(rt2x00dev->rx);
777 	rt2x00dev->rx = NULL;
778 	rt2x00dev->tx = NULL;
779 	rt2x00dev->bcn = NULL;
780 }
781