• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3   Broadcom B43legacy wireless driver
4 
5   PIO Transmission
6 
7   Copyright (c) 2005 Michael Buesch <mb@bu3sch.de>
8 
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; see the file COPYING.  If not, write to
21   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22   Boston, MA 02110-1301, USA.
23 
24 */
25 
26 #include "b43legacy.h"
27 #include "pio.h"
28 #include "main.h"
29 #include "xmit.h"
30 
31 #include <linux/delay.h>
32 
33 
tx_start(struct b43legacy_pioqueue * queue)34 static void tx_start(struct b43legacy_pioqueue *queue)
35 {
36 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
37 			    B43legacy_PIO_TXCTL_INIT);
38 }
39 
tx_octet(struct b43legacy_pioqueue * queue,u8 octet)40 static void tx_octet(struct b43legacy_pioqueue *queue,
41 		     u8 octet)
42 {
43 	if (queue->need_workarounds) {
44 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
45 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
46 				    B43legacy_PIO_TXCTL_WRITELO);
47 	} else {
48 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
49 				    B43legacy_PIO_TXCTL_WRITELO);
50 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
51 	}
52 }
53 
tx_get_next_word(const u8 * txhdr,const u8 * packet,size_t txhdr_size,unsigned int * pos)54 static u16 tx_get_next_word(const u8 *txhdr,
55 			    const u8 *packet,
56 			    size_t txhdr_size,
57 			    unsigned int *pos)
58 {
59 	const u8 *source;
60 	unsigned int i = *pos;
61 	u16 ret;
62 
63 	if (i < txhdr_size)
64 		source = txhdr;
65 	else {
66 		source = packet;
67 		i -= txhdr_size;
68 	}
69 	ret = le16_to_cpu(*((__le16 *)(source + i)));
70 	*pos += 2;
71 
72 	return ret;
73 }
74 
tx_data(struct b43legacy_pioqueue * queue,u8 * txhdr,const u8 * packet,unsigned int octets)75 static void tx_data(struct b43legacy_pioqueue *queue,
76 		    u8 *txhdr,
77 		    const u8 *packet,
78 		    unsigned int octets)
79 {
80 	u16 data;
81 	unsigned int i = 0;
82 
83 	if (queue->need_workarounds) {
84 		data = tx_get_next_word(txhdr, packet,
85 					sizeof(struct b43legacy_txhdr_fw3), &i);
86 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
87 	}
88 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
89 			    B43legacy_PIO_TXCTL_WRITELO |
90 			    B43legacy_PIO_TXCTL_WRITEHI);
91 	while (i < octets - 1) {
92 		data = tx_get_next_word(txhdr, packet,
93 					sizeof(struct b43legacy_txhdr_fw3), &i);
94 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
95 	}
96 	if (octets % 2)
97 		tx_octet(queue, packet[octets -
98 			 sizeof(struct b43legacy_txhdr_fw3) - 1]);
99 }
100 
tx_complete(struct b43legacy_pioqueue * queue,struct sk_buff * skb)101 static void tx_complete(struct b43legacy_pioqueue *queue,
102 			struct sk_buff *skb)
103 {
104 	if (queue->need_workarounds) {
105 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA,
106 				    skb->data[skb->len - 1]);
107 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
108 				    B43legacy_PIO_TXCTL_WRITELO |
109 				    B43legacy_PIO_TXCTL_COMPLETE);
110 	} else
111 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
112 				    B43legacy_PIO_TXCTL_COMPLETE);
113 }
114 
generate_cookie(struct b43legacy_pioqueue * queue,struct b43legacy_pio_txpacket * packet)115 static u16 generate_cookie(struct b43legacy_pioqueue *queue,
116 			   struct b43legacy_pio_txpacket *packet)
117 {
118 	u16 cookie = 0x0000;
119 	int packetindex;
120 
121 	/* We use the upper 4 bits for the PIO
122 	 * controller ID and the lower 12 bits
123 	 * for the packet index (in the cache).
124 	 */
125 	switch (queue->mmio_base) {
126 	case B43legacy_MMIO_PIO1_BASE:
127 		break;
128 	case B43legacy_MMIO_PIO2_BASE:
129 		cookie = 0x1000;
130 		break;
131 	case B43legacy_MMIO_PIO3_BASE:
132 		cookie = 0x2000;
133 		break;
134 	case B43legacy_MMIO_PIO4_BASE:
135 		cookie = 0x3000;
136 		break;
137 	default:
138 		B43legacy_WARN_ON(1);
139 	}
140 	packetindex = pio_txpacket_getindex(packet);
141 	B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000));
142 	cookie |= (u16)packetindex;
143 
144 	return cookie;
145 }
146 
147 static
parse_cookie(struct b43legacy_wldev * dev,u16 cookie,struct b43legacy_pio_txpacket ** packet)148 struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev,
149 					u16 cookie,
150 					struct b43legacy_pio_txpacket **packet)
151 {
152 	struct b43legacy_pio *pio = &dev->pio;
153 	struct b43legacy_pioqueue *queue = NULL;
154 	int packetindex;
155 
156 	switch (cookie & 0xF000) {
157 	case 0x0000:
158 		queue = pio->queue0;
159 		break;
160 	case 0x1000:
161 		queue = pio->queue1;
162 		break;
163 	case 0x2000:
164 		queue = pio->queue2;
165 		break;
166 	case 0x3000:
167 		queue = pio->queue3;
168 		break;
169 	default:
170 		B43legacy_WARN_ON(1);
171 	}
172 	packetindex = (cookie & 0x0FFF);
173 	B43legacy_WARN_ON(!(packetindex >= 0 && packetindex
174 			  < B43legacy_PIO_MAXTXPACKETS));
175 	*packet = &(queue->tx_packets_cache[packetindex]);
176 
177 	return queue;
178 }
179 
180 union txhdr_union {
181 	struct b43legacy_txhdr_fw3 txhdr_fw3;
182 };
183 
pio_tx_write_fragment(struct b43legacy_pioqueue * queue,struct sk_buff * skb,struct b43legacy_pio_txpacket * packet,size_t txhdr_size)184 static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
185 				  struct sk_buff *skb,
186 				  struct b43legacy_pio_txpacket *packet,
187 				  size_t txhdr_size)
188 {
189 	union txhdr_union txhdr_data;
190 	u8 *txhdr = NULL;
191 	unsigned int octets;
192 	int err;
193 
194 	txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
195 
196 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
197 	err = b43legacy_generate_txhdr(queue->dev,
198 				 txhdr, skb->data, skb->len,
199 				 IEEE80211_SKB_CB(skb),
200 				 generate_cookie(queue, packet));
201 	if (err)
202 		return err;
203 
204 	tx_start(queue);
205 	octets = skb->len + txhdr_size;
206 	if (queue->need_workarounds)
207 		octets--;
208 	tx_data(queue, txhdr, (u8 *)skb->data, octets);
209 	tx_complete(queue, skb);
210 
211 	return 0;
212 }
213 
free_txpacket(struct b43legacy_pio_txpacket * packet,int irq_context)214 static void free_txpacket(struct b43legacy_pio_txpacket *packet,
215 			  int irq_context)
216 {
217 	struct b43legacy_pioqueue *queue = packet->queue;
218 
219 	if (packet->skb) {
220 		if (irq_context)
221 			dev_kfree_skb_irq(packet->skb);
222 		else
223 			dev_kfree_skb(packet->skb);
224 	}
225 	list_move(&packet->list, &queue->txfree);
226 	queue->nr_txfree++;
227 }
228 
pio_tx_packet(struct b43legacy_pio_txpacket * packet)229 static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
230 {
231 	struct b43legacy_pioqueue *queue = packet->queue;
232 	struct sk_buff *skb = packet->skb;
233 	u16 octets;
234 	int err;
235 
236 	octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
237 	if (queue->tx_devq_size < octets) {
238 		b43legacywarn(queue->dev->wl, "PIO queue too small. "
239 			"Dropping packet.\n");
240 		/* Drop it silently (return success) */
241 		free_txpacket(packet, 1);
242 		return 0;
243 	}
244 	B43legacy_WARN_ON(queue->tx_devq_packets >
245 			  B43legacy_PIO_MAXTXDEVQPACKETS);
246 	B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size);
247 	/* Check if there is sufficient free space on the device
248 	 * TX queue. If not, return and let the TX tasklet
249 	 * retry later.
250 	 */
251 	if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS)
252 		return -EBUSY;
253 	if (queue->tx_devq_used + octets > queue->tx_devq_size)
254 		return -EBUSY;
255 	/* Now poke the device. */
256 	err = pio_tx_write_fragment(queue, skb, packet,
257 			      sizeof(struct b43legacy_txhdr_fw3));
258 	if (unlikely(err == -ENOKEY)) {
259 		/* Drop this packet, as we don't have the encryption key
260 		 * anymore and must not transmit it unencrypted. */
261 		free_txpacket(packet, 1);
262 		return 0;
263 	}
264 
265 	/* Account for the packet size.
266 	 * (We must not overflow the device TX queue)
267 	 */
268 	queue->tx_devq_packets++;
269 	queue->tx_devq_used += octets;
270 
271 	/* Transmission started, everything ok, move the
272 	 * packet to the txrunning list.
273 	 */
274 	list_move_tail(&packet->list, &queue->txrunning);
275 
276 	return 0;
277 }
278 
tx_tasklet(unsigned long d)279 static void tx_tasklet(unsigned long d)
280 {
281 	struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
282 	struct b43legacy_wldev *dev = queue->dev;
283 	unsigned long flags;
284 	struct b43legacy_pio_txpacket *packet, *tmp_packet;
285 	int err;
286 	u16 txctl;
287 
288 	spin_lock_irqsave(&dev->wl->irq_lock, flags);
289 	if (queue->tx_frozen)
290 		goto out_unlock;
291 	txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL);
292 	if (txctl & B43legacy_PIO_TXCTL_SUSPEND)
293 		goto out_unlock;
294 
295 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
296 		/* Try to transmit the packet. This can fail, if
297 		 * the device queue is full. In case of failure, the
298 		 * packet is left in the txqueue.
299 		 * If transmission succeed, the packet is moved to txrunning.
300 		 * If it is impossible to transmit the packet, it
301 		 * is dropped.
302 		 */
303 		err = pio_tx_packet(packet);
304 		if (err)
305 			break;
306 	}
307 out_unlock:
308 	spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
309 }
310 
setup_txqueues(struct b43legacy_pioqueue * queue)311 static void setup_txqueues(struct b43legacy_pioqueue *queue)
312 {
313 	struct b43legacy_pio_txpacket *packet;
314 	int i;
315 
316 	queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS;
317 	for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) {
318 		packet = &(queue->tx_packets_cache[i]);
319 
320 		packet->queue = queue;
321 		INIT_LIST_HEAD(&packet->list);
322 
323 		list_add(&packet->list, &queue->txfree);
324 	}
325 }
326 
327 static
b43legacy_setup_pioqueue(struct b43legacy_wldev * dev,u16 pio_mmio_base)328 struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev,
329 						    u16 pio_mmio_base)
330 {
331 	struct b43legacy_pioqueue *queue;
332 	u32 value;
333 	u16 qsize;
334 
335 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
336 	if (!queue)
337 		goto out;
338 
339 	queue->dev = dev;
340 	queue->mmio_base = pio_mmio_base;
341 	queue->need_workarounds = (dev->dev->id.revision < 3);
342 
343 	INIT_LIST_HEAD(&queue->txfree);
344 	INIT_LIST_HEAD(&queue->txqueue);
345 	INIT_LIST_HEAD(&queue->txrunning);
346 	tasklet_init(&queue->txtask, tx_tasklet,
347 		     (unsigned long)queue);
348 
349 	value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
350 	value &= ~B43legacy_MACCTL_BE;
351 	b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value);
352 
353 	qsize = b43legacy_read16(dev, queue->mmio_base
354 				 + B43legacy_PIO_TXQBUFSIZE);
355 	if (qsize == 0) {
356 		b43legacyerr(dev->wl, "This card does not support PIO "
357 		       "operation mode. Please use DMA mode "
358 		       "(module parameter pio=0).\n");
359 		goto err_freequeue;
360 	}
361 	if (qsize <= B43legacy_PIO_TXQADJUST) {
362 		b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n",
363 		       qsize);
364 		goto err_freequeue;
365 	}
366 	qsize -= B43legacy_PIO_TXQADJUST;
367 	queue->tx_devq_size = qsize;
368 
369 	setup_txqueues(queue);
370 
371 out:
372 	return queue;
373 
374 err_freequeue:
375 	kfree(queue);
376 	queue = NULL;
377 	goto out;
378 }
379 
cancel_transfers(struct b43legacy_pioqueue * queue)380 static void cancel_transfers(struct b43legacy_pioqueue *queue)
381 {
382 	struct b43legacy_pio_txpacket *packet, *tmp_packet;
383 
384 	tasklet_disable(&queue->txtask);
385 
386 	list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
387 		free_txpacket(packet, 0);
388 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
389 		free_txpacket(packet, 0);
390 }
391 
b43legacy_destroy_pioqueue(struct b43legacy_pioqueue * queue)392 static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue)
393 {
394 	if (!queue)
395 		return;
396 
397 	cancel_transfers(queue);
398 	kfree(queue);
399 }
400 
b43legacy_pio_free(struct b43legacy_wldev * dev)401 void b43legacy_pio_free(struct b43legacy_wldev *dev)
402 {
403 	struct b43legacy_pio *pio;
404 
405 	if (!b43legacy_using_pio(dev))
406 		return;
407 	pio = &dev->pio;
408 
409 	b43legacy_destroy_pioqueue(pio->queue3);
410 	pio->queue3 = NULL;
411 	b43legacy_destroy_pioqueue(pio->queue2);
412 	pio->queue2 = NULL;
413 	b43legacy_destroy_pioqueue(pio->queue1);
414 	pio->queue1 = NULL;
415 	b43legacy_destroy_pioqueue(pio->queue0);
416 	pio->queue0 = NULL;
417 }
418 
b43legacy_pio_init(struct b43legacy_wldev * dev)419 int b43legacy_pio_init(struct b43legacy_wldev *dev)
420 {
421 	struct b43legacy_pio *pio = &dev->pio;
422 	struct b43legacy_pioqueue *queue;
423 	int err = -ENOMEM;
424 
425 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE);
426 	if (!queue)
427 		goto out;
428 	pio->queue0 = queue;
429 
430 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE);
431 	if (!queue)
432 		goto err_destroy0;
433 	pio->queue1 = queue;
434 
435 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE);
436 	if (!queue)
437 		goto err_destroy1;
438 	pio->queue2 = queue;
439 
440 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE);
441 	if (!queue)
442 		goto err_destroy2;
443 	pio->queue3 = queue;
444 
445 	if (dev->dev->id.revision < 3)
446 		dev->irq_savedstate |= B43legacy_IRQ_PIO_WORKAROUND;
447 
448 	b43legacydbg(dev->wl, "PIO initialized\n");
449 	err = 0;
450 out:
451 	return err;
452 
453 err_destroy2:
454 	b43legacy_destroy_pioqueue(pio->queue2);
455 	pio->queue2 = NULL;
456 err_destroy1:
457 	b43legacy_destroy_pioqueue(pio->queue1);
458 	pio->queue1 = NULL;
459 err_destroy0:
460 	b43legacy_destroy_pioqueue(pio->queue0);
461 	pio->queue0 = NULL;
462 	goto out;
463 }
464 
b43legacy_pio_tx(struct b43legacy_wldev * dev,struct sk_buff * skb)465 int b43legacy_pio_tx(struct b43legacy_wldev *dev,
466 		     struct sk_buff *skb)
467 {
468 	struct b43legacy_pioqueue *queue = dev->pio.queue1;
469 	struct b43legacy_pio_txpacket *packet;
470 
471 	B43legacy_WARN_ON(queue->tx_suspended);
472 	B43legacy_WARN_ON(list_empty(&queue->txfree));
473 
474 	packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket,
475 			    list);
476 	packet->skb = skb;
477 
478 	list_move_tail(&packet->list, &queue->txqueue);
479 	queue->nr_txfree--;
480 	queue->nr_tx_packets++;
481 	B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
482 
483 	tasklet_schedule(&queue->txtask);
484 
485 	return 0;
486 }
487 
b43legacy_pio_handle_txstatus(struct b43legacy_wldev * dev,const struct b43legacy_txstatus * status)488 void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
489 				   const struct b43legacy_txstatus *status)
490 {
491 	struct b43legacy_pioqueue *queue;
492 	struct b43legacy_pio_txpacket *packet;
493 	struct ieee80211_tx_info *info;
494 	int retry_limit;
495 
496 	queue = parse_cookie(dev, status->cookie, &packet);
497 	B43legacy_WARN_ON(!queue);
498 
499 	if (!packet->skb)
500 		return;
501 
502 	queue->tx_devq_packets--;
503 	queue->tx_devq_used -= (packet->skb->len +
504 				sizeof(struct b43legacy_txhdr_fw3));
505 
506 	info = IEEE80211_SKB_CB(packet->skb);
507 
508 	/* preserve the confiured retry limit before clearing the status
509 	 * The xmit function has overwritten the rc's value with the actual
510 	 * retry limit done by the hardware */
511 	retry_limit = info->status.rates[0].count;
512 	ieee80211_tx_info_clear_status(info);
513 
514 	if (status->acked)
515 		info->flags |= IEEE80211_TX_STAT_ACK;
516 
517 	if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
518 		/*
519 		 * If the short retries (RTS, not data frame) have exceeded
520 		 * the limit, the hw will not have tried the selected rate,
521 		 * but will have used the fallback rate instead.
522 		 * Don't let the rate control count attempts for the selected
523 		 * rate in this case, otherwise the statistics will be off.
524 		 */
525 		info->status.rates[0].count = 0;
526 		info->status.rates[1].count = status->frame_count;
527 	} else {
528 		if (status->frame_count > retry_limit) {
529 			info->status.rates[0].count = retry_limit;
530 			info->status.rates[1].count = status->frame_count -
531 					retry_limit;
532 
533 		} else {
534 			info->status.rates[0].count = status->frame_count;
535 			info->status.rates[1].idx = -1;
536 		}
537 	}
538 	ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
539 	packet->skb = NULL;
540 
541 	free_txpacket(packet, 1);
542 	/* If there are packets on the txqueue, poke the tasklet
543 	 * to transmit them.
544 	 */
545 	if (!list_empty(&queue->txqueue))
546 		tasklet_schedule(&queue->txtask);
547 }
548 
b43legacy_pio_get_tx_stats(struct b43legacy_wldev * dev,struct ieee80211_tx_queue_stats * stats)549 void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
550 				struct ieee80211_tx_queue_stats *stats)
551 {
552 	struct b43legacy_pio *pio = &dev->pio;
553 	struct b43legacy_pioqueue *queue;
554 
555 	queue = pio->queue1;
556 	stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
557 	stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
558 	stats[0].count = queue->nr_tx_packets;
559 }
560 
pio_rx_error(struct b43legacy_pioqueue * queue,int clear_buffers,const char * error)561 static void pio_rx_error(struct b43legacy_pioqueue *queue,
562 			 int clear_buffers,
563 			 const char *error)
564 {
565 	int i;
566 
567 	b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error);
568 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
569 			    B43legacy_PIO_RXCTL_READY);
570 	if (clear_buffers) {
571 		B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE);
572 		for (i = 0; i < 15; i++) {
573 			/* Dummy read. */
574 			b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
575 		}
576 	}
577 }
578 
b43legacy_pio_rx(struct b43legacy_pioqueue * queue)579 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
580 {
581 	__le16 preamble[21] = { 0 };
582 	struct b43legacy_rxhdr_fw3 *rxhdr;
583 	u16 tmp;
584 	u16 len;
585 	u16 macstat;
586 	int i;
587 	int preamble_readwords;
588 	struct sk_buff *skb;
589 
590 	tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
591 	if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE))
592 		return;
593 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
594 			    B43legacy_PIO_RXCTL_DATAAVAILABLE);
595 
596 	for (i = 0; i < 10; i++) {
597 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
598 		if (tmp & B43legacy_PIO_RXCTL_READY)
599 			goto data_ready;
600 		udelay(10);
601 	}
602 	b43legacydbg(queue->dev->wl, "PIO RX timed out\n");
603 	return;
604 data_ready:
605 
606 	len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
607 	if (unlikely(len > 0x700)) {
608 		pio_rx_error(queue, 0, "len > 0x700");
609 		return;
610 	}
611 	if (unlikely(len == 0 && queue->mmio_base !=
612 		     B43legacy_MMIO_PIO4_BASE)) {
613 		pio_rx_error(queue, 0, "len == 0");
614 		return;
615 	}
616 	preamble[0] = cpu_to_le16(len);
617 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE)
618 		preamble_readwords = 14 / sizeof(u16);
619 	else
620 		preamble_readwords = 18 / sizeof(u16);
621 	for (i = 0; i < preamble_readwords; i++) {
622 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
623 		preamble[i + 1] = cpu_to_le16(tmp);
624 	}
625 	rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble;
626 	macstat = le16_to_cpu(rxhdr->mac_status);
627 	if (macstat & B43legacy_RX_MAC_FCSERR) {
628 		pio_rx_error(queue,
629 			     (queue->mmio_base == B43legacy_MMIO_PIO1_BASE),
630 			     "Frame FCS error");
631 		return;
632 	}
633 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) {
634 		/* We received an xmit status. */
635 		struct b43legacy_hwtxstatus *hw;
636 
637 		hw = (struct b43legacy_hwtxstatus *)(preamble + 1);
638 		b43legacy_handle_hwtxstatus(queue->dev, hw);
639 
640 		return;
641 	}
642 
643 	skb = dev_alloc_skb(len);
644 	if (unlikely(!skb)) {
645 		pio_rx_error(queue, 1, "OOM");
646 		return;
647 	}
648 	skb_put(skb, len);
649 	for (i = 0; i < len - 1; i += 2) {
650 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
651 		*((__le16 *)(skb->data + i)) = cpu_to_le16(tmp);
652 	}
653 	if (len % 2) {
654 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
655 		skb->data[len - 1] = (tmp & 0x00FF);
656 	}
657 	b43legacy_rx(queue->dev, skb, rxhdr);
658 }
659 
b43legacy_pio_tx_suspend(struct b43legacy_pioqueue * queue)660 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
661 {
662 	b43legacy_power_saving_ctl_bits(queue->dev, -1, 1);
663 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
664 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
665 			    | B43legacy_PIO_TXCTL_SUSPEND);
666 }
667 
b43legacy_pio_tx_resume(struct b43legacy_pioqueue * queue)668 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
669 {
670 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
671 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
672 			    & ~B43legacy_PIO_TXCTL_SUSPEND);
673 	b43legacy_power_saving_ctl_bits(queue->dev, -1, -1);
674 	tasklet_schedule(&queue->txtask);
675 }
676 
b43legacy_pio_freeze_txqueues(struct b43legacy_wldev * dev)677 void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev)
678 {
679 	struct b43legacy_pio *pio;
680 
681 	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
682 	pio = &dev->pio;
683 	pio->queue0->tx_frozen = 1;
684 	pio->queue1->tx_frozen = 1;
685 	pio->queue2->tx_frozen = 1;
686 	pio->queue3->tx_frozen = 1;
687 }
688 
b43legacy_pio_thaw_txqueues(struct b43legacy_wldev * dev)689 void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev)
690 {
691 	struct b43legacy_pio *pio;
692 
693 	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
694 	pio = &dev->pio;
695 	pio->queue0->tx_frozen = 0;
696 	pio->queue1->tx_frozen = 0;
697 	pio->queue2->tx_frozen = 0;
698 	pio->queue3->tx_frozen = 0;
699 	if (!list_empty(&pio->queue0->txqueue))
700 		tasklet_schedule(&pio->queue0->txtask);
701 	if (!list_empty(&pio->queue1->txqueue))
702 		tasklet_schedule(&pio->queue1->txtask);
703 	if (!list_empty(&pio->queue2->txqueue))
704 		tasklet_schedule(&pio->queue2->txtask);
705 	if (!list_empty(&pio->queue3->txqueue))
706 		tasklet_schedule(&pio->queue3->txtask);
707 }
708