• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3   Broadcom B43 wireless driver
4 
5   PIO data transfer
6 
7   Copyright (c) 2005-2008 Michael Buesch <m@bues.ch>
8 
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; see the file COPYING.  If not, write to
21   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22   Boston, MA 02110-1301, USA.
23 
24 */
25 
26 #include "b43.h"
27 #include "pio.h"
28 #include "dma.h"
29 #include "main.h"
30 #include "xmit.h"
31 
32 #include <linux/delay.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 
36 
generate_cookie(struct b43_pio_txqueue * q,struct b43_pio_txpacket * pack)37 static u16 generate_cookie(struct b43_pio_txqueue *q,
38 			   struct b43_pio_txpacket *pack)
39 {
40 	u16 cookie;
41 
42 	/* Use the upper 4 bits of the cookie as
43 	 * PIO controller ID and store the packet index number
44 	 * in the lower 12 bits.
45 	 * Note that the cookie must never be 0, as this
46 	 * is a special value used in RX path.
47 	 * It can also not be 0xFFFF because that is special
48 	 * for multicast frames.
49 	 */
50 	cookie = (((u16)q->index + 1) << 12);
51 	cookie |= pack->index;
52 
53 	return cookie;
54 }
55 
56 static
parse_cookie(struct b43_wldev * dev,u16 cookie,struct b43_pio_txpacket ** pack)57 struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev,
58 				     u16 cookie,
59 				      struct b43_pio_txpacket **pack)
60 {
61 	struct b43_pio *pio = &dev->pio;
62 	struct b43_pio_txqueue *q = NULL;
63 	unsigned int pack_index;
64 
65 	switch (cookie & 0xF000) {
66 	case 0x1000:
67 		q = pio->tx_queue_AC_BK;
68 		break;
69 	case 0x2000:
70 		q = pio->tx_queue_AC_BE;
71 		break;
72 	case 0x3000:
73 		q = pio->tx_queue_AC_VI;
74 		break;
75 	case 0x4000:
76 		q = pio->tx_queue_AC_VO;
77 		break;
78 	case 0x5000:
79 		q = pio->tx_queue_mcast;
80 		break;
81 	}
82 	if (B43_WARN_ON(!q))
83 		return NULL;
84 	pack_index = (cookie & 0x0FFF);
85 	if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
86 		return NULL;
87 	*pack = &q->packets[pack_index];
88 
89 	return q;
90 }
91 
index_to_pioqueue_base(struct b43_wldev * dev,unsigned int index)92 static u16 index_to_pioqueue_base(struct b43_wldev *dev,
93 				  unsigned int index)
94 {
95 	static const u16 bases[] = {
96 		B43_MMIO_PIO_BASE0,
97 		B43_MMIO_PIO_BASE1,
98 		B43_MMIO_PIO_BASE2,
99 		B43_MMIO_PIO_BASE3,
100 		B43_MMIO_PIO_BASE4,
101 		B43_MMIO_PIO_BASE5,
102 		B43_MMIO_PIO_BASE6,
103 		B43_MMIO_PIO_BASE7,
104 	};
105 	static const u16 bases_rev11[] = {
106 		B43_MMIO_PIO11_BASE0,
107 		B43_MMIO_PIO11_BASE1,
108 		B43_MMIO_PIO11_BASE2,
109 		B43_MMIO_PIO11_BASE3,
110 		B43_MMIO_PIO11_BASE4,
111 		B43_MMIO_PIO11_BASE5,
112 	};
113 
114 	if (dev->dev->core_rev >= 11) {
115 		B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
116 		return bases_rev11[index];
117 	}
118 	B43_WARN_ON(index >= ARRAY_SIZE(bases));
119 	return bases[index];
120 }
121 
pio_txqueue_offset(struct b43_wldev * dev)122 static u16 pio_txqueue_offset(struct b43_wldev *dev)
123 {
124 	if (dev->dev->core_rev >= 11)
125 		return 0x18;
126 	return 0;
127 }
128 
pio_rxqueue_offset(struct b43_wldev * dev)129 static u16 pio_rxqueue_offset(struct b43_wldev *dev)
130 {
131 	if (dev->dev->core_rev >= 11)
132 		return 0x38;
133 	return 8;
134 }
135 
b43_setup_pioqueue_tx(struct b43_wldev * dev,unsigned int index)136 static struct b43_pio_txqueue *b43_setup_pioqueue_tx(struct b43_wldev *dev,
137 						     unsigned int index)
138 {
139 	struct b43_pio_txqueue *q;
140 	struct b43_pio_txpacket *p;
141 	unsigned int i;
142 
143 	q = kzalloc(sizeof(*q), GFP_KERNEL);
144 	if (!q)
145 		return NULL;
146 	q->dev = dev;
147 	q->rev = dev->dev->core_rev;
148 	q->mmio_base = index_to_pioqueue_base(dev, index) +
149 		       pio_txqueue_offset(dev);
150 	q->index = index;
151 
152 	q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
153 	if (q->rev >= 8) {
154 		q->buffer_size = 1920; //FIXME this constant is wrong.
155 	} else {
156 		q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
157 		q->buffer_size -= 80;
158 	}
159 
160 	INIT_LIST_HEAD(&q->packets_list);
161 	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
162 		p = &(q->packets[i]);
163 		INIT_LIST_HEAD(&p->list);
164 		p->index = i;
165 		p->queue = q;
166 		list_add(&p->list, &q->packets_list);
167 	}
168 
169 	return q;
170 }
171 
b43_setup_pioqueue_rx(struct b43_wldev * dev,unsigned int index)172 static struct b43_pio_rxqueue *b43_setup_pioqueue_rx(struct b43_wldev *dev,
173 						     unsigned int index)
174 {
175 	struct b43_pio_rxqueue *q;
176 
177 	q = kzalloc(sizeof(*q), GFP_KERNEL);
178 	if (!q)
179 		return NULL;
180 	q->dev = dev;
181 	q->rev = dev->dev->core_rev;
182 	q->mmio_base = index_to_pioqueue_base(dev, index) +
183 		       pio_rxqueue_offset(dev);
184 
185 	/* Enable Direct FIFO RX (PIO) on the engine. */
186 	b43_dma_direct_fifo_rx(dev, index, 1);
187 
188 	return q;
189 }
190 
b43_pio_cancel_tx_packets(struct b43_pio_txqueue * q)191 static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
192 {
193 	struct b43_pio_txpacket *pack;
194 	unsigned int i;
195 
196 	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
197 		pack = &(q->packets[i]);
198 		if (pack->skb) {
199 			ieee80211_free_txskb(q->dev->wl->hw, pack->skb);
200 			pack->skb = NULL;
201 		}
202 	}
203 }
204 
b43_destroy_pioqueue_tx(struct b43_pio_txqueue * q,const char * name)205 static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
206 				    const char *name)
207 {
208 	if (!q)
209 		return;
210 	b43_pio_cancel_tx_packets(q);
211 	kfree(q);
212 }
213 
b43_destroy_pioqueue_rx(struct b43_pio_rxqueue * q,const char * name)214 static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
215 				    const char *name)
216 {
217 	if (!q)
218 		return;
219 	kfree(q);
220 }
221 
222 #define destroy_queue_tx(pio, queue) do {				\
223 	b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue));	\
224 	(pio)->queue = NULL;						\
225   } while (0)
226 
227 #define destroy_queue_rx(pio, queue) do {				\
228 	b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue));	\
229 	(pio)->queue = NULL;						\
230   } while (0)
231 
b43_pio_free(struct b43_wldev * dev)232 void b43_pio_free(struct b43_wldev *dev)
233 {
234 	struct b43_pio *pio;
235 
236 	if (!b43_using_pio_transfers(dev))
237 		return;
238 	pio = &dev->pio;
239 
240 	destroy_queue_rx(pio, rx_queue);
241 	destroy_queue_tx(pio, tx_queue_mcast);
242 	destroy_queue_tx(pio, tx_queue_AC_VO);
243 	destroy_queue_tx(pio, tx_queue_AC_VI);
244 	destroy_queue_tx(pio, tx_queue_AC_BE);
245 	destroy_queue_tx(pio, tx_queue_AC_BK);
246 }
247 
b43_pio_init(struct b43_wldev * dev)248 int b43_pio_init(struct b43_wldev *dev)
249 {
250 	struct b43_pio *pio = &dev->pio;
251 	int err = -ENOMEM;
252 
253 	b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
254 		    & ~B43_MACCTL_BE);
255 	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
256 
257 	pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
258 	if (!pio->tx_queue_AC_BK)
259 		goto out;
260 
261 	pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
262 	if (!pio->tx_queue_AC_BE)
263 		goto err_destroy_bk;
264 
265 	pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
266 	if (!pio->tx_queue_AC_VI)
267 		goto err_destroy_be;
268 
269 	pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
270 	if (!pio->tx_queue_AC_VO)
271 		goto err_destroy_vi;
272 
273 	pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
274 	if (!pio->tx_queue_mcast)
275 		goto err_destroy_vo;
276 
277 	pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
278 	if (!pio->rx_queue)
279 		goto err_destroy_mcast;
280 
281 	b43dbg(dev->wl, "PIO initialized\n");
282 	err = 0;
283 out:
284 	return err;
285 
286 err_destroy_mcast:
287 	destroy_queue_tx(pio, tx_queue_mcast);
288 err_destroy_vo:
289 	destroy_queue_tx(pio, tx_queue_AC_VO);
290 err_destroy_vi:
291 	destroy_queue_tx(pio, tx_queue_AC_VI);
292 err_destroy_be:
293 	destroy_queue_tx(pio, tx_queue_AC_BE);
294 err_destroy_bk:
295 	destroy_queue_tx(pio, tx_queue_AC_BK);
296 	return err;
297 }
298 
299 /* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
select_queue_by_priority(struct b43_wldev * dev,u8 queue_prio)300 static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev,
301 							u8 queue_prio)
302 {
303 	struct b43_pio_txqueue *q;
304 
305 	if (dev->qos_enabled) {
306 		/* 0 = highest priority */
307 		switch (queue_prio) {
308 		default:
309 			B43_WARN_ON(1);
310 			/* fallthrough */
311 		case 0:
312 			q = dev->pio.tx_queue_AC_VO;
313 			break;
314 		case 1:
315 			q = dev->pio.tx_queue_AC_VI;
316 			break;
317 		case 2:
318 			q = dev->pio.tx_queue_AC_BE;
319 			break;
320 		case 3:
321 			q = dev->pio.tx_queue_AC_BK;
322 			break;
323 		}
324 	} else
325 		q = dev->pio.tx_queue_AC_BE;
326 
327 	return q;
328 }
329 
tx_write_2byte_queue(struct b43_pio_txqueue * q,u16 ctl,const void * _data,unsigned int data_len)330 static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
331 				u16 ctl,
332 				const void *_data,
333 				unsigned int data_len)
334 {
335 	struct b43_wldev *dev = q->dev;
336 	struct b43_wl *wl = dev->wl;
337 	const u8 *data = _data;
338 
339 	ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
340 	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
341 
342 	b43_block_write(dev, data, (data_len & ~1),
343 			q->mmio_base + B43_PIO_TXDATA,
344 			sizeof(u16));
345 	if (data_len & 1) {
346 		u8 *tail = wl->pio_tailspace;
347 		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
348 
349 		/* Write the last byte. */
350 		ctl &= ~B43_PIO_TXCTL_WRITEHI;
351 		b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
352 		tail[0] = data[data_len - 1];
353 		tail[1] = 0;
354 		b43_block_write(dev, tail, 2,
355 				q->mmio_base + B43_PIO_TXDATA,
356 				sizeof(u16));
357 	}
358 
359 	return ctl;
360 }
361 
pio_tx_frame_2byte_queue(struct b43_pio_txpacket * pack,const u8 * hdr,unsigned int hdrlen)362 static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
363 				     const u8 *hdr, unsigned int hdrlen)
364 {
365 	struct b43_pio_txqueue *q = pack->queue;
366 	const char *frame = pack->skb->data;
367 	unsigned int frame_len = pack->skb->len;
368 	u16 ctl;
369 
370 	ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
371 	ctl |= B43_PIO_TXCTL_FREADY;
372 	ctl &= ~B43_PIO_TXCTL_EOF;
373 
374 	/* Transfer the header data. */
375 	ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
376 	/* Transfer the frame data. */
377 	ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
378 
379 	ctl |= B43_PIO_TXCTL_EOF;
380 	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
381 }
382 
tx_write_4byte_queue(struct b43_pio_txqueue * q,u32 ctl,const void * _data,unsigned int data_len)383 static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
384 				u32 ctl,
385 				const void *_data,
386 				unsigned int data_len)
387 {
388 	struct b43_wldev *dev = q->dev;
389 	struct b43_wl *wl = dev->wl;
390 	const u8 *data = _data;
391 
392 	ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
393 	       B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
394 	b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
395 
396 	b43_block_write(dev, data, (data_len & ~3),
397 			q->mmio_base + B43_PIO8_TXDATA,
398 			sizeof(u32));
399 	if (data_len & 3) {
400 		u8 *tail = wl->pio_tailspace;
401 		BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
402 
403 		memset(tail, 0, 4);
404 		/* Write the last few bytes. */
405 		ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
406 			 B43_PIO8_TXCTL_24_31);
407 		switch (data_len & 3) {
408 		case 3:
409 			ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
410 			tail[0] = data[data_len - 3];
411 			tail[1] = data[data_len - 2];
412 			tail[2] = data[data_len - 1];
413 			break;
414 		case 2:
415 			ctl |= B43_PIO8_TXCTL_8_15;
416 			tail[0] = data[data_len - 2];
417 			tail[1] = data[data_len - 1];
418 			break;
419 		case 1:
420 			tail[0] = data[data_len - 1];
421 			break;
422 		}
423 		b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
424 		b43_block_write(dev, tail, 4,
425 				q->mmio_base + B43_PIO8_TXDATA,
426 				sizeof(u32));
427 	}
428 
429 	return ctl;
430 }
431 
pio_tx_frame_4byte_queue(struct b43_pio_txpacket * pack,const u8 * hdr,unsigned int hdrlen)432 static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
433 				     const u8 *hdr, unsigned int hdrlen)
434 {
435 	struct b43_pio_txqueue *q = pack->queue;
436 	const char *frame = pack->skb->data;
437 	unsigned int frame_len = pack->skb->len;
438 	u32 ctl;
439 
440 	ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
441 	ctl |= B43_PIO8_TXCTL_FREADY;
442 	ctl &= ~B43_PIO8_TXCTL_EOF;
443 
444 	/* Transfer the header data. */
445 	ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
446 	/* Transfer the frame data. */
447 	ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
448 
449 	ctl |= B43_PIO8_TXCTL_EOF;
450 	b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
451 }
452 
pio_tx_frame(struct b43_pio_txqueue * q,struct sk_buff * skb)453 static int pio_tx_frame(struct b43_pio_txqueue *q,
454 			struct sk_buff *skb)
455 {
456 	struct b43_wldev *dev = q->dev;
457 	struct b43_wl *wl = dev->wl;
458 	struct b43_pio_txpacket *pack;
459 	u16 cookie;
460 	int err;
461 	unsigned int hdrlen;
462 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
463 	struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
464 
465 	B43_WARN_ON(list_empty(&q->packets_list));
466 	pack = list_entry(q->packets_list.next,
467 			  struct b43_pio_txpacket, list);
468 
469 	cookie = generate_cookie(q, pack);
470 	hdrlen = b43_txhdr_size(dev);
471 	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
472 	B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
473 	err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
474 				 info, cookie);
475 	if (err)
476 		return err;
477 
478 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
479 		/* Tell the firmware about the cookie of the last
480 		 * mcast frame, so it can clear the more-data bit in it. */
481 		b43_shm_write16(dev, B43_SHM_SHARED,
482 				B43_SHM_SH_MCASTCOOKIE, cookie);
483 	}
484 
485 	pack->skb = skb;
486 	if (q->rev >= 8)
487 		pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
488 	else
489 		pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
490 
491 	/* Remove it from the list of available packet slots.
492 	 * It will be put back when we receive the status report. */
493 	list_del(&pack->list);
494 
495 	/* Update the queue statistics. */
496 	q->buffer_used += roundup(skb->len + hdrlen, 4);
497 	q->free_packet_slots -= 1;
498 
499 	return 0;
500 }
501 
b43_pio_tx(struct b43_wldev * dev,struct sk_buff * skb)502 int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
503 {
504 	struct b43_pio_txqueue *q;
505 	struct ieee80211_hdr *hdr;
506 	unsigned int hdrlen, total_len;
507 	int err = 0;
508 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
509 
510 	hdr = (struct ieee80211_hdr *)skb->data;
511 
512 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
513 		/* The multicast queue will be sent after the DTIM. */
514 		q = dev->pio.tx_queue_mcast;
515 		/* Set the frame More-Data bit. Ucode will clear it
516 		 * for us on the last frame. */
517 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
518 	} else {
519 		/* Decide by priority where to put this frame. */
520 		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
521 	}
522 
523 	hdrlen = b43_txhdr_size(dev);
524 	total_len = roundup(skb->len + hdrlen, 4);
525 
526 	if (unlikely(total_len > q->buffer_size)) {
527 		err = -ENOBUFS;
528 		b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
529 		goto out;
530 	}
531 	if (unlikely(q->free_packet_slots == 0)) {
532 		err = -ENOBUFS;
533 		b43warn(dev->wl, "PIO: TX packet overflow.\n");
534 		goto out;
535 	}
536 	B43_WARN_ON(q->buffer_used > q->buffer_size);
537 
538 	if (total_len > (q->buffer_size - q->buffer_used)) {
539 		/* Not enough memory on the queue. */
540 		err = -EBUSY;
541 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
542 		q->stopped = true;
543 		goto out;
544 	}
545 
546 	/* Assign the queue number to the ring (if not already done before)
547 	 * so TX status handling can use it. The mac80211-queue to b43-queue
548 	 * mapping is static, so we don't need to store it per frame. */
549 	q->queue_prio = skb_get_queue_mapping(skb);
550 
551 	err = pio_tx_frame(q, skb);
552 	if (unlikely(err == -ENOKEY)) {
553 		/* Drop this packet, as we don't have the encryption key
554 		 * anymore and must not transmit it unencrypted. */
555 		ieee80211_free_txskb(dev->wl->hw, skb);
556 		err = 0;
557 		goto out;
558 	}
559 	if (unlikely(err)) {
560 		b43err(dev->wl, "PIO transmission failure\n");
561 		goto out;
562 	}
563 
564 	B43_WARN_ON(q->buffer_used > q->buffer_size);
565 	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
566 	    (q->free_packet_slots == 0)) {
567 		/* The queue is full. */
568 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
569 		q->stopped = true;
570 	}
571 
572 out:
573 	return err;
574 }
575 
b43_pio_handle_txstatus(struct b43_wldev * dev,const struct b43_txstatus * status)576 void b43_pio_handle_txstatus(struct b43_wldev *dev,
577 			     const struct b43_txstatus *status)
578 {
579 	struct b43_pio_txqueue *q;
580 	struct b43_pio_txpacket *pack = NULL;
581 	unsigned int total_len;
582 	struct ieee80211_tx_info *info;
583 
584 	q = parse_cookie(dev, status->cookie, &pack);
585 	if (unlikely(!q))
586 		return;
587 	B43_WARN_ON(!pack);
588 
589 	info = IEEE80211_SKB_CB(pack->skb);
590 
591 	b43_fill_txstatus_report(dev, info, status);
592 
593 	total_len = pack->skb->len + b43_txhdr_size(dev);
594 	total_len = roundup(total_len, 4);
595 	q->buffer_used -= total_len;
596 	q->free_packet_slots += 1;
597 
598 	ieee80211_tx_status(dev->wl->hw, pack->skb);
599 	pack->skb = NULL;
600 	list_add(&pack->list, &q->packets_list);
601 
602 	if (q->stopped) {
603 		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
604 		q->stopped = false;
605 	}
606 }
607 
608 /* Returns whether we should fetch another frame. */
pio_rx_frame(struct b43_pio_rxqueue * q)609 static bool pio_rx_frame(struct b43_pio_rxqueue *q)
610 {
611 	struct b43_wldev *dev = q->dev;
612 	struct b43_wl *wl = dev->wl;
613 	u16 len;
614 	u32 macstat = 0;
615 	unsigned int i, padding;
616 	struct sk_buff *skb;
617 	const char *err_msg = NULL;
618 	struct b43_rxhdr_fw4 *rxhdr =
619 		(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
620 	size_t rxhdr_size = sizeof(*rxhdr);
621 
622 	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
623 	switch (dev->fw.hdr_format) {
624 	case B43_FW_HDR_410:
625 	case B43_FW_HDR_351:
626 		rxhdr_size -= sizeof(rxhdr->format_598) -
627 			sizeof(rxhdr->format_351);
628 		break;
629 	case B43_FW_HDR_598:
630 		break;
631 	}
632 	memset(rxhdr, 0, rxhdr_size);
633 
634 	/* Check if we have data and wait for it to get ready. */
635 	if (q->rev >= 8) {
636 		u32 ctl;
637 
638 		ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
639 		if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
640 			return 0;
641 		b43_piorx_write32(q, B43_PIO8_RXCTL,
642 				  B43_PIO8_RXCTL_FRAMERDY);
643 		for (i = 0; i < 10; i++) {
644 			ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
645 			if (ctl & B43_PIO8_RXCTL_DATARDY)
646 				goto data_ready;
647 			udelay(10);
648 		}
649 	} else {
650 		u16 ctl;
651 
652 		ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
653 		if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
654 			return 0;
655 		b43_piorx_write16(q, B43_PIO_RXCTL,
656 				  B43_PIO_RXCTL_FRAMERDY);
657 		for (i = 0; i < 10; i++) {
658 			ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
659 			if (ctl & B43_PIO_RXCTL_DATARDY)
660 				goto data_ready;
661 			udelay(10);
662 		}
663 	}
664 	b43dbg(q->dev->wl, "PIO RX timed out\n");
665 	return 1;
666 data_ready:
667 
668 	/* Get the preamble (RX header) */
669 	if (q->rev >= 8) {
670 		b43_block_read(dev, rxhdr, rxhdr_size,
671 			       q->mmio_base + B43_PIO8_RXDATA,
672 			       sizeof(u32));
673 	} else {
674 		b43_block_read(dev, rxhdr, rxhdr_size,
675 			       q->mmio_base + B43_PIO_RXDATA,
676 			       sizeof(u16));
677 	}
678 	/* Sanity checks. */
679 	len = le16_to_cpu(rxhdr->frame_len);
680 	if (unlikely(len > 0x700)) {
681 		err_msg = "len > 0x700";
682 		goto rx_error;
683 	}
684 	if (unlikely(len == 0)) {
685 		err_msg = "len == 0";
686 		goto rx_error;
687 	}
688 
689 	switch (dev->fw.hdr_format) {
690 	case B43_FW_HDR_598:
691 		macstat = le32_to_cpu(rxhdr->format_598.mac_status);
692 		break;
693 	case B43_FW_HDR_410:
694 	case B43_FW_HDR_351:
695 		macstat = le32_to_cpu(rxhdr->format_351.mac_status);
696 		break;
697 	}
698 	if (macstat & B43_RX_MAC_FCSERR) {
699 		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
700 			/* Drop frames with failed FCS. */
701 			err_msg = "Frame FCS error";
702 			goto rx_error;
703 		}
704 	}
705 
706 	/* We always pad 2 bytes, as that's what upstream code expects
707 	 * due to the RX-header being 30 bytes. In case the frame is
708 	 * unaligned, we pad another 2 bytes. */
709 	padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
710 	skb = dev_alloc_skb(len + padding + 2);
711 	if (unlikely(!skb)) {
712 		err_msg = "Out of memory";
713 		goto rx_error;
714 	}
715 	skb_reserve(skb, 2);
716 	skb_put(skb, len + padding);
717 	if (q->rev >= 8) {
718 		b43_block_read(dev, skb->data + padding, (len & ~3),
719 			       q->mmio_base + B43_PIO8_RXDATA,
720 			       sizeof(u32));
721 		if (len & 3) {
722 			u8 *tail = wl->pio_tailspace;
723 			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
724 
725 			/* Read the last few bytes. */
726 			b43_block_read(dev, tail, 4,
727 				       q->mmio_base + B43_PIO8_RXDATA,
728 				       sizeof(u32));
729 			switch (len & 3) {
730 			case 3:
731 				skb->data[len + padding - 3] = tail[0];
732 				skb->data[len + padding - 2] = tail[1];
733 				skb->data[len + padding - 1] = tail[2];
734 				break;
735 			case 2:
736 				skb->data[len + padding - 2] = tail[0];
737 				skb->data[len + padding - 1] = tail[1];
738 				break;
739 			case 1:
740 				skb->data[len + padding - 1] = tail[0];
741 				break;
742 			}
743 		}
744 	} else {
745 		b43_block_read(dev, skb->data + padding, (len & ~1),
746 			       q->mmio_base + B43_PIO_RXDATA,
747 			       sizeof(u16));
748 		if (len & 1) {
749 			u8 *tail = wl->pio_tailspace;
750 			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
751 
752 			/* Read the last byte. */
753 			b43_block_read(dev, tail, 2,
754 				       q->mmio_base + B43_PIO_RXDATA,
755 				       sizeof(u16));
756 			skb->data[len + padding - 1] = tail[0];
757 		}
758 	}
759 
760 	b43_rx(q->dev, skb, rxhdr);
761 
762 	return 1;
763 
764 rx_error:
765 	if (err_msg)
766 		b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
767 	if (q->rev >= 8)
768 		b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
769 	else
770 		b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
771 
772 	return 1;
773 }
774 
b43_pio_rx(struct b43_pio_rxqueue * q)775 void b43_pio_rx(struct b43_pio_rxqueue *q)
776 {
777 	unsigned int count = 0;
778 	bool stop;
779 
780 	while (1) {
781 		stop = (pio_rx_frame(q) == 0);
782 		if (stop)
783 			break;
784 		cond_resched();
785 		if (WARN_ON_ONCE(++count > 10000))
786 			break;
787 	}
788 }
789 
b43_pio_tx_suspend_queue(struct b43_pio_txqueue * q)790 static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
791 {
792 	if (q->rev >= 8) {
793 		b43_piotx_write32(q, B43_PIO8_TXCTL,
794 				  b43_piotx_read32(q, B43_PIO8_TXCTL)
795 				  | B43_PIO8_TXCTL_SUSPREQ);
796 	} else {
797 		b43_piotx_write16(q, B43_PIO_TXCTL,
798 				  b43_piotx_read16(q, B43_PIO_TXCTL)
799 				  | B43_PIO_TXCTL_SUSPREQ);
800 	}
801 }
802 
b43_pio_tx_resume_queue(struct b43_pio_txqueue * q)803 static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
804 {
805 	if (q->rev >= 8) {
806 		b43_piotx_write32(q, B43_PIO8_TXCTL,
807 				  b43_piotx_read32(q, B43_PIO8_TXCTL)
808 				  & ~B43_PIO8_TXCTL_SUSPREQ);
809 	} else {
810 		b43_piotx_write16(q, B43_PIO_TXCTL,
811 				  b43_piotx_read16(q, B43_PIO_TXCTL)
812 				  & ~B43_PIO_TXCTL_SUSPREQ);
813 	}
814 }
815 
b43_pio_tx_suspend(struct b43_wldev * dev)816 void b43_pio_tx_suspend(struct b43_wldev *dev)
817 {
818 	b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
819 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
820 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
821 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
822 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
823 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
824 }
825 
b43_pio_tx_resume(struct b43_wldev * dev)826 void b43_pio_tx_resume(struct b43_wldev *dev)
827 {
828 	b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
829 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
830 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
831 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
832 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
833 	b43_power_saving_ctl_bits(dev, 0);
834 }
835