• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3   Broadcom B43 wireless driver
4 
5   PIO data transfer
6 
7   Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de>
8 
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; see the file COPYING.  If not, write to
21   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22   Boston, MA 02110-1301, USA.
23 
24 */
25 
26 #include "b43.h"
27 #include "pio.h"
28 #include "dma.h"
29 #include "main.h"
30 #include "xmit.h"
31 
32 #include <linux/delay.h>
33 
34 
35 static void b43_pio_rx_work(struct work_struct *work);
36 
37 
generate_cookie(struct b43_pio_txqueue * q,struct b43_pio_txpacket * pack)38 static u16 generate_cookie(struct b43_pio_txqueue *q,
39 			   struct b43_pio_txpacket *pack)
40 {
41 	u16 cookie;
42 
43 	/* Use the upper 4 bits of the cookie as
44 	 * PIO controller ID and store the packet index number
45 	 * in the lower 12 bits.
46 	 * Note that the cookie must never be 0, as this
47 	 * is a special value used in RX path.
48 	 * It can also not be 0xFFFF because that is special
49 	 * for multicast frames.
50 	 */
51 	cookie = (((u16)q->index + 1) << 12);
52 	cookie |= pack->index;
53 
54 	return cookie;
55 }
56 
57 static
parse_cookie(struct b43_wldev * dev,u16 cookie,struct b43_pio_txpacket ** pack)58 struct b43_pio_txqueue * parse_cookie(struct b43_wldev *dev,
59 				      u16 cookie,
60 				      struct b43_pio_txpacket **pack)
61 {
62 	struct b43_pio *pio = &dev->pio;
63 	struct b43_pio_txqueue *q = NULL;
64 	unsigned int pack_index;
65 
66 	switch (cookie & 0xF000) {
67 	case 0x1000:
68 		q = pio->tx_queue_AC_BK;
69 		break;
70 	case 0x2000:
71 		q = pio->tx_queue_AC_BE;
72 		break;
73 	case 0x3000:
74 		q = pio->tx_queue_AC_VI;
75 		break;
76 	case 0x4000:
77 		q = pio->tx_queue_AC_VO;
78 		break;
79 	case 0x5000:
80 		q = pio->tx_queue_mcast;
81 		break;
82 	}
83 	if (B43_WARN_ON(!q))
84 		return NULL;
85 	pack_index = (cookie & 0x0FFF);
86 	if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets)))
87 		return NULL;
88 	*pack = &q->packets[pack_index];
89 
90 	return q;
91 }
92 
index_to_pioqueue_base(struct b43_wldev * dev,unsigned int index)93 static u16 index_to_pioqueue_base(struct b43_wldev *dev,
94 				  unsigned int index)
95 {
96 	static const u16 bases[] = {
97 		B43_MMIO_PIO_BASE0,
98 		B43_MMIO_PIO_BASE1,
99 		B43_MMIO_PIO_BASE2,
100 		B43_MMIO_PIO_BASE3,
101 		B43_MMIO_PIO_BASE4,
102 		B43_MMIO_PIO_BASE5,
103 		B43_MMIO_PIO_BASE6,
104 		B43_MMIO_PIO_BASE7,
105 	};
106 	static const u16 bases_rev11[] = {
107 		B43_MMIO_PIO11_BASE0,
108 		B43_MMIO_PIO11_BASE1,
109 		B43_MMIO_PIO11_BASE2,
110 		B43_MMIO_PIO11_BASE3,
111 		B43_MMIO_PIO11_BASE4,
112 		B43_MMIO_PIO11_BASE5,
113 	};
114 
115 	if (dev->dev->id.revision >= 11) {
116 		B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11));
117 		return bases_rev11[index];
118 	}
119 	B43_WARN_ON(index >= ARRAY_SIZE(bases));
120 	return bases[index];
121 }
122 
pio_txqueue_offset(struct b43_wldev * dev)123 static u16 pio_txqueue_offset(struct b43_wldev *dev)
124 {
125 	if (dev->dev->id.revision >= 11)
126 		return 0x18;
127 	return 0;
128 }
129 
pio_rxqueue_offset(struct b43_wldev * dev)130 static u16 pio_rxqueue_offset(struct b43_wldev *dev)
131 {
132 	if (dev->dev->id.revision >= 11)
133 		return 0x38;
134 	return 8;
135 }
136 
b43_setup_pioqueue_tx(struct b43_wldev * dev,unsigned int index)137 static struct b43_pio_txqueue * b43_setup_pioqueue_tx(struct b43_wldev *dev,
138 						      unsigned int index)
139 {
140 	struct b43_pio_txqueue *q;
141 	struct b43_pio_txpacket *p;
142 	unsigned int i;
143 
144 	q = kzalloc(sizeof(*q), GFP_KERNEL);
145 	if (!q)
146 		return NULL;
147 	spin_lock_init(&q->lock);
148 	q->dev = dev;
149 	q->rev = dev->dev->id.revision;
150 	q->mmio_base = index_to_pioqueue_base(dev, index) +
151 		       pio_txqueue_offset(dev);
152 	q->index = index;
153 
154 	q->free_packet_slots = B43_PIO_MAX_NR_TXPACKETS;
155 	if (q->rev >= 8) {
156 		q->buffer_size = 1920; //FIXME this constant is wrong.
157 	} else {
158 		q->buffer_size = b43_piotx_read16(q, B43_PIO_TXQBUFSIZE);
159 		q->buffer_size -= 80;
160 	}
161 
162 	INIT_LIST_HEAD(&q->packets_list);
163 	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
164 		p = &(q->packets[i]);
165 		INIT_LIST_HEAD(&p->list);
166 		p->index = i;
167 		p->queue = q;
168 		list_add(&p->list, &q->packets_list);
169 	}
170 
171 	return q;
172 }
173 
b43_setup_pioqueue_rx(struct b43_wldev * dev,unsigned int index)174 static struct b43_pio_rxqueue * b43_setup_pioqueue_rx(struct b43_wldev *dev,
175 						      unsigned int index)
176 {
177 	struct b43_pio_rxqueue *q;
178 
179 	q = kzalloc(sizeof(*q), GFP_KERNEL);
180 	if (!q)
181 		return NULL;
182 	spin_lock_init(&q->lock);
183 	q->dev = dev;
184 	q->rev = dev->dev->id.revision;
185 	q->mmio_base = index_to_pioqueue_base(dev, index) +
186 		       pio_rxqueue_offset(dev);
187 	INIT_WORK(&q->rx_work, b43_pio_rx_work);
188 
189 	/* Enable Direct FIFO RX (PIO) on the engine. */
190 	b43_dma_direct_fifo_rx(dev, index, 1);
191 
192 	return q;
193 }
194 
b43_pio_cancel_tx_packets(struct b43_pio_txqueue * q)195 static void b43_pio_cancel_tx_packets(struct b43_pio_txqueue *q)
196 {
197 	struct b43_pio_txpacket *pack;
198 	unsigned int i;
199 
200 	for (i = 0; i < ARRAY_SIZE(q->packets); i++) {
201 		pack = &(q->packets[i]);
202 		if (pack->skb) {
203 			dev_kfree_skb_any(pack->skb);
204 			pack->skb = NULL;
205 		}
206 	}
207 }
208 
b43_destroy_pioqueue_tx(struct b43_pio_txqueue * q,const char * name)209 static void b43_destroy_pioqueue_tx(struct b43_pio_txqueue *q,
210 				    const char *name)
211 {
212 	if (!q)
213 		return;
214 	b43_pio_cancel_tx_packets(q);
215 	kfree(q);
216 }
217 
b43_destroy_pioqueue_rx(struct b43_pio_rxqueue * q,const char * name)218 static void b43_destroy_pioqueue_rx(struct b43_pio_rxqueue *q,
219 				    const char *name)
220 {
221 	if (!q)
222 		return;
223 	kfree(q);
224 }
225 
226 #define destroy_queue_tx(pio, queue) do {				\
227 	b43_destroy_pioqueue_tx((pio)->queue, __stringify(queue));	\
228 	(pio)->queue = NULL;						\
229   } while (0)
230 
231 #define destroy_queue_rx(pio, queue) do {				\
232 	b43_destroy_pioqueue_rx((pio)->queue, __stringify(queue));	\
233 	(pio)->queue = NULL;						\
234   } while (0)
235 
b43_pio_free(struct b43_wldev * dev)236 void b43_pio_free(struct b43_wldev *dev)
237 {
238 	struct b43_pio *pio;
239 
240 	if (!b43_using_pio_transfers(dev))
241 		return;
242 	pio = &dev->pio;
243 
244 	destroy_queue_rx(pio, rx_queue);
245 	destroy_queue_tx(pio, tx_queue_mcast);
246 	destroy_queue_tx(pio, tx_queue_AC_VO);
247 	destroy_queue_tx(pio, tx_queue_AC_VI);
248 	destroy_queue_tx(pio, tx_queue_AC_BE);
249 	destroy_queue_tx(pio, tx_queue_AC_BK);
250 }
251 
b43_pio_stop(struct b43_wldev * dev)252 void b43_pio_stop(struct b43_wldev *dev)
253 {
254 	if (!b43_using_pio_transfers(dev))
255 		return;
256 	cancel_work_sync(&dev->pio.rx_queue->rx_work);
257 }
258 
b43_pio_init(struct b43_wldev * dev)259 int b43_pio_init(struct b43_wldev *dev)
260 {
261 	struct b43_pio *pio = &dev->pio;
262 	int err = -ENOMEM;
263 
264 	b43_write32(dev, B43_MMIO_MACCTL, b43_read32(dev, B43_MMIO_MACCTL)
265 		    & ~B43_MACCTL_BE);
266 	b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_RXPADOFF, 0);
267 
268 	pio->tx_queue_AC_BK = b43_setup_pioqueue_tx(dev, 0);
269 	if (!pio->tx_queue_AC_BK)
270 		goto out;
271 
272 	pio->tx_queue_AC_BE = b43_setup_pioqueue_tx(dev, 1);
273 	if (!pio->tx_queue_AC_BE)
274 		goto err_destroy_bk;
275 
276 	pio->tx_queue_AC_VI = b43_setup_pioqueue_tx(dev, 2);
277 	if (!pio->tx_queue_AC_VI)
278 		goto err_destroy_be;
279 
280 	pio->tx_queue_AC_VO = b43_setup_pioqueue_tx(dev, 3);
281 	if (!pio->tx_queue_AC_VO)
282 		goto err_destroy_vi;
283 
284 	pio->tx_queue_mcast = b43_setup_pioqueue_tx(dev, 4);
285 	if (!pio->tx_queue_mcast)
286 		goto err_destroy_vo;
287 
288 	pio->rx_queue = b43_setup_pioqueue_rx(dev, 0);
289 	if (!pio->rx_queue)
290 		goto err_destroy_mcast;
291 
292 	b43dbg(dev->wl, "PIO initialized\n");
293 	err = 0;
294 out:
295 	return err;
296 
297 err_destroy_mcast:
298 	destroy_queue_tx(pio, tx_queue_mcast);
299 err_destroy_vo:
300 	destroy_queue_tx(pio, tx_queue_AC_VO);
301 err_destroy_vi:
302 	destroy_queue_tx(pio, tx_queue_AC_VI);
303 err_destroy_be:
304 	destroy_queue_tx(pio, tx_queue_AC_BE);
305 err_destroy_bk:
306 	destroy_queue_tx(pio, tx_queue_AC_BK);
307 	return err;
308 }
309 
310 /* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */
select_queue_by_priority(struct b43_wldev * dev,u8 queue_prio)311 static struct b43_pio_txqueue * select_queue_by_priority(struct b43_wldev *dev,
312 							 u8 queue_prio)
313 {
314 	struct b43_pio_txqueue *q;
315 
316 	if (b43_modparam_qos) {
317 		/* 0 = highest priority */
318 		switch (queue_prio) {
319 		default:
320 			B43_WARN_ON(1);
321 			/* fallthrough */
322 		case 0:
323 			q = dev->pio.tx_queue_AC_VO;
324 			break;
325 		case 1:
326 			q = dev->pio.tx_queue_AC_VI;
327 			break;
328 		case 2:
329 			q = dev->pio.tx_queue_AC_BE;
330 			break;
331 		case 3:
332 			q = dev->pio.tx_queue_AC_BK;
333 			break;
334 		}
335 	} else
336 		q = dev->pio.tx_queue_AC_BE;
337 
338 	return q;
339 }
340 
tx_write_2byte_queue(struct b43_pio_txqueue * q,u16 ctl,const void * _data,unsigned int data_len)341 static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
342 				u16 ctl,
343 				const void *_data,
344 				unsigned int data_len)
345 {
346 	struct b43_wldev *dev = q->dev;
347 	const u8 *data = _data;
348 
349 	ctl |= B43_PIO_TXCTL_WRITELO | B43_PIO_TXCTL_WRITEHI;
350 	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
351 
352 	ssb_block_write(dev->dev, data, (data_len & ~1),
353 			q->mmio_base + B43_PIO_TXDATA,
354 			sizeof(u16));
355 	if (data_len & 1) {
356 		/* Write the last byte. */
357 		ctl &= ~B43_PIO_TXCTL_WRITEHI;
358 		b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
359 		b43_piotx_write16(q, B43_PIO_TXDATA, data[data_len - 1]);
360 	}
361 
362 	return ctl;
363 }
364 
pio_tx_frame_2byte_queue(struct b43_pio_txpacket * pack,const u8 * hdr,unsigned int hdrlen)365 static void pio_tx_frame_2byte_queue(struct b43_pio_txpacket *pack,
366 				     const u8 *hdr, unsigned int hdrlen)
367 {
368 	struct b43_pio_txqueue *q = pack->queue;
369 	const char *frame = pack->skb->data;
370 	unsigned int frame_len = pack->skb->len;
371 	u16 ctl;
372 
373 	ctl = b43_piotx_read16(q, B43_PIO_TXCTL);
374 	ctl |= B43_PIO_TXCTL_FREADY;
375 	ctl &= ~B43_PIO_TXCTL_EOF;
376 
377 	/* Transfer the header data. */
378 	ctl = tx_write_2byte_queue(q, ctl, hdr, hdrlen);
379 	/* Transfer the frame data. */
380 	ctl = tx_write_2byte_queue(q, ctl, frame, frame_len);
381 
382 	ctl |= B43_PIO_TXCTL_EOF;
383 	b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
384 }
385 
tx_write_4byte_queue(struct b43_pio_txqueue * q,u32 ctl,const void * _data,unsigned int data_len)386 static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
387 				u32 ctl,
388 				const void *_data,
389 				unsigned int data_len)
390 {
391 	struct b43_wldev *dev = q->dev;
392 	const u8 *data = _data;
393 
394 	ctl |= B43_PIO8_TXCTL_0_7 | B43_PIO8_TXCTL_8_15 |
395 	       B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_24_31;
396 	b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
397 
398 	ssb_block_write(dev->dev, data, (data_len & ~3),
399 			q->mmio_base + B43_PIO8_TXDATA,
400 			sizeof(u32));
401 	if (data_len & 3) {
402 		u32 value = 0;
403 
404 		/* Write the last few bytes. */
405 		ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
406 			 B43_PIO8_TXCTL_24_31);
407 		data = &(data[data_len - 1]);
408 		switch (data_len & 3) {
409 		case 3:
410 			ctl |= B43_PIO8_TXCTL_16_23;
411 			value |= (u32)(*data) << 16;
412 			data--;
413 		case 2:
414 			ctl |= B43_PIO8_TXCTL_8_15;
415 			value |= (u32)(*data) << 8;
416 			data--;
417 		case 1:
418 			value |= (u32)(*data);
419 		}
420 		b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
421 		b43_piotx_write32(q, B43_PIO8_TXDATA, value);
422 	}
423 
424 	return ctl;
425 }
426 
pio_tx_frame_4byte_queue(struct b43_pio_txpacket * pack,const u8 * hdr,unsigned int hdrlen)427 static void pio_tx_frame_4byte_queue(struct b43_pio_txpacket *pack,
428 				     const u8 *hdr, unsigned int hdrlen)
429 {
430 	struct b43_pio_txqueue *q = pack->queue;
431 	const char *frame = pack->skb->data;
432 	unsigned int frame_len = pack->skb->len;
433 	u32 ctl;
434 
435 	ctl = b43_piotx_read32(q, B43_PIO8_TXCTL);
436 	ctl |= B43_PIO8_TXCTL_FREADY;
437 	ctl &= ~B43_PIO8_TXCTL_EOF;
438 
439 	/* Transfer the header data. */
440 	ctl = tx_write_4byte_queue(q, ctl, hdr, hdrlen);
441 	/* Transfer the frame data. */
442 	ctl = tx_write_4byte_queue(q, ctl, frame, frame_len);
443 
444 	ctl |= B43_PIO8_TXCTL_EOF;
445 	b43_piotx_write32(q, B43_PIO_TXCTL, ctl);
446 }
447 
pio_tx_frame(struct b43_pio_txqueue * q,struct sk_buff * skb)448 static int pio_tx_frame(struct b43_pio_txqueue *q,
449 			struct sk_buff *skb)
450 {
451 	struct b43_pio_txpacket *pack;
452 	struct b43_txhdr txhdr;
453 	u16 cookie;
454 	int err;
455 	unsigned int hdrlen;
456 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
457 
458 	B43_WARN_ON(list_empty(&q->packets_list));
459 	pack = list_entry(q->packets_list.next,
460 			  struct b43_pio_txpacket, list);
461 
462 	cookie = generate_cookie(q, pack);
463 	hdrlen = b43_txhdr_size(q->dev);
464 	err = b43_generate_txhdr(q->dev, (u8 *)&txhdr, skb->data,
465 				 skb->len, info, cookie);
466 	if (err)
467 		return err;
468 
469 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
470 		/* Tell the firmware about the cookie of the last
471 		 * mcast frame, so it can clear the more-data bit in it. */
472 		b43_shm_write16(q->dev, B43_SHM_SHARED,
473 				B43_SHM_SH_MCASTCOOKIE, cookie);
474 	}
475 
476 	pack->skb = skb;
477 	if (q->rev >= 8)
478 		pio_tx_frame_4byte_queue(pack, (const u8 *)&txhdr, hdrlen);
479 	else
480 		pio_tx_frame_2byte_queue(pack, (const u8 *)&txhdr, hdrlen);
481 
482 	/* Remove it from the list of available packet slots.
483 	 * It will be put back when we receive the status report. */
484 	list_del(&pack->list);
485 
486 	/* Update the queue statistics. */
487 	q->buffer_used += roundup(skb->len + hdrlen, 4);
488 	q->free_packet_slots -= 1;
489 
490 	return 0;
491 }
492 
b43_pio_tx(struct b43_wldev * dev,struct sk_buff * skb)493 int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
494 {
495 	struct b43_pio_txqueue *q;
496 	struct ieee80211_hdr *hdr;
497 	unsigned long flags;
498 	unsigned int hdrlen, total_len;
499 	int err = 0;
500 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
501 
502 	hdr = (struct ieee80211_hdr *)skb->data;
503 
504 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
505 		/* The multicast queue will be sent after the DTIM. */
506 		q = dev->pio.tx_queue_mcast;
507 		/* Set the frame More-Data bit. Ucode will clear it
508 		 * for us on the last frame. */
509 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
510 	} else {
511 		/* Decide by priority where to put this frame. */
512 		q = select_queue_by_priority(dev, skb_get_queue_mapping(skb));
513 	}
514 
515 	spin_lock_irqsave(&q->lock, flags);
516 
517 	hdrlen = b43_txhdr_size(dev);
518 	total_len = roundup(skb->len + hdrlen, 4);
519 
520 	if (unlikely(total_len > q->buffer_size)) {
521 		err = -ENOBUFS;
522 		b43dbg(dev->wl, "PIO: TX packet longer than queue.\n");
523 		goto out_unlock;
524 	}
525 	if (unlikely(q->free_packet_slots == 0)) {
526 		err = -ENOBUFS;
527 		b43warn(dev->wl, "PIO: TX packet overflow.\n");
528 		goto out_unlock;
529 	}
530 	B43_WARN_ON(q->buffer_used > q->buffer_size);
531 
532 	if (total_len > (q->buffer_size - q->buffer_used)) {
533 		/* Not enough memory on the queue. */
534 		err = -EBUSY;
535 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
536 		q->stopped = 1;
537 		goto out_unlock;
538 	}
539 
540 	/* Assign the queue number to the ring (if not already done before)
541 	 * so TX status handling can use it. The mac80211-queue to b43-queue
542 	 * mapping is static, so we don't need to store it per frame. */
543 	q->queue_prio = skb_get_queue_mapping(skb);
544 
545 	err = pio_tx_frame(q, skb);
546 	if (unlikely(err == -ENOKEY)) {
547 		/* Drop this packet, as we don't have the encryption key
548 		 * anymore and must not transmit it unencrypted. */
549 		dev_kfree_skb_any(skb);
550 		err = 0;
551 		goto out_unlock;
552 	}
553 	if (unlikely(err)) {
554 		b43err(dev->wl, "PIO transmission failure\n");
555 		goto out_unlock;
556 	}
557 	q->nr_tx_packets++;
558 
559 	B43_WARN_ON(q->buffer_used > q->buffer_size);
560 	if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
561 	    (q->free_packet_slots == 0)) {
562 		/* The queue is full. */
563 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
564 		q->stopped = 1;
565 	}
566 
567 out_unlock:
568 	spin_unlock_irqrestore(&q->lock, flags);
569 
570 	return err;
571 }
572 
573 /* Called with IRQs disabled. */
b43_pio_handle_txstatus(struct b43_wldev * dev,const struct b43_txstatus * status)574 void b43_pio_handle_txstatus(struct b43_wldev *dev,
575 			     const struct b43_txstatus *status)
576 {
577 	struct b43_pio_txqueue *q;
578 	struct b43_pio_txpacket *pack = NULL;
579 	unsigned int total_len;
580 	struct ieee80211_tx_info *info;
581 
582 	q = parse_cookie(dev, status->cookie, &pack);
583 	if (unlikely(!q))
584 		return;
585 	B43_WARN_ON(!pack);
586 
587 	spin_lock(&q->lock); /* IRQs are already disabled. */
588 
589 	info = IEEE80211_SKB_CB(pack->skb);
590 
591 	b43_fill_txstatus_report(dev, info, status);
592 
593 	total_len = pack->skb->len + b43_txhdr_size(dev);
594 	total_len = roundup(total_len, 4);
595 	q->buffer_used -= total_len;
596 	q->free_packet_slots += 1;
597 
598 	ieee80211_tx_status_irqsafe(dev->wl->hw, pack->skb);
599 	pack->skb = NULL;
600 	list_add(&pack->list, &q->packets_list);
601 
602 	if (q->stopped) {
603 		ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
604 		q->stopped = 0;
605 	}
606 
607 	spin_unlock(&q->lock);
608 }
609 
b43_pio_get_tx_stats(struct b43_wldev * dev,struct ieee80211_tx_queue_stats * stats)610 void b43_pio_get_tx_stats(struct b43_wldev *dev,
611 			  struct ieee80211_tx_queue_stats *stats)
612 {
613 	const int nr_queues = dev->wl->hw->queues;
614 	struct b43_pio_txqueue *q;
615 	unsigned long flags;
616 	int i;
617 
618 	for (i = 0; i < nr_queues; i++) {
619 		q = select_queue_by_priority(dev, i);
620 
621 		spin_lock_irqsave(&q->lock, flags);
622 		stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
623 		stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
624 		stats[i].count = q->nr_tx_packets;
625 		spin_unlock_irqrestore(&q->lock, flags);
626 	}
627 }
628 
629 /* Returns whether we should fetch another frame. */
pio_rx_frame(struct b43_pio_rxqueue * q)630 static bool pio_rx_frame(struct b43_pio_rxqueue *q)
631 {
632 	struct b43_wldev *dev = q->dev;
633 	struct b43_rxhdr_fw4 rxhdr;
634 	u16 len;
635 	u32 macstat;
636 	unsigned int i, padding;
637 	struct sk_buff *skb;
638 	const char *err_msg = NULL;
639 
640 	memset(&rxhdr, 0, sizeof(rxhdr));
641 
642 	/* Check if we have data and wait for it to get ready. */
643 	if (q->rev >= 8) {
644 		u32 ctl;
645 
646 		ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
647 		if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
648 			return 0;
649 		b43_piorx_write32(q, B43_PIO8_RXCTL,
650 				  B43_PIO8_RXCTL_FRAMERDY);
651 		for (i = 0; i < 10; i++) {
652 			ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
653 			if (ctl & B43_PIO8_RXCTL_DATARDY)
654 				goto data_ready;
655 			udelay(10);
656 		}
657 	} else {
658 		u16 ctl;
659 
660 		ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
661 		if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
662 			return 0;
663 		b43_piorx_write16(q, B43_PIO_RXCTL,
664 				  B43_PIO_RXCTL_FRAMERDY);
665 		for (i = 0; i < 10; i++) {
666 			ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
667 			if (ctl & B43_PIO_RXCTL_DATARDY)
668 				goto data_ready;
669 			udelay(10);
670 		}
671 	}
672 	b43dbg(q->dev->wl, "PIO RX timed out\n");
673 	return 1;
674 data_ready:
675 
676 	/* Get the preamble (RX header) */
677 	if (q->rev >= 8) {
678 		ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
679 			       q->mmio_base + B43_PIO8_RXDATA,
680 			       sizeof(u32));
681 	} else {
682 		ssb_block_read(dev->dev, &rxhdr, sizeof(rxhdr),
683 			       q->mmio_base + B43_PIO_RXDATA,
684 			       sizeof(u16));
685 	}
686 	/* Sanity checks. */
687 	len = le16_to_cpu(rxhdr.frame_len);
688 	if (unlikely(len > 0x700)) {
689 		err_msg = "len > 0x700";
690 		goto rx_error;
691 	}
692 	if (unlikely(len == 0)) {
693 		err_msg = "len == 0";
694 		goto rx_error;
695 	}
696 
697 	macstat = le32_to_cpu(rxhdr.mac_status);
698 	if (macstat & B43_RX_MAC_FCSERR) {
699 		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
700 			/* Drop frames with failed FCS. */
701 			err_msg = "Frame FCS error";
702 			goto rx_error;
703 		}
704 	}
705 
706 	/* We always pad 2 bytes, as that's what upstream code expects
707 	 * due to the RX-header being 30 bytes. In case the frame is
708 	 * unaligned, we pad another 2 bytes. */
709 	padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
710 	skb = dev_alloc_skb(len + padding + 2);
711 	if (unlikely(!skb)) {
712 		err_msg = "Out of memory";
713 		goto rx_error;
714 	}
715 	skb_reserve(skb, 2);
716 	skb_put(skb, len + padding);
717 	if (q->rev >= 8) {
718 		ssb_block_read(dev->dev, skb->data + padding, (len & ~3),
719 			       q->mmio_base + B43_PIO8_RXDATA,
720 			       sizeof(u32));
721 		if (len & 3) {
722 			u32 value;
723 			char *data;
724 
725 			/* Read the last few bytes. */
726 			value = b43_piorx_read32(q, B43_PIO8_RXDATA);
727 			data = &(skb->data[len + padding - 1]);
728 			switch (len & 3) {
729 			case 3:
730 				*data = (value >> 16);
731 				data--;
732 			case 2:
733 				*data = (value >> 8);
734 				data--;
735 			case 1:
736 				*data = value;
737 			}
738 		}
739 	} else {
740 		ssb_block_read(dev->dev, skb->data + padding, (len & ~1),
741 			       q->mmio_base + B43_PIO_RXDATA,
742 			       sizeof(u16));
743 		if (len & 1) {
744 			u16 value;
745 
746 			/* Read the last byte. */
747 			value = b43_piorx_read16(q, B43_PIO_RXDATA);
748 			skb->data[len + padding - 1] = value;
749 		}
750 	}
751 
752 	b43_rx(q->dev, skb, &rxhdr);
753 
754 	return 1;
755 
756 rx_error:
757 	if (err_msg)
758 		b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
759 	b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);
760 	return 1;
761 }
762 
763 /* RX workqueue. We can sleep, yay! */
b43_pio_rx_work(struct work_struct * work)764 static void b43_pio_rx_work(struct work_struct *work)
765 {
766 	struct b43_pio_rxqueue *q = container_of(work, struct b43_pio_rxqueue,
767 						 rx_work);
768 	unsigned int budget = 50;
769 	bool stop;
770 
771 	do {
772 		spin_lock_irq(&q->lock);
773 		stop = (pio_rx_frame(q) == 0);
774 		spin_unlock_irq(&q->lock);
775 		cond_resched();
776 		if (stop)
777 			break;
778 	} while (--budget);
779 }
780 
781 /* Called with IRQs disabled. */
b43_pio_rx(struct b43_pio_rxqueue * q)782 void b43_pio_rx(struct b43_pio_rxqueue *q)
783 {
784 	/* Due to latency issues we must run the RX path in
785 	 * a workqueue to be able to schedule between packets. */
786 	queue_work(q->dev->wl->hw->workqueue, &q->rx_work);
787 }
788 
b43_pio_tx_suspend_queue(struct b43_pio_txqueue * q)789 static void b43_pio_tx_suspend_queue(struct b43_pio_txqueue *q)
790 {
791 	unsigned long flags;
792 
793 	spin_lock_irqsave(&q->lock, flags);
794 	if (q->rev >= 8) {
795 		b43_piotx_write32(q, B43_PIO8_TXCTL,
796 				  b43_piotx_read32(q, B43_PIO8_TXCTL)
797 				  | B43_PIO8_TXCTL_SUSPREQ);
798 	} else {
799 		b43_piotx_write16(q, B43_PIO_TXCTL,
800 				  b43_piotx_read16(q, B43_PIO_TXCTL)
801 				  | B43_PIO_TXCTL_SUSPREQ);
802 	}
803 	spin_unlock_irqrestore(&q->lock, flags);
804 }
805 
b43_pio_tx_resume_queue(struct b43_pio_txqueue * q)806 static void b43_pio_tx_resume_queue(struct b43_pio_txqueue *q)
807 {
808 	unsigned long flags;
809 
810 	spin_lock_irqsave(&q->lock, flags);
811 	if (q->rev >= 8) {
812 		b43_piotx_write32(q, B43_PIO8_TXCTL,
813 				  b43_piotx_read32(q, B43_PIO8_TXCTL)
814 				  & ~B43_PIO8_TXCTL_SUSPREQ);
815 	} else {
816 		b43_piotx_write16(q, B43_PIO_TXCTL,
817 				  b43_piotx_read16(q, B43_PIO_TXCTL)
818 				  & ~B43_PIO_TXCTL_SUSPREQ);
819 	}
820 	spin_unlock_irqrestore(&q->lock, flags);
821 }
822 
b43_pio_tx_suspend(struct b43_wldev * dev)823 void b43_pio_tx_suspend(struct b43_wldev *dev)
824 {
825 	b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
826 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BK);
827 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_BE);
828 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VI);
829 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_AC_VO);
830 	b43_pio_tx_suspend_queue(dev->pio.tx_queue_mcast);
831 }
832 
b43_pio_tx_resume(struct b43_wldev * dev)833 void b43_pio_tx_resume(struct b43_wldev *dev)
834 {
835 	b43_pio_tx_resume_queue(dev->pio.tx_queue_mcast);
836 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VO);
837 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_VI);
838 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BE);
839 	b43_pio_tx_resume_queue(dev->pio.tx_queue_AC_BK);
840 	b43_power_saving_ctl_bits(dev, 0);
841 }
842