• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2 
3   Broadcom B43 wireless driver
4 
5   DMA ringbuffer and descriptor allocation/management
6 
7   Copyright (c) 2005, 2006 Michael Buesch <mb@bu3sch.de>
8 
9   Some code in this file is derived from the b44.c driver
10   Copyright (C) 2002 David S. Miller
11   Copyright (C) Pekka Pietikainen
12 
13   This program is free software; you can redistribute it and/or modify
14   it under the terms of the GNU General Public License as published by
15   the Free Software Foundation; either version 2 of the License, or
16   (at your option) any later version.
17 
18   This program is distributed in the hope that it will be useful,
19   but WITHOUT ANY WARRANTY; without even the implied warranty of
20   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21   GNU General Public License for more details.
22 
23   You should have received a copy of the GNU General Public License
24   along with this program; see the file COPYING.  If not, write to
25   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26   Boston, MA 02110-1301, USA.
27 
28 */
29 
30 #include "b43.h"
31 #include "dma.h"
32 #include "main.h"
33 #include "debugfs.h"
34 #include "xmit.h"
35 
36 #include <linux/dma-mapping.h>
37 #include <linux/pci.h>
38 #include <linux/delay.h>
39 #include <linux/skbuff.h>
40 #include <linux/etherdevice.h>
41 #include <asm/div64.h>
42 
43 
44 /* 32bit DMA ops. */
45 static
op32_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)46 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
47 					  int slot,
48 					  struct b43_dmadesc_meta **meta)
49 {
50 	struct b43_dmadesc32 *desc;
51 
52 	*meta = &(ring->meta[slot]);
53 	desc = ring->descbase;
54 	desc = &(desc[slot]);
55 
56 	return (struct b43_dmadesc_generic *)desc;
57 }
58 
op32_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)59 static void op32_fill_descriptor(struct b43_dmaring *ring,
60 				 struct b43_dmadesc_generic *desc,
61 				 dma_addr_t dmaaddr, u16 bufsize,
62 				 int start, int end, int irq)
63 {
64 	struct b43_dmadesc32 *descbase = ring->descbase;
65 	int slot;
66 	u32 ctl;
67 	u32 addr;
68 	u32 addrext;
69 
70 	slot = (int)(&(desc->dma32) - descbase);
71 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 
73 	addr = (u32) (dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
74 	addrext = (u32) (dmaaddr & SSB_DMA_TRANSLATION_MASK)
75 	    >> SSB_DMA_TRANSLATION_SHIFT;
76 	addr |= ssb_dma_translation(ring->dev->dev);
77 	ctl = (bufsize - ring->frameoffset)
78 	    & B43_DMA32_DCTL_BYTECNT;
79 	if (slot == ring->nr_slots - 1)
80 		ctl |= B43_DMA32_DCTL_DTABLEEND;
81 	if (start)
82 		ctl |= B43_DMA32_DCTL_FRAMESTART;
83 	if (end)
84 		ctl |= B43_DMA32_DCTL_FRAMEEND;
85 	if (irq)
86 		ctl |= B43_DMA32_DCTL_IRQ;
87 	ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
88 	    & B43_DMA32_DCTL_ADDREXT_MASK;
89 
90 	desc->dma32.control = cpu_to_le32(ctl);
91 	desc->dma32.address = cpu_to_le32(addr);
92 }
93 
op32_poke_tx(struct b43_dmaring * ring,int slot)94 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
95 {
96 	b43_dma_write(ring, B43_DMA32_TXINDEX,
97 		      (u32) (slot * sizeof(struct b43_dmadesc32)));
98 }
99 
op32_tx_suspend(struct b43_dmaring * ring)100 static void op32_tx_suspend(struct b43_dmaring *ring)
101 {
102 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
103 		      | B43_DMA32_TXSUSPEND);
104 }
105 
op32_tx_resume(struct b43_dmaring * ring)106 static void op32_tx_resume(struct b43_dmaring *ring)
107 {
108 	b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
109 		      & ~B43_DMA32_TXSUSPEND);
110 }
111 
op32_get_current_rxslot(struct b43_dmaring * ring)112 static int op32_get_current_rxslot(struct b43_dmaring *ring)
113 {
114 	u32 val;
115 
116 	val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
117 	val &= B43_DMA32_RXDPTR;
118 
119 	return (val / sizeof(struct b43_dmadesc32));
120 }
121 
op32_set_current_rxslot(struct b43_dmaring * ring,int slot)122 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
123 {
124 	b43_dma_write(ring, B43_DMA32_RXINDEX,
125 		      (u32) (slot * sizeof(struct b43_dmadesc32)));
126 }
127 
128 static const struct b43_dma_ops dma32_ops = {
129 	.idx2desc = op32_idx2desc,
130 	.fill_descriptor = op32_fill_descriptor,
131 	.poke_tx = op32_poke_tx,
132 	.tx_suspend = op32_tx_suspend,
133 	.tx_resume = op32_tx_resume,
134 	.get_current_rxslot = op32_get_current_rxslot,
135 	.set_current_rxslot = op32_set_current_rxslot,
136 };
137 
138 /* 64bit DMA ops. */
139 static
op64_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)140 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
141 					  int slot,
142 					  struct b43_dmadesc_meta **meta)
143 {
144 	struct b43_dmadesc64 *desc;
145 
146 	*meta = &(ring->meta[slot]);
147 	desc = ring->descbase;
148 	desc = &(desc[slot]);
149 
150 	return (struct b43_dmadesc_generic *)desc;
151 }
152 
op64_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)153 static void op64_fill_descriptor(struct b43_dmaring *ring,
154 				 struct b43_dmadesc_generic *desc,
155 				 dma_addr_t dmaaddr, u16 bufsize,
156 				 int start, int end, int irq)
157 {
158 	struct b43_dmadesc64 *descbase = ring->descbase;
159 	int slot;
160 	u32 ctl0 = 0, ctl1 = 0;
161 	u32 addrlo, addrhi;
162 	u32 addrext;
163 
164 	slot = (int)(&(desc->dma64) - descbase);
165 	B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
166 
167 	addrlo = (u32) (dmaaddr & 0xFFFFFFFF);
168 	addrhi = (((u64) dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
169 	addrext = (((u64) dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
170 	    >> SSB_DMA_TRANSLATION_SHIFT;
171 	addrhi |= (ssb_dma_translation(ring->dev->dev) << 1);
172 	if (slot == ring->nr_slots - 1)
173 		ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
174 	if (start)
175 		ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
176 	if (end)
177 		ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
178 	if (irq)
179 		ctl0 |= B43_DMA64_DCTL0_IRQ;
180 	ctl1 |= (bufsize - ring->frameoffset)
181 	    & B43_DMA64_DCTL1_BYTECNT;
182 	ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
183 	    & B43_DMA64_DCTL1_ADDREXT_MASK;
184 
185 	desc->dma64.control0 = cpu_to_le32(ctl0);
186 	desc->dma64.control1 = cpu_to_le32(ctl1);
187 	desc->dma64.address_low = cpu_to_le32(addrlo);
188 	desc->dma64.address_high = cpu_to_le32(addrhi);
189 }
190 
op64_poke_tx(struct b43_dmaring * ring,int slot)191 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
192 {
193 	b43_dma_write(ring, B43_DMA64_TXINDEX,
194 		      (u32) (slot * sizeof(struct b43_dmadesc64)));
195 }
196 
op64_tx_suspend(struct b43_dmaring * ring)197 static void op64_tx_suspend(struct b43_dmaring *ring)
198 {
199 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
200 		      | B43_DMA64_TXSUSPEND);
201 }
202 
op64_tx_resume(struct b43_dmaring * ring)203 static void op64_tx_resume(struct b43_dmaring *ring)
204 {
205 	b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
206 		      & ~B43_DMA64_TXSUSPEND);
207 }
208 
op64_get_current_rxslot(struct b43_dmaring * ring)209 static int op64_get_current_rxslot(struct b43_dmaring *ring)
210 {
211 	u32 val;
212 
213 	val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
214 	val &= B43_DMA64_RXSTATDPTR;
215 
216 	return (val / sizeof(struct b43_dmadesc64));
217 }
218 
op64_set_current_rxslot(struct b43_dmaring * ring,int slot)219 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
220 {
221 	b43_dma_write(ring, B43_DMA64_RXINDEX,
222 		      (u32) (slot * sizeof(struct b43_dmadesc64)));
223 }
224 
225 static const struct b43_dma_ops dma64_ops = {
226 	.idx2desc = op64_idx2desc,
227 	.fill_descriptor = op64_fill_descriptor,
228 	.poke_tx = op64_poke_tx,
229 	.tx_suspend = op64_tx_suspend,
230 	.tx_resume = op64_tx_resume,
231 	.get_current_rxslot = op64_get_current_rxslot,
232 	.set_current_rxslot = op64_set_current_rxslot,
233 };
234 
free_slots(struct b43_dmaring * ring)235 static inline int free_slots(struct b43_dmaring *ring)
236 {
237 	return (ring->nr_slots - ring->used_slots);
238 }
239 
next_slot(struct b43_dmaring * ring,int slot)240 static inline int next_slot(struct b43_dmaring *ring, int slot)
241 {
242 	B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
243 	if (slot == ring->nr_slots - 1)
244 		return 0;
245 	return slot + 1;
246 }
247 
prev_slot(struct b43_dmaring * ring,int slot)248 static inline int prev_slot(struct b43_dmaring *ring, int slot)
249 {
250 	B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
251 	if (slot == 0)
252 		return ring->nr_slots - 1;
253 	return slot - 1;
254 }
255 
256 #ifdef CONFIG_B43_DEBUG
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)257 static void update_max_used_slots(struct b43_dmaring *ring,
258 				  int current_used_slots)
259 {
260 	if (current_used_slots <= ring->max_used_slots)
261 		return;
262 	ring->max_used_slots = current_used_slots;
263 	if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
264 		b43dbg(ring->dev->wl,
265 		       "max_used_slots increased to %d on %s ring %d\n",
266 		       ring->max_used_slots,
267 		       ring->tx ? "TX" : "RX", ring->index);
268 	}
269 }
270 #else
271 static inline
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)272     void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
273 {
274 }
275 #endif /* DEBUG */
276 
277 /* Request a slot for usage. */
request_slot(struct b43_dmaring * ring)278 static inline int request_slot(struct b43_dmaring *ring)
279 {
280 	int slot;
281 
282 	B43_WARN_ON(!ring->tx);
283 	B43_WARN_ON(ring->stopped);
284 	B43_WARN_ON(free_slots(ring) == 0);
285 
286 	slot = next_slot(ring, ring->current_slot);
287 	ring->current_slot = slot;
288 	ring->used_slots++;
289 
290 	update_max_used_slots(ring, ring->used_slots);
291 
292 	return slot;
293 }
294 
b43_dmacontroller_base(enum b43_dmatype type,int controller_idx)295 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
296 {
297 	static const u16 map64[] = {
298 		B43_MMIO_DMA64_BASE0,
299 		B43_MMIO_DMA64_BASE1,
300 		B43_MMIO_DMA64_BASE2,
301 		B43_MMIO_DMA64_BASE3,
302 		B43_MMIO_DMA64_BASE4,
303 		B43_MMIO_DMA64_BASE5,
304 	};
305 	static const u16 map32[] = {
306 		B43_MMIO_DMA32_BASE0,
307 		B43_MMIO_DMA32_BASE1,
308 		B43_MMIO_DMA32_BASE2,
309 		B43_MMIO_DMA32_BASE3,
310 		B43_MMIO_DMA32_BASE4,
311 		B43_MMIO_DMA32_BASE5,
312 	};
313 
314 	if (type == B43_DMA_64BIT) {
315 		B43_WARN_ON(!(controller_idx >= 0 &&
316 			      controller_idx < ARRAY_SIZE(map64)));
317 		return map64[controller_idx];
318 	}
319 	B43_WARN_ON(!(controller_idx >= 0 &&
320 		      controller_idx < ARRAY_SIZE(map32)));
321 	return map32[controller_idx];
322 }
323 
324 static inline
map_descbuffer(struct b43_dmaring * ring,unsigned char * buf,size_t len,int tx)325     dma_addr_t map_descbuffer(struct b43_dmaring *ring,
326 			      unsigned char *buf, size_t len, int tx)
327 {
328 	dma_addr_t dmaaddr;
329 
330 	if (tx) {
331 		dmaaddr = ssb_dma_map_single(ring->dev->dev,
332 					     buf, len, DMA_TO_DEVICE);
333 	} else {
334 		dmaaddr = ssb_dma_map_single(ring->dev->dev,
335 					     buf, len, DMA_FROM_DEVICE);
336 	}
337 
338 	return dmaaddr;
339 }
340 
341 static inline
unmap_descbuffer(struct b43_dmaring * ring,dma_addr_t addr,size_t len,int tx)342     void unmap_descbuffer(struct b43_dmaring *ring,
343 			  dma_addr_t addr, size_t len, int tx)
344 {
345 	if (tx) {
346 		ssb_dma_unmap_single(ring->dev->dev,
347 				     addr, len, DMA_TO_DEVICE);
348 	} else {
349 		ssb_dma_unmap_single(ring->dev->dev,
350 				     addr, len, DMA_FROM_DEVICE);
351 	}
352 }
353 
354 static inline
sync_descbuffer_for_cpu(struct b43_dmaring * ring,dma_addr_t addr,size_t len)355     void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
356 				 dma_addr_t addr, size_t len)
357 {
358 	B43_WARN_ON(ring->tx);
359 	ssb_dma_sync_single_for_cpu(ring->dev->dev,
360 				    addr, len, DMA_FROM_DEVICE);
361 }
362 
363 static inline
sync_descbuffer_for_device(struct b43_dmaring * ring,dma_addr_t addr,size_t len)364     void sync_descbuffer_for_device(struct b43_dmaring *ring,
365 				    dma_addr_t addr, size_t len)
366 {
367 	B43_WARN_ON(ring->tx);
368 	ssb_dma_sync_single_for_device(ring->dev->dev,
369 				       addr, len, DMA_FROM_DEVICE);
370 }
371 
372 static inline
free_descriptor_buffer(struct b43_dmaring * ring,struct b43_dmadesc_meta * meta)373     void free_descriptor_buffer(struct b43_dmaring *ring,
374 				struct b43_dmadesc_meta *meta)
375 {
376 	if (meta->skb) {
377 		dev_kfree_skb_any(meta->skb);
378 		meta->skb = NULL;
379 	}
380 }
381 
alloc_ringmemory(struct b43_dmaring * ring)382 static int alloc_ringmemory(struct b43_dmaring *ring)
383 {
384 	gfp_t flags = GFP_KERNEL;
385 
386 	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
387 	 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
388 	 * has shown that 4K is sufficient for the latter as long as the buffer
389 	 * does not cross an 8K boundary.
390 	 *
391 	 * For unknown reasons - possibly a hardware error - the BCM4311 rev
392 	 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
393 	 * which accounts for the GFP_DMA flag below.
394 	 *
395 	 * The flags here must match the flags in free_ringmemory below!
396 	 */
397 	if (ring->type == B43_DMA_64BIT)
398 		flags |= GFP_DMA;
399 	ring->descbase = ssb_dma_alloc_consistent(ring->dev->dev,
400 						  B43_DMA_RINGMEMSIZE,
401 						  &(ring->dmabase), flags);
402 	if (!ring->descbase) {
403 		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
404 		return -ENOMEM;
405 	}
406 	memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);
407 
408 	return 0;
409 }
410 
free_ringmemory(struct b43_dmaring * ring)411 static void free_ringmemory(struct b43_dmaring *ring)
412 {
413 	gfp_t flags = GFP_KERNEL;
414 
415 	if (ring->type == B43_DMA_64BIT)
416 		flags |= GFP_DMA;
417 
418 	ssb_dma_free_consistent(ring->dev->dev, B43_DMA_RINGMEMSIZE,
419 				ring->descbase, ring->dmabase, flags);
420 }
421 
422 /* Reset the RX DMA channel */
b43_dmacontroller_rx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)423 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
424 				      enum b43_dmatype type)
425 {
426 	int i;
427 	u32 value;
428 	u16 offset;
429 
430 	might_sleep();
431 
432 	offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
433 	b43_write32(dev, mmio_base + offset, 0);
434 	for (i = 0; i < 10; i++) {
435 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
436 						   B43_DMA32_RXSTATUS;
437 		value = b43_read32(dev, mmio_base + offset);
438 		if (type == B43_DMA_64BIT) {
439 			value &= B43_DMA64_RXSTAT;
440 			if (value == B43_DMA64_RXSTAT_DISABLED) {
441 				i = -1;
442 				break;
443 			}
444 		} else {
445 			value &= B43_DMA32_RXSTATE;
446 			if (value == B43_DMA32_RXSTAT_DISABLED) {
447 				i = -1;
448 				break;
449 			}
450 		}
451 		msleep(1);
452 	}
453 	if (i != -1) {
454 		b43err(dev->wl, "DMA RX reset timed out\n");
455 		return -ENODEV;
456 	}
457 
458 	return 0;
459 }
460 
461 /* Reset the TX DMA channel */
b43_dmacontroller_tx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)462 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
463 				      enum b43_dmatype type)
464 {
465 	int i;
466 	u32 value;
467 	u16 offset;
468 
469 	might_sleep();
470 
471 	for (i = 0; i < 10; i++) {
472 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
473 						   B43_DMA32_TXSTATUS;
474 		value = b43_read32(dev, mmio_base + offset);
475 		if (type == B43_DMA_64BIT) {
476 			value &= B43_DMA64_TXSTAT;
477 			if (value == B43_DMA64_TXSTAT_DISABLED ||
478 			    value == B43_DMA64_TXSTAT_IDLEWAIT ||
479 			    value == B43_DMA64_TXSTAT_STOPPED)
480 				break;
481 		} else {
482 			value &= B43_DMA32_TXSTATE;
483 			if (value == B43_DMA32_TXSTAT_DISABLED ||
484 			    value == B43_DMA32_TXSTAT_IDLEWAIT ||
485 			    value == B43_DMA32_TXSTAT_STOPPED)
486 				break;
487 		}
488 		msleep(1);
489 	}
490 	offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
491 	b43_write32(dev, mmio_base + offset, 0);
492 	for (i = 0; i < 10; i++) {
493 		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
494 						   B43_DMA32_TXSTATUS;
495 		value = b43_read32(dev, mmio_base + offset);
496 		if (type == B43_DMA_64BIT) {
497 			value &= B43_DMA64_TXSTAT;
498 			if (value == B43_DMA64_TXSTAT_DISABLED) {
499 				i = -1;
500 				break;
501 			}
502 		} else {
503 			value &= B43_DMA32_TXSTATE;
504 			if (value == B43_DMA32_TXSTAT_DISABLED) {
505 				i = -1;
506 				break;
507 			}
508 		}
509 		msleep(1);
510 	}
511 	if (i != -1) {
512 		b43err(dev->wl, "DMA TX reset timed out\n");
513 		return -ENODEV;
514 	}
515 	/* ensure the reset is completed. */
516 	msleep(1);
517 
518 	return 0;
519 }
520 
521 /* Check if a DMA mapping address is invalid. */
b43_dma_mapping_error(struct b43_dmaring * ring,dma_addr_t addr,size_t buffersize,bool dma_to_device)522 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
523 				  dma_addr_t addr,
524 				  size_t buffersize, bool dma_to_device)
525 {
526 	if (unlikely(ssb_dma_mapping_error(ring->dev->dev, addr)))
527 		return 1;
528 
529 	switch (ring->type) {
530 	case B43_DMA_30BIT:
531 		if ((u64)addr + buffersize > (1ULL << 30))
532 			goto address_error;
533 		break;
534 	case B43_DMA_32BIT:
535 		if ((u64)addr + buffersize > (1ULL << 32))
536 			goto address_error;
537 		break;
538 	case B43_DMA_64BIT:
539 		/* Currently we can't have addresses beyond
540 		 * 64bit in the kernel. */
541 		break;
542 	}
543 
544 	/* The address is OK. */
545 	return 0;
546 
547 address_error:
548 	/* We can't support this address. Unmap it again. */
549 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
550 
551 	return 1;
552 }
553 
setup_rx_descbuffer(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,struct b43_dmadesc_meta * meta,gfp_t gfp_flags)554 static int setup_rx_descbuffer(struct b43_dmaring *ring,
555 			       struct b43_dmadesc_generic *desc,
556 			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
557 {
558 	struct b43_rxhdr_fw4 *rxhdr;
559 	dma_addr_t dmaaddr;
560 	struct sk_buff *skb;
561 
562 	B43_WARN_ON(ring->tx);
563 
564 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
565 	if (unlikely(!skb))
566 		return -ENOMEM;
567 	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
568 	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
569 		/* ugh. try to realloc in zone_dma */
570 		gfp_flags |= GFP_DMA;
571 
572 		dev_kfree_skb_any(skb);
573 
574 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
575 		if (unlikely(!skb))
576 			return -ENOMEM;
577 		dmaaddr = map_descbuffer(ring, skb->data,
578 					 ring->rx_buffersize, 0);
579 	}
580 
581 	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
582 		b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
583 		dev_kfree_skb_any(skb);
584 		return -EIO;
585 	}
586 
587 	meta->skb = skb;
588 	meta->dmaaddr = dmaaddr;
589 	ring->ops->fill_descriptor(ring, desc, dmaaddr,
590 				   ring->rx_buffersize, 0, 0, 0);
591 
592 	rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
593 	rxhdr->frame_len = 0;
594 
595 	return 0;
596 }
597 
598 /* Allocate the initial descbuffers.
599  * This is used for an RX ring only.
600  */
alloc_initial_descbuffers(struct b43_dmaring * ring)601 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
602 {
603 	int i, err = -ENOMEM;
604 	struct b43_dmadesc_generic *desc;
605 	struct b43_dmadesc_meta *meta;
606 
607 	for (i = 0; i < ring->nr_slots; i++) {
608 		desc = ring->ops->idx2desc(ring, i, &meta);
609 
610 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
611 		if (err) {
612 			b43err(ring->dev->wl,
613 			       "Failed to allocate initial descbuffers\n");
614 			goto err_unwind;
615 		}
616 	}
617 	mb();
618 	ring->used_slots = ring->nr_slots;
619 	err = 0;
620       out:
621 	return err;
622 
623       err_unwind:
624 	for (i--; i >= 0; i--) {
625 		desc = ring->ops->idx2desc(ring, i, &meta);
626 
627 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
628 		dev_kfree_skb(meta->skb);
629 	}
630 	goto out;
631 }
632 
633 /* Do initial setup of the DMA controller.
634  * Reset the controller, write the ring busaddress
635  * and switch the "enable" bit on.
636  */
dmacontroller_setup(struct b43_dmaring * ring)637 static int dmacontroller_setup(struct b43_dmaring *ring)
638 {
639 	int err = 0;
640 	u32 value;
641 	u32 addrext;
642 	u32 trans = ssb_dma_translation(ring->dev->dev);
643 
644 	if (ring->tx) {
645 		if (ring->type == B43_DMA_64BIT) {
646 			u64 ringbase = (u64) (ring->dmabase);
647 
648 			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
649 			    >> SSB_DMA_TRANSLATION_SHIFT;
650 			value = B43_DMA64_TXENABLE;
651 			value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
652 			    & B43_DMA64_TXADDREXT_MASK;
653 			b43_dma_write(ring, B43_DMA64_TXCTL, value);
654 			b43_dma_write(ring, B43_DMA64_TXRINGLO,
655 				      (ringbase & 0xFFFFFFFF));
656 			b43_dma_write(ring, B43_DMA64_TXRINGHI,
657 				      ((ringbase >> 32) &
658 				       ~SSB_DMA_TRANSLATION_MASK)
659 				      | (trans << 1));
660 		} else {
661 			u32 ringbase = (u32) (ring->dmabase);
662 
663 			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
664 			    >> SSB_DMA_TRANSLATION_SHIFT;
665 			value = B43_DMA32_TXENABLE;
666 			value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
667 			    & B43_DMA32_TXADDREXT_MASK;
668 			b43_dma_write(ring, B43_DMA32_TXCTL, value);
669 			b43_dma_write(ring, B43_DMA32_TXRING,
670 				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)
671 				      | trans);
672 		}
673 	} else {
674 		err = alloc_initial_descbuffers(ring);
675 		if (err)
676 			goto out;
677 		if (ring->type == B43_DMA_64BIT) {
678 			u64 ringbase = (u64) (ring->dmabase);
679 
680 			addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK)
681 			    >> SSB_DMA_TRANSLATION_SHIFT;
682 			value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
683 			value |= B43_DMA64_RXENABLE;
684 			value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
685 			    & B43_DMA64_RXADDREXT_MASK;
686 			b43_dma_write(ring, B43_DMA64_RXCTL, value);
687 			b43_dma_write(ring, B43_DMA64_RXRINGLO,
688 				      (ringbase & 0xFFFFFFFF));
689 			b43_dma_write(ring, B43_DMA64_RXRINGHI,
690 				      ((ringbase >> 32) &
691 				       ~SSB_DMA_TRANSLATION_MASK)
692 				      | (trans << 1));
693 			b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
694 				      sizeof(struct b43_dmadesc64));
695 		} else {
696 			u32 ringbase = (u32) (ring->dmabase);
697 
698 			addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
699 			    >> SSB_DMA_TRANSLATION_SHIFT;
700 			value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
701 			value |= B43_DMA32_RXENABLE;
702 			value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
703 			    & B43_DMA32_RXADDREXT_MASK;
704 			b43_dma_write(ring, B43_DMA32_RXCTL, value);
705 			b43_dma_write(ring, B43_DMA32_RXRING,
706 				      (ringbase & ~SSB_DMA_TRANSLATION_MASK)
707 				      | trans);
708 			b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
709 				      sizeof(struct b43_dmadesc32));
710 		}
711 	}
712 
713 out:
714 	return err;
715 }
716 
717 /* Shutdown the DMA controller. */
dmacontroller_cleanup(struct b43_dmaring * ring)718 static void dmacontroller_cleanup(struct b43_dmaring *ring)
719 {
720 	if (ring->tx) {
721 		b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
722 					   ring->type);
723 		if (ring->type == B43_DMA_64BIT) {
724 			b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
725 			b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
726 		} else
727 			b43_dma_write(ring, B43_DMA32_TXRING, 0);
728 	} else {
729 		b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
730 					   ring->type);
731 		if (ring->type == B43_DMA_64BIT) {
732 			b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
733 			b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
734 		} else
735 			b43_dma_write(ring, B43_DMA32_RXRING, 0);
736 	}
737 }
738 
free_all_descbuffers(struct b43_dmaring * ring)739 static void free_all_descbuffers(struct b43_dmaring *ring)
740 {
741 	struct b43_dmadesc_generic *desc;
742 	struct b43_dmadesc_meta *meta;
743 	int i;
744 
745 	if (!ring->used_slots)
746 		return;
747 	for (i = 0; i < ring->nr_slots; i++) {
748 		desc = ring->ops->idx2desc(ring, i, &meta);
749 
750 		if (!meta->skb) {
751 			B43_WARN_ON(!ring->tx);
752 			continue;
753 		}
754 		if (ring->tx) {
755 			unmap_descbuffer(ring, meta->dmaaddr,
756 					 meta->skb->len, 1);
757 		} else {
758 			unmap_descbuffer(ring, meta->dmaaddr,
759 					 ring->rx_buffersize, 0);
760 		}
761 		free_descriptor_buffer(ring, meta);
762 	}
763 }
764 
supported_dma_mask(struct b43_wldev * dev)765 static u64 supported_dma_mask(struct b43_wldev *dev)
766 {
767 	u32 tmp;
768 	u16 mmio_base;
769 
770 	tmp = b43_read32(dev, SSB_TMSHIGH);
771 	if (tmp & SSB_TMSHIGH_DMA64)
772 		return DMA_64BIT_MASK;
773 	mmio_base = b43_dmacontroller_base(0, 0);
774 	b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
775 	tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
776 	if (tmp & B43_DMA32_TXADDREXT_MASK)
777 		return DMA_32BIT_MASK;
778 
779 	return DMA_30BIT_MASK;
780 }
781 
dma_mask_to_engine_type(u64 dmamask)782 static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask)
783 {
784 	if (dmamask == DMA_30BIT_MASK)
785 		return B43_DMA_30BIT;
786 	if (dmamask == DMA_32BIT_MASK)
787 		return B43_DMA_32BIT;
788 	if (dmamask == DMA_64BIT_MASK)
789 		return B43_DMA_64BIT;
790 	B43_WARN_ON(1);
791 	return B43_DMA_30BIT;
792 }
793 
794 /* Main initialization function. */
795 static
b43_setup_dmaring(struct b43_wldev * dev,int controller_index,int for_tx,enum b43_dmatype type)796 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
797 				      int controller_index,
798 				      int for_tx,
799 				      enum b43_dmatype type)
800 {
801 	struct b43_dmaring *ring;
802 	int err;
803 	dma_addr_t dma_test;
804 
805 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
806 	if (!ring)
807 		goto out;
808 
809 	ring->nr_slots = B43_RXRING_SLOTS;
810 	if (for_tx)
811 		ring->nr_slots = B43_TXRING_SLOTS;
812 
813 	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
814 			     GFP_KERNEL);
815 	if (!ring->meta)
816 		goto err_kfree_ring;
817 
818 	ring->type = type;
819 	ring->dev = dev;
820 	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
821 	ring->index = controller_index;
822 	if (type == B43_DMA_64BIT)
823 		ring->ops = &dma64_ops;
824 	else
825 		ring->ops = &dma32_ops;
826 	if (for_tx) {
827 		ring->tx = 1;
828 		ring->current_slot = -1;
829 	} else {
830 		if (ring->index == 0) {
831 			ring->rx_buffersize = B43_DMA0_RX_BUFFERSIZE;
832 			ring->frameoffset = B43_DMA0_RX_FRAMEOFFSET;
833 		} else if (ring->index == 3) {
834 			ring->rx_buffersize = B43_DMA3_RX_BUFFERSIZE;
835 			ring->frameoffset = B43_DMA3_RX_FRAMEOFFSET;
836 		} else
837 			B43_WARN_ON(1);
838 	}
839 	spin_lock_init(&ring->lock);
840 #ifdef CONFIG_B43_DEBUG
841 	ring->last_injected_overflow = jiffies;
842 #endif
843 
844 	if (for_tx) {
845 		ring->txhdr_cache = kcalloc(ring->nr_slots,
846 					    b43_txhdr_size(dev),
847 					    GFP_KERNEL);
848 		if (!ring->txhdr_cache)
849 			goto err_kfree_meta;
850 
851 		/* test for ability to dma to txhdr_cache */
852 		dma_test = ssb_dma_map_single(dev->dev,
853 					      ring->txhdr_cache,
854 					      b43_txhdr_size(dev),
855 					      DMA_TO_DEVICE);
856 
857 		if (b43_dma_mapping_error(ring, dma_test,
858 					  b43_txhdr_size(dev), 1)) {
859 			/* ugh realloc */
860 			kfree(ring->txhdr_cache);
861 			ring->txhdr_cache = kcalloc(ring->nr_slots,
862 						    b43_txhdr_size(dev),
863 						    GFP_KERNEL | GFP_DMA);
864 			if (!ring->txhdr_cache)
865 				goto err_kfree_meta;
866 
867 			dma_test = ssb_dma_map_single(dev->dev,
868 						      ring->txhdr_cache,
869 						      b43_txhdr_size(dev),
870 						      DMA_TO_DEVICE);
871 
872 			if (b43_dma_mapping_error(ring, dma_test,
873 						  b43_txhdr_size(dev), 1)) {
874 
875 				b43err(dev->wl,
876 				       "TXHDR DMA allocation failed\n");
877 				goto err_kfree_txhdr_cache;
878 			}
879 		}
880 
881 		ssb_dma_unmap_single(dev->dev,
882 				     dma_test, b43_txhdr_size(dev),
883 				     DMA_TO_DEVICE);
884 	}
885 
886 	err = alloc_ringmemory(ring);
887 	if (err)
888 		goto err_kfree_txhdr_cache;
889 	err = dmacontroller_setup(ring);
890 	if (err)
891 		goto err_free_ringmemory;
892 
893       out:
894 	return ring;
895 
896       err_free_ringmemory:
897 	free_ringmemory(ring);
898       err_kfree_txhdr_cache:
899 	kfree(ring->txhdr_cache);
900       err_kfree_meta:
901 	kfree(ring->meta);
902       err_kfree_ring:
903 	kfree(ring);
904 	ring = NULL;
905 	goto out;
906 }
907 
908 #define divide(a, b)	({	\
909 	typeof(a) __a = a;	\
910 	do_div(__a, b);		\
911 	__a;			\
912   })
913 
914 #define modulo(a, b)	({	\
915 	typeof(a) __a = a;	\
916 	do_div(__a, b);		\
917   })
918 
919 /* Main cleanup function. */
b43_destroy_dmaring(struct b43_dmaring * ring,const char * ringname)920 static void b43_destroy_dmaring(struct b43_dmaring *ring,
921 				const char *ringname)
922 {
923 	if (!ring)
924 		return;
925 
926 #ifdef CONFIG_B43_DEBUG
927 	{
928 		/* Print some statistics. */
929 		u64 failed_packets = ring->nr_failed_tx_packets;
930 		u64 succeed_packets = ring->nr_succeed_tx_packets;
931 		u64 nr_packets = failed_packets + succeed_packets;
932 		u64 permille_failed = 0, average_tries = 0;
933 
934 		if (nr_packets)
935 			permille_failed = divide(failed_packets * 1000, nr_packets);
936 		if (nr_packets)
937 			average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
938 
939 		b43dbg(ring->dev->wl, "DMA-%u %s: "
940 		       "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
941 		       "Average tries %llu.%02llu\n",
942 		       (unsigned int)(ring->type), ringname,
943 		       ring->max_used_slots,
944 		       ring->nr_slots,
945 		       (unsigned long long)failed_packets,
946 		       (unsigned long long)nr_packets,
947 		       (unsigned long long)divide(permille_failed, 10),
948 		       (unsigned long long)modulo(permille_failed, 10),
949 		       (unsigned long long)divide(average_tries, 100),
950 		       (unsigned long long)modulo(average_tries, 100));
951 	}
952 #endif /* DEBUG */
953 
954 	/* Device IRQs are disabled prior entering this function,
955 	 * so no need to take care of concurrency with rx handler stuff.
956 	 */
957 	dmacontroller_cleanup(ring);
958 	free_all_descbuffers(ring);
959 	free_ringmemory(ring);
960 
961 	kfree(ring->txhdr_cache);
962 	kfree(ring->meta);
963 	kfree(ring);
964 }
965 
966 #define destroy_ring(dma, ring) do {				\
967 	b43_destroy_dmaring((dma)->ring, __stringify(ring));	\
968 	(dma)->ring = NULL;					\
969     } while (0)
970 
b43_dma_free(struct b43_wldev * dev)971 void b43_dma_free(struct b43_wldev *dev)
972 {
973 	struct b43_dma *dma;
974 
975 	if (b43_using_pio_transfers(dev))
976 		return;
977 	dma = &dev->dma;
978 
979 	destroy_ring(dma, rx_ring);
980 	destroy_ring(dma, tx_ring_AC_BK);
981 	destroy_ring(dma, tx_ring_AC_BE);
982 	destroy_ring(dma, tx_ring_AC_VI);
983 	destroy_ring(dma, tx_ring_AC_VO);
984 	destroy_ring(dma, tx_ring_mcast);
985 }
986 
b43_dma_set_mask(struct b43_wldev * dev,u64 mask)987 static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
988 {
989 	u64 orig_mask = mask;
990 	bool fallback = 0;
991 	int err;
992 
993 	/* Try to set the DMA mask. If it fails, try falling back to a
994 	 * lower mask, as we can always also support a lower one. */
995 	while (1) {
996 		err = ssb_dma_set_mask(dev->dev, mask);
997 		if (!err)
998 			break;
999 		if (mask == DMA_64BIT_MASK) {
1000 			mask = DMA_32BIT_MASK;
1001 			fallback = 1;
1002 			continue;
1003 		}
1004 		if (mask == DMA_32BIT_MASK) {
1005 			mask = DMA_30BIT_MASK;
1006 			fallback = 1;
1007 			continue;
1008 		}
1009 		b43err(dev->wl, "The machine/kernel does not support "
1010 		       "the required %u-bit DMA mask\n",
1011 		       (unsigned int)dma_mask_to_engine_type(orig_mask));
1012 		return -EOPNOTSUPP;
1013 	}
1014 	if (fallback) {
1015 		b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
1016 			(unsigned int)dma_mask_to_engine_type(orig_mask),
1017 			(unsigned int)dma_mask_to_engine_type(mask));
1018 	}
1019 
1020 	return 0;
1021 }
1022 
b43_dma_init(struct b43_wldev * dev)1023 int b43_dma_init(struct b43_wldev *dev)
1024 {
1025 	struct b43_dma *dma = &dev->dma;
1026 	int err;
1027 	u64 dmamask;
1028 	enum b43_dmatype type;
1029 
1030 	dmamask = supported_dma_mask(dev);
1031 	type = dma_mask_to_engine_type(dmamask);
1032 	err = b43_dma_set_mask(dev, dmamask);
1033 	if (err)
1034 		return err;
1035 
1036 	err = -ENOMEM;
1037 	/* setup TX DMA channels. */
1038 	dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1039 	if (!dma->tx_ring_AC_BK)
1040 		goto out;
1041 
1042 	dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1043 	if (!dma->tx_ring_AC_BE)
1044 		goto err_destroy_bk;
1045 
1046 	dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1047 	if (!dma->tx_ring_AC_VI)
1048 		goto err_destroy_be;
1049 
1050 	dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1051 	if (!dma->tx_ring_AC_VO)
1052 		goto err_destroy_vi;
1053 
1054 	dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1055 	if (!dma->tx_ring_mcast)
1056 		goto err_destroy_vo;
1057 
1058 	/* setup RX DMA channel. */
1059 	dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1060 	if (!dma->rx_ring)
1061 		goto err_destroy_mcast;
1062 
1063 	/* No support for the TX status DMA ring. */
1064 	B43_WARN_ON(dev->dev->id.revision < 5);
1065 
1066 	b43dbg(dev->wl, "%u-bit DMA initialized\n",
1067 	       (unsigned int)type);
1068 	err = 0;
1069 out:
1070 	return err;
1071 
1072 err_destroy_mcast:
1073 	destroy_ring(dma, tx_ring_mcast);
1074 err_destroy_vo:
1075 	destroy_ring(dma, tx_ring_AC_VO);
1076 err_destroy_vi:
1077 	destroy_ring(dma, tx_ring_AC_VI);
1078 err_destroy_be:
1079 	destroy_ring(dma, tx_ring_AC_BE);
1080 err_destroy_bk:
1081 	destroy_ring(dma, tx_ring_AC_BK);
1082 	return err;
1083 }
1084 
1085 /* Generate a cookie for the TX header. */
generate_cookie(struct b43_dmaring * ring,int slot)1086 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1087 {
1088 	u16 cookie;
1089 
1090 	/* Use the upper 4 bits of the cookie as
1091 	 * DMA controller ID and store the slot number
1092 	 * in the lower 12 bits.
1093 	 * Note that the cookie must never be 0, as this
1094 	 * is a special value used in RX path.
1095 	 * It can also not be 0xFFFF because that is special
1096 	 * for multicast frames.
1097 	 */
1098 	cookie = (((u16)ring->index + 1) << 12);
1099 	B43_WARN_ON(slot & ~0x0FFF);
1100 	cookie |= (u16)slot;
1101 
1102 	return cookie;
1103 }
1104 
1105 /* Inspect a cookie and find out to which controller/slot it belongs. */
1106 static
parse_cookie(struct b43_wldev * dev,u16 cookie,int * slot)1107 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1108 {
1109 	struct b43_dma *dma = &dev->dma;
1110 	struct b43_dmaring *ring = NULL;
1111 
1112 	switch (cookie & 0xF000) {
1113 	case 0x1000:
1114 		ring = dma->tx_ring_AC_BK;
1115 		break;
1116 	case 0x2000:
1117 		ring = dma->tx_ring_AC_BE;
1118 		break;
1119 	case 0x3000:
1120 		ring = dma->tx_ring_AC_VI;
1121 		break;
1122 	case 0x4000:
1123 		ring = dma->tx_ring_AC_VO;
1124 		break;
1125 	case 0x5000:
1126 		ring = dma->tx_ring_mcast;
1127 		break;
1128 	default:
1129 		B43_WARN_ON(1);
1130 	}
1131 	*slot = (cookie & 0x0FFF);
1132 	B43_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1133 
1134 	return ring;
1135 }
1136 
dma_tx_fragment(struct b43_dmaring * ring,struct sk_buff * skb)1137 static int dma_tx_fragment(struct b43_dmaring *ring,
1138 			   struct sk_buff *skb)
1139 {
1140 	const struct b43_dma_ops *ops = ring->ops;
1141 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1142 	u8 *header;
1143 	int slot, old_top_slot, old_used_slots;
1144 	int err;
1145 	struct b43_dmadesc_generic *desc;
1146 	struct b43_dmadesc_meta *meta;
1147 	struct b43_dmadesc_meta *meta_hdr;
1148 	struct sk_buff *bounce_skb;
1149 	u16 cookie;
1150 	size_t hdrsize = b43_txhdr_size(ring->dev);
1151 
1152 #define SLOTS_PER_PACKET  2
1153 
1154 	old_top_slot = ring->current_slot;
1155 	old_used_slots = ring->used_slots;
1156 
1157 	/* Get a slot for the header. */
1158 	slot = request_slot(ring);
1159 	desc = ops->idx2desc(ring, slot, &meta_hdr);
1160 	memset(meta_hdr, 0, sizeof(*meta_hdr));
1161 
1162 	header = &(ring->txhdr_cache[slot * hdrsize]);
1163 	cookie = generate_cookie(ring, slot);
1164 	err = b43_generate_txhdr(ring->dev, header,
1165 				 skb->data, skb->len, info, cookie);
1166 	if (unlikely(err)) {
1167 		ring->current_slot = old_top_slot;
1168 		ring->used_slots = old_used_slots;
1169 		return err;
1170 	}
1171 
1172 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1173 					   hdrsize, 1);
1174 	if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1175 		ring->current_slot = old_top_slot;
1176 		ring->used_slots = old_used_slots;
1177 		return -EIO;
1178 	}
1179 	ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1180 			     hdrsize, 1, 0, 0);
1181 
1182 	/* Get a slot for the payload. */
1183 	slot = request_slot(ring);
1184 	desc = ops->idx2desc(ring, slot, &meta);
1185 	memset(meta, 0, sizeof(*meta));
1186 
1187 	meta->skb = skb;
1188 	meta->is_last_fragment = 1;
1189 
1190 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1191 	/* create a bounce buffer in zone_dma on mapping failure. */
1192 	if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1193 		bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
1194 		if (!bounce_skb) {
1195 			ring->current_slot = old_top_slot;
1196 			ring->used_slots = old_used_slots;
1197 			err = -ENOMEM;
1198 			goto out_unmap_hdr;
1199 		}
1200 
1201 		memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1202 		dev_kfree_skb_any(skb);
1203 		skb = bounce_skb;
1204 		meta->skb = skb;
1205 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1206 		if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1207 			ring->current_slot = old_top_slot;
1208 			ring->used_slots = old_used_slots;
1209 			err = -EIO;
1210 			goto out_free_bounce;
1211 		}
1212 	}
1213 
1214 	ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1215 
1216 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1217 		/* Tell the firmware about the cookie of the last
1218 		 * mcast frame, so it can clear the more-data bit in it. */
1219 		b43_shm_write16(ring->dev, B43_SHM_SHARED,
1220 				B43_SHM_SH_MCASTCOOKIE, cookie);
1221 	}
1222 	/* Now transfer the whole frame. */
1223 	wmb();
1224 	ops->poke_tx(ring, next_slot(ring, slot));
1225 	return 0;
1226 
1227 out_free_bounce:
1228 	dev_kfree_skb_any(skb);
1229 out_unmap_hdr:
1230 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1231 			 hdrsize, 1);
1232 	return err;
1233 }
1234 
should_inject_overflow(struct b43_dmaring * ring)1235 static inline int should_inject_overflow(struct b43_dmaring *ring)
1236 {
1237 #ifdef CONFIG_B43_DEBUG
1238 	if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1239 		/* Check if we should inject another ringbuffer overflow
1240 		 * to test handling of this situation in the stack. */
1241 		unsigned long next_overflow;
1242 
1243 		next_overflow = ring->last_injected_overflow + HZ;
1244 		if (time_after(jiffies, next_overflow)) {
1245 			ring->last_injected_overflow = jiffies;
1246 			b43dbg(ring->dev->wl,
1247 			       "Injecting TX ring overflow on "
1248 			       "DMA controller %d\n", ring->index);
1249 			return 1;
1250 		}
1251 	}
1252 #endif /* CONFIG_B43_DEBUG */
1253 	return 0;
1254 }
1255 
1256 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
select_ring_by_priority(struct b43_wldev * dev,u8 queue_prio)1257 static struct b43_dmaring * select_ring_by_priority(struct b43_wldev *dev,
1258 						    u8 queue_prio)
1259 {
1260 	struct b43_dmaring *ring;
1261 
1262 	if (b43_modparam_qos) {
1263 		/* 0 = highest priority */
1264 		switch (queue_prio) {
1265 		default:
1266 			B43_WARN_ON(1);
1267 			/* fallthrough */
1268 		case 0:
1269 			ring = dev->dma.tx_ring_AC_VO;
1270 			break;
1271 		case 1:
1272 			ring = dev->dma.tx_ring_AC_VI;
1273 			break;
1274 		case 2:
1275 			ring = dev->dma.tx_ring_AC_BE;
1276 			break;
1277 		case 3:
1278 			ring = dev->dma.tx_ring_AC_BK;
1279 			break;
1280 		}
1281 	} else
1282 		ring = dev->dma.tx_ring_AC_BE;
1283 
1284 	return ring;
1285 }
1286 
b43_dma_tx(struct b43_wldev * dev,struct sk_buff * skb)1287 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1288 {
1289 	struct b43_dmaring *ring;
1290 	struct ieee80211_hdr *hdr;
1291 	int err = 0;
1292 	unsigned long flags;
1293 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1294 
1295 	hdr = (struct ieee80211_hdr *)skb->data;
1296 	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1297 		/* The multicast ring will be sent after the DTIM */
1298 		ring = dev->dma.tx_ring_mcast;
1299 		/* Set the more-data bit. Ucode will clear it on
1300 		 * the last frame for us. */
1301 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1302 	} else {
1303 		/* Decide by priority where to put this frame. */
1304 		ring = select_ring_by_priority(
1305 			dev, skb_get_queue_mapping(skb));
1306 	}
1307 
1308 	spin_lock_irqsave(&ring->lock, flags);
1309 	B43_WARN_ON(!ring->tx);
1310 	if (unlikely(free_slots(ring) < SLOTS_PER_PACKET)) {
1311 		b43warn(dev->wl, "DMA queue overflow\n");
1312 		err = -ENOSPC;
1313 		goto out_unlock;
1314 	}
1315 	/* Check if the queue was stopped in mac80211,
1316 	 * but we got called nevertheless.
1317 	 * That would be a mac80211 bug. */
1318 	B43_WARN_ON(ring->stopped);
1319 
1320 	/* Assign the queue number to the ring (if not already done before)
1321 	 * so TX status handling can use it. The queue to ring mapping is
1322 	 * static, so we don't need to store it per frame. */
1323 	ring->queue_prio = skb_get_queue_mapping(skb);
1324 
1325 	err = dma_tx_fragment(ring, skb);
1326 	if (unlikely(err == -ENOKEY)) {
1327 		/* Drop this packet, as we don't have the encryption key
1328 		 * anymore and must not transmit it unencrypted. */
1329 		dev_kfree_skb_any(skb);
1330 		err = 0;
1331 		goto out_unlock;
1332 	}
1333 	if (unlikely(err)) {
1334 		b43err(dev->wl, "DMA tx mapping failure\n");
1335 		goto out_unlock;
1336 	}
1337 	ring->nr_tx_packets++;
1338 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1339 	    should_inject_overflow(ring)) {
1340 		/* This TX ring is full. */
1341 		ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
1342 		ring->stopped = 1;
1343 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1344 			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1345 		}
1346 	}
1347 out_unlock:
1348 	spin_unlock_irqrestore(&ring->lock, flags);
1349 
1350 	return err;
1351 }
1352 
1353 /* Called with IRQs disabled. */
b43_dma_handle_txstatus(struct b43_wldev * dev,const struct b43_txstatus * status)1354 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1355 			     const struct b43_txstatus *status)
1356 {
1357 	const struct b43_dma_ops *ops;
1358 	struct b43_dmaring *ring;
1359 	struct b43_dmadesc_generic *desc;
1360 	struct b43_dmadesc_meta *meta;
1361 	int slot;
1362 	bool frame_succeed;
1363 
1364 	ring = parse_cookie(dev, status->cookie, &slot);
1365 	if (unlikely(!ring))
1366 		return;
1367 
1368 	spin_lock(&ring->lock); /* IRQs are already disabled. */
1369 
1370 	B43_WARN_ON(!ring->tx);
1371 	ops = ring->ops;
1372 	while (1) {
1373 		B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1374 		desc = ops->idx2desc(ring, slot, &meta);
1375 
1376 		if (meta->skb)
1377 			unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len,
1378 					 1);
1379 		else
1380 			unmap_descbuffer(ring, meta->dmaaddr,
1381 					 b43_txhdr_size(dev), 1);
1382 
1383 		if (meta->is_last_fragment) {
1384 			struct ieee80211_tx_info *info;
1385 
1386 			BUG_ON(!meta->skb);
1387 
1388 			info = IEEE80211_SKB_CB(meta->skb);
1389 
1390 			/*
1391 			 * Call back to inform the ieee80211 subsystem about
1392 			 * the status of the transmission.
1393 			 */
1394 			frame_succeed = b43_fill_txstatus_report(dev, info, status);
1395 #ifdef CONFIG_B43_DEBUG
1396 			if (frame_succeed)
1397 				ring->nr_succeed_tx_packets++;
1398 			else
1399 				ring->nr_failed_tx_packets++;
1400 			ring->nr_total_packet_tries += status->frame_count;
1401 #endif /* DEBUG */
1402 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1403 
1404 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1405 			meta->skb = NULL;
1406 		} else {
1407 			/* No need to call free_descriptor_buffer here, as
1408 			 * this is only the txhdr, which is not allocated.
1409 			 */
1410 			B43_WARN_ON(meta->skb);
1411 		}
1412 
1413 		/* Everything unmapped and free'd. So it's not used anymore. */
1414 		ring->used_slots--;
1415 
1416 		if (meta->is_last_fragment)
1417 			break;
1418 		slot = next_slot(ring, slot);
1419 	}
1420 	dev->stats.last_tx = jiffies;
1421 	if (ring->stopped) {
1422 		B43_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1423 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1424 		ring->stopped = 0;
1425 		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1426 			b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1427 		}
1428 	}
1429 
1430 	spin_unlock(&ring->lock);
1431 }
1432 
b43_dma_get_tx_stats(struct b43_wldev * dev,struct ieee80211_tx_queue_stats * stats)1433 void b43_dma_get_tx_stats(struct b43_wldev *dev,
1434 			  struct ieee80211_tx_queue_stats *stats)
1435 {
1436 	const int nr_queues = dev->wl->hw->queues;
1437 	struct b43_dmaring *ring;
1438 	unsigned long flags;
1439 	int i;
1440 
1441 	for (i = 0; i < nr_queues; i++) {
1442 		ring = select_ring_by_priority(dev, i);
1443 
1444 		spin_lock_irqsave(&ring->lock, flags);
1445 		stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
1446 		stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
1447 		stats[i].count = ring->nr_tx_packets;
1448 		spin_unlock_irqrestore(&ring->lock, flags);
1449 	}
1450 }
1451 
dma_rx(struct b43_dmaring * ring,int * slot)1452 static void dma_rx(struct b43_dmaring *ring, int *slot)
1453 {
1454 	const struct b43_dma_ops *ops = ring->ops;
1455 	struct b43_dmadesc_generic *desc;
1456 	struct b43_dmadesc_meta *meta;
1457 	struct b43_rxhdr_fw4 *rxhdr;
1458 	struct sk_buff *skb;
1459 	u16 len;
1460 	int err;
1461 	dma_addr_t dmaaddr;
1462 
1463 	desc = ops->idx2desc(ring, *slot, &meta);
1464 
1465 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1466 	skb = meta->skb;
1467 
1468 	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1469 	len = le16_to_cpu(rxhdr->frame_len);
1470 	if (len == 0) {
1471 		int i = 0;
1472 
1473 		do {
1474 			udelay(2);
1475 			barrier();
1476 			len = le16_to_cpu(rxhdr->frame_len);
1477 		} while (len == 0 && i++ < 5);
1478 		if (unlikely(len == 0)) {
1479 			/* recycle the descriptor buffer. */
1480 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1481 						   ring->rx_buffersize);
1482 			goto drop;
1483 		}
1484 	}
1485 	if (unlikely(len > ring->rx_buffersize)) {
1486 		/* The data did not fit into one descriptor buffer
1487 		 * and is split over multiple buffers.
1488 		 * This should never happen, as we try to allocate buffers
1489 		 * big enough. So simply ignore this packet.
1490 		 */
1491 		int cnt = 0;
1492 		s32 tmp = len;
1493 
1494 		while (1) {
1495 			desc = ops->idx2desc(ring, *slot, &meta);
1496 			/* recycle the descriptor buffer. */
1497 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1498 						   ring->rx_buffersize);
1499 			*slot = next_slot(ring, *slot);
1500 			cnt++;
1501 			tmp -= ring->rx_buffersize;
1502 			if (tmp <= 0)
1503 				break;
1504 		}
1505 		b43err(ring->dev->wl, "DMA RX buffer too small "
1506 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1507 		       len, ring->rx_buffersize, cnt);
1508 		goto drop;
1509 	}
1510 
1511 	dmaaddr = meta->dmaaddr;
1512 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1513 	if (unlikely(err)) {
1514 		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1515 		sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1516 		goto drop;
1517 	}
1518 
1519 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1520 	skb_put(skb, len + ring->frameoffset);
1521 	skb_pull(skb, ring->frameoffset);
1522 
1523 	b43_rx(ring->dev, skb, rxhdr);
1524 drop:
1525 	return;
1526 }
1527 
b43_dma_rx(struct b43_dmaring * ring)1528 void b43_dma_rx(struct b43_dmaring *ring)
1529 {
1530 	const struct b43_dma_ops *ops = ring->ops;
1531 	int slot, current_slot;
1532 	int used_slots = 0;
1533 
1534 	B43_WARN_ON(ring->tx);
1535 	current_slot = ops->get_current_rxslot(ring);
1536 	B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1537 
1538 	slot = ring->current_slot;
1539 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1540 		dma_rx(ring, &slot);
1541 		update_max_used_slots(ring, ++used_slots);
1542 	}
1543 	ops->set_current_rxslot(ring, slot);
1544 	ring->current_slot = slot;
1545 }
1546 
b43_dma_tx_suspend_ring(struct b43_dmaring * ring)1547 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1548 {
1549 	unsigned long flags;
1550 
1551 	spin_lock_irqsave(&ring->lock, flags);
1552 	B43_WARN_ON(!ring->tx);
1553 	ring->ops->tx_suspend(ring);
1554 	spin_unlock_irqrestore(&ring->lock, flags);
1555 }
1556 
b43_dma_tx_resume_ring(struct b43_dmaring * ring)1557 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1558 {
1559 	unsigned long flags;
1560 
1561 	spin_lock_irqsave(&ring->lock, flags);
1562 	B43_WARN_ON(!ring->tx);
1563 	ring->ops->tx_resume(ring);
1564 	spin_unlock_irqrestore(&ring->lock, flags);
1565 }
1566 
b43_dma_tx_suspend(struct b43_wldev * dev)1567 void b43_dma_tx_suspend(struct b43_wldev *dev)
1568 {
1569 	b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1570 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1571 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1572 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1573 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1574 	b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1575 }
1576 
b43_dma_tx_resume(struct b43_wldev * dev)1577 void b43_dma_tx_resume(struct b43_wldev *dev)
1578 {
1579 	b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1580 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1581 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1582 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1583 	b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1584 	b43_power_saving_ctl_bits(dev, 0);
1585 }
1586 
1587 #ifdef CONFIG_B43_PIO
direct_fifo_rx(struct b43_wldev * dev,enum b43_dmatype type,u16 mmio_base,bool enable)1588 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1589 			   u16 mmio_base, bool enable)
1590 {
1591 	u32 ctl;
1592 
1593 	if (type == B43_DMA_64BIT) {
1594 		ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1595 		ctl &= ~B43_DMA64_RXDIRECTFIFO;
1596 		if (enable)
1597 			ctl |= B43_DMA64_RXDIRECTFIFO;
1598 		b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1599 	} else {
1600 		ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1601 		ctl &= ~B43_DMA32_RXDIRECTFIFO;
1602 		if (enable)
1603 			ctl |= B43_DMA32_RXDIRECTFIFO;
1604 		b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1605 	}
1606 }
1607 
1608 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1609  * This is called from PIO code, so DMA structures are not available. */
b43_dma_direct_fifo_rx(struct b43_wldev * dev,unsigned int engine_index,bool enable)1610 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1611 			    unsigned int engine_index, bool enable)
1612 {
1613 	enum b43_dmatype type;
1614 	u16 mmio_base;
1615 
1616 	type = dma_mask_to_engine_type(supported_dma_mask(dev));
1617 
1618 	mmio_base = b43_dmacontroller_base(type, engine_index);
1619 	direct_fifo_rx(dev, type, mmio_base, enable);
1620 }
1621 #endif /* CONFIG_B43_PIO */
1622