1 /*
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 *
8 * Modified for gPXE, July 2009, by Joshua Oreman <oremanj@rwcr.net>
9 * Original from Linux kernel 2.6.30.
10 *
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
21 * redistribution must be conditioned upon including a substantially
22 * similar Disclaimer requirement for further binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
35 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
36 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
37 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
38 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
39 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
40 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
42 * THE POSSIBILITY OF SUCH DAMAGES.
43 *
44 */
45
46 FILE_LICENCE ( BSD3 );
47
48 #include <stdlib.h>
49 #include <gpxe/malloc.h>
50 #include <gpxe/timer.h>
51 #include <gpxe/netdevice.h>
52 #include <gpxe/pci.h>
53 #include <gpxe/pci_io.h>
54
55 #include "base.h"
56 #include "reg.h"
57
58 #define ATH5K_CALIB_INTERVAL 10 /* Calibrate PHY every 10 seconds */
59 #define ATH5K_RETRIES 4 /* Number of times to retry packet sends */
60 #define ATH5K_DESC_ALIGN 16 /* Alignment for TX/RX descriptors */
61
62 /******************\
63 * Internal defines *
64 \******************/
65
66 /* Known PCI ids */
67 static struct pci_device_id ath5k_nics[] = {
68 PCI_ROM(0x168c, 0x0207, "ath5210e", "Atheros 5210 early", AR5K_AR5210),
69 PCI_ROM(0x168c, 0x0007, "ath5210", "Atheros 5210", AR5K_AR5210),
70 PCI_ROM(0x168c, 0x0011, "ath5311", "Atheros 5311 (AHB)", AR5K_AR5211),
71 PCI_ROM(0x168c, 0x0012, "ath5211", "Atheros 5211", AR5K_AR5211),
72 PCI_ROM(0x168c, 0x0013, "ath5212", "Atheros 5212", AR5K_AR5212),
73 PCI_ROM(0xa727, 0x0013, "ath5212c","3com Ath 5212", AR5K_AR5212),
74 PCI_ROM(0x10b7, 0x0013, "rdag675", "3com 3CRDAG675", AR5K_AR5212),
75 PCI_ROM(0x168c, 0x1014, "ath5212m", "Ath 5212 miniPCI", AR5K_AR5212),
76 PCI_ROM(0x168c, 0x0014, "ath5212x14", "Atheros 5212 x14", AR5K_AR5212),
77 PCI_ROM(0x168c, 0x0015, "ath5212x15", "Atheros 5212 x15", AR5K_AR5212),
78 PCI_ROM(0x168c, 0x0016, "ath5212x16", "Atheros 5212 x16", AR5K_AR5212),
79 PCI_ROM(0x168c, 0x0017, "ath5212x17", "Atheros 5212 x17", AR5K_AR5212),
80 PCI_ROM(0x168c, 0x0018, "ath5212x18", "Atheros 5212 x18", AR5K_AR5212),
81 PCI_ROM(0x168c, 0x0019, "ath5212x19", "Atheros 5212 x19", AR5K_AR5212),
82 PCI_ROM(0x168c, 0x001a, "ath2413", "Atheros 2413 Griffin", AR5K_AR5212),
83 PCI_ROM(0x168c, 0x001b, "ath5413", "Atheros 5413 Eagle", AR5K_AR5212),
84 PCI_ROM(0x168c, 0x001c, "ath5212e", "Atheros 5212 PCI-E", AR5K_AR5212),
85 PCI_ROM(0x168c, 0x001d, "ath2417", "Atheros 2417 Nala", AR5K_AR5212),
86 };
87
88 /* Known SREVs */
89 static const struct ath5k_srev_name srev_names[] = {
90 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
91 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
92 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
93 { "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
94 { "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
95 { "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
96 { "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
97 { "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
98 { "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
99 { "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
100 { "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
101 { "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
102 { "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
103 { "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
104 { "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
105 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
106 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
107 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
108 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
109 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
110 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
111 { "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
112 { "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
113 { "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
114 { "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
115 { "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
116 { "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
117 { "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
118 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
119 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
120 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
121 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
122 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
123 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
124 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
125 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
126 };
127
128 #define ATH5K_SPMBL_NO 1
129 #define ATH5K_SPMBL_YES 2
130 #define ATH5K_SPMBL_BOTH 3
131
132 static const struct {
133 u16 bitrate;
134 u8 short_pmbl;
135 u8 hw_code;
136 } ath5k_rates[] = {
137 { 10, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_1M },
138 { 20, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_2M },
139 { 55, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_5_5M },
140 { 110, ATH5K_SPMBL_NO, ATH5K_RATE_CODE_11M },
141 { 60, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_6M },
142 { 90, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_9M },
143 { 120, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_12M },
144 { 180, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_18M },
145 { 240, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_24M },
146 { 360, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_36M },
147 { 480, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_48M },
148 { 540, ATH5K_SPMBL_BOTH, ATH5K_RATE_CODE_54M },
149 { 20, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE },
150 { 55, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE },
151 { 110, ATH5K_SPMBL_YES, ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE },
152 { 0, 0, 0 },
153 };
154
155 #define ATH5K_NR_RATES 15
156
157 /*
158 * Prototypes - PCI stack related functions
159 */
160 static int ath5k_probe(struct pci_device *pdev,
161 const struct pci_device_id *id);
162 static void ath5k_remove(struct pci_device *pdev);
163
164 struct pci_driver ath5k_pci_driver __pci_driver = {
165 .ids = ath5k_nics,
166 .id_count = sizeof(ath5k_nics) / sizeof(ath5k_nics[0]),
167 .probe = ath5k_probe,
168 .remove = ath5k_remove,
169 };
170
171
172
173 /*
174 * Prototypes - MAC 802.11 stack related functions
175 */
176 static int ath5k_tx(struct net80211_device *dev, struct io_buffer *skb);
177 static int ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan);
178 static int ath5k_reset_wake(struct ath5k_softc *sc);
179 static int ath5k_start(struct net80211_device *dev);
180 static void ath5k_stop(struct net80211_device *dev);
181 static int ath5k_config(struct net80211_device *dev, int changed);
182 static void ath5k_poll(struct net80211_device *dev);
183 static void ath5k_irq(struct net80211_device *dev, int enable);
184
185 static struct net80211_device_operations ath5k_ops = {
186 .open = ath5k_start,
187 .close = ath5k_stop,
188 .transmit = ath5k_tx,
189 .poll = ath5k_poll,
190 .irq = ath5k_irq,
191 .config = ath5k_config,
192 };
193
194 /*
195 * Prototypes - Internal functions
196 */
197 /* Attach detach */
198 static int ath5k_attach(struct net80211_device *dev);
199 static void ath5k_detach(struct net80211_device *dev);
200 /* Channel/mode setup */
201 static unsigned int ath5k_copy_channels(struct ath5k_hw *ah,
202 struct net80211_channel *channels,
203 unsigned int mode,
204 unsigned int max);
205 static int ath5k_setup_bands(struct net80211_device *dev);
206 static int ath5k_chan_set(struct ath5k_softc *sc,
207 struct net80211_channel *chan);
208 static void ath5k_setcurmode(struct ath5k_softc *sc,
209 unsigned int mode);
210 static void ath5k_mode_setup(struct ath5k_softc *sc);
211
212 /* Descriptor setup */
213 static int ath5k_desc_alloc(struct ath5k_softc *sc);
214 static void ath5k_desc_free(struct ath5k_softc *sc);
215 /* Buffers setup */
216 static int ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
217 static int ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf);
218
ath5k_txbuf_free(struct ath5k_softc * sc,struct ath5k_buf * bf)219 static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
220 struct ath5k_buf *bf)
221 {
222 if (!bf->iob)
223 return;
224
225 net80211_tx_complete(sc->dev, bf->iob, 0, ECANCELED);
226 bf->iob = NULL;
227 }
228
ath5k_rxbuf_free(struct ath5k_softc * sc __unused,struct ath5k_buf * bf)229 static inline void ath5k_rxbuf_free(struct ath5k_softc *sc __unused,
230 struct ath5k_buf *bf)
231 {
232 free_iob(bf->iob);
233 bf->iob = NULL;
234 }
235
236 /* Queues setup */
237 static int ath5k_txq_setup(struct ath5k_softc *sc,
238 int qtype, int subtype);
239 static void ath5k_txq_drainq(struct ath5k_softc *sc,
240 struct ath5k_txq *txq);
241 static void ath5k_txq_cleanup(struct ath5k_softc *sc);
242 static void ath5k_txq_release(struct ath5k_softc *sc);
243 /* Rx handling */
244 static int ath5k_rx_start(struct ath5k_softc *sc);
245 static void ath5k_rx_stop(struct ath5k_softc *sc);
246 /* Tx handling */
247 static void ath5k_tx_processq(struct ath5k_softc *sc,
248 struct ath5k_txq *txq);
249
250 /* Interrupt handling */
251 static int ath5k_init(struct ath5k_softc *sc);
252 static int ath5k_stop_hw(struct ath5k_softc *sc);
253
254 static void ath5k_calibrate(struct ath5k_softc *sc);
255
256 /* Filter */
257 static void ath5k_configure_filter(struct ath5k_softc *sc);
258
259 /********************\
260 * PCI Initialization *
261 \********************/
262
263 #if DBGLVL_MAX
264 static const char *
ath5k_chip_name(enum ath5k_srev_type type,u16 val)265 ath5k_chip_name(enum ath5k_srev_type type, u16 val)
266 {
267 const char *name = "xxxxx";
268 unsigned int i;
269
270 for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
271 if (srev_names[i].sr_type != type)
272 continue;
273
274 if ((val & 0xf0) == srev_names[i].sr_val)
275 name = srev_names[i].sr_name;
276
277 if ((val & 0xff) == srev_names[i].sr_val) {
278 name = srev_names[i].sr_name;
279 break;
280 }
281 }
282
283 return name;
284 }
285 #endif
286
ath5k_probe(struct pci_device * pdev,const struct pci_device_id * id)287 static int ath5k_probe(struct pci_device *pdev,
288 const struct pci_device_id *id)
289 {
290 void *mem;
291 struct ath5k_softc *sc;
292 struct net80211_device *dev;
293 int ret;
294 u8 csz;
295
296 adjust_pci_device(pdev);
297
298 /*
299 * Cache line size is used to size and align various
300 * structures used to communicate with the hardware.
301 */
302 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
303 if (csz == 0) {
304 /*
305 * We must have this setup properly for rx buffer
306 * DMA to work so force a reasonable value here if it
307 * comes up zero.
308 */
309 csz = 16;
310 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
311 }
312 /*
313 * The default setting of latency timer yields poor results,
314 * set it to the value used by other systems. It may be worth
315 * tweaking this setting more.
316 */
317 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
318
319 /*
320 * Disable the RETRY_TIMEOUT register (0x41) to keep
321 * PCI Tx retries from interfering with C3 CPU state.
322 */
323 pci_write_config_byte(pdev, 0x41, 0);
324
325 mem = ioremap(pdev->membase, 0x10000);
326 if (!mem) {
327 DBG("ath5k: cannot remap PCI memory region\n");
328 ret = -EIO;
329 goto err;
330 }
331
332 /*
333 * Allocate dev (net80211 main struct)
334 * and dev->priv (driver private data)
335 */
336 dev = net80211_alloc(sizeof(*sc));
337 if (!dev) {
338 DBG("ath5k: cannot allocate 802.11 device\n");
339 ret = -ENOMEM;
340 goto err_map;
341 }
342
343 /* Initialize driver private data */
344 sc = dev->priv;
345 sc->dev = dev;
346 sc->pdev = pdev;
347
348 sc->hwinfo = zalloc(sizeof(*sc->hwinfo));
349 if (!sc->hwinfo) {
350 DBG("ath5k: cannot allocate 802.11 hardware info structure\n");
351 ret = -ENOMEM;
352 goto err_free;
353 }
354
355 sc->hwinfo->flags = NET80211_HW_RX_HAS_FCS;
356 sc->hwinfo->signal_type = NET80211_SIGNAL_DB;
357 sc->hwinfo->signal_max = 40; /* 35dB should give perfect 54Mbps */
358 sc->hwinfo->channel_change_time = 5000;
359
360 /* Avoid working with the device until setup is complete */
361 sc->status |= ATH_STAT_INVALID;
362
363 sc->iobase = mem;
364 sc->cachelsz = csz * 4; /* convert to bytes */
365
366 DBG("ath5k: register base at %p (%08lx)\n", sc->iobase, pdev->membase);
367 DBG("ath5k: cache line size %d\n", sc->cachelsz);
368
369 /* Set private data */
370 pci_set_drvdata(pdev, dev);
371 dev->netdev->dev = (struct device *)pdev;
372
373 /* Initialize device */
374 ret = ath5k_hw_attach(sc, id->driver_data, &sc->ah);
375 if (ret)
376 goto err_free_hwinfo;
377
378 /* Finish private driver data initialization */
379 ret = ath5k_attach(dev);
380 if (ret)
381 goto err_ah;
382
383 #if DBGLVL_MAX
384 DBG("Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
385 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
386 sc->ah->ah_mac_srev, sc->ah->ah_phy_revision);
387
388 if (!sc->ah->ah_single_chip) {
389 /* Single chip radio (!RF5111) */
390 if (sc->ah->ah_radio_5ghz_revision &&
391 !sc->ah->ah_radio_2ghz_revision) {
392 /* No 5GHz support -> report 2GHz radio */
393 if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A)) {
394 DBG("RF%s 2GHz radio found (0x%x)\n",
395 ath5k_chip_name(AR5K_VERSION_RAD,
396 sc->ah->ah_radio_5ghz_revision),
397 sc->ah->ah_radio_5ghz_revision);
398 /* No 2GHz support (5110 and some
399 * 5Ghz only cards) -> report 5Ghz radio */
400 } else if (!(sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B)) {
401 DBG("RF%s 5GHz radio found (0x%x)\n",
402 ath5k_chip_name(AR5K_VERSION_RAD,
403 sc->ah->ah_radio_5ghz_revision),
404 sc->ah->ah_radio_5ghz_revision);
405 /* Multiband radio */
406 } else {
407 DBG("RF%s multiband radio found (0x%x)\n",
408 ath5k_chip_name(AR5K_VERSION_RAD,
409 sc->ah->ah_radio_5ghz_revision),
410 sc->ah->ah_radio_5ghz_revision);
411 }
412 }
413 /* Multi chip radio (RF5111 - RF2111) ->
414 * report both 2GHz/5GHz radios */
415 else if (sc->ah->ah_radio_5ghz_revision &&
416 sc->ah->ah_radio_2ghz_revision) {
417 DBG("RF%s 5GHz radio found (0x%x)\n",
418 ath5k_chip_name(AR5K_VERSION_RAD,
419 sc->ah->ah_radio_5ghz_revision),
420 sc->ah->ah_radio_5ghz_revision);
421 DBG("RF%s 2GHz radio found (0x%x)\n",
422 ath5k_chip_name(AR5K_VERSION_RAD,
423 sc->ah->ah_radio_2ghz_revision),
424 sc->ah->ah_radio_2ghz_revision);
425 }
426 }
427 #endif
428
429 /* Ready to go */
430 sc->status &= ~ATH_STAT_INVALID;
431
432 return 0;
433 err_ah:
434 ath5k_hw_detach(sc->ah);
435 err_free_hwinfo:
436 free(sc->hwinfo);
437 err_free:
438 net80211_free(dev);
439 err_map:
440 iounmap(mem);
441 err:
442 return ret;
443 }
444
ath5k_remove(struct pci_device * pdev)445 static void ath5k_remove(struct pci_device *pdev)
446 {
447 struct net80211_device *dev = pci_get_drvdata(pdev);
448 struct ath5k_softc *sc = dev->priv;
449
450 ath5k_detach(dev);
451 ath5k_hw_detach(sc->ah);
452 iounmap(sc->iobase);
453 free(sc->hwinfo);
454 net80211_free(dev);
455 }
456
457
458 /***********************\
459 * Driver Initialization *
460 \***********************/
461
462 static int
ath5k_attach(struct net80211_device * dev)463 ath5k_attach(struct net80211_device *dev)
464 {
465 struct ath5k_softc *sc = dev->priv;
466 struct ath5k_hw *ah = sc->ah;
467 int ret;
468
469 /*
470 * Collect the channel list. The 802.11 layer
471 * is resposible for filtering this list based
472 * on settings like the phy mode and regulatory
473 * domain restrictions.
474 */
475 ret = ath5k_setup_bands(dev);
476 if (ret) {
477 DBG("ath5k: can't get channels\n");
478 goto err;
479 }
480
481 /* NB: setup here so ath5k_rate_update is happy */
482 if (ah->ah_modes & AR5K_MODE_BIT_11A)
483 ath5k_setcurmode(sc, AR5K_MODE_11A);
484 else
485 ath5k_setcurmode(sc, AR5K_MODE_11B);
486
487 /*
488 * Allocate tx+rx descriptors and populate the lists.
489 */
490 ret = ath5k_desc_alloc(sc);
491 if (ret) {
492 DBG("ath5k: can't allocate descriptors\n");
493 goto err;
494 }
495
496 /*
497 * Allocate hardware transmit queues. Note that hw functions
498 * handle reseting these queues at the needed time.
499 */
500 ret = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
501 if (ret) {
502 DBG("ath5k: can't setup xmit queue\n");
503 goto err_desc;
504 }
505
506 sc->last_calib_ticks = currticks();
507
508 ret = ath5k_eeprom_read_mac(ah, sc->hwinfo->hwaddr);
509 if (ret) {
510 DBG("ath5k: unable to read address from EEPROM: 0x%04x\n",
511 sc->pdev->device);
512 goto err_queues;
513 }
514
515 memset(sc->bssidmask, 0xff, ETH_ALEN);
516 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
517
518 ret = net80211_register(sc->dev, &ath5k_ops, sc->hwinfo);
519 if (ret) {
520 DBG("ath5k: can't register ieee80211 hw\n");
521 goto err_queues;
522 }
523
524 return 0;
525 err_queues:
526 ath5k_txq_release(sc);
527 err_desc:
528 ath5k_desc_free(sc);
529 err:
530 return ret;
531 }
532
533 static void
ath5k_detach(struct net80211_device * dev)534 ath5k_detach(struct net80211_device *dev)
535 {
536 struct ath5k_softc *sc = dev->priv;
537
538 net80211_unregister(dev);
539 ath5k_desc_free(sc);
540 ath5k_txq_release(sc);
541 }
542
543
544
545
546 /********************\
547 * Channel/mode setup *
548 \********************/
549
550 /*
551 * Convert IEEE channel number to MHz frequency.
552 */
553 static inline short
ath5k_ieee2mhz(short chan)554 ath5k_ieee2mhz(short chan)
555 {
556 if (chan < 14)
557 return 2407 + 5 * chan;
558 if (chan == 14)
559 return 2484;
560 if (chan < 27)
561 return 2212 + 20 * chan;
562 return 5000 + 5 * chan;
563 }
564
565 static unsigned int
ath5k_copy_channels(struct ath5k_hw * ah,struct net80211_channel * channels,unsigned int mode,unsigned int max)566 ath5k_copy_channels(struct ath5k_hw *ah,
567 struct net80211_channel *channels,
568 unsigned int mode, unsigned int max)
569 {
570 unsigned int i, count, size, chfreq, freq, ch;
571
572 if (!(ah->ah_modes & (1 << mode)))
573 return 0;
574
575 switch (mode) {
576 case AR5K_MODE_11A:
577 case AR5K_MODE_11A_TURBO:
578 /* 1..220, but 2GHz frequencies are filtered by check_channel */
579 size = 220;
580 chfreq = CHANNEL_5GHZ;
581 break;
582 case AR5K_MODE_11B:
583 case AR5K_MODE_11G:
584 case AR5K_MODE_11G_TURBO:
585 size = 26;
586 chfreq = CHANNEL_2GHZ;
587 break;
588 default:
589 return 0;
590 }
591
592 for (i = 0, count = 0; i < size && max > 0; i++) {
593 ch = i + 1 ;
594 freq = ath5k_ieee2mhz(ch);
595
596 /* Check if channel is supported by the chipset */
597 if (!ath5k_channel_ok(ah, freq, chfreq))
598 continue;
599
600 /* Write channel info and increment counter */
601 channels[count].center_freq = freq;
602 channels[count].maxpower = 0; /* use regulatory */
603 channels[count].band = (chfreq == CHANNEL_2GHZ) ?
604 NET80211_BAND_2GHZ : NET80211_BAND_5GHZ;
605 switch (mode) {
606 case AR5K_MODE_11A:
607 case AR5K_MODE_11G:
608 channels[count].hw_value = chfreq | CHANNEL_OFDM;
609 break;
610 case AR5K_MODE_11A_TURBO:
611 case AR5K_MODE_11G_TURBO:
612 channels[count].hw_value = chfreq |
613 CHANNEL_OFDM | CHANNEL_TURBO;
614 break;
615 case AR5K_MODE_11B:
616 channels[count].hw_value = CHANNEL_B;
617 }
618
619 count++;
620 max--;
621 }
622
623 return count;
624 }
625
626 static int
ath5k_setup_bands(struct net80211_device * dev)627 ath5k_setup_bands(struct net80211_device *dev)
628 {
629 struct ath5k_softc *sc = dev->priv;
630 struct ath5k_hw *ah = sc->ah;
631 int max_c, count_c = 0;
632 int i;
633 int band;
634
635 max_c = sizeof(sc->hwinfo->channels) / sizeof(sc->hwinfo->channels[0]);
636
637 /* 2GHz band */
638 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11G) {
639 /* G mode */
640 band = NET80211_BAND_2GHZ;
641 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
642 sc->hwinfo->modes = (NET80211_MODE_G | NET80211_MODE_B);
643
644 for (i = 0; i < 12; i++)
645 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
646 sc->hwinfo->nr_rates[band] = 12;
647
648 sc->hwinfo->nr_channels =
649 ath5k_copy_channels(ah, sc->hwinfo->channels,
650 AR5K_MODE_11G, max_c);
651 count_c = sc->hwinfo->nr_channels;
652 max_c -= count_c;
653 } else if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11B) {
654 /* B mode */
655 band = NET80211_BAND_2GHZ;
656 sc->hwinfo->bands = NET80211_BAND_BIT_2GHZ;
657 sc->hwinfo->modes = NET80211_MODE_B;
658
659 for (i = 0; i < 4; i++)
660 sc->hwinfo->rates[band][i] = ath5k_rates[i].bitrate;
661 sc->hwinfo->nr_rates[band] = 4;
662
663 sc->hwinfo->nr_channels =
664 ath5k_copy_channels(ah, sc->hwinfo->channels,
665 AR5K_MODE_11B, max_c);
666 count_c = sc->hwinfo->nr_channels;
667 max_c -= count_c;
668 }
669
670 /* 5GHz band, A mode */
671 if (sc->ah->ah_capabilities.cap_mode & AR5K_MODE_BIT_11A) {
672 band = NET80211_BAND_5GHZ;
673 sc->hwinfo->bands |= NET80211_BAND_BIT_5GHZ;
674 sc->hwinfo->modes |= NET80211_MODE_A;
675
676 for (i = 0; i < 8; i++)
677 sc->hwinfo->rates[band][i] = ath5k_rates[i+4].bitrate;
678 sc->hwinfo->nr_rates[band] = 8;
679
680 sc->hwinfo->nr_channels =
681 ath5k_copy_channels(ah, sc->hwinfo->channels,
682 AR5K_MODE_11B, max_c);
683 count_c = sc->hwinfo->nr_channels;
684 max_c -= count_c;
685 }
686
687 return 0;
688 }
689
690 /*
691 * Set/change channels. If the channel is really being changed,
692 * it's done by reseting the chip. To accomplish this we must
693 * first cleanup any pending DMA, then restart stuff after a la
694 * ath5k_init.
695 */
696 static int
ath5k_chan_set(struct ath5k_softc * sc,struct net80211_channel * chan)697 ath5k_chan_set(struct ath5k_softc *sc, struct net80211_channel *chan)
698 {
699 if (chan->center_freq != sc->curchan->center_freq ||
700 chan->hw_value != sc->curchan->hw_value) {
701 /*
702 * To switch channels clear any pending DMA operations;
703 * wait long enough for the RX fifo to drain, reset the
704 * hardware at the new frequency, and then re-enable
705 * the relevant bits of the h/w.
706 */
707 DBG2("ath5k: resetting for channel change (%d -> %d MHz)\n",
708 sc->curchan->center_freq, chan->center_freq);
709 return ath5k_reset(sc, chan);
710 }
711
712 return 0;
713 }
714
715 static void
ath5k_setcurmode(struct ath5k_softc * sc,unsigned int mode)716 ath5k_setcurmode(struct ath5k_softc *sc, unsigned int mode)
717 {
718 sc->curmode = mode;
719
720 if (mode == AR5K_MODE_11A) {
721 sc->curband = NET80211_BAND_5GHZ;
722 } else {
723 sc->curband = NET80211_BAND_2GHZ;
724 }
725 }
726
727 static void
ath5k_mode_setup(struct ath5k_softc * sc)728 ath5k_mode_setup(struct ath5k_softc *sc)
729 {
730 struct ath5k_hw *ah = sc->ah;
731 u32 rfilt;
732
733 /* configure rx filter */
734 rfilt = sc->filter_flags;
735 ath5k_hw_set_rx_filter(ah, rfilt);
736
737 if (ath5k_hw_hasbssidmask(ah))
738 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
739
740 /* configure operational mode */
741 ath5k_hw_set_opmode(ah);
742
743 ath5k_hw_set_mcast_filter(ah, 0, 0);
744 }
745
746 static inline int
ath5k_hw_rix_to_bitrate(int hw_rix)747 ath5k_hw_rix_to_bitrate(int hw_rix)
748 {
749 int i;
750
751 for (i = 0; i < ATH5K_NR_RATES; i++) {
752 if (ath5k_rates[i].hw_code == hw_rix)
753 return ath5k_rates[i].bitrate;
754 }
755
756 DBG("ath5k: invalid rix %02x\n", hw_rix);
757 return 10; /* use lowest rate */
758 }
759
ath5k_bitrate_to_hw_rix(int bitrate)760 int ath5k_bitrate_to_hw_rix(int bitrate)
761 {
762 int i;
763
764 for (i = 0; i < ATH5K_NR_RATES; i++) {
765 if (ath5k_rates[i].bitrate == bitrate)
766 return ath5k_rates[i].hw_code;
767 }
768
769 DBG("ath5k: invalid bitrate %d\n", bitrate);
770 return ATH5K_RATE_CODE_1M; /* use lowest rate */
771 }
772
773 /***************\
774 * Buffers setup *
775 \***************/
776
777 static struct io_buffer *
ath5k_rx_iob_alloc(struct ath5k_softc * sc,u32 * iob_addr)778 ath5k_rx_iob_alloc(struct ath5k_softc *sc, u32 *iob_addr)
779 {
780 struct io_buffer *iob;
781 unsigned int off;
782
783 /*
784 * Allocate buffer with headroom_needed space for the
785 * fake physical layer header at the start.
786 */
787 iob = alloc_iob(sc->rxbufsize + sc->cachelsz - 1);
788
789 if (!iob) {
790 DBG("ath5k: can't alloc iobuf of size %d\n",
791 sc->rxbufsize + sc->cachelsz - 1);
792 return NULL;
793 }
794
795 *iob_addr = virt_to_bus(iob->data);
796
797 /*
798 * Cache-line-align. This is important (for the
799 * 5210 at least) as not doing so causes bogus data
800 * in rx'd frames.
801 */
802 off = *iob_addr % sc->cachelsz;
803 if (off != 0) {
804 iob_reserve(iob, sc->cachelsz - off);
805 *iob_addr += sc->cachelsz - off;
806 }
807
808 return iob;
809 }
810
811 static int
ath5k_rxbuf_setup(struct ath5k_softc * sc,struct ath5k_buf * bf)812 ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
813 {
814 struct ath5k_hw *ah = sc->ah;
815 struct io_buffer *iob = bf->iob;
816 struct ath5k_desc *ds;
817
818 if (!iob) {
819 iob = ath5k_rx_iob_alloc(sc, &bf->iobaddr);
820 if (!iob)
821 return -ENOMEM;
822 bf->iob = iob;
823 }
824
825 /*
826 * Setup descriptors. For receive we always terminate
827 * the descriptor list with a self-linked entry so we'll
828 * not get overrun under high load (as can happen with a
829 * 5212 when ANI processing enables PHY error frames).
830 *
831 * To insure the last descriptor is self-linked we create
832 * each descriptor as self-linked and add it to the end. As
833 * each additional descriptor is added the previous self-linked
834 * entry is ``fixed'' naturally. This should be safe even
835 * if DMA is happening. When processing RX interrupts we
836 * never remove/process the last, self-linked, entry on the
837 * descriptor list. This insures the hardware always has
838 * someplace to write a new frame.
839 */
840 ds = bf->desc;
841 ds->ds_link = bf->daddr; /* link to self */
842 ds->ds_data = bf->iobaddr;
843 if (ah->ah_setup_rx_desc(ah, ds,
844 iob_tailroom(iob), /* buffer size */
845 0) != 0) {
846 DBG("ath5k: error setting up RX descriptor for %d bytes\n", iob_tailroom(iob));
847 return -EINVAL;
848 }
849
850 if (sc->rxlink != NULL)
851 *sc->rxlink = bf->daddr;
852 sc->rxlink = &ds->ds_link;
853 return 0;
854 }
855
856 static int
ath5k_txbuf_setup(struct ath5k_softc * sc,struct ath5k_buf * bf)857 ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
858 {
859 struct ath5k_hw *ah = sc->ah;
860 struct ath5k_txq *txq = &sc->txq;
861 struct ath5k_desc *ds = bf->desc;
862 struct io_buffer *iob = bf->iob;
863 unsigned int pktlen, flags;
864 int ret;
865 u16 duration = 0;
866 u16 cts_rate = 0;
867
868 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
869 bf->iobaddr = virt_to_bus(iob->data);
870 pktlen = iob_len(iob);
871
872 /* FIXME: If we are in g mode and rate is a CCK rate
873 * subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
874 * from tx power (value is in dB units already) */
875 if (sc->dev->phy_flags & NET80211_PHY_USE_PROTECTION) {
876 struct net80211_device *dev = sc->dev;
877
878 flags |= AR5K_TXDESC_CTSENA;
879 cts_rate = sc->hw_rtscts_rate;
880 duration = net80211_cts_duration(dev, pktlen);
881 }
882 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
883 IEEE80211_TYP_FRAME_HEADER_LEN,
884 AR5K_PKT_TYPE_NORMAL, sc->power_level * 2,
885 sc->hw_rate, ATH5K_RETRIES,
886 AR5K_TXKEYIX_INVALID, 0, flags,
887 cts_rate, duration);
888 if (ret)
889 return ret;
890
891 ds->ds_link = 0;
892 ds->ds_data = bf->iobaddr;
893
894 list_add_tail(&bf->list, &txq->q);
895 if (txq->link == NULL) /* is this first packet? */
896 ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
897 else /* no, so only link it */
898 *txq->link = bf->daddr;
899
900 txq->link = &ds->ds_link;
901 ath5k_hw_start_tx_dma(ah, txq->qnum);
902 mb();
903
904 return 0;
905 }
906
907 /*******************\
908 * Descriptors setup *
909 \*******************/
910
911 static int
ath5k_desc_alloc(struct ath5k_softc * sc)912 ath5k_desc_alloc(struct ath5k_softc *sc)
913 {
914 struct ath5k_desc *ds;
915 struct ath5k_buf *bf;
916 u32 da;
917 unsigned int i;
918 int ret;
919
920 /* allocate descriptors */
921 sc->desc_len = sizeof(struct ath5k_desc) * (ATH_TXBUF + ATH_RXBUF + 1);
922 sc->desc = malloc_dma(sc->desc_len, ATH5K_DESC_ALIGN);
923 if (sc->desc == NULL) {
924 DBG("ath5k: can't allocate descriptors\n");
925 ret = -ENOMEM;
926 goto err;
927 }
928 memset(sc->desc, 0, sc->desc_len);
929 sc->desc_daddr = virt_to_bus(sc->desc);
930
931 ds = sc->desc;
932 da = sc->desc_daddr;
933
934 bf = calloc(ATH_TXBUF + ATH_RXBUF + 1, sizeof(struct ath5k_buf));
935 if (bf == NULL) {
936 DBG("ath5k: can't allocate buffer pointers\n");
937 ret = -ENOMEM;
938 goto err_free;
939 }
940 sc->bufptr = bf;
941
942 INIT_LIST_HEAD(&sc->rxbuf);
943 for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
944 bf->desc = ds;
945 bf->daddr = da;
946 list_add_tail(&bf->list, &sc->rxbuf);
947 }
948
949 INIT_LIST_HEAD(&sc->txbuf);
950 sc->txbuf_len = ATH_TXBUF;
951 for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
952 bf->desc = ds;
953 bf->daddr = da;
954 list_add_tail(&bf->list, &sc->txbuf);
955 }
956
957 return 0;
958
959 err_free:
960 free_dma(sc->desc, sc->desc_len);
961 err:
962 sc->desc = NULL;
963 return ret;
964 }
965
966 static void
ath5k_desc_free(struct ath5k_softc * sc)967 ath5k_desc_free(struct ath5k_softc *sc)
968 {
969 struct ath5k_buf *bf;
970
971 list_for_each_entry(bf, &sc->txbuf, list)
972 ath5k_txbuf_free(sc, bf);
973 list_for_each_entry(bf, &sc->rxbuf, list)
974 ath5k_rxbuf_free(sc, bf);
975
976 /* Free memory associated with all descriptors */
977 free_dma(sc->desc, sc->desc_len);
978
979 free(sc->bufptr);
980 sc->bufptr = NULL;
981 }
982
983
984
985
986
987 /**************\
988 * Queues setup *
989 \**************/
990
991 static int
ath5k_txq_setup(struct ath5k_softc * sc,int qtype,int subtype)992 ath5k_txq_setup(struct ath5k_softc *sc, int qtype, int subtype)
993 {
994 struct ath5k_hw *ah = sc->ah;
995 struct ath5k_txq *txq;
996 struct ath5k_txq_info qi = {
997 .tqi_subtype = subtype,
998 .tqi_aifs = AR5K_TXQ_USEDEFAULT,
999 .tqi_cw_min = AR5K_TXQ_USEDEFAULT,
1000 .tqi_cw_max = AR5K_TXQ_USEDEFAULT
1001 };
1002 int qnum;
1003
1004 /*
1005 * Enable interrupts only for EOL and DESC conditions.
1006 * We mark tx descriptors to receive a DESC interrupt
1007 * when a tx queue gets deep; otherwise waiting for the
1008 * EOL to reap descriptors. Note that this is done to
1009 * reduce interrupt load and this only defers reaping
1010 * descriptors, never transmitting frames. Aside from
1011 * reducing interrupts this also permits more concurrency.
1012 * The only potential downside is if the tx queue backs
1013 * up in which case the top half of the kernel may backup
1014 * due to a lack of tx descriptors.
1015 */
1016 qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
1017 AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
1018 qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
1019 if (qnum < 0) {
1020 DBG("ath5k: can't set up a TX queue\n");
1021 return -EIO;
1022 }
1023
1024 txq = &sc->txq;
1025 if (!txq->setup) {
1026 txq->qnum = qnum;
1027 txq->link = NULL;
1028 INIT_LIST_HEAD(&txq->q);
1029 txq->setup = 1;
1030 }
1031 return 0;
1032 }
1033
1034 static void
ath5k_txq_drainq(struct ath5k_softc * sc,struct ath5k_txq * txq)1035 ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1036 {
1037 struct ath5k_buf *bf, *bf0;
1038
1039 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1040 ath5k_txbuf_free(sc, bf);
1041
1042 list_del(&bf->list);
1043 list_add_tail(&bf->list, &sc->txbuf);
1044 sc->txbuf_len++;
1045 }
1046 txq->link = NULL;
1047 }
1048
1049 /*
1050 * Drain the transmit queues and reclaim resources.
1051 */
1052 static void
ath5k_txq_cleanup(struct ath5k_softc * sc)1053 ath5k_txq_cleanup(struct ath5k_softc *sc)
1054 {
1055 struct ath5k_hw *ah = sc->ah;
1056
1057 if (!(sc->status & ATH_STAT_INVALID)) {
1058 /* don't touch the hardware if marked invalid */
1059 if (sc->txq.setup) {
1060 ath5k_hw_stop_tx_dma(ah, sc->txq.qnum);
1061 DBG("ath5k: txq [%d] %x, link %p\n",
1062 sc->txq.qnum,
1063 ath5k_hw_get_txdp(ah, sc->txq.qnum),
1064 sc->txq.link);
1065 }
1066 }
1067
1068 if (sc->txq.setup)
1069 ath5k_txq_drainq(sc, &sc->txq);
1070 }
1071
1072 static void
ath5k_txq_release(struct ath5k_softc * sc)1073 ath5k_txq_release(struct ath5k_softc *sc)
1074 {
1075 if (sc->txq.setup) {
1076 ath5k_hw_release_tx_queue(sc->ah);
1077 sc->txq.setup = 0;
1078 }
1079 }
1080
1081
1082
1083
1084 /*************\
1085 * RX Handling *
1086 \*************/
1087
1088 /*
1089 * Enable the receive h/w following a reset.
1090 */
1091 static int
ath5k_rx_start(struct ath5k_softc * sc)1092 ath5k_rx_start(struct ath5k_softc *sc)
1093 {
1094 struct ath5k_hw *ah = sc->ah;
1095 struct ath5k_buf *bf;
1096 int ret;
1097
1098 sc->rxbufsize = IEEE80211_MAX_LEN;
1099 if (sc->rxbufsize % sc->cachelsz != 0)
1100 sc->rxbufsize += sc->cachelsz - (sc->rxbufsize % sc->cachelsz);
1101
1102 sc->rxlink = NULL;
1103
1104 list_for_each_entry(bf, &sc->rxbuf, list) {
1105 ret = ath5k_rxbuf_setup(sc, bf);
1106 if (ret != 0)
1107 return ret;
1108 }
1109
1110 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1111
1112 ath5k_hw_set_rxdp(ah, bf->daddr);
1113 ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
1114 ath5k_mode_setup(sc); /* set filters, etc. */
1115 ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
1116
1117 return 0;
1118 }
1119
1120 /*
1121 * Disable the receive h/w in preparation for a reset.
1122 */
1123 static void
ath5k_rx_stop(struct ath5k_softc * sc)1124 ath5k_rx_stop(struct ath5k_softc *sc)
1125 {
1126 struct ath5k_hw *ah = sc->ah;
1127
1128 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1129 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1130 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */
1131
1132 sc->rxlink = NULL; /* just in case */
1133 }
1134
1135 static void
ath5k_handle_rx(struct ath5k_softc * sc)1136 ath5k_handle_rx(struct ath5k_softc *sc)
1137 {
1138 struct ath5k_rx_status rs;
1139 struct io_buffer *iob, *next_iob;
1140 u32 next_iob_addr;
1141 struct ath5k_buf *bf, *bf_last;
1142 struct ath5k_desc *ds;
1143 int ret;
1144
1145 memset(&rs, 0, sizeof(rs));
1146
1147 if (list_empty(&sc->rxbuf)) {
1148 DBG("ath5k: empty rx buf pool\n");
1149 return;
1150 }
1151
1152 bf_last = list_entry(sc->rxbuf.prev, struct ath5k_buf, list);
1153
1154 do {
1155 bf = list_entry(sc->rxbuf.next, struct ath5k_buf, list);
1156 assert(bf->iob != NULL);
1157 iob = bf->iob;
1158 ds = bf->desc;
1159
1160 /*
1161 * last buffer must not be freed to ensure proper hardware
1162 * function. When the hardware finishes also a packet next to
1163 * it, we are sure, it doesn't use it anymore and we can go on.
1164 */
1165 if (bf_last == bf)
1166 bf->flags |= 1;
1167 if (bf->flags) {
1168 struct ath5k_buf *bf_next = list_entry(bf->list.next,
1169 struct ath5k_buf, list);
1170 ret = sc->ah->ah_proc_rx_desc(sc->ah, bf_next->desc,
1171 &rs);
1172 if (ret)
1173 break;
1174 bf->flags &= ~1;
1175 /* skip the overwritten one (even status is martian) */
1176 goto next;
1177 }
1178
1179 ret = sc->ah->ah_proc_rx_desc(sc->ah, ds, &rs);
1180 if (ret) {
1181 if (ret != -EINPROGRESS) {
1182 DBG("ath5k: error in processing rx desc: %s\n",
1183 strerror(ret));
1184 net80211_rx_err(sc->dev, NULL, -ret);
1185 } else {
1186 /* normal return, reached end of
1187 available descriptors */
1188 }
1189 return;
1190 }
1191
1192 if (rs.rs_more) {
1193 DBG("ath5k: unsupported fragmented rx\n");
1194 goto next;
1195 }
1196
1197 if (rs.rs_status) {
1198 if (rs.rs_status & AR5K_RXERR_PHY) {
1199 /* These are uncommon, and may indicate a real problem. */
1200 net80211_rx_err(sc->dev, NULL, EIO);
1201 goto next;
1202 }
1203 if (rs.rs_status & AR5K_RXERR_CRC) {
1204 /* These occur *all the time*. */
1205 goto next;
1206 }
1207 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1208 /*
1209 * Decrypt error. If the error occurred
1210 * because there was no hardware key, then
1211 * let the frame through so the upper layers
1212 * can process it. This is necessary for 5210
1213 * parts which have no way to setup a ``clear''
1214 * key cache entry.
1215 *
1216 * XXX do key cache faulting
1217 */
1218 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1219 !(rs.rs_status & AR5K_RXERR_CRC))
1220 goto accept;
1221 }
1222
1223 /* any other error, unhandled */
1224 DBG("ath5k: packet rx status %x\n", rs.rs_status);
1225 goto next;
1226 }
1227 accept:
1228 next_iob = ath5k_rx_iob_alloc(sc, &next_iob_addr);
1229
1230 /*
1231 * If we can't replace bf->iob with a new iob under memory
1232 * pressure, just skip this packet
1233 */
1234 if (!next_iob) {
1235 DBG("ath5k: dropping packet under memory pressure\n");
1236 goto next;
1237 }
1238
1239 iob_put(iob, rs.rs_datalen);
1240
1241 /* The MAC header is padded to have 32-bit boundary if the
1242 * packet payload is non-zero. However, gPXE only
1243 * supports standard 802.11 packets with 24-byte
1244 * header, so no padding correction should be needed.
1245 */
1246
1247 DBG2("ath5k: rx %d bytes, signal %d\n", rs.rs_datalen,
1248 rs.rs_rssi);
1249
1250 net80211_rx(sc->dev, iob, rs.rs_rssi,
1251 ath5k_hw_rix_to_bitrate(rs.rs_rate));
1252
1253 bf->iob = next_iob;
1254 bf->iobaddr = next_iob_addr;
1255 next:
1256 list_del(&bf->list);
1257 list_add_tail(&bf->list, &sc->rxbuf);
1258 } while (ath5k_rxbuf_setup(sc, bf) == 0);
1259 }
1260
1261
1262
1263
1264 /*************\
1265 * TX Handling *
1266 \*************/
1267
1268 static void
ath5k_tx_processq(struct ath5k_softc * sc,struct ath5k_txq * txq)1269 ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1270 {
1271 struct ath5k_tx_status ts;
1272 struct ath5k_buf *bf, *bf0;
1273 struct ath5k_desc *ds;
1274 struct io_buffer *iob;
1275 int ret;
1276
1277 memset(&ts, 0, sizeof(ts));
1278
1279 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1280 ds = bf->desc;
1281
1282 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1283 if (ret) {
1284 if (ret != -EINPROGRESS) {
1285 DBG("ath5k: error in processing tx desc: %s\n",
1286 strerror(ret));
1287 } else {
1288 /* normal return, reached end of tx completions */
1289 }
1290 break;
1291 }
1292
1293 iob = bf->iob;
1294 bf->iob = NULL;
1295
1296 DBG2("ath5k: tx %d bytes complete, %d retries\n",
1297 iob_len(iob), ts.ts_retry[0]);
1298
1299 net80211_tx_complete(sc->dev, iob, ts.ts_retry[0],
1300 ts.ts_status ? EIO : 0);
1301
1302 list_del(&bf->list);
1303 list_add_tail(&bf->list, &sc->txbuf);
1304 sc->txbuf_len++;
1305 }
1306
1307 if (list_empty(&txq->q))
1308 txq->link = NULL;
1309 }
1310
1311 static void
ath5k_handle_tx(struct ath5k_softc * sc)1312 ath5k_handle_tx(struct ath5k_softc *sc)
1313 {
1314 ath5k_tx_processq(sc, &sc->txq);
1315 }
1316
1317
1318 /********************\
1319 * Interrupt handling *
1320 \********************/
1321
1322 static void
ath5k_irq(struct net80211_device * dev,int enable)1323 ath5k_irq(struct net80211_device *dev, int enable)
1324 {
1325 struct ath5k_softc *sc = dev->priv;
1326 struct ath5k_hw *ah = sc->ah;
1327
1328 sc->irq_ena = enable;
1329 ah->ah_ier = enable ? AR5K_IER_ENABLE : AR5K_IER_DISABLE;
1330
1331 ath5k_hw_reg_write(ah, ah->ah_ier, AR5K_IER);
1332 ath5k_hw_set_imr(ah, sc->imask);
1333 }
1334
1335 static int
ath5k_init(struct ath5k_softc * sc)1336 ath5k_init(struct ath5k_softc *sc)
1337 {
1338 struct ath5k_hw *ah = sc->ah;
1339 int ret, i;
1340
1341 /*
1342 * Stop anything previously setup. This is safe
1343 * no matter this is the first time through or not.
1344 */
1345 ath5k_stop_hw(sc);
1346
1347 /*
1348 * The basic interface to setting the hardware in a good
1349 * state is ``reset''. On return the hardware is known to
1350 * be powered up and with interrupts disabled. This must
1351 * be followed by initialization of the appropriate bits
1352 * and then setup of the interrupt mask.
1353 */
1354 sc->curchan = sc->dev->channels + sc->dev->channel;
1355 sc->curband = sc->curchan->band;
1356 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
1357 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
1358 AR5K_INT_FATAL | AR5K_INT_GLOBAL;
1359 ret = ath5k_reset(sc, NULL);
1360 if (ret)
1361 goto done;
1362
1363 ath5k_rfkill_hw_start(ah);
1364
1365 /*
1366 * Reset the key cache since some parts do not reset the
1367 * contents on initial power up or resume from suspend.
1368 */
1369 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
1370 ath5k_hw_reset_key(ah, i);
1371
1372 /* Set ack to be sent at low bit-rates */
1373 ath5k_hw_set_ack_bitrate_high(ah, 0);
1374
1375 ret = 0;
1376 done:
1377 mb();
1378 return ret;
1379 }
1380
1381 static int
ath5k_stop_hw(struct ath5k_softc * sc)1382 ath5k_stop_hw(struct ath5k_softc *sc)
1383 {
1384 struct ath5k_hw *ah = sc->ah;
1385
1386 /*
1387 * Shutdown the hardware and driver:
1388 * stop output from above
1389 * disable interrupts
1390 * turn off timers
1391 * turn off the radio
1392 * clear transmit machinery
1393 * clear receive machinery
1394 * drain and release tx queues
1395 * reclaim beacon resources
1396 * power down hardware
1397 *
1398 * Note that some of this work is not possible if the
1399 * hardware is gone (invalid).
1400 */
1401
1402 if (!(sc->status & ATH_STAT_INVALID)) {
1403 ath5k_hw_set_imr(ah, 0);
1404 }
1405 ath5k_txq_cleanup(sc);
1406 if (!(sc->status & ATH_STAT_INVALID)) {
1407 ath5k_rx_stop(sc);
1408 ath5k_hw_phy_disable(ah);
1409 } else
1410 sc->rxlink = NULL;
1411
1412 ath5k_rfkill_hw_stop(sc->ah);
1413
1414 return 0;
1415 }
1416
1417 static void
ath5k_poll(struct net80211_device * dev)1418 ath5k_poll(struct net80211_device *dev)
1419 {
1420 struct ath5k_softc *sc = dev->priv;
1421 struct ath5k_hw *ah = sc->ah;
1422 enum ath5k_int status;
1423 unsigned int counter = 1000;
1424
1425 if (currticks() - sc->last_calib_ticks >
1426 ATH5K_CALIB_INTERVAL * ticks_per_sec()) {
1427 ath5k_calibrate(sc);
1428 sc->last_calib_ticks = currticks();
1429 }
1430
1431 if ((sc->status & ATH_STAT_INVALID) ||
1432 (sc->irq_ena && !ath5k_hw_is_intr_pending(ah)))
1433 return;
1434
1435 do {
1436 ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
1437 DBGP("ath5k: status %#x/%#x\n", status, sc->imask);
1438 if (status & AR5K_INT_FATAL) {
1439 /*
1440 * Fatal errors are unrecoverable.
1441 * Typically these are caused by DMA errors.
1442 */
1443 DBG("ath5k: fatal error, resetting\n");
1444 ath5k_reset_wake(sc);
1445 } else if (status & AR5K_INT_RXORN) {
1446 DBG("ath5k: rx overrun, resetting\n");
1447 ath5k_reset_wake(sc);
1448 } else {
1449 if (status & AR5K_INT_RXEOL) {
1450 /*
1451 * NB: the hardware should re-read the link when
1452 * RXE bit is written, but it doesn't work at
1453 * least on older hardware revs.
1454 */
1455 DBG("ath5k: rx EOL\n");
1456 sc->rxlink = NULL;
1457 }
1458 if (status & AR5K_INT_TXURN) {
1459 /* bump tx trigger level */
1460 DBG("ath5k: tx underrun\n");
1461 ath5k_hw_update_tx_triglevel(ah, 1);
1462 }
1463 if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
1464 ath5k_handle_rx(sc);
1465 if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
1466 | AR5K_INT_TXERR | AR5K_INT_TXEOL))
1467 ath5k_handle_tx(sc);
1468 }
1469 } while (ath5k_hw_is_intr_pending(ah) && counter-- > 0);
1470
1471 if (!counter)
1472 DBG("ath5k: too many interrupts, giving up for now\n");
1473 }
1474
1475 /*
1476 * Periodically recalibrate the PHY to account
1477 * for temperature/environment changes.
1478 */
1479 static void
ath5k_calibrate(struct ath5k_softc * sc)1480 ath5k_calibrate(struct ath5k_softc *sc)
1481 {
1482 struct ath5k_hw *ah = sc->ah;
1483
1484 if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
1485 /*
1486 * Rfgain is out of bounds, reset the chip
1487 * to load new gain values.
1488 */
1489 DBG("ath5k: resetting for calibration\n");
1490 ath5k_reset_wake(sc);
1491 }
1492 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
1493 DBG("ath5k: calibration of channel %d failed\n",
1494 sc->curchan->channel_nr);
1495 }
1496
1497
1498 /********************\
1499 * Net80211 functions *
1500 \********************/
1501
1502 static int
ath5k_tx(struct net80211_device * dev,struct io_buffer * iob)1503 ath5k_tx(struct net80211_device *dev, struct io_buffer *iob)
1504 {
1505 struct ath5k_softc *sc = dev->priv;
1506 struct ath5k_buf *bf;
1507 int rc;
1508
1509 /*
1510 * The hardware expects the header padded to 4 byte boundaries.
1511 * gPXE only ever sends 24-byte headers, so no action necessary.
1512 */
1513
1514 if (list_empty(&sc->txbuf)) {
1515 DBG("ath5k: dropping packet because no tx bufs available\n");
1516 return -ENOBUFS;
1517 }
1518
1519 bf = list_entry(sc->txbuf.next, struct ath5k_buf, list);
1520 list_del(&bf->list);
1521 sc->txbuf_len--;
1522
1523 bf->iob = iob;
1524
1525 if ((rc = ath5k_txbuf_setup(sc, bf)) != 0) {
1526 bf->iob = NULL;
1527 list_add_tail(&bf->list, &sc->txbuf);
1528 sc->txbuf_len++;
1529 return rc;
1530 }
1531 return 0;
1532 }
1533
1534 /*
1535 * Reset the hardware. If chan is not NULL, then also pause rx/tx
1536 * and change to the given channel.
1537 */
1538 static int
ath5k_reset(struct ath5k_softc * sc,struct net80211_channel * chan)1539 ath5k_reset(struct ath5k_softc *sc, struct net80211_channel *chan)
1540 {
1541 struct ath5k_hw *ah = sc->ah;
1542 int ret;
1543
1544 if (chan) {
1545 ath5k_hw_set_imr(ah, 0);
1546 ath5k_txq_cleanup(sc);
1547 ath5k_rx_stop(sc);
1548
1549 sc->curchan = chan;
1550 sc->curband = chan->band;
1551 }
1552
1553 ret = ath5k_hw_reset(ah, sc->curchan, 1);
1554 if (ret) {
1555 DBG("ath5k: can't reset hardware: %s\n", strerror(ret));
1556 return ret;
1557 }
1558
1559 ret = ath5k_rx_start(sc);
1560 if (ret) {
1561 DBG("ath5k: can't start rx logic: %s\n", strerror(ret));
1562 return ret;
1563 }
1564
1565 /*
1566 * Change channels and update the h/w rate map if we're switching;
1567 * e.g. 11a to 11b/g.
1568 *
1569 * We may be doing a reset in response to an ioctl that changes the
1570 * channel so update any state that might change as a result.
1571 *
1572 * XXX needed?
1573 */
1574 /* ath5k_chan_change(sc, c); */
1575
1576 /* Reenable interrupts if necessary */
1577 ath5k_irq(sc->dev, sc->irq_ena);
1578
1579 return 0;
1580 }
1581
ath5k_reset_wake(struct ath5k_softc * sc)1582 static int ath5k_reset_wake(struct ath5k_softc *sc)
1583 {
1584 return ath5k_reset(sc, sc->curchan);
1585 }
1586
ath5k_start(struct net80211_device * dev)1587 static int ath5k_start(struct net80211_device *dev)
1588 {
1589 struct ath5k_softc *sc = dev->priv;
1590 int ret;
1591
1592 if ((ret = ath5k_init(sc)) != 0)
1593 return ret;
1594
1595 sc->assoc = 0;
1596 ath5k_configure_filter(sc);
1597 ath5k_hw_set_lladdr(sc->ah, dev->netdev->ll_addr);
1598
1599 return 0;
1600 }
1601
ath5k_stop(struct net80211_device * dev)1602 static void ath5k_stop(struct net80211_device *dev)
1603 {
1604 struct ath5k_softc *sc = dev->priv;
1605 u8 mac[ETH_ALEN] = {};
1606
1607 ath5k_hw_set_lladdr(sc->ah, mac);
1608
1609 ath5k_stop_hw(sc);
1610 }
1611
1612 static int
ath5k_config(struct net80211_device * dev,int changed)1613 ath5k_config(struct net80211_device *dev, int changed)
1614 {
1615 struct ath5k_softc *sc = dev->priv;
1616 struct ath5k_hw *ah = sc->ah;
1617 struct net80211_channel *chan = &dev->channels[dev->channel];
1618 int ret;
1619
1620 if (changed & NET80211_CFG_CHANNEL) {
1621 sc->power_level = chan->maxpower;
1622 if ((ret = ath5k_chan_set(sc, chan)) != 0)
1623 return ret;
1624 }
1625
1626 if ((changed & NET80211_CFG_RATE) ||
1627 (changed & NET80211_CFG_PHY_PARAMS)) {
1628 int spmbl = ATH5K_SPMBL_NO;
1629 u16 rate = dev->rates[dev->rate];
1630 u16 slowrate = dev->rates[dev->rtscts_rate];
1631 int i;
1632
1633 if (dev->phy_flags & NET80211_PHY_USE_SHORT_PREAMBLE)
1634 spmbl = ATH5K_SPMBL_YES;
1635
1636 for (i = 0; i < ATH5K_NR_RATES; i++) {
1637 if (ath5k_rates[i].bitrate == rate &&
1638 (ath5k_rates[i].short_pmbl & spmbl))
1639 sc->hw_rate = ath5k_rates[i].hw_code;
1640
1641 if (ath5k_rates[i].bitrate == slowrate &&
1642 (ath5k_rates[i].short_pmbl & spmbl))
1643 sc->hw_rtscts_rate = ath5k_rates[i].hw_code;
1644 }
1645 }
1646
1647 if (changed & NET80211_CFG_ASSOC) {
1648 sc->assoc = !!(dev->state & NET80211_ASSOCIATED);
1649 if (sc->assoc) {
1650 memcpy(ah->ah_bssid, dev->bssid, ETH_ALEN);
1651 } else {
1652 memset(ah->ah_bssid, 0xff, ETH_ALEN);
1653 }
1654 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1655 }
1656
1657 return 0;
1658 }
1659
1660 /*
1661 * o always accept unicast, broadcast, and multicast traffic
1662 * o multicast traffic for all BSSIDs will be enabled if mac80211
1663 * says it should be
1664 * o maintain current state of phy ofdm or phy cck error reception.
1665 * If the hardware detects any of these type of errors then
1666 * ath5k_hw_get_rx_filter() will pass to us the respective
1667 * hardware filters to be able to receive these type of frames.
1668 * o probe request frames are accepted only when operating in
1669 * hostap, adhoc, or monitor modes
1670 * o enable promiscuous mode according to the interface state
1671 * o accept beacons:
1672 * - when operating in adhoc mode so the 802.11 layer creates
1673 * node table entries for peers,
1674 * - when operating in station mode for collecting rssi data when
1675 * the station is otherwise quiet, or
1676 * - when scanning
1677 */
ath5k_configure_filter(struct ath5k_softc * sc)1678 static void ath5k_configure_filter(struct ath5k_softc *sc)
1679 {
1680 struct ath5k_hw *ah = sc->ah;
1681 u32 mfilt[2], rfilt;
1682
1683 /* Enable all multicast */
1684 mfilt[0] = ~0;
1685 mfilt[1] = ~0;
1686
1687 /* Enable data frames and beacons */
1688 rfilt = (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
1689 AR5K_RX_FILTER_MCAST | AR5K_RX_FILTER_BEACON);
1690
1691 /* Set filters */
1692 ath5k_hw_set_rx_filter(ah, rfilt);
1693
1694 /* Set multicast bits */
1695 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
1696
1697 /* Set the cached hw filter flags, this will alter actually
1698 * be set in HW */
1699 sc->filter_flags = rfilt;
1700 }
1701