• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2008-2011 Atheros Communications Inc.
3  *
4  * Permission to use, copy, modify, and/or distribute this software for any
5  * purpose with or without fee is hereby granted, provided that the above
6  * copyright notice and this permission notice appear in all copies.
7  *
8  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15  */
16 
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/ath9k_platform.h>
22 #include <linux/module.h>
23 #include <linux/relay.h>
24 
25 #include "ath9k.h"
26 
27 struct ath9k_eeprom_ctx {
28 	struct completion complete;
29 	struct ath_hw *ah;
30 };
31 
32 static char *dev_info = "ath9k";
33 
34 MODULE_AUTHOR("Atheros Communications");
35 MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
36 MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
37 MODULE_LICENSE("Dual BSD/GPL");
38 
39 static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
40 module_param_named(debug, ath9k_debug, uint, 0);
41 MODULE_PARM_DESC(debug, "Debugging mask");
42 
43 int ath9k_modparam_nohwcrypt;
44 module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
45 MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
46 
47 int led_blink;
48 module_param_named(blink, led_blink, int, 0444);
49 MODULE_PARM_DESC(blink, "Enable LED blink on activity");
50 
51 static int ath9k_btcoex_enable;
52 module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
53 MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
54 
55 static int ath9k_enable_diversity;
56 module_param_named(enable_diversity, ath9k_enable_diversity, int, 0444);
57 MODULE_PARM_DESC(enable_diversity, "Enable Antenna diversity for AR9565");
58 
59 bool is_ath9k_unloaded;
60 /* We use the hw_value as an index into our private channel structure */
61 
62 #define CHAN2G(_freq, _idx)  { \
63 	.band = IEEE80211_BAND_2GHZ, \
64 	.center_freq = (_freq), \
65 	.hw_value = (_idx), \
66 	.max_power = 20, \
67 }
68 
69 #define CHAN5G(_freq, _idx) { \
70 	.band = IEEE80211_BAND_5GHZ, \
71 	.center_freq = (_freq), \
72 	.hw_value = (_idx), \
73 	.max_power = 20, \
74 }
75 
76 /* Some 2 GHz radios are actually tunable on 2312-2732
77  * on 5 MHz steps, we support the channels which we know
78  * we have calibration data for all cards though to make
79  * this static */
80 static const struct ieee80211_channel ath9k_2ghz_chantable[] = {
81 	CHAN2G(2412, 0), /* Channel 1 */
82 	CHAN2G(2417, 1), /* Channel 2 */
83 	CHAN2G(2422, 2), /* Channel 3 */
84 	CHAN2G(2427, 3), /* Channel 4 */
85 	CHAN2G(2432, 4), /* Channel 5 */
86 	CHAN2G(2437, 5), /* Channel 6 */
87 	CHAN2G(2442, 6), /* Channel 7 */
88 	CHAN2G(2447, 7), /* Channel 8 */
89 	CHAN2G(2452, 8), /* Channel 9 */
90 	CHAN2G(2457, 9), /* Channel 10 */
91 	CHAN2G(2462, 10), /* Channel 11 */
92 	CHAN2G(2467, 11), /* Channel 12 */
93 	CHAN2G(2472, 12), /* Channel 13 */
94 	CHAN2G(2484, 13), /* Channel 14 */
95 };
96 
97 /* Some 5 GHz radios are actually tunable on XXXX-YYYY
98  * on 5 MHz steps, we support the channels which we know
99  * we have calibration data for all cards though to make
100  * this static */
101 static const struct ieee80211_channel ath9k_5ghz_chantable[] = {
102 	/* _We_ call this UNII 1 */
103 	CHAN5G(5180, 14), /* Channel 36 */
104 	CHAN5G(5200, 15), /* Channel 40 */
105 	CHAN5G(5220, 16), /* Channel 44 */
106 	CHAN5G(5240, 17), /* Channel 48 */
107 	/* _We_ call this UNII 2 */
108 	CHAN5G(5260, 18), /* Channel 52 */
109 	CHAN5G(5280, 19), /* Channel 56 */
110 	CHAN5G(5300, 20), /* Channel 60 */
111 	CHAN5G(5320, 21), /* Channel 64 */
112 	/* _We_ call this "Middle band" */
113 	CHAN5G(5500, 22), /* Channel 100 */
114 	CHAN5G(5520, 23), /* Channel 104 */
115 	CHAN5G(5540, 24), /* Channel 108 */
116 	CHAN5G(5560, 25), /* Channel 112 */
117 	CHAN5G(5580, 26), /* Channel 116 */
118 	CHAN5G(5600, 27), /* Channel 120 */
119 	CHAN5G(5620, 28), /* Channel 124 */
120 	CHAN5G(5640, 29), /* Channel 128 */
121 	CHAN5G(5660, 30), /* Channel 132 */
122 	CHAN5G(5680, 31), /* Channel 136 */
123 	CHAN5G(5700, 32), /* Channel 140 */
124 	/* _We_ call this UNII 3 */
125 	CHAN5G(5745, 33), /* Channel 149 */
126 	CHAN5G(5765, 34), /* Channel 153 */
127 	CHAN5G(5785, 35), /* Channel 157 */
128 	CHAN5G(5805, 36), /* Channel 161 */
129 	CHAN5G(5825, 37), /* Channel 165 */
130 };
131 
132 /* Atheros hardware rate code addition for short premble */
133 #define SHPCHECK(__hw_rate, __flags) \
134 	((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
135 
136 #define RATE(_bitrate, _hw_rate, _flags) {              \
137 	.bitrate        = (_bitrate),                   \
138 	.flags          = (_flags),                     \
139 	.hw_value       = (_hw_rate),                   \
140 	.hw_value_short = (SHPCHECK(_hw_rate, _flags))  \
141 }
142 
143 static struct ieee80211_rate ath9k_legacy_rates[] = {
144 	RATE(10, 0x1b, 0),
145 	RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
146 	RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
147 	RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
148 	RATE(60, 0x0b, 0),
149 	RATE(90, 0x0f, 0),
150 	RATE(120, 0x0a, 0),
151 	RATE(180, 0x0e, 0),
152 	RATE(240, 0x09, 0),
153 	RATE(360, 0x0d, 0),
154 	RATE(480, 0x08, 0),
155 	RATE(540, 0x0c, 0),
156 };
157 
158 #ifdef CONFIG_MAC80211_LEDS
159 static const struct ieee80211_tpt_blink ath9k_tpt_blink[] = {
160 	{ .throughput = 0 * 1024, .blink_time = 334 },
161 	{ .throughput = 1 * 1024, .blink_time = 260 },
162 	{ .throughput = 5 * 1024, .blink_time = 220 },
163 	{ .throughput = 10 * 1024, .blink_time = 190 },
164 	{ .throughput = 20 * 1024, .blink_time = 170 },
165 	{ .throughput = 50 * 1024, .blink_time = 150 },
166 	{ .throughput = 70 * 1024, .blink_time = 130 },
167 	{ .throughput = 100 * 1024, .blink_time = 110 },
168 	{ .throughput = 200 * 1024, .blink_time = 80 },
169 	{ .throughput = 300 * 1024, .blink_time = 50 },
170 };
171 #endif
172 
173 static void ath9k_deinit_softc(struct ath_softc *sc);
174 
175 /*
176  * Read and write, they both share the same lock. We do this to serialize
177  * reads and writes on Atheros 802.11n PCI devices only. This is required
178  * as the FIFO on these devices can only accept sanely 2 requests.
179  */
180 
ath9k_iowrite32(void * hw_priv,u32 val,u32 reg_offset)181 static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
182 {
183 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
184 	struct ath_common *common = ath9k_hw_common(ah);
185 	struct ath_softc *sc = (struct ath_softc *) common->priv;
186 
187 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
188 		unsigned long flags;
189 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
190 		iowrite32(val, sc->mem + reg_offset);
191 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
192 	} else
193 		iowrite32(val, sc->mem + reg_offset);
194 }
195 
ath9k_ioread32(void * hw_priv,u32 reg_offset)196 static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
197 {
198 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
199 	struct ath_common *common = ath9k_hw_common(ah);
200 	struct ath_softc *sc = (struct ath_softc *) common->priv;
201 	u32 val;
202 
203 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
204 		unsigned long flags;
205 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
206 		val = ioread32(sc->mem + reg_offset);
207 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
208 	} else
209 		val = ioread32(sc->mem + reg_offset);
210 	return val;
211 }
212 
__ath9k_reg_rmw(struct ath_softc * sc,u32 reg_offset,u32 set,u32 clr)213 static unsigned int __ath9k_reg_rmw(struct ath_softc *sc, u32 reg_offset,
214 				    u32 set, u32 clr)
215 {
216 	u32 val;
217 
218 	val = ioread32(sc->mem + reg_offset);
219 	val &= ~clr;
220 	val |= set;
221 	iowrite32(val, sc->mem + reg_offset);
222 
223 	return val;
224 }
225 
ath9k_reg_rmw(void * hw_priv,u32 reg_offset,u32 set,u32 clr)226 static unsigned int ath9k_reg_rmw(void *hw_priv, u32 reg_offset, u32 set, u32 clr)
227 {
228 	struct ath_hw *ah = (struct ath_hw *) hw_priv;
229 	struct ath_common *common = ath9k_hw_common(ah);
230 	struct ath_softc *sc = (struct ath_softc *) common->priv;
231 	unsigned long uninitialized_var(flags);
232 	u32 val;
233 
234 	if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_ON) {
235 		spin_lock_irqsave(&sc->sc_serial_rw, flags);
236 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
237 		spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
238 	} else
239 		val = __ath9k_reg_rmw(sc, reg_offset, set, clr);
240 
241 	return val;
242 }
243 
244 /**************************/
245 /*     Initialization     */
246 /**************************/
247 
setup_ht_cap(struct ath_softc * sc,struct ieee80211_sta_ht_cap * ht_info)248 static void setup_ht_cap(struct ath_softc *sc,
249 			 struct ieee80211_sta_ht_cap *ht_info)
250 {
251 	struct ath_hw *ah = sc->sc_ah;
252 	struct ath_common *common = ath9k_hw_common(ah);
253 	u8 tx_streams, rx_streams;
254 	int i, max_streams;
255 
256 	ht_info->ht_supported = true;
257 	ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
258 		       IEEE80211_HT_CAP_SM_PS |
259 		       IEEE80211_HT_CAP_SGI_40 |
260 		       IEEE80211_HT_CAP_DSSSCCK40;
261 
262 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_LDPC)
263 		ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
264 
265 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
266 		ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
267 
268 	ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
269 	ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
270 
271 	if (AR_SREV_9330(ah) || AR_SREV_9485(ah) || AR_SREV_9565(ah))
272 		max_streams = 1;
273 	else if (AR_SREV_9462(ah))
274 		max_streams = 2;
275 	else if (AR_SREV_9300_20_OR_LATER(ah))
276 		max_streams = 3;
277 	else
278 		max_streams = 2;
279 
280 	if (AR_SREV_9280_20_OR_LATER(ah)) {
281 		if (max_streams >= 2)
282 			ht_info->cap |= IEEE80211_HT_CAP_TX_STBC;
283 		ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
284 	}
285 
286 	/* set up supported mcs set */
287 	memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
288 	tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
289 	rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
290 
291 	ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
292 		tx_streams, rx_streams);
293 
294 	if (tx_streams != rx_streams) {
295 		ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
296 		ht_info->mcs.tx_params |= ((tx_streams - 1) <<
297 				IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
298 	}
299 
300 	for (i = 0; i < rx_streams; i++)
301 		ht_info->mcs.rx_mask[i] = 0xff;
302 
303 	ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
304 }
305 
ath9k_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)306 static void ath9k_reg_notifier(struct wiphy *wiphy,
307 			       struct regulatory_request *request)
308 {
309 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
310 	struct ath_softc *sc = hw->priv;
311 	struct ath_hw *ah = sc->sc_ah;
312 	struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
313 
314 	ath_reg_notifier_apply(wiphy, request, reg);
315 
316 	/* Set tx power */
317 	if (ah->curchan) {
318 		sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
319 		ath9k_ps_wakeup(sc);
320 		ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
321 		sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
322 		/* synchronize DFS detector if regulatory domain changed */
323 		if (sc->dfs_detector != NULL)
324 			sc->dfs_detector->set_dfs_domain(sc->dfs_detector,
325 							 request->dfs_region);
326 		ath9k_ps_restore(sc);
327 	}
328 }
329 
330 /*
331  *  This function will allocate both the DMA descriptor structure, and the
332  *  buffers it contains.  These are used to contain the descriptors used
333  *  by the system.
334 */
ath_descdma_setup(struct ath_softc * sc,struct ath_descdma * dd,struct list_head * head,const char * name,int nbuf,int ndesc,bool is_tx)335 int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
336 		      struct list_head *head, const char *name,
337 		      int nbuf, int ndesc, bool is_tx)
338 {
339 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
340 	u8 *ds;
341 	struct ath_buf *bf;
342 	int i, bsize, desc_len;
343 
344 	ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
345 		name, nbuf, ndesc);
346 
347 	INIT_LIST_HEAD(head);
348 
349 	if (is_tx)
350 		desc_len = sc->sc_ah->caps.tx_desc_len;
351 	else
352 		desc_len = sizeof(struct ath_desc);
353 
354 	/* ath_desc must be a multiple of DWORDs */
355 	if ((desc_len % 4) != 0) {
356 		ath_err(common, "ath_desc not DWORD aligned\n");
357 		BUG_ON((desc_len % 4) != 0);
358 		return -ENOMEM;
359 	}
360 
361 	dd->dd_desc_len = desc_len * nbuf * ndesc;
362 
363 	/*
364 	 * Need additional DMA memory because we can't use
365 	 * descriptors that cross the 4K page boundary. Assume
366 	 * one skipped descriptor per 4K page.
367 	 */
368 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
369 		u32 ndesc_skipped =
370 			ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
371 		u32 dma_len;
372 
373 		while (ndesc_skipped) {
374 			dma_len = ndesc_skipped * desc_len;
375 			dd->dd_desc_len += dma_len;
376 
377 			ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
378 		}
379 	}
380 
381 	/* allocate descriptors */
382 	dd->dd_desc = dmam_alloc_coherent(sc->dev, dd->dd_desc_len,
383 					  &dd->dd_desc_paddr, GFP_KERNEL);
384 	if (!dd->dd_desc)
385 		return -ENOMEM;
386 
387 	ds = (u8 *) dd->dd_desc;
388 	ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
389 		name, ds, (u32) dd->dd_desc_len,
390 		ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
391 
392 	/* allocate buffers */
393 	bsize = sizeof(struct ath_buf) * nbuf;
394 	bf = devm_kzalloc(sc->dev, bsize, GFP_KERNEL);
395 	if (!bf)
396 		return -ENOMEM;
397 
398 	for (i = 0; i < nbuf; i++, bf++, ds += (desc_len * ndesc)) {
399 		bf->bf_desc = ds;
400 		bf->bf_daddr = DS2PHYS(dd, ds);
401 
402 		if (!(sc->sc_ah->caps.hw_caps &
403 		      ATH9K_HW_CAP_4KB_SPLITTRANS)) {
404 			/*
405 			 * Skip descriptor addresses which can cause 4KB
406 			 * boundary crossing (addr + length) with a 32 dword
407 			 * descriptor fetch.
408 			 */
409 			while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
410 				BUG_ON((caddr_t) bf->bf_desc >=
411 				       ((caddr_t) dd->dd_desc +
412 					dd->dd_desc_len));
413 
414 				ds += (desc_len * ndesc);
415 				bf->bf_desc = ds;
416 				bf->bf_daddr = DS2PHYS(dd, ds);
417 			}
418 		}
419 		list_add_tail(&bf->list, head);
420 	}
421 	return 0;
422 }
423 
ath9k_init_queues(struct ath_softc * sc)424 static int ath9k_init_queues(struct ath_softc *sc)
425 {
426 	int i = 0;
427 
428 	sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
429 	sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
430 
431 	sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
432 	ath_cabq_update(sc);
433 
434 	for (i = 0; i < IEEE80211_NUM_ACS; i++) {
435 		sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
436 		sc->tx.txq_map[i]->mac80211_qnum = i;
437 		sc->tx.txq_max_pending[i] = ATH_MAX_QDEPTH;
438 	}
439 	return 0;
440 }
441 
ath9k_init_channels_rates(struct ath_softc * sc)442 static int ath9k_init_channels_rates(struct ath_softc *sc)
443 {
444 	void *channels;
445 
446 	BUILD_BUG_ON(ARRAY_SIZE(ath9k_2ghz_chantable) +
447 		     ARRAY_SIZE(ath9k_5ghz_chantable) !=
448 		     ATH9K_NUM_CHANNELS);
449 
450 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ) {
451 		channels = devm_kzalloc(sc->dev,
452 			sizeof(ath9k_2ghz_chantable), GFP_KERNEL);
453 		if (!channels)
454 		    return -ENOMEM;
455 
456 		memcpy(channels, ath9k_2ghz_chantable,
457 		       sizeof(ath9k_2ghz_chantable));
458 		sc->sbands[IEEE80211_BAND_2GHZ].channels = channels;
459 		sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
460 		sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
461 			ARRAY_SIZE(ath9k_2ghz_chantable);
462 		sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
463 		sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
464 			ARRAY_SIZE(ath9k_legacy_rates);
465 	}
466 
467 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ) {
468 		channels = devm_kzalloc(sc->dev,
469 			sizeof(ath9k_5ghz_chantable), GFP_KERNEL);
470 		if (!channels)
471 			return -ENOMEM;
472 
473 		memcpy(channels, ath9k_5ghz_chantable,
474 		       sizeof(ath9k_5ghz_chantable));
475 		sc->sbands[IEEE80211_BAND_5GHZ].channels = channels;
476 		sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
477 		sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
478 			ARRAY_SIZE(ath9k_5ghz_chantable);
479 		sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
480 			ath9k_legacy_rates + 4;
481 		sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
482 			ARRAY_SIZE(ath9k_legacy_rates) - 4;
483 	}
484 	return 0;
485 }
486 
ath9k_init_misc(struct ath_softc * sc)487 static void ath9k_init_misc(struct ath_softc *sc)
488 {
489 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
490 	int i = 0;
491 
492 	setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
493 
494 	sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
495 	sc->config.txpowlimit = ATH_TXPOWER_MAX;
496 	memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
497 	sc->beacon.slottime = ATH9K_SLOT_TIME_9;
498 
499 	for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++)
500 		sc->beacon.bslot[i] = NULL;
501 
502 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
503 		sc->ant_comb.count = ATH_ANT_DIV_COMB_INIT_COUNT;
504 
505 	sc->spec_config.enabled = 0;
506 	sc->spec_config.short_repeat = true;
507 	sc->spec_config.count = 8;
508 	sc->spec_config.endless = false;
509 	sc->spec_config.period = 0xFF;
510 	sc->spec_config.fft_period = 0xF;
511 }
512 
ath9k_eeprom_request_cb(const struct firmware * eeprom_blob,void * ctx)513 static void ath9k_eeprom_request_cb(const struct firmware *eeprom_blob,
514 				    void *ctx)
515 {
516 	struct ath9k_eeprom_ctx *ec = ctx;
517 
518 	if (eeprom_blob)
519 		ec->ah->eeprom_blob = eeprom_blob;
520 
521 	complete(&ec->complete);
522 }
523 
ath9k_eeprom_request(struct ath_softc * sc,const char * name)524 static int ath9k_eeprom_request(struct ath_softc *sc, const char *name)
525 {
526 	struct ath9k_eeprom_ctx ec;
527 	struct ath_hw *ah = ah = sc->sc_ah;
528 	int err;
529 
530 	/* try to load the EEPROM content asynchronously */
531 	init_completion(&ec.complete);
532 	ec.ah = sc->sc_ah;
533 
534 	err = request_firmware_nowait(THIS_MODULE, 1, name, sc->dev, GFP_KERNEL,
535 				      &ec, ath9k_eeprom_request_cb);
536 	if (err < 0) {
537 		ath_err(ath9k_hw_common(ah),
538 			"EEPROM request failed\n");
539 		return err;
540 	}
541 
542 	wait_for_completion(&ec.complete);
543 
544 	if (!ah->eeprom_blob) {
545 		ath_err(ath9k_hw_common(ah),
546 			"Unable to load EEPROM file %s\n", name);
547 		return -EINVAL;
548 	}
549 
550 	return 0;
551 }
552 
ath9k_eeprom_release(struct ath_softc * sc)553 static void ath9k_eeprom_release(struct ath_softc *sc)
554 {
555 	release_firmware(sc->sc_ah->eeprom_blob);
556 }
557 
ath9k_init_softc(u16 devid,struct ath_softc * sc,const struct ath_bus_ops * bus_ops)558 static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
559 			    const struct ath_bus_ops *bus_ops)
560 {
561 	struct ath9k_platform_data *pdata = sc->dev->platform_data;
562 	struct ath_hw *ah = NULL;
563 	struct ath_common *common;
564 	int ret = 0, i;
565 	int csz = 0;
566 
567 	ah = devm_kzalloc(sc->dev, sizeof(struct ath_hw), GFP_KERNEL);
568 	if (!ah)
569 		return -ENOMEM;
570 
571 	ah->dev = sc->dev;
572 	ah->hw = sc->hw;
573 	ah->hw_version.devid = devid;
574 	ah->reg_ops.read = ath9k_ioread32;
575 	ah->reg_ops.write = ath9k_iowrite32;
576 	ah->reg_ops.rmw = ath9k_reg_rmw;
577 	atomic_set(&ah->intr_ref_cnt, -1);
578 	sc->sc_ah = ah;
579 
580 	sc->dfs_detector = dfs_pattern_detector_init(ah, NL80211_DFS_UNSET);
581 
582 	if (!pdata) {
583 		ah->ah_flags |= AH_USE_EEPROM;
584 		sc->sc_ah->led_pin = -1;
585 	} else {
586 		sc->sc_ah->gpio_mask = pdata->gpio_mask;
587 		sc->sc_ah->gpio_val = pdata->gpio_val;
588 		sc->sc_ah->led_pin = pdata->led_pin;
589 		ah->is_clk_25mhz = pdata->is_clk_25mhz;
590 		ah->get_mac_revision = pdata->get_mac_revision;
591 		ah->external_reset = pdata->external_reset;
592 	}
593 
594 	common = ath9k_hw_common(ah);
595 	common->ops = &ah->reg_ops;
596 	common->bus_ops = bus_ops;
597 	common->ah = ah;
598 	common->hw = sc->hw;
599 	common->priv = sc;
600 	common->debug_mask = ath9k_debug;
601 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
602 	common->disable_ani = false;
603 
604 	/*
605 	 * Enable Antenna diversity only when BTCOEX is disabled
606 	 * and the user manually requests the feature.
607 	 */
608 	if (!common->btcoex_enabled && ath9k_enable_diversity)
609 		common->antenna_diversity = 1;
610 
611 	spin_lock_init(&common->cc_lock);
612 
613 	spin_lock_init(&sc->sc_serial_rw);
614 	spin_lock_init(&sc->sc_pm_lock);
615 	mutex_init(&sc->mutex);
616 #ifdef CONFIG_ATH9K_MAC_DEBUG
617 	spin_lock_init(&sc->debug.samp_lock);
618 #endif
619 	tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
620 	tasklet_init(&sc->bcon_tasklet, ath9k_beacon_tasklet,
621 		     (unsigned long)sc);
622 
623 	INIT_WORK(&sc->hw_reset_work, ath_reset_work);
624 	INIT_WORK(&sc->hw_check_work, ath_hw_check);
625 	INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
626 	INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
627 	setup_timer(&sc->rx_poll_timer, ath_rx_poll, (unsigned long)sc);
628 
629 	/*
630 	 * Cache line size is used to size and align various
631 	 * structures used to communicate with the hardware.
632 	 */
633 	ath_read_cachesize(common, &csz);
634 	common->cachelsz = csz << 2; /* convert to bytes */
635 
636 	if (pdata && pdata->eeprom_name) {
637 		ret = ath9k_eeprom_request(sc, pdata->eeprom_name);
638 		if (ret)
639 			return ret;
640 	}
641 
642 	/* Initializes the hardware for all supported chipsets */
643 	ret = ath9k_hw_init(ah);
644 	if (ret)
645 		goto err_hw;
646 
647 	if (pdata && pdata->macaddr)
648 		memcpy(common->macaddr, pdata->macaddr, ETH_ALEN);
649 
650 	ret = ath9k_init_queues(sc);
651 	if (ret)
652 		goto err_queues;
653 
654 	ret =  ath9k_init_btcoex(sc);
655 	if (ret)
656 		goto err_btcoex;
657 
658 	ret = ath9k_init_channels_rates(sc);
659 	if (ret)
660 		goto err_btcoex;
661 
662 	ath9k_cmn_init_crypto(sc->sc_ah);
663 	ath9k_init_misc(sc);
664 	ath_fill_led_pin(sc);
665 
666 	if (common->bus_ops->aspm_init)
667 		common->bus_ops->aspm_init(common);
668 
669 	return 0;
670 
671 err_btcoex:
672 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
673 		if (ATH_TXQ_SETUP(sc, i))
674 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
675 err_queues:
676 	ath9k_hw_deinit(ah);
677 err_hw:
678 	ath9k_eeprom_release(sc);
679 	return ret;
680 }
681 
ath9k_init_band_txpower(struct ath_softc * sc,int band)682 static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
683 {
684 	struct ieee80211_supported_band *sband;
685 	struct ieee80211_channel *chan;
686 	struct ath_hw *ah = sc->sc_ah;
687 	int i;
688 
689 	sband = &sc->sbands[band];
690 	for (i = 0; i < sband->n_channels; i++) {
691 		chan = &sband->channels[i];
692 		ah->curchan = &ah->channels[chan->hw_value];
693 		ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
694 		ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
695 	}
696 }
697 
ath9k_init_txpower_limits(struct ath_softc * sc)698 static void ath9k_init_txpower_limits(struct ath_softc *sc)
699 {
700 	struct ath_hw *ah = sc->sc_ah;
701 	struct ath9k_channel *curchan = ah->curchan;
702 
703 	if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
704 		ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
705 	if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
706 		ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
707 
708 	ah->curchan = curchan;
709 }
710 
ath9k_reload_chainmask_settings(struct ath_softc * sc)711 void ath9k_reload_chainmask_settings(struct ath_softc *sc)
712 {
713 	if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT))
714 		return;
715 
716 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
717 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
718 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
719 		setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
720 }
721 
722 static const struct ieee80211_iface_limit if_limits[] = {
723 	{ .max = 2048,	.types = BIT(NL80211_IFTYPE_STATION) |
724 				 BIT(NL80211_IFTYPE_P2P_CLIENT) |
725 				 BIT(NL80211_IFTYPE_WDS) },
726 	{ .max = 8,	.types =
727 #ifdef CONFIG_MAC80211_MESH
728 				 BIT(NL80211_IFTYPE_MESH_POINT) |
729 #endif
730 				 BIT(NL80211_IFTYPE_AP) |
731 				 BIT(NL80211_IFTYPE_P2P_GO) },
732 };
733 
734 
735 static const struct ieee80211_iface_limit if_dfs_limits[] = {
736 	{ .max = 1,	.types = BIT(NL80211_IFTYPE_AP) },
737 };
738 
739 static const struct ieee80211_iface_combination if_comb[] = {
740 	{
741 		.limits = if_limits,
742 		.n_limits = ARRAY_SIZE(if_limits),
743 		.max_interfaces = 2048,
744 		.num_different_channels = 1,
745 		.beacon_int_infra_match = true,
746 	},
747 	{
748 		.limits = if_dfs_limits,
749 		.n_limits = ARRAY_SIZE(if_dfs_limits),
750 		.max_interfaces = 1,
751 		.num_different_channels = 1,
752 		.beacon_int_infra_match = true,
753 		.radar_detect_widths =	BIT(NL80211_CHAN_NO_HT) |
754 					BIT(NL80211_CHAN_HT20),
755 	}
756 };
757 
ath9k_set_hw_capab(struct ath_softc * sc,struct ieee80211_hw * hw)758 void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
759 {
760 	struct ath_hw *ah = sc->sc_ah;
761 	struct ath_common *common = ath9k_hw_common(ah);
762 
763 	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
764 		IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
765 		IEEE80211_HW_SIGNAL_DBM |
766 		IEEE80211_HW_SUPPORTS_PS |
767 		IEEE80211_HW_PS_NULLFUNC_STACK |
768 		IEEE80211_HW_SPECTRUM_MGMT |
769 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
770 		IEEE80211_HW_SUPPORTS_RC_TABLE;
771 
772 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
773 		 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
774 
775 	if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
776 		hw->flags |= IEEE80211_HW_MFP_CAPABLE;
777 
778 	hw->wiphy->interface_modes =
779 		BIT(NL80211_IFTYPE_P2P_GO) |
780 		BIT(NL80211_IFTYPE_P2P_CLIENT) |
781 		BIT(NL80211_IFTYPE_AP) |
782 		BIT(NL80211_IFTYPE_WDS) |
783 		BIT(NL80211_IFTYPE_STATION) |
784 		BIT(NL80211_IFTYPE_ADHOC) |
785 		BIT(NL80211_IFTYPE_MESH_POINT);
786 
787 	hw->wiphy->iface_combinations = if_comb;
788 	hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
789 
790 	hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
791 
792 	hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
793 	hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
794 	hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
795 
796 #ifdef CONFIG_PM_SLEEP
797 
798 	if ((ah->caps.hw_caps & ATH9K_HW_WOW_DEVICE_CAPABLE) &&
799 	    device_can_wakeup(sc->dev)) {
800 
801 		hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
802 					  WIPHY_WOWLAN_DISCONNECT;
803 		hw->wiphy->wowlan.n_patterns = MAX_NUM_USER_PATTERN;
804 		hw->wiphy->wowlan.pattern_min_len = 1;
805 		hw->wiphy->wowlan.pattern_max_len = MAX_PATTERN_SIZE;
806 
807 	}
808 
809 	atomic_set(&sc->wow_sleep_proc_intr, -1);
810 	atomic_set(&sc->wow_got_bmiss_intr, -1);
811 
812 #endif
813 
814 	hw->queues = 4;
815 	hw->max_rates = 4;
816 	hw->channel_change_time = 5000;
817 	hw->max_listen_interval = 1;
818 	hw->max_rate_tries = 10;
819 	hw->sta_data_size = sizeof(struct ath_node);
820 	hw->vif_data_size = sizeof(struct ath_vif);
821 
822 	hw->wiphy->available_antennas_rx = BIT(ah->caps.max_rxchains) - 1;
823 	hw->wiphy->available_antennas_tx = BIT(ah->caps.max_txchains) - 1;
824 
825 	/* single chain devices with rx diversity */
826 	if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
827 		hw->wiphy->available_antennas_rx = BIT(0) | BIT(1);
828 
829 	sc->ant_rx = hw->wiphy->available_antennas_rx;
830 	sc->ant_tx = hw->wiphy->available_antennas_tx;
831 
832 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
833 		hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
834 			&sc->sbands[IEEE80211_BAND_2GHZ];
835 	if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
836 		hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
837 			&sc->sbands[IEEE80211_BAND_5GHZ];
838 
839 	ath9k_reload_chainmask_settings(sc);
840 
841 	SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
842 }
843 
ath9k_init_device(u16 devid,struct ath_softc * sc,const struct ath_bus_ops * bus_ops)844 int ath9k_init_device(u16 devid, struct ath_softc *sc,
845 		    const struct ath_bus_ops *bus_ops)
846 {
847 	struct ieee80211_hw *hw = sc->hw;
848 	struct ath_common *common;
849 	struct ath_hw *ah;
850 	int error = 0;
851 	struct ath_regulatory *reg;
852 
853 	/* Bring up device */
854 	error = ath9k_init_softc(devid, sc, bus_ops);
855 	if (error)
856 		return error;
857 
858 	ah = sc->sc_ah;
859 	common = ath9k_hw_common(ah);
860 	ath9k_set_hw_capab(sc, hw);
861 
862 	/* Initialize regulatory */
863 	error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
864 			      ath9k_reg_notifier);
865 	if (error)
866 		goto deinit;
867 
868 	reg = &common->regulatory;
869 
870 	/* Setup TX DMA */
871 	error = ath_tx_init(sc, ATH_TXBUF);
872 	if (error != 0)
873 		goto deinit;
874 
875 	/* Setup RX DMA */
876 	error = ath_rx_init(sc, ATH_RXBUF);
877 	if (error != 0)
878 		goto deinit;
879 
880 	ath9k_init_txpower_limits(sc);
881 
882 #ifdef CONFIG_MAC80211_LEDS
883 	/* must be initialized before ieee80211_register_hw */
884 	sc->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(sc->hw,
885 		IEEE80211_TPT_LEDTRIG_FL_RADIO, ath9k_tpt_blink,
886 		ARRAY_SIZE(ath9k_tpt_blink));
887 #endif
888 
889 	/* Register with mac80211 */
890 	error = ieee80211_register_hw(hw);
891 	if (error)
892 		goto rx_cleanup;
893 
894 	error = ath9k_init_debug(ah);
895 	if (error) {
896 		ath_err(common, "Unable to create debugfs files\n");
897 		goto unregister;
898 	}
899 
900 	/* Handle world regulatory */
901 	if (!ath_is_world_regd(reg)) {
902 		error = regulatory_hint(hw->wiphy, reg->alpha2);
903 		if (error)
904 			goto debug_cleanup;
905 	}
906 
907 	ath_init_leds(sc);
908 	ath_start_rfkill_poll(sc);
909 
910 	return 0;
911 
912 debug_cleanup:
913 	ath9k_deinit_debug(sc);
914 unregister:
915 	ieee80211_unregister_hw(hw);
916 rx_cleanup:
917 	ath_rx_cleanup(sc);
918 deinit:
919 	ath9k_deinit_softc(sc);
920 	return error;
921 }
922 
923 /*****************************/
924 /*     De-Initialization     */
925 /*****************************/
926 
ath9k_deinit_softc(struct ath_softc * sc)927 static void ath9k_deinit_softc(struct ath_softc *sc)
928 {
929 	int i = 0;
930 
931 	ath9k_deinit_btcoex(sc);
932 
933 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
934 		if (ATH_TXQ_SETUP(sc, i))
935 			ath_tx_cleanupq(sc, &sc->tx.txq[i]);
936 
937 	ath9k_hw_deinit(sc->sc_ah);
938 	if (sc->dfs_detector != NULL)
939 		sc->dfs_detector->exit(sc->dfs_detector);
940 
941 	ath9k_eeprom_release(sc);
942 }
943 
ath9k_deinit_device(struct ath_softc * sc)944 void ath9k_deinit_device(struct ath_softc *sc)
945 {
946 	struct ieee80211_hw *hw = sc->hw;
947 
948 	ath9k_ps_wakeup(sc);
949 
950 	wiphy_rfkill_stop_polling(sc->hw->wiphy);
951 	ath_deinit_leds(sc);
952 
953 	ath9k_ps_restore(sc);
954 
955 	ath9k_deinit_debug(sc);
956 	ieee80211_unregister_hw(hw);
957 	ath_rx_cleanup(sc);
958 	ath9k_deinit_softc(sc);
959 }
960 
961 /************************/
962 /*     Module Hooks     */
963 /************************/
964 
ath9k_init(void)965 static int __init ath9k_init(void)
966 {
967 	int error;
968 
969 	/* Register rate control algorithm */
970 	error = ath_rate_control_register();
971 	if (error != 0) {
972 		pr_err("Unable to register rate control algorithm: %d\n",
973 		       error);
974 		goto err_out;
975 	}
976 
977 	error = ath_pci_init();
978 	if (error < 0) {
979 		pr_err("No PCI devices found, driver not installed\n");
980 		error = -ENODEV;
981 		goto err_rate_unregister;
982 	}
983 
984 	error = ath_ahb_init();
985 	if (error < 0) {
986 		error = -ENODEV;
987 		goto err_pci_exit;
988 	}
989 
990 	return 0;
991 
992  err_pci_exit:
993 	ath_pci_exit();
994 
995  err_rate_unregister:
996 	ath_rate_control_unregister();
997  err_out:
998 	return error;
999 }
1000 module_init(ath9k_init);
1001 
ath9k_exit(void)1002 static void __exit ath9k_exit(void)
1003 {
1004 	is_ath9k_unloaded = true;
1005 	ath_ahb_exit();
1006 	ath_pci_exit();
1007 	ath_rate_control_unregister();
1008 	pr_info("%s: Driver unloaded\n", dev_info);
1009 }
1010 module_exit(ath9k_exit);
1011