• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Atheros CARL9170 driver
3  *
4  * mac80211 interaction code
5  *
6  * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7  * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; see the file COPYING.  If not, see
21  * http://www.gnu.org/licenses/.
22  *
23  * This file incorporates work covered by the following copyright and
24  * permission notice:
25  *    Copyright (c) 2007-2008 Atheros Communications, Inc.
26  *
27  *    Permission to use, copy, modify, and/or distribute this software for any
28  *    purpose with or without fee is hereby granted, provided that the above
29  *    copyright notice and this permission notice appear in all copies.
30  *
31  *    THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32  *    WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33  *    MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34  *    ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35  *    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36  *    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37  *    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38  */
39 
40 #include <linux/init.h>
41 #include <linux/slab.h>
42 #include <linux/module.h>
43 #include <linux/etherdevice.h>
44 #include <linux/random.h>
45 #include <net/mac80211.h>
46 #include <net/cfg80211.h>
47 #include "hw.h"
48 #include "carl9170.h"
49 #include "cmd.h"
50 
51 static bool modparam_nohwcrypt;
52 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
53 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
54 
55 int modparam_noht;
56 module_param_named(noht, modparam_noht, int, S_IRUGO);
57 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
58 
59 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) {	\
60 	.bitrate	= (_bitrate),			\
61 	.flags		= (_flags),			\
62 	.hw_value	= (_hw_rate) | (_txpidx) << 4,	\
63 }
64 
65 struct ieee80211_rate __carl9170_ratetable[] = {
66 	RATE(10, 0, 0, 0),
67 	RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
68 	RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
69 	RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
70 	RATE(60, 0xb, 0, 0),
71 	RATE(90, 0xf, 0, 0),
72 	RATE(120, 0xa, 0, 0),
73 	RATE(180, 0xe, 0, 0),
74 	RATE(240, 0x9, 0, 0),
75 	RATE(360, 0xd, 1, 0),
76 	RATE(480, 0x8, 2, 0),
77 	RATE(540, 0xc, 3, 0),
78 };
79 #undef RATE
80 
81 #define carl9170_g_ratetable	(__carl9170_ratetable + 0)
82 #define carl9170_g_ratetable_size	12
83 #define carl9170_a_ratetable	(__carl9170_ratetable + 4)
84 #define carl9170_a_ratetable_size	8
85 
86 /*
87  * NB: The hw_value is used as an index into the carl9170_phy_freq_params
88  *     array in phy.c so that we don't have to do frequency lookups!
89  */
90 #define CHAN(_freq, _idx) {		\
91 	.center_freq	= (_freq),	\
92 	.hw_value	= (_idx),	\
93 	.max_power	= 18, /* XXX */	\
94 }
95 
96 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
97 	CHAN(2412,  0),
98 	CHAN(2417,  1),
99 	CHAN(2422,  2),
100 	CHAN(2427,  3),
101 	CHAN(2432,  4),
102 	CHAN(2437,  5),
103 	CHAN(2442,  6),
104 	CHAN(2447,  7),
105 	CHAN(2452,  8),
106 	CHAN(2457,  9),
107 	CHAN(2462, 10),
108 	CHAN(2467, 11),
109 	CHAN(2472, 12),
110 	CHAN(2484, 13),
111 };
112 
113 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
114 	CHAN(4920, 14),
115 	CHAN(4940, 15),
116 	CHAN(4960, 16),
117 	CHAN(4980, 17),
118 	CHAN(5040, 18),
119 	CHAN(5060, 19),
120 	CHAN(5080, 20),
121 	CHAN(5180, 21),
122 	CHAN(5200, 22),
123 	CHAN(5220, 23),
124 	CHAN(5240, 24),
125 	CHAN(5260, 25),
126 	CHAN(5280, 26),
127 	CHAN(5300, 27),
128 	CHAN(5320, 28),
129 	CHAN(5500, 29),
130 	CHAN(5520, 30),
131 	CHAN(5540, 31),
132 	CHAN(5560, 32),
133 	CHAN(5580, 33),
134 	CHAN(5600, 34),
135 	CHAN(5620, 35),
136 	CHAN(5640, 36),
137 	CHAN(5660, 37),
138 	CHAN(5680, 38),
139 	CHAN(5700, 39),
140 	CHAN(5745, 40),
141 	CHAN(5765, 41),
142 	CHAN(5785, 42),
143 	CHAN(5805, 43),
144 	CHAN(5825, 44),
145 	CHAN(5170, 45),
146 	CHAN(5190, 46),
147 	CHAN(5210, 47),
148 	CHAN(5230, 48),
149 };
150 #undef CHAN
151 
152 #define CARL9170_HT_CAP							\
153 {									\
154 	.ht_supported	= true,						\
155 	.cap		= IEEE80211_HT_CAP_MAX_AMSDU |			\
156 			  IEEE80211_HT_CAP_SUP_WIDTH_20_40 |		\
157 			  IEEE80211_HT_CAP_SGI_40 |			\
158 			  IEEE80211_HT_CAP_DSSSCCK40 |			\
159 			  IEEE80211_HT_CAP_SM_PS,			\
160 	.ampdu_factor	= IEEE80211_HT_MAX_AMPDU_64K,			\
161 	.ampdu_density	= IEEE80211_HT_MPDU_DENSITY_8,			\
162 	.mcs		= {						\
163 		.rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, },	\
164 		.rx_highest = cpu_to_le16(300),				\
165 		.tx_params = IEEE80211_HT_MCS_TX_DEFINED,		\
166 	},								\
167 }
168 
169 static struct ieee80211_supported_band carl9170_band_2GHz = {
170 	.channels	= carl9170_2ghz_chantable,
171 	.n_channels	= ARRAY_SIZE(carl9170_2ghz_chantable),
172 	.bitrates	= carl9170_g_ratetable,
173 	.n_bitrates	= carl9170_g_ratetable_size,
174 	.ht_cap		= CARL9170_HT_CAP,
175 };
176 
177 static struct ieee80211_supported_band carl9170_band_5GHz = {
178 	.channels	= carl9170_5ghz_chantable,
179 	.n_channels	= ARRAY_SIZE(carl9170_5ghz_chantable),
180 	.bitrates	= carl9170_a_ratetable,
181 	.n_bitrates	= carl9170_a_ratetable_size,
182 	.ht_cap		= CARL9170_HT_CAP,
183 };
184 
carl9170_ampdu_gc(struct ar9170 * ar)185 static void carl9170_ampdu_gc(struct ar9170 *ar)
186 {
187 	struct carl9170_sta_tid *tid_info;
188 	LIST_HEAD(tid_gc);
189 
190 	rcu_read_lock();
191 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
192 		spin_lock_bh(&ar->tx_ampdu_list_lock);
193 		if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
194 			tid_info->state = CARL9170_TID_STATE_KILLED;
195 			list_del_rcu(&tid_info->list);
196 			ar->tx_ampdu_list_len--;
197 			list_add_tail(&tid_info->tmp_list, &tid_gc);
198 		}
199 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
200 
201 	}
202 	rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
203 	rcu_read_unlock();
204 
205 	synchronize_rcu();
206 
207 	while (!list_empty(&tid_gc)) {
208 		struct sk_buff *skb;
209 		tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
210 					    tmp_list);
211 
212 		while ((skb = __skb_dequeue(&tid_info->queue)))
213 			carl9170_tx_status(ar, skb, false);
214 
215 		list_del_init(&tid_info->tmp_list);
216 		kfree(tid_info);
217 	}
218 }
219 
carl9170_flush(struct ar9170 * ar,bool drop_queued)220 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
221 {
222 	if (drop_queued) {
223 		int i;
224 
225 		/*
226 		 * We can only drop frames which have not been uploaded
227 		 * to the device yet.
228 		 */
229 
230 		for (i = 0; i < ar->hw->queues; i++) {
231 			struct sk_buff *skb;
232 
233 			while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
234 				struct ieee80211_tx_info *info;
235 
236 				info = IEEE80211_SKB_CB(skb);
237 				if (info->flags & IEEE80211_TX_CTL_AMPDU)
238 					atomic_dec(&ar->tx_ampdu_upload);
239 
240 				carl9170_tx_status(ar, skb, false);
241 			}
242 		}
243 	}
244 
245 	/* Wait for all other outstanding frames to timeout. */
246 	if (atomic_read(&ar->tx_total_queued))
247 		WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
248 }
249 
carl9170_flush_ba(struct ar9170 * ar)250 static void carl9170_flush_ba(struct ar9170 *ar)
251 {
252 	struct sk_buff_head free;
253 	struct carl9170_sta_tid *tid_info;
254 	struct sk_buff *skb;
255 
256 	__skb_queue_head_init(&free);
257 
258 	rcu_read_lock();
259 	spin_lock_bh(&ar->tx_ampdu_list_lock);
260 	list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
261 		if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
262 			tid_info->state = CARL9170_TID_STATE_SUSPEND;
263 
264 			spin_lock(&tid_info->lock);
265 			while ((skb = __skb_dequeue(&tid_info->queue)))
266 				__skb_queue_tail(&free, skb);
267 			spin_unlock(&tid_info->lock);
268 		}
269 	}
270 	spin_unlock_bh(&ar->tx_ampdu_list_lock);
271 	rcu_read_unlock();
272 
273 	while ((skb = __skb_dequeue(&free)))
274 		carl9170_tx_status(ar, skb, false);
275 }
276 
carl9170_zap_queues(struct ar9170 * ar)277 static void carl9170_zap_queues(struct ar9170 *ar)
278 {
279 	struct carl9170_vif_info *cvif;
280 	unsigned int i;
281 
282 	carl9170_ampdu_gc(ar);
283 
284 	carl9170_flush_ba(ar);
285 	carl9170_flush(ar, true);
286 
287 	for (i = 0; i < ar->hw->queues; i++) {
288 		spin_lock_bh(&ar->tx_status[i].lock);
289 		while (!skb_queue_empty(&ar->tx_status[i])) {
290 			struct sk_buff *skb;
291 
292 			skb = skb_peek(&ar->tx_status[i]);
293 			carl9170_tx_get_skb(skb);
294 			spin_unlock_bh(&ar->tx_status[i].lock);
295 			carl9170_tx_drop(ar, skb);
296 			spin_lock_bh(&ar->tx_status[i].lock);
297 			carl9170_tx_put_skb(skb);
298 		}
299 		spin_unlock_bh(&ar->tx_status[i].lock);
300 	}
301 
302 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
303 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
304 	BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
305 
306 	/* reinitialize queues statistics */
307 	memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
308 	for (i = 0; i < ar->hw->queues; i++)
309 		ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
310 
311 	for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
312 		ar->mem_bitmap[i] = 0;
313 
314 	rcu_read_lock();
315 	list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
316 		spin_lock_bh(&ar->beacon_lock);
317 		dev_kfree_skb_any(cvif->beacon);
318 		cvif->beacon = NULL;
319 		spin_unlock_bh(&ar->beacon_lock);
320 	}
321 	rcu_read_unlock();
322 
323 	atomic_set(&ar->tx_ampdu_upload, 0);
324 	atomic_set(&ar->tx_ampdu_scheduler, 0);
325 	atomic_set(&ar->tx_total_pending, 0);
326 	atomic_set(&ar->tx_total_queued, 0);
327 	atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
328 }
329 
330 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop)		\
331 do {									\
332 	queue.aifs = ai_fs;						\
333 	queue.cw_min = cwmin;						\
334 	queue.cw_max = cwmax;						\
335 	queue.txop = _txop;						\
336 } while (0)
337 
carl9170_op_start(struct ieee80211_hw * hw)338 static int carl9170_op_start(struct ieee80211_hw *hw)
339 {
340 	struct ar9170 *ar = hw->priv;
341 	int err, i;
342 
343 	mutex_lock(&ar->mutex);
344 
345 	carl9170_zap_queues(ar);
346 
347 	/* reset QoS defaults */
348 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3,     7, 47);
349 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7,    15, 94);
350 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023,  0);
351 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023,  0);
352 	CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
353 
354 	ar->current_factor = ar->current_density = -1;
355 	/* "The first key is unique." */
356 	ar->usedkeys = 1;
357 	ar->filter_state = 0;
358 	ar->ps.last_action = jiffies;
359 	ar->ps.last_slept = jiffies;
360 	ar->erp_mode = CARL9170_ERP_AUTO;
361 
362 	/* Set "disable hw crypto offload" whenever the module parameter
363 	 * nohwcrypt is true or if the firmware does not support it.
364 	 */
365 	ar->disable_offload = modparam_nohwcrypt |
366 		ar->fw.disable_offload_fw;
367 	ar->rx_software_decryption = ar->disable_offload;
368 
369 	for (i = 0; i < ar->hw->queues; i++) {
370 		ar->queue_stop_timeout[i] = jiffies;
371 		ar->max_queue_stop_timeout[i] = 0;
372 	}
373 
374 	atomic_set(&ar->mem_allocs, 0);
375 
376 	err = carl9170_usb_open(ar);
377 	if (err)
378 		goto out;
379 
380 	err = carl9170_init_mac(ar);
381 	if (err)
382 		goto out;
383 
384 	err = carl9170_set_qos(ar);
385 	if (err)
386 		goto out;
387 
388 	if (ar->fw.rx_filter) {
389 		err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
390 			CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
391 		if (err)
392 			goto out;
393 	}
394 
395 	err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
396 				 AR9170_DMA_TRIGGER_RXQ);
397 	if (err)
398 		goto out;
399 
400 	/* Clear key-cache */
401 	for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
402 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
403 					  0, NULL, 0);
404 		if (err)
405 			goto out;
406 
407 		err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
408 					  1, NULL, 0);
409 		if (err)
410 			goto out;
411 
412 		if (i < AR9170_CAM_MAX_USER) {
413 			err = carl9170_disable_key(ar, i);
414 			if (err)
415 				goto out;
416 		}
417 	}
418 
419 	carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
420 
421 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
422 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
423 
424 	ieee80211_wake_queues(ar->hw);
425 	err = 0;
426 
427 out:
428 	mutex_unlock(&ar->mutex);
429 	return err;
430 }
431 
carl9170_cancel_worker(struct ar9170 * ar)432 static void carl9170_cancel_worker(struct ar9170 *ar)
433 {
434 	cancel_delayed_work_sync(&ar->stat_work);
435 	cancel_delayed_work_sync(&ar->tx_janitor);
436 #ifdef CONFIG_CARL9170_LEDS
437 	cancel_delayed_work_sync(&ar->led_work);
438 #endif /* CONFIG_CARL9170_LEDS */
439 	cancel_work_sync(&ar->ps_work);
440 	cancel_work_sync(&ar->ping_work);
441 	cancel_work_sync(&ar->ampdu_work);
442 }
443 
carl9170_op_stop(struct ieee80211_hw * hw)444 static void carl9170_op_stop(struct ieee80211_hw *hw)
445 {
446 	struct ar9170 *ar = hw->priv;
447 
448 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
449 
450 	ieee80211_stop_queues(ar->hw);
451 
452 	mutex_lock(&ar->mutex);
453 	if (IS_ACCEPTING_CMD(ar)) {
454 		RCU_INIT_POINTER(ar->beacon_iter, NULL);
455 
456 		carl9170_led_set_state(ar, 0);
457 
458 		/* stop DMA */
459 		carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
460 		carl9170_usb_stop(ar);
461 	}
462 
463 	carl9170_zap_queues(ar);
464 	mutex_unlock(&ar->mutex);
465 
466 	carl9170_cancel_worker(ar);
467 }
468 
carl9170_restart_work(struct work_struct * work)469 static void carl9170_restart_work(struct work_struct *work)
470 {
471 	struct ar9170 *ar = container_of(work, struct ar9170,
472 					 restart_work);
473 	int err = -EIO;
474 
475 	ar->usedkeys = 0;
476 	ar->filter_state = 0;
477 	carl9170_cancel_worker(ar);
478 
479 	mutex_lock(&ar->mutex);
480 	if (!ar->force_usb_reset) {
481 		err = carl9170_usb_restart(ar);
482 		if (net_ratelimit()) {
483 			if (err)
484 				dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
485 			else
486 				dev_info(&ar->udev->dev, "device restarted successfully.\n");
487 		}
488 	}
489 	carl9170_zap_queues(ar);
490 	mutex_unlock(&ar->mutex);
491 
492 	if (!err && !ar->force_usb_reset) {
493 		ar->restart_counter++;
494 		atomic_set(&ar->pending_restarts, 0);
495 
496 		ieee80211_restart_hw(ar->hw);
497 	} else {
498 		/*
499 		 * The reset was unsuccessful and the device seems to
500 		 * be dead. But there's still one option: a low-level
501 		 * usb subsystem reset...
502 		 */
503 
504 		carl9170_usb_reset(ar);
505 	}
506 }
507 
carl9170_restart(struct ar9170 * ar,const enum carl9170_restart_reasons r)508 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
509 {
510 	carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
511 
512 	/*
513 	 * Sometimes, an error can trigger several different reset events.
514 	 * By ignoring these *surplus* reset events, the device won't be
515 	 * killed again, right after it has recovered.
516 	 */
517 	if (atomic_inc_return(&ar->pending_restarts) > 1) {
518 		dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
519 		return;
520 	}
521 
522 	ieee80211_stop_queues(ar->hw);
523 
524 	dev_err(&ar->udev->dev, "restart device (%d)\n", r);
525 
526 	if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
527 	    !WARN_ON(r >= __CARL9170_RR_LAST))
528 		ar->last_reason = r;
529 
530 	if (!ar->registered)
531 		return;
532 
533 	if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
534 		ar->force_usb_reset = true;
535 
536 	ieee80211_queue_work(ar->hw, &ar->restart_work);
537 
538 	/*
539 	 * At this point, the device instance might have vanished/disabled.
540 	 * So, don't put any code which access the ar9170 struct
541 	 * without proper protection.
542 	 */
543 }
544 
carl9170_ping_work(struct work_struct * work)545 static void carl9170_ping_work(struct work_struct *work)
546 {
547 	struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
548 	int err;
549 
550 	if (!IS_STARTED(ar))
551 		return;
552 
553 	mutex_lock(&ar->mutex);
554 	err = carl9170_echo_test(ar, 0xdeadbeef);
555 	if (err)
556 		carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
557 	mutex_unlock(&ar->mutex);
558 }
559 
carl9170_init_interface(struct ar9170 * ar,struct ieee80211_vif * vif)560 static int carl9170_init_interface(struct ar9170 *ar,
561 				   struct ieee80211_vif *vif)
562 {
563 	struct ath_common *common = &ar->common;
564 	int err;
565 
566 	if (!vif) {
567 		WARN_ON_ONCE(IS_STARTED(ar));
568 		return 0;
569 	}
570 
571 	memcpy(common->macaddr, vif->addr, ETH_ALEN);
572 
573 	/* We have to fall back to software crypto, whenever
574 	 * the user choose to participates in an IBSS. HW
575 	 * offload for IBSS RSN is not supported by this driver.
576 	 *
577 	 * NOTE: If the previous main interface has already
578 	 * disabled hw crypto offload, we have to keep this
579 	 * previous disable_offload setting as it was.
580 	 * Altough ideally, we should notify mac80211 and tell
581 	 * it to forget about any HW crypto offload for now.
582 	 */
583 	ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
584 	    (vif->type != NL80211_IFTYPE_AP));
585 
586 	/* While the driver supports HW offload in a single
587 	 * P2P client configuration, it doesn't support HW
588 	 * offload in the favourit, concurrent P2P GO+CLIENT
589 	 * configuration. Hence, HW offload will always be
590 	 * disabled for P2P.
591 	 */
592 	ar->disable_offload |= vif->p2p;
593 
594 	ar->rx_software_decryption = ar->disable_offload;
595 
596 	err = carl9170_set_operating_mode(ar);
597 	return err;
598 }
599 
carl9170_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)600 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
601 				     struct ieee80211_vif *vif)
602 {
603 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
604 	struct ieee80211_vif *main_vif, *old_main = NULL;
605 	struct ar9170 *ar = hw->priv;
606 	int vif_id = -1, err = 0;
607 
608 	mutex_lock(&ar->mutex);
609 	rcu_read_lock();
610 	if (vif_priv->active) {
611 		/*
612 		 * Skip the interface structure initialization,
613 		 * if the vif survived the _restart call.
614 		 */
615 		vif_id = vif_priv->id;
616 		vif_priv->enable_beacon = false;
617 
618 		spin_lock_bh(&ar->beacon_lock);
619 		dev_kfree_skb_any(vif_priv->beacon);
620 		vif_priv->beacon = NULL;
621 		spin_unlock_bh(&ar->beacon_lock);
622 
623 		goto init;
624 	}
625 
626 	/* Because the AR9170 HW's MAC doesn't provide full support for
627 	 * multiple, independent interfaces [of different operation modes].
628 	 * We have to select ONE main interface [main mode of HW], but we
629 	 * can have multiple slaves [AKA: entry in the ACK-table].
630 	 *
631 	 * The first (from HEAD/TOP) interface in the ar->vif_list is
632 	 * always the main intf. All following intfs in this list
633 	 * are considered to be slave intfs.
634 	 */
635 	main_vif = carl9170_get_main_vif(ar);
636 
637 	if (main_vif) {
638 		switch (main_vif->type) {
639 		case NL80211_IFTYPE_STATION:
640 			if (vif->type == NL80211_IFTYPE_STATION)
641 				break;
642 
643 			/* P2P GO [master] use-case
644 			 * Because the P2P GO station is selected dynamically
645 			 * by all participating peers of a WIFI Direct network,
646 			 * the driver has be able to change the main interface
647 			 * operating mode on the fly.
648 			 */
649 			if (main_vif->p2p && vif->p2p &&
650 			    vif->type == NL80211_IFTYPE_AP) {
651 				old_main = main_vif;
652 				break;
653 			}
654 
655 			err = -EBUSY;
656 			rcu_read_unlock();
657 
658 			goto unlock;
659 
660 		case NL80211_IFTYPE_MESH_POINT:
661 		case NL80211_IFTYPE_AP:
662 			if ((vif->type == NL80211_IFTYPE_STATION) ||
663 			    (vif->type == NL80211_IFTYPE_WDS) ||
664 			    (vif->type == NL80211_IFTYPE_AP) ||
665 			    (vif->type == NL80211_IFTYPE_MESH_POINT))
666 				break;
667 
668 			err = -EBUSY;
669 			rcu_read_unlock();
670 			goto unlock;
671 
672 		default:
673 			rcu_read_unlock();
674 			goto unlock;
675 		}
676 	}
677 
678 	vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
679 
680 	if (vif_id < 0) {
681 		rcu_read_unlock();
682 
683 		err = -ENOSPC;
684 		goto unlock;
685 	}
686 
687 	BUG_ON(ar->vif_priv[vif_id].id != vif_id);
688 
689 	vif_priv->active = true;
690 	vif_priv->id = vif_id;
691 	vif_priv->enable_beacon = false;
692 	ar->vifs++;
693 	if (old_main) {
694 		/* We end up in here, if the main interface is being replaced.
695 		 * Put the new main interface at the HEAD of the list and the
696 		 * previous inteface will automatically become second in line.
697 		 */
698 		list_add_rcu(&vif_priv->list, &ar->vif_list);
699 	} else {
700 		/* Add new inteface. If the list is empty, it will become the
701 		 * main inteface, otherwise it will be slave.
702 		 */
703 		list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
704 	}
705 	rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
706 
707 init:
708 	main_vif = carl9170_get_main_vif(ar);
709 
710 	if (main_vif == vif) {
711 		rcu_assign_pointer(ar->beacon_iter, vif_priv);
712 		rcu_read_unlock();
713 
714 		if (old_main) {
715 			struct carl9170_vif_info *old_main_priv =
716 				(void *) old_main->drv_priv;
717 			/* downgrade old main intf to slave intf.
718 			 * NOTE: We are no longer under rcu_read_lock.
719 			 * But we are still holding ar->mutex, so the
720 			 * vif data [id, addr] is safe.
721 			 */
722 			err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
723 						       old_main->addr);
724 			if (err)
725 				goto unlock;
726 		}
727 
728 		err = carl9170_init_interface(ar, vif);
729 		if (err)
730 			goto unlock;
731 	} else {
732 		rcu_read_unlock();
733 		err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
734 
735 		if (err)
736 			goto unlock;
737 	}
738 
739 	if (ar->fw.tx_seq_table) {
740 		err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
741 					 0);
742 		if (err)
743 			goto unlock;
744 	}
745 
746 unlock:
747 	if (err && (vif_id >= 0)) {
748 		vif_priv->active = false;
749 		bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
750 		ar->vifs--;
751 		RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
752 		list_del_rcu(&vif_priv->list);
753 		mutex_unlock(&ar->mutex);
754 		synchronize_rcu();
755 	} else {
756 		if (ar->vifs > 1)
757 			ar->ps.off_override |= PS_OFF_VIF;
758 
759 		mutex_unlock(&ar->mutex);
760 	}
761 
762 	return err;
763 }
764 
carl9170_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)765 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
766 					 struct ieee80211_vif *vif)
767 {
768 	struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
769 	struct ieee80211_vif *main_vif;
770 	struct ar9170 *ar = hw->priv;
771 	unsigned int id;
772 
773 	mutex_lock(&ar->mutex);
774 
775 	if (WARN_ON_ONCE(!vif_priv->active))
776 		goto unlock;
777 
778 	ar->vifs--;
779 
780 	rcu_read_lock();
781 	main_vif = carl9170_get_main_vif(ar);
782 
783 	id = vif_priv->id;
784 
785 	vif_priv->active = false;
786 	WARN_ON(vif_priv->enable_beacon);
787 	vif_priv->enable_beacon = false;
788 	list_del_rcu(&vif_priv->list);
789 	RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
790 
791 	if (vif == main_vif) {
792 		rcu_read_unlock();
793 
794 		if (ar->vifs) {
795 			WARN_ON(carl9170_init_interface(ar,
796 					carl9170_get_main_vif(ar)));
797 		} else {
798 			carl9170_set_operating_mode(ar);
799 		}
800 	} else {
801 		rcu_read_unlock();
802 
803 		WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
804 	}
805 
806 	carl9170_update_beacon(ar, false);
807 	carl9170_flush_cab(ar, id);
808 
809 	spin_lock_bh(&ar->beacon_lock);
810 	dev_kfree_skb_any(vif_priv->beacon);
811 	vif_priv->beacon = NULL;
812 	spin_unlock_bh(&ar->beacon_lock);
813 
814 	bitmap_release_region(&ar->vif_bitmap, id, 0);
815 
816 	carl9170_set_beacon_timers(ar);
817 
818 	if (ar->vifs == 1)
819 		ar->ps.off_override &= ~PS_OFF_VIF;
820 
821 unlock:
822 	mutex_unlock(&ar->mutex);
823 
824 	synchronize_rcu();
825 }
826 
carl9170_ps_check(struct ar9170 * ar)827 void carl9170_ps_check(struct ar9170 *ar)
828 {
829 	ieee80211_queue_work(ar->hw, &ar->ps_work);
830 }
831 
832 /* caller must hold ar->mutex */
carl9170_ps_update(struct ar9170 * ar)833 static int carl9170_ps_update(struct ar9170 *ar)
834 {
835 	bool ps = false;
836 	int err = 0;
837 
838 	if (!ar->ps.off_override)
839 		ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
840 
841 	if (ps != ar->ps.state) {
842 		err = carl9170_powersave(ar, ps);
843 		if (err)
844 			return err;
845 
846 		if (ar->ps.state && !ps) {
847 			ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
848 				ar->ps.last_action);
849 		}
850 
851 		if (ps)
852 			ar->ps.last_slept = jiffies;
853 
854 		ar->ps.last_action = jiffies;
855 		ar->ps.state = ps;
856 	}
857 
858 	return 0;
859 }
860 
carl9170_ps_work(struct work_struct * work)861 static void carl9170_ps_work(struct work_struct *work)
862 {
863 	struct ar9170 *ar = container_of(work, struct ar9170,
864 					 ps_work);
865 	mutex_lock(&ar->mutex);
866 	if (IS_STARTED(ar))
867 		WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
868 	mutex_unlock(&ar->mutex);
869 }
870 
carl9170_update_survey(struct ar9170 * ar,bool flush,bool noise)871 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
872 {
873 	int err;
874 
875 	if (noise) {
876 		err = carl9170_get_noisefloor(ar);
877 		if (err)
878 			return err;
879 	}
880 
881 	if (ar->fw.hw_counters) {
882 		err = carl9170_collect_tally(ar);
883 		if (err)
884 			return err;
885 	}
886 
887 	if (flush)
888 		memset(&ar->tally, 0, sizeof(ar->tally));
889 
890 	return 0;
891 }
892 
carl9170_stat_work(struct work_struct * work)893 static void carl9170_stat_work(struct work_struct *work)
894 {
895 	struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
896 	int err;
897 
898 	mutex_lock(&ar->mutex);
899 	err = carl9170_update_survey(ar, false, true);
900 	mutex_unlock(&ar->mutex);
901 
902 	if (err)
903 		return;
904 
905 	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
906 		round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
907 }
908 
carl9170_op_config(struct ieee80211_hw * hw,u32 changed)909 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
910 {
911 	struct ar9170 *ar = hw->priv;
912 	int err = 0;
913 
914 	mutex_lock(&ar->mutex);
915 	if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
916 		/* TODO */
917 		err = 0;
918 	}
919 
920 	if (changed & IEEE80211_CONF_CHANGE_PS) {
921 		err = carl9170_ps_update(ar);
922 		if (err)
923 			goto out;
924 	}
925 
926 	if (changed & IEEE80211_CONF_CHANGE_SMPS) {
927 		/* TODO */
928 		err = 0;
929 	}
930 
931 	if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
932 		enum nl80211_channel_type channel_type =
933 			cfg80211_get_chandef_type(&hw->conf.chandef);
934 
935 		/* adjust slot time for 5 GHz */
936 		err = carl9170_set_slot_time(ar);
937 		if (err)
938 			goto out;
939 
940 		err = carl9170_update_survey(ar, true, false);
941 		if (err)
942 			goto out;
943 
944 		err = carl9170_set_channel(ar, hw->conf.chandef.chan,
945 					   channel_type);
946 		if (err)
947 			goto out;
948 
949 		err = carl9170_update_survey(ar, false, true);
950 		if (err)
951 			goto out;
952 
953 		err = carl9170_set_dyn_sifs_ack(ar);
954 		if (err)
955 			goto out;
956 
957 		err = carl9170_set_rts_cts_rate(ar);
958 		if (err)
959 			goto out;
960 	}
961 
962 	if (changed & IEEE80211_CONF_CHANGE_POWER) {
963 		err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
964 		if (err)
965 			goto out;
966 	}
967 
968 out:
969 	mutex_unlock(&ar->mutex);
970 	return err;
971 }
972 
carl9170_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)973 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
974 					 struct netdev_hw_addr_list *mc_list)
975 {
976 	struct netdev_hw_addr *ha;
977 	u64 mchash;
978 
979 	/* always get broadcast frames */
980 	mchash = 1ULL << (0xff >> 2);
981 
982 	netdev_hw_addr_list_for_each(ha, mc_list)
983 		mchash |= 1ULL << (ha->addr[5] >> 2);
984 
985 	return mchash;
986 }
987 
carl9170_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * new_flags,u64 multicast)988 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
989 					 unsigned int changed_flags,
990 					 unsigned int *new_flags,
991 					 u64 multicast)
992 {
993 	struct ar9170 *ar = hw->priv;
994 
995 	/* mask supported flags */
996 	*new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
997 
998 	if (!IS_ACCEPTING_CMD(ar))
999 		return;
1000 
1001 	mutex_lock(&ar->mutex);
1002 
1003 	ar->filter_state = *new_flags;
1004 	/*
1005 	 * We can support more by setting the sniffer bit and
1006 	 * then checking the error flags, later.
1007 	 */
1008 
1009 	if (*new_flags & FIF_ALLMULTI)
1010 		multicast = ~0ULL;
1011 
1012 	if (multicast != ar->cur_mc_hash)
1013 		WARN_ON(carl9170_update_multicast(ar, multicast));
1014 
1015 	if (changed_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS)) {
1016 		ar->sniffer_enabled = !!(*new_flags &
1017 			(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS));
1018 
1019 		WARN_ON(carl9170_set_operating_mode(ar));
1020 	}
1021 
1022 	if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1023 		u32 rx_filter = 0;
1024 
1025 		if (!ar->fw.ba_filter)
1026 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1027 
1028 		if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1029 			rx_filter |= CARL9170_RX_FILTER_BAD;
1030 
1031 		if (!(*new_flags & FIF_CONTROL))
1032 			rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1033 
1034 		if (!(*new_flags & FIF_PSPOLL))
1035 			rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1036 
1037 		if (!(*new_flags & (FIF_OTHER_BSS | FIF_PROMISC_IN_BSS))) {
1038 			rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1039 			rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1040 		}
1041 
1042 		WARN_ON(carl9170_rx_filter(ar, rx_filter));
1043 	}
1044 
1045 	mutex_unlock(&ar->mutex);
1046 }
1047 
1048 
carl9170_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)1049 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1050 					 struct ieee80211_vif *vif,
1051 					 struct ieee80211_bss_conf *bss_conf,
1052 					 u32 changed)
1053 {
1054 	struct ar9170 *ar = hw->priv;
1055 	struct ath_common *common = &ar->common;
1056 	int err = 0;
1057 	struct carl9170_vif_info *vif_priv;
1058 	struct ieee80211_vif *main_vif;
1059 
1060 	mutex_lock(&ar->mutex);
1061 	vif_priv = (void *) vif->drv_priv;
1062 	main_vif = carl9170_get_main_vif(ar);
1063 	if (WARN_ON(!main_vif))
1064 		goto out;
1065 
1066 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
1067 		struct carl9170_vif_info *iter;
1068 		int i = 0;
1069 
1070 		vif_priv->enable_beacon = bss_conf->enable_beacon;
1071 		rcu_read_lock();
1072 		list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1073 			if (iter->active && iter->enable_beacon)
1074 				i++;
1075 
1076 		}
1077 		rcu_read_unlock();
1078 
1079 		ar->beacon_enabled = i;
1080 	}
1081 
1082 	if (changed & BSS_CHANGED_BEACON) {
1083 		err = carl9170_update_beacon(ar, false);
1084 		if (err)
1085 			goto out;
1086 	}
1087 
1088 	if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1089 		       BSS_CHANGED_BEACON_INT)) {
1090 
1091 		if (main_vif != vif) {
1092 			bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1093 			bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1094 		}
1095 
1096 		/*
1097 		 * Therefore a hard limit for the broadcast traffic should
1098 		 * prevent false alarms.
1099 		 */
1100 		if (vif->type != NL80211_IFTYPE_STATION &&
1101 		    (bss_conf->beacon_int * bss_conf->dtim_period >=
1102 		     (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1103 			err = -EINVAL;
1104 			goto out;
1105 		}
1106 
1107 		err = carl9170_set_beacon_timers(ar);
1108 		if (err)
1109 			goto out;
1110 	}
1111 
1112 	if (changed & BSS_CHANGED_HT) {
1113 		/* TODO */
1114 		err = 0;
1115 		if (err)
1116 			goto out;
1117 	}
1118 
1119 	if (main_vif != vif)
1120 		goto out;
1121 
1122 	/*
1123 	 * The following settings can only be changed by the
1124 	 * master interface.
1125 	 */
1126 
1127 	if (changed & BSS_CHANGED_BSSID) {
1128 		memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1129 		err = carl9170_set_operating_mode(ar);
1130 		if (err)
1131 			goto out;
1132 	}
1133 
1134 	if (changed & BSS_CHANGED_ASSOC) {
1135 		ar->common.curaid = bss_conf->aid;
1136 		err = carl9170_set_beacon_timers(ar);
1137 		if (err)
1138 			goto out;
1139 	}
1140 
1141 	if (changed & BSS_CHANGED_ERP_SLOT) {
1142 		err = carl9170_set_slot_time(ar);
1143 		if (err)
1144 			goto out;
1145 	}
1146 
1147 	if (changed & BSS_CHANGED_BASIC_RATES) {
1148 		err = carl9170_set_mac_rates(ar);
1149 		if (err)
1150 			goto out;
1151 	}
1152 
1153 out:
1154 	WARN_ON_ONCE(err && IS_STARTED(ar));
1155 	mutex_unlock(&ar->mutex);
1156 }
1157 
carl9170_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1158 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1159 			       struct ieee80211_vif *vif)
1160 {
1161 	struct ar9170 *ar = hw->priv;
1162 	struct carl9170_tsf_rsp tsf;
1163 	int err;
1164 
1165 	mutex_lock(&ar->mutex);
1166 	err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1167 				0, NULL, sizeof(tsf), &tsf);
1168 	mutex_unlock(&ar->mutex);
1169 	if (WARN_ON(err))
1170 		return 0;
1171 
1172 	return le64_to_cpu(tsf.tsf_64);
1173 }
1174 
carl9170_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1175 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1176 			       struct ieee80211_vif *vif,
1177 			       struct ieee80211_sta *sta,
1178 			       struct ieee80211_key_conf *key)
1179 {
1180 	struct ar9170 *ar = hw->priv;
1181 	int err = 0, i;
1182 	u8 ktype;
1183 
1184 	if (ar->disable_offload || !vif)
1185 		return -EOPNOTSUPP;
1186 
1187 	/* Fall back to software encryption whenever the driver is connected
1188 	 * to more than one network.
1189 	 *
1190 	 * This is very unfortunate, because some machines cannot handle
1191 	 * the high througput speed in 802.11n networks.
1192 	 */
1193 
1194 	if (!is_main_vif(ar, vif)) {
1195 		mutex_lock(&ar->mutex);
1196 		goto err_softw;
1197 	}
1198 
1199 	/*
1200 	 * While the hardware supports *catch-all* key, for offloading
1201 	 * group-key en-/de-cryption. The way of how the hardware
1202 	 * decides which keyId maps to which key, remains a mystery...
1203 	 */
1204 	if ((vif->type != NL80211_IFTYPE_STATION &&
1205 	     vif->type != NL80211_IFTYPE_ADHOC) &&
1206 	    !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1207 		return -EOPNOTSUPP;
1208 
1209 	switch (key->cipher) {
1210 	case WLAN_CIPHER_SUITE_WEP40:
1211 		ktype = AR9170_ENC_ALG_WEP64;
1212 		break;
1213 	case WLAN_CIPHER_SUITE_WEP104:
1214 		ktype = AR9170_ENC_ALG_WEP128;
1215 		break;
1216 	case WLAN_CIPHER_SUITE_TKIP:
1217 		ktype = AR9170_ENC_ALG_TKIP;
1218 		break;
1219 	case WLAN_CIPHER_SUITE_CCMP:
1220 		ktype = AR9170_ENC_ALG_AESCCMP;
1221 		key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1222 		break;
1223 	default:
1224 		return -EOPNOTSUPP;
1225 	}
1226 
1227 	mutex_lock(&ar->mutex);
1228 	if (cmd == SET_KEY) {
1229 		if (!IS_STARTED(ar)) {
1230 			err = -EOPNOTSUPP;
1231 			goto out;
1232 		}
1233 
1234 		if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1235 			sta = NULL;
1236 
1237 			i = 64 + key->keyidx;
1238 		} else {
1239 			for (i = 0; i < 64; i++)
1240 				if (!(ar->usedkeys & BIT(i)))
1241 					break;
1242 			if (i == 64)
1243 				goto err_softw;
1244 		}
1245 
1246 		key->hw_key_idx = i;
1247 
1248 		err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1249 					  ktype, 0, key->key,
1250 					  min_t(u8, 16, key->keylen));
1251 		if (err)
1252 			goto out;
1253 
1254 		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1255 			err = carl9170_upload_key(ar, i, sta ? sta->addr :
1256 						  NULL, ktype, 1,
1257 						  key->key + 16, 16);
1258 			if (err)
1259 				goto out;
1260 
1261 			/*
1262 			 * hardware is not capable generating MMIC
1263 			 * of fragmented frames!
1264 			 */
1265 			key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1266 		}
1267 
1268 		if (i < 64)
1269 			ar->usedkeys |= BIT(i);
1270 
1271 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1272 	} else {
1273 		if (!IS_STARTED(ar)) {
1274 			/* The device is gone... together with the key ;-) */
1275 			err = 0;
1276 			goto out;
1277 		}
1278 
1279 		if (key->hw_key_idx < 64) {
1280 			ar->usedkeys &= ~BIT(key->hw_key_idx);
1281 		} else {
1282 			err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1283 						  AR9170_ENC_ALG_NONE, 0,
1284 						  NULL, 0);
1285 			if (err)
1286 				goto out;
1287 
1288 			if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1289 				err = carl9170_upload_key(ar, key->hw_key_idx,
1290 							  NULL,
1291 							  AR9170_ENC_ALG_NONE,
1292 							  1, NULL, 0);
1293 				if (err)
1294 					goto out;
1295 			}
1296 
1297 		}
1298 
1299 		err = carl9170_disable_key(ar, key->hw_key_idx);
1300 		if (err)
1301 			goto out;
1302 	}
1303 
1304 out:
1305 	mutex_unlock(&ar->mutex);
1306 	return err;
1307 
1308 err_softw:
1309 	if (!ar->rx_software_decryption) {
1310 		ar->rx_software_decryption = true;
1311 		carl9170_set_operating_mode(ar);
1312 	}
1313 	mutex_unlock(&ar->mutex);
1314 	return -ENOSPC;
1315 }
1316 
carl9170_op_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1317 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1318 			       struct ieee80211_vif *vif,
1319 			       struct ieee80211_sta *sta)
1320 {
1321 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1322 	unsigned int i;
1323 
1324 	atomic_set(&sta_info->pending_frames, 0);
1325 
1326 	if (sta->ht_cap.ht_supported) {
1327 		if (sta->ht_cap.ampdu_density > 6) {
1328 			/*
1329 			 * HW does support 16us AMPDU density.
1330 			 * No HT-Xmit for station.
1331 			 */
1332 
1333 			return 0;
1334 		}
1335 
1336 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1337 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1338 
1339 		sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
1340 		sta_info->ht_sta = true;
1341 	}
1342 
1343 	return 0;
1344 }
1345 
carl9170_op_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1346 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1347 				struct ieee80211_vif *vif,
1348 				struct ieee80211_sta *sta)
1349 {
1350 	struct ar9170 *ar = hw->priv;
1351 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1352 	unsigned int i;
1353 	bool cleanup = false;
1354 
1355 	if (sta->ht_cap.ht_supported) {
1356 
1357 		sta_info->ht_sta = false;
1358 
1359 		rcu_read_lock();
1360 		for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1361 			struct carl9170_sta_tid *tid_info;
1362 
1363 			tid_info = rcu_dereference(sta_info->agg[i]);
1364 			RCU_INIT_POINTER(sta_info->agg[i], NULL);
1365 
1366 			if (!tid_info)
1367 				continue;
1368 
1369 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1370 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1371 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1372 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1373 			cleanup = true;
1374 		}
1375 		rcu_read_unlock();
1376 
1377 		if (cleanup)
1378 			carl9170_ampdu_gc(ar);
1379 	}
1380 
1381 	return 0;
1382 }
1383 
carl9170_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * param)1384 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1385 			       struct ieee80211_vif *vif, u16 queue,
1386 			       const struct ieee80211_tx_queue_params *param)
1387 {
1388 	struct ar9170 *ar = hw->priv;
1389 	int ret;
1390 
1391 	mutex_lock(&ar->mutex);
1392 	if (queue < ar->hw->queues) {
1393 		memcpy(&ar->edcf[ar9170_qmap[queue]], param, sizeof(*param));
1394 		ret = carl9170_set_qos(ar);
1395 	} else {
1396 		ret = -EINVAL;
1397 	}
1398 
1399 	mutex_unlock(&ar->mutex);
1400 	return ret;
1401 }
1402 
carl9170_ampdu_work(struct work_struct * work)1403 static void carl9170_ampdu_work(struct work_struct *work)
1404 {
1405 	struct ar9170 *ar = container_of(work, struct ar9170,
1406 					 ampdu_work);
1407 
1408 	if (!IS_STARTED(ar))
1409 		return;
1410 
1411 	mutex_lock(&ar->mutex);
1412 	carl9170_ampdu_gc(ar);
1413 	mutex_unlock(&ar->mutex);
1414 }
1415 
carl9170_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size)1416 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1417 				    struct ieee80211_vif *vif,
1418 				    enum ieee80211_ampdu_mlme_action action,
1419 				    struct ieee80211_sta *sta,
1420 				    u16 tid, u16 *ssn, u8 buf_size)
1421 {
1422 	struct ar9170 *ar = hw->priv;
1423 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1424 	struct carl9170_sta_tid *tid_info;
1425 
1426 	if (modparam_noht)
1427 		return -EOPNOTSUPP;
1428 
1429 	switch (action) {
1430 	case IEEE80211_AMPDU_TX_START:
1431 		if (!sta_info->ht_sta)
1432 			return -EOPNOTSUPP;
1433 
1434 		rcu_read_lock();
1435 		if (rcu_dereference(sta_info->agg[tid])) {
1436 			rcu_read_unlock();
1437 			return -EBUSY;
1438 		}
1439 
1440 		tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1441 				   GFP_ATOMIC);
1442 		if (!tid_info) {
1443 			rcu_read_unlock();
1444 			return -ENOMEM;
1445 		}
1446 
1447 		tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1448 		tid_info->state = CARL9170_TID_STATE_PROGRESS;
1449 		tid_info->tid = tid;
1450 		tid_info->max = sta_info->ampdu_max_len;
1451 
1452 		INIT_LIST_HEAD(&tid_info->list);
1453 		INIT_LIST_HEAD(&tid_info->tmp_list);
1454 		skb_queue_head_init(&tid_info->queue);
1455 		spin_lock_init(&tid_info->lock);
1456 
1457 		spin_lock_bh(&ar->tx_ampdu_list_lock);
1458 		ar->tx_ampdu_list_len++;
1459 		list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1460 		rcu_assign_pointer(sta_info->agg[tid], tid_info);
1461 		spin_unlock_bh(&ar->tx_ampdu_list_lock);
1462 		rcu_read_unlock();
1463 
1464 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1465 		break;
1466 
1467 	case IEEE80211_AMPDU_TX_STOP_CONT:
1468 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
1469 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1470 		rcu_read_lock();
1471 		tid_info = rcu_dereference(sta_info->agg[tid]);
1472 		if (tid_info) {
1473 			spin_lock_bh(&ar->tx_ampdu_list_lock);
1474 			if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1475 				tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1476 			spin_unlock_bh(&ar->tx_ampdu_list_lock);
1477 		}
1478 
1479 		RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1480 		rcu_read_unlock();
1481 
1482 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1483 		ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1484 		break;
1485 
1486 	case IEEE80211_AMPDU_TX_OPERATIONAL:
1487 		rcu_read_lock();
1488 		tid_info = rcu_dereference(sta_info->agg[tid]);
1489 
1490 		sta_info->stats[tid].clear = true;
1491 		sta_info->stats[tid].req = false;
1492 
1493 		if (tid_info) {
1494 			bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1495 			tid_info->state = CARL9170_TID_STATE_IDLE;
1496 		}
1497 		rcu_read_unlock();
1498 
1499 		if (WARN_ON_ONCE(!tid_info))
1500 			return -EFAULT;
1501 
1502 		break;
1503 
1504 	case IEEE80211_AMPDU_RX_START:
1505 	case IEEE80211_AMPDU_RX_STOP:
1506 		/* Handled by hardware */
1507 		break;
1508 
1509 	default:
1510 		return -EOPNOTSUPP;
1511 	}
1512 
1513 	return 0;
1514 }
1515 
1516 #ifdef CONFIG_CARL9170_WPC
carl9170_register_wps_button(struct ar9170 * ar)1517 static int carl9170_register_wps_button(struct ar9170 *ar)
1518 {
1519 	struct input_dev *input;
1520 	int err;
1521 
1522 	if (!(ar->features & CARL9170_WPS_BUTTON))
1523 		return 0;
1524 
1525 	input = input_allocate_device();
1526 	if (!input)
1527 		return -ENOMEM;
1528 
1529 	snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1530 		 wiphy_name(ar->hw->wiphy));
1531 
1532 	snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1533 		 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1534 
1535 	input->name = ar->wps.name;
1536 	input->phys = ar->wps.phys;
1537 	input->id.bustype = BUS_USB;
1538 	input->dev.parent = &ar->hw->wiphy->dev;
1539 
1540 	input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1541 
1542 	err = input_register_device(input);
1543 	if (err) {
1544 		input_free_device(input);
1545 		return err;
1546 	}
1547 
1548 	ar->wps.pbc = input;
1549 	return 0;
1550 }
1551 #endif /* CONFIG_CARL9170_WPC */
1552 
1553 #ifdef CONFIG_CARL9170_HWRNG
carl9170_rng_get(struct ar9170 * ar)1554 static int carl9170_rng_get(struct ar9170 *ar)
1555 {
1556 
1557 #define RW	(CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1558 #define RB	(CARL9170_MAX_CMD_PAYLOAD_LEN)
1559 
1560 	static const __le32 rng_load[RW] = {
1561 		[0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1562 
1563 	u32 buf[RW];
1564 
1565 	unsigned int i, off = 0, transfer, count;
1566 	int err;
1567 
1568 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1569 
1570 	if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1571 		return -EAGAIN;
1572 
1573 	count = ARRAY_SIZE(ar->rng.cache);
1574 	while (count) {
1575 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1576 					RB, (u8 *) rng_load,
1577 					RB, (u8 *) buf);
1578 		if (err)
1579 			return err;
1580 
1581 		transfer = min_t(unsigned int, count, RW);
1582 		for (i = 0; i < transfer; i++)
1583 			ar->rng.cache[off + i] = buf[i];
1584 
1585 		off += transfer;
1586 		count -= transfer;
1587 	}
1588 
1589 	ar->rng.cache_idx = 0;
1590 
1591 #undef RW
1592 #undef RB
1593 	return 0;
1594 }
1595 
carl9170_rng_read(struct hwrng * rng,u32 * data)1596 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1597 {
1598 	struct ar9170 *ar = (struct ar9170 *)rng->priv;
1599 	int ret = -EIO;
1600 
1601 	mutex_lock(&ar->mutex);
1602 	if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1603 		ret = carl9170_rng_get(ar);
1604 		if (ret) {
1605 			mutex_unlock(&ar->mutex);
1606 			return ret;
1607 		}
1608 	}
1609 
1610 	*data = ar->rng.cache[ar->rng.cache_idx++];
1611 	mutex_unlock(&ar->mutex);
1612 
1613 	return sizeof(u16);
1614 }
1615 
carl9170_unregister_hwrng(struct ar9170 * ar)1616 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1617 {
1618 	if (ar->rng.initialized) {
1619 		hwrng_unregister(&ar->rng.rng);
1620 		ar->rng.initialized = false;
1621 	}
1622 }
1623 
carl9170_register_hwrng(struct ar9170 * ar)1624 static int carl9170_register_hwrng(struct ar9170 *ar)
1625 {
1626 	int err;
1627 
1628 	snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1629 		 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1630 	ar->rng.rng.name = ar->rng.name;
1631 	ar->rng.rng.data_read = carl9170_rng_read;
1632 	ar->rng.rng.priv = (unsigned long)ar;
1633 
1634 	if (WARN_ON(ar->rng.initialized))
1635 		return -EALREADY;
1636 
1637 	err = hwrng_register(&ar->rng.rng);
1638 	if (err) {
1639 		dev_err(&ar->udev->dev, "Failed to register the random "
1640 			"number generator (%d)\n", err);
1641 		return err;
1642 	}
1643 
1644 	ar->rng.initialized = true;
1645 
1646 	err = carl9170_rng_get(ar);
1647 	if (err) {
1648 		carl9170_unregister_hwrng(ar);
1649 		return err;
1650 	}
1651 
1652 	return 0;
1653 }
1654 #endif /* CONFIG_CARL9170_HWRNG */
1655 
carl9170_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1656 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1657 				struct survey_info *survey)
1658 {
1659 	struct ar9170 *ar = hw->priv;
1660 	struct ieee80211_channel *chan;
1661 	struct ieee80211_supported_band *band;
1662 	int err, b, i;
1663 
1664 	chan = ar->channel;
1665 	if (!chan)
1666 		return -ENODEV;
1667 
1668 	if (idx == chan->hw_value) {
1669 		mutex_lock(&ar->mutex);
1670 		err = carl9170_update_survey(ar, false, true);
1671 		mutex_unlock(&ar->mutex);
1672 		if (err)
1673 			return err;
1674 	}
1675 
1676 	for (b = 0; b < IEEE80211_NUM_BANDS; b++) {
1677 		band = ar->hw->wiphy->bands[b];
1678 
1679 		if (!band)
1680 			continue;
1681 
1682 		for (i = 0; i < band->n_channels; i++) {
1683 			if (band->channels[i].hw_value == idx) {
1684 				chan = &band->channels[i];
1685 				goto found;
1686 			}
1687 		}
1688 	}
1689 	return -ENOENT;
1690 
1691 found:
1692 	memcpy(survey, &ar->survey[idx], sizeof(*survey));
1693 
1694 	survey->channel = chan;
1695 	survey->filled = SURVEY_INFO_NOISE_DBM;
1696 
1697 	if (ar->channel == chan)
1698 		survey->filled |= SURVEY_INFO_IN_USE;
1699 
1700 	if (ar->fw.hw_counters) {
1701 		survey->filled |= SURVEY_INFO_CHANNEL_TIME |
1702 				  SURVEY_INFO_CHANNEL_TIME_BUSY |
1703 				  SURVEY_INFO_CHANNEL_TIME_TX;
1704 	}
1705 
1706 	return 0;
1707 }
1708 
carl9170_op_flush(struct ieee80211_hw * hw,u32 queues,bool drop)1709 static void carl9170_op_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
1710 {
1711 	struct ar9170 *ar = hw->priv;
1712 	unsigned int vid;
1713 
1714 	mutex_lock(&ar->mutex);
1715 	for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1716 		carl9170_flush_cab(ar, vid);
1717 
1718 	carl9170_flush(ar, drop);
1719 	mutex_unlock(&ar->mutex);
1720 }
1721 
carl9170_op_get_stats(struct ieee80211_hw * hw,struct ieee80211_low_level_stats * stats)1722 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1723 				 struct ieee80211_low_level_stats *stats)
1724 {
1725 	struct ar9170 *ar = hw->priv;
1726 
1727 	memset(stats, 0, sizeof(*stats));
1728 	stats->dot11ACKFailureCount = ar->tx_ack_failures;
1729 	stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1730 	return 0;
1731 }
1732 
carl9170_op_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)1733 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1734 				   struct ieee80211_vif *vif,
1735 				   enum sta_notify_cmd cmd,
1736 				   struct ieee80211_sta *sta)
1737 {
1738 	struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1739 
1740 	switch (cmd) {
1741 	case STA_NOTIFY_SLEEP:
1742 		sta_info->sleeping = true;
1743 		if (atomic_read(&sta_info->pending_frames))
1744 			ieee80211_sta_block_awake(hw, sta, true);
1745 		break;
1746 
1747 	case STA_NOTIFY_AWAKE:
1748 		sta_info->sleeping = false;
1749 		break;
1750 	}
1751 }
1752 
carl9170_tx_frames_pending(struct ieee80211_hw * hw)1753 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1754 {
1755 	struct ar9170 *ar = hw->priv;
1756 
1757 	return !!atomic_read(&ar->tx_total_queued);
1758 }
1759 
1760 static const struct ieee80211_ops carl9170_ops = {
1761 	.start			= carl9170_op_start,
1762 	.stop			= carl9170_op_stop,
1763 	.tx			= carl9170_op_tx,
1764 	.flush			= carl9170_op_flush,
1765 	.add_interface		= carl9170_op_add_interface,
1766 	.remove_interface	= carl9170_op_remove_interface,
1767 	.config			= carl9170_op_config,
1768 	.prepare_multicast	= carl9170_op_prepare_multicast,
1769 	.configure_filter	= carl9170_op_configure_filter,
1770 	.conf_tx		= carl9170_op_conf_tx,
1771 	.bss_info_changed	= carl9170_op_bss_info_changed,
1772 	.get_tsf		= carl9170_op_get_tsf,
1773 	.set_key		= carl9170_op_set_key,
1774 	.sta_add		= carl9170_op_sta_add,
1775 	.sta_remove		= carl9170_op_sta_remove,
1776 	.sta_notify		= carl9170_op_sta_notify,
1777 	.get_survey		= carl9170_op_get_survey,
1778 	.get_stats		= carl9170_op_get_stats,
1779 	.ampdu_action		= carl9170_op_ampdu_action,
1780 	.tx_frames_pending	= carl9170_tx_frames_pending,
1781 };
1782 
carl9170_alloc(size_t priv_size)1783 void *carl9170_alloc(size_t priv_size)
1784 {
1785 	struct ieee80211_hw *hw;
1786 	struct ar9170 *ar;
1787 	struct sk_buff *skb;
1788 	int i;
1789 
1790 	/*
1791 	 * this buffer is used for rx stream reconstruction.
1792 	 * Under heavy load this device (or the transport layer?)
1793 	 * tends to split the streams into separate rx descriptors.
1794 	 */
1795 
1796 	skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1797 	if (!skb)
1798 		goto err_nomem;
1799 
1800 	hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1801 	if (!hw)
1802 		goto err_nomem;
1803 
1804 	ar = hw->priv;
1805 	ar->hw = hw;
1806 	ar->rx_failover = skb;
1807 
1808 	memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1809 	ar->rx_has_plcp = false;
1810 
1811 	/*
1812 	 * Here's a hidden pitfall!
1813 	 *
1814 	 * All 4 AC queues work perfectly well under _legacy_ operation.
1815 	 * However as soon as aggregation is enabled, the traffic flow
1816 	 * gets very bumpy. Therefore we have to _switch_ to a
1817 	 * software AC with a single HW queue.
1818 	 */
1819 	hw->queues = __AR9170_NUM_TXQ;
1820 
1821 	mutex_init(&ar->mutex);
1822 	spin_lock_init(&ar->beacon_lock);
1823 	spin_lock_init(&ar->cmd_lock);
1824 	spin_lock_init(&ar->tx_stats_lock);
1825 	spin_lock_init(&ar->tx_ampdu_list_lock);
1826 	spin_lock_init(&ar->mem_lock);
1827 	spin_lock_init(&ar->state_lock);
1828 	atomic_set(&ar->pending_restarts, 0);
1829 	ar->vifs = 0;
1830 	for (i = 0; i < ar->hw->queues; i++) {
1831 		skb_queue_head_init(&ar->tx_status[i]);
1832 		skb_queue_head_init(&ar->tx_pending[i]);
1833 
1834 		INIT_LIST_HEAD(&ar->bar_list[i]);
1835 		spin_lock_init(&ar->bar_list_lock[i]);
1836 	}
1837 	INIT_WORK(&ar->ps_work, carl9170_ps_work);
1838 	INIT_WORK(&ar->ping_work, carl9170_ping_work);
1839 	INIT_WORK(&ar->restart_work, carl9170_restart_work);
1840 	INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1841 	INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1842 	INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1843 	INIT_LIST_HEAD(&ar->tx_ampdu_list);
1844 	rcu_assign_pointer(ar->tx_ampdu_iter,
1845 			   (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1846 
1847 	bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1848 	INIT_LIST_HEAD(&ar->vif_list);
1849 	init_completion(&ar->tx_flush);
1850 
1851 	/* firmware decides which modes we support */
1852 	hw->wiphy->interface_modes = 0;
1853 
1854 	hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS |
1855 		     IEEE80211_HW_MFP_CAPABLE |
1856 		     IEEE80211_HW_REPORTS_TX_ACK_STATUS |
1857 		     IEEE80211_HW_SUPPORTS_PS |
1858 		     IEEE80211_HW_PS_NULLFUNC_STACK |
1859 		     IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC |
1860 		     IEEE80211_HW_SIGNAL_DBM;
1861 
1862 	if (!modparam_noht) {
1863 		/*
1864 		 * see the comment above, why we allow the user
1865 		 * to disable HT by a module parameter.
1866 		 */
1867 		hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
1868 	}
1869 
1870 	hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1871 	hw->sta_data_size = sizeof(struct carl9170_sta_info);
1872 	hw->vif_data_size = sizeof(struct carl9170_vif_info);
1873 
1874 	hw->max_rates = CARL9170_TX_MAX_RATES;
1875 	hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1876 
1877 	for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1878 		ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1879 
1880 	return ar;
1881 
1882 err_nomem:
1883 	kfree_skb(skb);
1884 	return ERR_PTR(-ENOMEM);
1885 }
1886 
carl9170_read_eeprom(struct ar9170 * ar)1887 static int carl9170_read_eeprom(struct ar9170 *ar)
1888 {
1889 #define RW	8	/* number of words to read at once */
1890 #define RB	(sizeof(u32) * RW)
1891 	u8 *eeprom = (void *)&ar->eeprom;
1892 	__le32 offsets[RW];
1893 	int i, j, err;
1894 
1895 	BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1896 
1897 	BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1898 #ifndef __CHECKER__
1899 	/* don't want to handle trailing remains */
1900 	BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1901 #endif
1902 
1903 	for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1904 		for (j = 0; j < RW; j++)
1905 			offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1906 						 RB * i + 4 * j);
1907 
1908 		err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1909 					RB, (u8 *) &offsets,
1910 					RB, eeprom + RB * i);
1911 		if (err)
1912 			return err;
1913 	}
1914 
1915 #undef RW
1916 #undef RB
1917 	return 0;
1918 }
1919 
carl9170_parse_eeprom(struct ar9170 * ar)1920 static int carl9170_parse_eeprom(struct ar9170 *ar)
1921 {
1922 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1923 	unsigned int rx_streams, tx_streams, tx_params = 0;
1924 	int bands = 0;
1925 	int chans = 0;
1926 
1927 	if (ar->eeprom.length == cpu_to_le16(0xffff))
1928 		return -ENODATA;
1929 
1930 	rx_streams = hweight8(ar->eeprom.rx_mask);
1931 	tx_streams = hweight8(ar->eeprom.tx_mask);
1932 
1933 	if (rx_streams != tx_streams) {
1934 		tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1935 
1936 		WARN_ON(!(tx_streams >= 1 && tx_streams <=
1937 			IEEE80211_HT_MCS_TX_MAX_STREAMS));
1938 
1939 		tx_params = (tx_streams - 1) <<
1940 			    IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1941 
1942 		carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1943 		carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1944 	}
1945 
1946 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1947 		ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1948 			&carl9170_band_2GHz;
1949 		chans += carl9170_band_2GHz.n_channels;
1950 		bands++;
1951 	}
1952 	if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1953 		ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1954 			&carl9170_band_5GHz;
1955 		chans += carl9170_band_5GHz.n_channels;
1956 		bands++;
1957 	}
1958 
1959 	if (!bands)
1960 		return -EINVAL;
1961 
1962 	ar->survey = kzalloc(sizeof(struct survey_info) * chans, GFP_KERNEL);
1963 	if (!ar->survey)
1964 		return -ENOMEM;
1965 	ar->num_channels = chans;
1966 
1967 	/*
1968 	 * I measured this, a bandswitch takes roughly
1969 	 * 135 ms and a frequency switch about 80.
1970 	 *
1971 	 * FIXME: measure these values again once EEPROM settings
1972 	 *	  are used, that will influence them!
1973 	 */
1974 	if (bands == 2)
1975 		ar->hw->channel_change_time = 135 * 1000;
1976 	else
1977 		ar->hw->channel_change_time = 80 * 1000;
1978 
1979 	regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1980 
1981 	/* second part of wiphy init */
1982 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1983 
1984 	return 0;
1985 }
1986 
carl9170_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)1987 static void carl9170_reg_notifier(struct wiphy *wiphy,
1988 				  struct regulatory_request *request)
1989 {
1990 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1991 	struct ar9170 *ar = hw->priv;
1992 
1993 	ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1994 }
1995 
carl9170_register(struct ar9170 * ar)1996 int carl9170_register(struct ar9170 *ar)
1997 {
1998 	struct ath_regulatory *regulatory = &ar->common.regulatory;
1999 	int err = 0, i;
2000 
2001 	if (WARN_ON(ar->mem_bitmap))
2002 		return -EINVAL;
2003 
2004 	ar->mem_bitmap = kzalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG) *
2005 				 sizeof(unsigned long), GFP_KERNEL);
2006 
2007 	if (!ar->mem_bitmap)
2008 		return -ENOMEM;
2009 
2010 	/* try to read EEPROM, init MAC addr */
2011 	err = carl9170_read_eeprom(ar);
2012 	if (err)
2013 		return err;
2014 
2015 	err = carl9170_parse_eeprom(ar);
2016 	if (err)
2017 		return err;
2018 
2019 	err = ath_regd_init(regulatory, ar->hw->wiphy,
2020 			    carl9170_reg_notifier);
2021 	if (err)
2022 		return err;
2023 
2024 	if (modparam_noht) {
2025 		carl9170_band_2GHz.ht_cap.ht_supported = false;
2026 		carl9170_band_5GHz.ht_cap.ht_supported = false;
2027 	}
2028 
2029 	for (i = 0; i < ar->fw.vif_num; i++) {
2030 		ar->vif_priv[i].id = i;
2031 		ar->vif_priv[i].vif = NULL;
2032 	}
2033 
2034 	err = ieee80211_register_hw(ar->hw);
2035 	if (err)
2036 		return err;
2037 
2038 	/* mac80211 interface is now registered */
2039 	ar->registered = true;
2040 
2041 	if (!ath_is_world_regd(regulatory))
2042 		regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2043 
2044 #ifdef CONFIG_CARL9170_DEBUGFS
2045 	carl9170_debugfs_register(ar);
2046 #endif /* CONFIG_CARL9170_DEBUGFS */
2047 
2048 	err = carl9170_led_init(ar);
2049 	if (err)
2050 		goto err_unreg;
2051 
2052 #ifdef CONFIG_CARL9170_LEDS
2053 	err = carl9170_led_register(ar);
2054 	if (err)
2055 		goto err_unreg;
2056 #endif /* CONFIG_CARL9170_LEDS */
2057 
2058 #ifdef CONFIG_CARL9170_WPC
2059 	err = carl9170_register_wps_button(ar);
2060 	if (err)
2061 		goto err_unreg;
2062 #endif /* CONFIG_CARL9170_WPC */
2063 
2064 #ifdef CONFIG_CARL9170_HWRNG
2065 	err = carl9170_register_hwrng(ar);
2066 	if (err)
2067 		goto err_unreg;
2068 #endif /* CONFIG_CARL9170_HWRNG */
2069 
2070 	dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2071 		 wiphy_name(ar->hw->wiphy));
2072 
2073 	return 0;
2074 
2075 err_unreg:
2076 	carl9170_unregister(ar);
2077 	return err;
2078 }
2079 
carl9170_unregister(struct ar9170 * ar)2080 void carl9170_unregister(struct ar9170 *ar)
2081 {
2082 	if (!ar->registered)
2083 		return;
2084 
2085 	ar->registered = false;
2086 
2087 #ifdef CONFIG_CARL9170_LEDS
2088 	carl9170_led_unregister(ar);
2089 #endif /* CONFIG_CARL9170_LEDS */
2090 
2091 #ifdef CONFIG_CARL9170_DEBUGFS
2092 	carl9170_debugfs_unregister(ar);
2093 #endif /* CONFIG_CARL9170_DEBUGFS */
2094 
2095 #ifdef CONFIG_CARL9170_WPC
2096 	if (ar->wps.pbc) {
2097 		input_unregister_device(ar->wps.pbc);
2098 		ar->wps.pbc = NULL;
2099 	}
2100 #endif /* CONFIG_CARL9170_WPC */
2101 
2102 #ifdef CONFIG_CARL9170_HWRNG
2103 	carl9170_unregister_hwrng(ar);
2104 #endif /* CONFIG_CARL9170_HWRNG */
2105 
2106 	carl9170_cancel_worker(ar);
2107 	cancel_work_sync(&ar->restart_work);
2108 
2109 	ieee80211_unregister_hw(ar->hw);
2110 }
2111 
carl9170_free(struct ar9170 * ar)2112 void carl9170_free(struct ar9170 *ar)
2113 {
2114 	WARN_ON(ar->registered);
2115 	WARN_ON(IS_INITIALIZED(ar));
2116 
2117 	kfree_skb(ar->rx_failover);
2118 	ar->rx_failover = NULL;
2119 
2120 	kfree(ar->mem_bitmap);
2121 	ar->mem_bitmap = NULL;
2122 
2123 	kfree(ar->survey);
2124 	ar->survey = NULL;
2125 
2126 	mutex_destroy(&ar->mutex);
2127 
2128 	ieee80211_free_hw(ar->hw);
2129 }
2130