1 /*
2 * Atheros CARL9170 driver
3 *
4 * mac80211 interaction code
5 *
6 * Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
7 * Copyright 2009, 2010, Christian Lamparter <chunkeey@googlemail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; see the file COPYING. If not, see
21 * http://www.gnu.org/licenses/.
22 *
23 * This file incorporates work covered by the following copyright and
24 * permission notice:
25 * Copyright (c) 2007-2008 Atheros Communications, Inc.
26 *
27 * Permission to use, copy, modify, and/or distribute this software for any
28 * purpose with or without fee is hereby granted, provided that the above
29 * copyright notice and this permission notice appear in all copies.
30 *
31 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
32 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
33 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
34 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
35 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
36 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
37 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
38 */
39
40 #include <linux/slab.h>
41 #include <linux/module.h>
42 #include <linux/etherdevice.h>
43 #include <linux/random.h>
44 #include <net/mac80211.h>
45 #include <net/cfg80211.h>
46 #include "hw.h"
47 #include "carl9170.h"
48 #include "cmd.h"
49
50 static bool modparam_nohwcrypt;
51 module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444);
52 MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
53
54 int modparam_noht;
55 module_param_named(noht, modparam_noht, int, 0444);
56 MODULE_PARM_DESC(noht, "Disable MPDU aggregation.");
57
58 #define RATE(_bitrate, _hw_rate, _txpidx, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate) | (_txpidx) << 4, \
62 }
63
64 struct ieee80211_rate __carl9170_ratetable[] = {
65 RATE(10, 0, 0, 0),
66 RATE(20, 1, 1, IEEE80211_RATE_SHORT_PREAMBLE),
67 RATE(55, 2, 2, IEEE80211_RATE_SHORT_PREAMBLE),
68 RATE(110, 3, 3, IEEE80211_RATE_SHORT_PREAMBLE),
69 RATE(60, 0xb, 0, 0),
70 RATE(90, 0xf, 0, 0),
71 RATE(120, 0xa, 0, 0),
72 RATE(180, 0xe, 0, 0),
73 RATE(240, 0x9, 0, 0),
74 RATE(360, 0xd, 1, 0),
75 RATE(480, 0x8, 2, 0),
76 RATE(540, 0xc, 3, 0),
77 };
78 #undef RATE
79
80 #define carl9170_g_ratetable (__carl9170_ratetable + 0)
81 #define carl9170_g_ratetable_size 12
82 #define carl9170_a_ratetable (__carl9170_ratetable + 4)
83 #define carl9170_a_ratetable_size 8
84
85 /*
86 * NB: The hw_value is used as an index into the carl9170_phy_freq_params
87 * array in phy.c so that we don't have to do frequency lookups!
88 */
89 #define CHAN(_freq, _idx) { \
90 .center_freq = (_freq), \
91 .hw_value = (_idx), \
92 .max_power = 18, /* XXX */ \
93 }
94
95 static struct ieee80211_channel carl9170_2ghz_chantable[] = {
96 CHAN(2412, 0),
97 CHAN(2417, 1),
98 CHAN(2422, 2),
99 CHAN(2427, 3),
100 CHAN(2432, 4),
101 CHAN(2437, 5),
102 CHAN(2442, 6),
103 CHAN(2447, 7),
104 CHAN(2452, 8),
105 CHAN(2457, 9),
106 CHAN(2462, 10),
107 CHAN(2467, 11),
108 CHAN(2472, 12),
109 CHAN(2484, 13),
110 };
111
112 static struct ieee80211_channel carl9170_5ghz_chantable[] = {
113 CHAN(4920, 14),
114 CHAN(4940, 15),
115 CHAN(4960, 16),
116 CHAN(4980, 17),
117 CHAN(5040, 18),
118 CHAN(5060, 19),
119 CHAN(5080, 20),
120 CHAN(5180, 21),
121 CHAN(5200, 22),
122 CHAN(5220, 23),
123 CHAN(5240, 24),
124 CHAN(5260, 25),
125 CHAN(5280, 26),
126 CHAN(5300, 27),
127 CHAN(5320, 28),
128 CHAN(5500, 29),
129 CHAN(5520, 30),
130 CHAN(5540, 31),
131 CHAN(5560, 32),
132 CHAN(5580, 33),
133 CHAN(5600, 34),
134 CHAN(5620, 35),
135 CHAN(5640, 36),
136 CHAN(5660, 37),
137 CHAN(5680, 38),
138 CHAN(5700, 39),
139 CHAN(5745, 40),
140 CHAN(5765, 41),
141 CHAN(5785, 42),
142 CHAN(5805, 43),
143 CHAN(5825, 44),
144 CHAN(5170, 45),
145 CHAN(5190, 46),
146 CHAN(5210, 47),
147 CHAN(5230, 48),
148 };
149 #undef CHAN
150
151 #define CARL9170_HT_CAP \
152 { \
153 .ht_supported = true, \
154 .cap = IEEE80211_HT_CAP_MAX_AMSDU | \
155 IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
156 IEEE80211_HT_CAP_SGI_40 | \
157 IEEE80211_HT_CAP_DSSSCCK40 | \
158 IEEE80211_HT_CAP_SM_PS, \
159 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K, \
160 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
161 .mcs = { \
162 .rx_mask = { 0xff, 0xff, 0, 0, 0x1, 0, 0, 0, 0, 0, }, \
163 .rx_highest = cpu_to_le16(300), \
164 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
165 }, \
166 }
167
168 static struct ieee80211_supported_band carl9170_band_2GHz = {
169 .channels = carl9170_2ghz_chantable,
170 .n_channels = ARRAY_SIZE(carl9170_2ghz_chantable),
171 .bitrates = carl9170_g_ratetable,
172 .n_bitrates = carl9170_g_ratetable_size,
173 .ht_cap = CARL9170_HT_CAP,
174 };
175
176 static struct ieee80211_supported_band carl9170_band_5GHz = {
177 .channels = carl9170_5ghz_chantable,
178 .n_channels = ARRAY_SIZE(carl9170_5ghz_chantable),
179 .bitrates = carl9170_a_ratetable,
180 .n_bitrates = carl9170_a_ratetable_size,
181 .ht_cap = CARL9170_HT_CAP,
182 };
183
carl9170_ampdu_gc(struct ar9170 * ar)184 static void carl9170_ampdu_gc(struct ar9170 *ar)
185 {
186 struct carl9170_sta_tid *tid_info;
187 LIST_HEAD(tid_gc);
188
189 rcu_read_lock();
190 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
191 spin_lock_bh(&ar->tx_ampdu_list_lock);
192 if (tid_info->state == CARL9170_TID_STATE_SHUTDOWN) {
193 tid_info->state = CARL9170_TID_STATE_KILLED;
194 list_del_rcu(&tid_info->list);
195 ar->tx_ampdu_list_len--;
196 list_add_tail(&tid_info->tmp_list, &tid_gc);
197 }
198 spin_unlock_bh(&ar->tx_ampdu_list_lock);
199
200 }
201 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
202 rcu_read_unlock();
203
204 synchronize_rcu();
205
206 while (!list_empty(&tid_gc)) {
207 struct sk_buff *skb;
208 tid_info = list_first_entry(&tid_gc, struct carl9170_sta_tid,
209 tmp_list);
210
211 while ((skb = __skb_dequeue(&tid_info->queue)))
212 carl9170_tx_status(ar, skb, false);
213
214 list_del_init(&tid_info->tmp_list);
215 kfree(tid_info);
216 }
217 }
218
carl9170_flush(struct ar9170 * ar,bool drop_queued)219 static void carl9170_flush(struct ar9170 *ar, bool drop_queued)
220 {
221 if (drop_queued) {
222 int i;
223
224 /*
225 * We can only drop frames which have not been uploaded
226 * to the device yet.
227 */
228
229 for (i = 0; i < ar->hw->queues; i++) {
230 struct sk_buff *skb;
231
232 while ((skb = skb_dequeue(&ar->tx_pending[i]))) {
233 struct ieee80211_tx_info *info;
234
235 info = IEEE80211_SKB_CB(skb);
236 if (info->flags & IEEE80211_TX_CTL_AMPDU)
237 atomic_dec(&ar->tx_ampdu_upload);
238
239 carl9170_tx_status(ar, skb, false);
240 }
241 }
242 }
243
244 /* Wait for all other outstanding frames to timeout. */
245 if (atomic_read(&ar->tx_total_queued))
246 WARN_ON(wait_for_completion_timeout(&ar->tx_flush, HZ) == 0);
247 }
248
carl9170_flush_ba(struct ar9170 * ar)249 static void carl9170_flush_ba(struct ar9170 *ar)
250 {
251 struct sk_buff_head free;
252 struct carl9170_sta_tid *tid_info;
253 struct sk_buff *skb;
254
255 __skb_queue_head_init(&free);
256
257 rcu_read_lock();
258 spin_lock_bh(&ar->tx_ampdu_list_lock);
259 list_for_each_entry_rcu(tid_info, &ar->tx_ampdu_list, list) {
260 if (tid_info->state > CARL9170_TID_STATE_SUSPEND) {
261 tid_info->state = CARL9170_TID_STATE_SUSPEND;
262
263 spin_lock(&tid_info->lock);
264 while ((skb = __skb_dequeue(&tid_info->queue)))
265 __skb_queue_tail(&free, skb);
266 spin_unlock(&tid_info->lock);
267 }
268 }
269 spin_unlock_bh(&ar->tx_ampdu_list_lock);
270 rcu_read_unlock();
271
272 while ((skb = __skb_dequeue(&free)))
273 carl9170_tx_status(ar, skb, false);
274 }
275
carl9170_zap_queues(struct ar9170 * ar)276 static void carl9170_zap_queues(struct ar9170 *ar)
277 {
278 struct carl9170_vif_info *cvif;
279 unsigned int i;
280
281 carl9170_ampdu_gc(ar);
282
283 carl9170_flush_ba(ar);
284 carl9170_flush(ar, true);
285
286 for (i = 0; i < ar->hw->queues; i++) {
287 spin_lock_bh(&ar->tx_status[i].lock);
288 while (!skb_queue_empty(&ar->tx_status[i])) {
289 struct sk_buff *skb;
290
291 skb = skb_peek(&ar->tx_status[i]);
292 carl9170_tx_get_skb(skb);
293 spin_unlock_bh(&ar->tx_status[i].lock);
294 carl9170_tx_drop(ar, skb);
295 spin_lock_bh(&ar->tx_status[i].lock);
296 carl9170_tx_put_skb(skb);
297 }
298 spin_unlock_bh(&ar->tx_status[i].lock);
299 }
300
301 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_SOFT < 1);
302 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD < CARL9170_NUM_TX_LIMIT_SOFT);
303 BUILD_BUG_ON(CARL9170_NUM_TX_LIMIT_HARD >= CARL9170_BAW_BITS);
304
305 /* reinitialize queues statistics */
306 memset(&ar->tx_stats, 0, sizeof(ar->tx_stats));
307 for (i = 0; i < ar->hw->queues; i++)
308 ar->tx_stats[i].limit = CARL9170_NUM_TX_LIMIT_HARD;
309
310 for (i = 0; i < DIV_ROUND_UP(ar->fw.mem_blocks, BITS_PER_LONG); i++)
311 ar->mem_bitmap[i] = 0;
312
313 rcu_read_lock();
314 list_for_each_entry_rcu(cvif, &ar->vif_list, list) {
315 spin_lock_bh(&ar->beacon_lock);
316 dev_kfree_skb_any(cvif->beacon);
317 cvif->beacon = NULL;
318 spin_unlock_bh(&ar->beacon_lock);
319 }
320 rcu_read_unlock();
321
322 atomic_set(&ar->tx_ampdu_upload, 0);
323 atomic_set(&ar->tx_ampdu_scheduler, 0);
324 atomic_set(&ar->tx_total_pending, 0);
325 atomic_set(&ar->tx_total_queued, 0);
326 atomic_set(&ar->mem_free_blocks, ar->fw.mem_blocks);
327 }
328
329 #define CARL9170_FILL_QUEUE(queue, ai_fs, cwmin, cwmax, _txop) \
330 do { \
331 queue.aifs = ai_fs; \
332 queue.cw_min = cwmin; \
333 queue.cw_max = cwmax; \
334 queue.txop = _txop; \
335 } while (0)
336
carl9170_op_start(struct ieee80211_hw * hw)337 static int carl9170_op_start(struct ieee80211_hw *hw)
338 {
339 struct ar9170 *ar = hw->priv;
340 int err, i;
341
342 mutex_lock(&ar->mutex);
343
344 carl9170_zap_queues(ar);
345
346 /* reset QoS defaults */
347 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VO], 2, 3, 7, 47);
348 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_VI], 2, 7, 15, 94);
349 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BE], 3, 15, 1023, 0);
350 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_BK], 7, 15, 1023, 0);
351 CARL9170_FILL_QUEUE(ar->edcf[AR9170_TXQ_SPECIAL], 2, 3, 7, 0);
352
353 ar->current_factor = ar->current_density = -1;
354 /* "The first key is unique." */
355 ar->usedkeys = 1;
356 ar->filter_state = 0;
357 ar->ps.last_action = jiffies;
358 ar->ps.last_slept = jiffies;
359 ar->erp_mode = CARL9170_ERP_AUTO;
360
361 /* Set "disable hw crypto offload" whenever the module parameter
362 * nohwcrypt is true or if the firmware does not support it.
363 */
364 ar->disable_offload = modparam_nohwcrypt |
365 ar->fw.disable_offload_fw;
366 ar->rx_software_decryption = ar->disable_offload;
367
368 for (i = 0; i < ar->hw->queues; i++) {
369 ar->queue_stop_timeout[i] = jiffies;
370 ar->max_queue_stop_timeout[i] = 0;
371 }
372
373 atomic_set(&ar->mem_allocs, 0);
374
375 err = carl9170_usb_open(ar);
376 if (err)
377 goto out;
378
379 err = carl9170_init_mac(ar);
380 if (err)
381 goto out;
382
383 err = carl9170_set_qos(ar);
384 if (err)
385 goto out;
386
387 if (ar->fw.rx_filter) {
388 err = carl9170_rx_filter(ar, CARL9170_RX_FILTER_OTHER_RA |
389 CARL9170_RX_FILTER_CTL_OTHER | CARL9170_RX_FILTER_BAD);
390 if (err)
391 goto out;
392 }
393
394 err = carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER,
395 AR9170_DMA_TRIGGER_RXQ);
396 if (err)
397 goto out;
398
399 /* Clear key-cache */
400 for (i = 0; i < AR9170_CAM_MAX_USER + 4; i++) {
401 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
402 0, NULL, 0);
403 if (err)
404 goto out;
405
406 err = carl9170_upload_key(ar, i, NULL, AR9170_ENC_ALG_NONE,
407 1, NULL, 0);
408 if (err)
409 goto out;
410
411 if (i < AR9170_CAM_MAX_USER) {
412 err = carl9170_disable_key(ar, i);
413 if (err)
414 goto out;
415 }
416 }
417
418 carl9170_set_state_when(ar, CARL9170_IDLE, CARL9170_STARTED);
419
420 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
421 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
422
423 ieee80211_wake_queues(ar->hw);
424 err = 0;
425
426 out:
427 mutex_unlock(&ar->mutex);
428 return err;
429 }
430
carl9170_cancel_worker(struct ar9170 * ar)431 static void carl9170_cancel_worker(struct ar9170 *ar)
432 {
433 cancel_delayed_work_sync(&ar->stat_work);
434 cancel_delayed_work_sync(&ar->tx_janitor);
435 #ifdef CONFIG_CARL9170_LEDS
436 cancel_delayed_work_sync(&ar->led_work);
437 #endif /* CONFIG_CARL9170_LEDS */
438 cancel_work_sync(&ar->ps_work);
439 cancel_work_sync(&ar->ping_work);
440 cancel_work_sync(&ar->ampdu_work);
441 }
442
carl9170_op_stop(struct ieee80211_hw * hw)443 static void carl9170_op_stop(struct ieee80211_hw *hw)
444 {
445 struct ar9170 *ar = hw->priv;
446
447 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
448
449 ieee80211_stop_queues(ar->hw);
450
451 mutex_lock(&ar->mutex);
452 if (IS_ACCEPTING_CMD(ar)) {
453 RCU_INIT_POINTER(ar->beacon_iter, NULL);
454
455 carl9170_led_set_state(ar, 0);
456
457 /* stop DMA */
458 carl9170_write_reg(ar, AR9170_MAC_REG_DMA_TRIGGER, 0);
459 carl9170_usb_stop(ar);
460 }
461
462 carl9170_zap_queues(ar);
463 mutex_unlock(&ar->mutex);
464
465 carl9170_cancel_worker(ar);
466 }
467
carl9170_restart_work(struct work_struct * work)468 static void carl9170_restart_work(struct work_struct *work)
469 {
470 struct ar9170 *ar = container_of(work, struct ar9170,
471 restart_work);
472 int err = -EIO;
473
474 ar->usedkeys = 0;
475 ar->filter_state = 0;
476 carl9170_cancel_worker(ar);
477
478 mutex_lock(&ar->mutex);
479 if (!ar->force_usb_reset) {
480 err = carl9170_usb_restart(ar);
481 if (net_ratelimit()) {
482 if (err)
483 dev_err(&ar->udev->dev, "Failed to restart device (%d).\n", err);
484 else
485 dev_info(&ar->udev->dev, "device restarted successfully.\n");
486 }
487 }
488 carl9170_zap_queues(ar);
489 mutex_unlock(&ar->mutex);
490
491 if (!err && !ar->force_usb_reset) {
492 ar->restart_counter++;
493 atomic_set(&ar->pending_restarts, 0);
494
495 ieee80211_restart_hw(ar->hw);
496 } else {
497 /*
498 * The reset was unsuccessful and the device seems to
499 * be dead. But there's still one option: a low-level
500 * usb subsystem reset...
501 */
502
503 carl9170_usb_reset(ar);
504 }
505 }
506
carl9170_restart(struct ar9170 * ar,const enum carl9170_restart_reasons r)507 void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
508 {
509 carl9170_set_state_when(ar, CARL9170_STARTED, CARL9170_IDLE);
510
511 /*
512 * Sometimes, an error can trigger several different reset events.
513 * By ignoring these *surplus* reset events, the device won't be
514 * killed again, right after it has recovered.
515 */
516 if (atomic_inc_return(&ar->pending_restarts) > 1) {
517 dev_dbg(&ar->udev->dev, "ignoring restart (%d)\n", r);
518 return;
519 }
520
521 ieee80211_stop_queues(ar->hw);
522
523 dev_err(&ar->udev->dev, "restart device (%d)\n", r);
524
525 if (!WARN_ON(r == CARL9170_RR_NO_REASON) ||
526 !WARN_ON(r >= __CARL9170_RR_LAST))
527 ar->last_reason = r;
528
529 if (!ar->registered)
530 return;
531
532 if (!IS_ACCEPTING_CMD(ar) || ar->needs_full_reset)
533 ar->force_usb_reset = true;
534
535 ieee80211_queue_work(ar->hw, &ar->restart_work);
536
537 /*
538 * At this point, the device instance might have vanished/disabled.
539 * So, don't put any code which access the ar9170 struct
540 * without proper protection.
541 */
542 }
543
carl9170_ping_work(struct work_struct * work)544 static void carl9170_ping_work(struct work_struct *work)
545 {
546 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
547 int err;
548
549 if (!IS_STARTED(ar))
550 return;
551
552 mutex_lock(&ar->mutex);
553 err = carl9170_echo_test(ar, 0xdeadbeef);
554 if (err)
555 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
556 mutex_unlock(&ar->mutex);
557 }
558
carl9170_init_interface(struct ar9170 * ar,struct ieee80211_vif * vif)559 static int carl9170_init_interface(struct ar9170 *ar,
560 struct ieee80211_vif *vif)
561 {
562 struct ath_common *common = &ar->common;
563 int err;
564
565 if (!vif) {
566 WARN_ON_ONCE(IS_STARTED(ar));
567 return 0;
568 }
569
570 memcpy(common->macaddr, vif->addr, ETH_ALEN);
571
572 /* We have to fall back to software crypto, whenever
573 * the user choose to participates in an IBSS. HW
574 * offload for IBSS RSN is not supported by this driver.
575 *
576 * NOTE: If the previous main interface has already
577 * disabled hw crypto offload, we have to keep this
578 * previous disable_offload setting as it was.
579 * Altough ideally, we should notify mac80211 and tell
580 * it to forget about any HW crypto offload for now.
581 */
582 ar->disable_offload |= ((vif->type != NL80211_IFTYPE_STATION) &&
583 (vif->type != NL80211_IFTYPE_AP));
584
585 /* The driver used to have P2P GO+CLIENT support,
586 * but since this was dropped and we don't know if
587 * there are any gremlins lurking in the shadows,
588 * so best we keep HW offload disabled for P2P.
589 */
590 ar->disable_offload |= vif->p2p;
591
592 ar->rx_software_decryption = ar->disable_offload;
593
594 err = carl9170_set_operating_mode(ar);
595 return err;
596 }
597
carl9170_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)598 static int carl9170_op_add_interface(struct ieee80211_hw *hw,
599 struct ieee80211_vif *vif)
600 {
601 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
602 struct ieee80211_vif *main_vif, *old_main = NULL;
603 struct ar9170 *ar = hw->priv;
604 int vif_id = -1, err = 0;
605
606 mutex_lock(&ar->mutex);
607 rcu_read_lock();
608 if (vif_priv->active) {
609 /*
610 * Skip the interface structure initialization,
611 * if the vif survived the _restart call.
612 */
613 vif_id = vif_priv->id;
614 vif_priv->enable_beacon = false;
615
616 spin_lock_bh(&ar->beacon_lock);
617 dev_kfree_skb_any(vif_priv->beacon);
618 vif_priv->beacon = NULL;
619 spin_unlock_bh(&ar->beacon_lock);
620
621 goto init;
622 }
623
624 /* Because the AR9170 HW's MAC doesn't provide full support for
625 * multiple, independent interfaces [of different operation modes].
626 * We have to select ONE main interface [main mode of HW], but we
627 * can have multiple slaves [AKA: entry in the ACK-table].
628 *
629 * The first (from HEAD/TOP) interface in the ar->vif_list is
630 * always the main intf. All following intfs in this list
631 * are considered to be slave intfs.
632 */
633 main_vif = carl9170_get_main_vif(ar);
634
635 if (main_vif) {
636 switch (main_vif->type) {
637 case NL80211_IFTYPE_STATION:
638 if (vif->type == NL80211_IFTYPE_STATION)
639 break;
640
641 err = -EBUSY;
642 rcu_read_unlock();
643
644 goto unlock;
645
646 case NL80211_IFTYPE_MESH_POINT:
647 case NL80211_IFTYPE_AP:
648 if ((vif->type == NL80211_IFTYPE_STATION) ||
649 (vif->type == NL80211_IFTYPE_AP) ||
650 (vif->type == NL80211_IFTYPE_MESH_POINT))
651 break;
652
653 err = -EBUSY;
654 rcu_read_unlock();
655 goto unlock;
656
657 default:
658 rcu_read_unlock();
659 goto unlock;
660 }
661 }
662
663 vif_id = bitmap_find_free_region(&ar->vif_bitmap, ar->fw.vif_num, 0);
664
665 if (vif_id < 0) {
666 rcu_read_unlock();
667
668 err = -ENOSPC;
669 goto unlock;
670 }
671
672 BUG_ON(ar->vif_priv[vif_id].id != vif_id);
673
674 vif_priv->active = true;
675 vif_priv->id = vif_id;
676 vif_priv->enable_beacon = false;
677 ar->vifs++;
678 if (old_main) {
679 /* We end up in here, if the main interface is being replaced.
680 * Put the new main interface at the HEAD of the list and the
681 * previous inteface will automatically become second in line.
682 */
683 list_add_rcu(&vif_priv->list, &ar->vif_list);
684 } else {
685 /* Add new inteface. If the list is empty, it will become the
686 * main inteface, otherwise it will be slave.
687 */
688 list_add_tail_rcu(&vif_priv->list, &ar->vif_list);
689 }
690 rcu_assign_pointer(ar->vif_priv[vif_id].vif, vif);
691
692 init:
693 main_vif = carl9170_get_main_vif(ar);
694
695 if (main_vif == vif) {
696 rcu_assign_pointer(ar->beacon_iter, vif_priv);
697 rcu_read_unlock();
698
699 if (old_main) {
700 struct carl9170_vif_info *old_main_priv =
701 (void *) old_main->drv_priv;
702 /* downgrade old main intf to slave intf.
703 * NOTE: We are no longer under rcu_read_lock.
704 * But we are still holding ar->mutex, so the
705 * vif data [id, addr] is safe.
706 */
707 err = carl9170_mod_virtual_mac(ar, old_main_priv->id,
708 old_main->addr);
709 if (err)
710 goto unlock;
711 }
712
713 err = carl9170_init_interface(ar, vif);
714 if (err)
715 goto unlock;
716 } else {
717 rcu_read_unlock();
718 err = carl9170_mod_virtual_mac(ar, vif_id, vif->addr);
719
720 if (err)
721 goto unlock;
722 }
723
724 if (ar->fw.tx_seq_table) {
725 err = carl9170_write_reg(ar, ar->fw.tx_seq_table + vif_id * 4,
726 0);
727 if (err)
728 goto unlock;
729 }
730
731 unlock:
732 if (err && (vif_id >= 0)) {
733 vif_priv->active = false;
734 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
735 ar->vifs--;
736 RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
737 list_del_rcu(&vif_priv->list);
738 mutex_unlock(&ar->mutex);
739 synchronize_rcu();
740 } else {
741 if (ar->vifs > 1)
742 ar->ps.off_override |= PS_OFF_VIF;
743
744 mutex_unlock(&ar->mutex);
745 }
746
747 return err;
748 }
749
carl9170_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)750 static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
751 struct ieee80211_vif *vif)
752 {
753 struct carl9170_vif_info *vif_priv = (void *) vif->drv_priv;
754 struct ieee80211_vif *main_vif;
755 struct ar9170 *ar = hw->priv;
756 unsigned int id;
757
758 mutex_lock(&ar->mutex);
759
760 if (WARN_ON_ONCE(!vif_priv->active))
761 goto unlock;
762
763 ar->vifs--;
764
765 rcu_read_lock();
766 main_vif = carl9170_get_main_vif(ar);
767
768 id = vif_priv->id;
769
770 vif_priv->active = false;
771 WARN_ON(vif_priv->enable_beacon);
772 vif_priv->enable_beacon = false;
773 list_del_rcu(&vif_priv->list);
774 RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
775
776 if (vif == main_vif) {
777 rcu_read_unlock();
778
779 if (ar->vifs) {
780 WARN_ON(carl9170_init_interface(ar,
781 carl9170_get_main_vif(ar)));
782 } else {
783 carl9170_set_operating_mode(ar);
784 }
785 } else {
786 rcu_read_unlock();
787
788 WARN_ON(carl9170_mod_virtual_mac(ar, id, NULL));
789 }
790
791 carl9170_update_beacon(ar, false);
792 carl9170_flush_cab(ar, id);
793
794 spin_lock_bh(&ar->beacon_lock);
795 dev_kfree_skb_any(vif_priv->beacon);
796 vif_priv->beacon = NULL;
797 spin_unlock_bh(&ar->beacon_lock);
798
799 bitmap_release_region(&ar->vif_bitmap, id, 0);
800
801 carl9170_set_beacon_timers(ar);
802
803 if (ar->vifs == 1)
804 ar->ps.off_override &= ~PS_OFF_VIF;
805
806 unlock:
807 mutex_unlock(&ar->mutex);
808
809 synchronize_rcu();
810 }
811
carl9170_ps_check(struct ar9170 * ar)812 void carl9170_ps_check(struct ar9170 *ar)
813 {
814 ieee80211_queue_work(ar->hw, &ar->ps_work);
815 }
816
817 /* caller must hold ar->mutex */
carl9170_ps_update(struct ar9170 * ar)818 static int carl9170_ps_update(struct ar9170 *ar)
819 {
820 bool ps = false;
821 int err = 0;
822
823 if (!ar->ps.off_override)
824 ps = (ar->hw->conf.flags & IEEE80211_CONF_PS);
825
826 if (ps != ar->ps.state) {
827 err = carl9170_powersave(ar, ps);
828 if (err)
829 return err;
830
831 if (ar->ps.state && !ps) {
832 ar->ps.sleep_ms = jiffies_to_msecs(jiffies -
833 ar->ps.last_action);
834 }
835
836 if (ps)
837 ar->ps.last_slept = jiffies;
838
839 ar->ps.last_action = jiffies;
840 ar->ps.state = ps;
841 }
842
843 return 0;
844 }
845
carl9170_ps_work(struct work_struct * work)846 static void carl9170_ps_work(struct work_struct *work)
847 {
848 struct ar9170 *ar = container_of(work, struct ar9170,
849 ps_work);
850 mutex_lock(&ar->mutex);
851 if (IS_STARTED(ar))
852 WARN_ON_ONCE(carl9170_ps_update(ar) != 0);
853 mutex_unlock(&ar->mutex);
854 }
855
carl9170_update_survey(struct ar9170 * ar,bool flush,bool noise)856 static int carl9170_update_survey(struct ar9170 *ar, bool flush, bool noise)
857 {
858 int err;
859
860 if (noise) {
861 err = carl9170_get_noisefloor(ar);
862 if (err)
863 return err;
864 }
865
866 if (ar->fw.hw_counters) {
867 err = carl9170_collect_tally(ar);
868 if (err)
869 return err;
870 }
871
872 if (flush)
873 memset(&ar->tally, 0, sizeof(ar->tally));
874
875 return 0;
876 }
877
carl9170_stat_work(struct work_struct * work)878 static void carl9170_stat_work(struct work_struct *work)
879 {
880 struct ar9170 *ar = container_of(work, struct ar9170, stat_work.work);
881 int err;
882
883 mutex_lock(&ar->mutex);
884 err = carl9170_update_survey(ar, false, true);
885 mutex_unlock(&ar->mutex);
886
887 if (err)
888 return;
889
890 ieee80211_queue_delayed_work(ar->hw, &ar->stat_work,
891 round_jiffies(msecs_to_jiffies(CARL9170_STAT_WORK)));
892 }
893
carl9170_op_config(struct ieee80211_hw * hw,u32 changed)894 static int carl9170_op_config(struct ieee80211_hw *hw, u32 changed)
895 {
896 struct ar9170 *ar = hw->priv;
897 int err = 0;
898
899 mutex_lock(&ar->mutex);
900 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) {
901 /* TODO */
902 err = 0;
903 }
904
905 if (changed & IEEE80211_CONF_CHANGE_PS) {
906 err = carl9170_ps_update(ar);
907 if (err)
908 goto out;
909 }
910
911 if (changed & IEEE80211_CONF_CHANGE_SMPS) {
912 /* TODO */
913 err = 0;
914 }
915
916 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
917 enum nl80211_channel_type channel_type =
918 cfg80211_get_chandef_type(&hw->conf.chandef);
919
920 /* adjust slot time for 5 GHz */
921 err = carl9170_set_slot_time(ar);
922 if (err)
923 goto out;
924
925 err = carl9170_update_survey(ar, true, false);
926 if (err)
927 goto out;
928
929 err = carl9170_set_channel(ar, hw->conf.chandef.chan,
930 channel_type);
931 if (err)
932 goto out;
933
934 err = carl9170_update_survey(ar, false, true);
935 if (err)
936 goto out;
937
938 err = carl9170_set_dyn_sifs_ack(ar);
939 if (err)
940 goto out;
941
942 err = carl9170_set_rts_cts_rate(ar);
943 if (err)
944 goto out;
945 }
946
947 if (changed & IEEE80211_CONF_CHANGE_POWER) {
948 err = carl9170_set_mac_tpc(ar, ar->hw->conf.chandef.chan);
949 if (err)
950 goto out;
951 }
952
953 out:
954 mutex_unlock(&ar->mutex);
955 return err;
956 }
957
carl9170_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)958 static u64 carl9170_op_prepare_multicast(struct ieee80211_hw *hw,
959 struct netdev_hw_addr_list *mc_list)
960 {
961 struct netdev_hw_addr *ha;
962 u64 mchash;
963
964 /* always get broadcast frames */
965 mchash = 1ULL << (0xff >> 2);
966
967 netdev_hw_addr_list_for_each(ha, mc_list)
968 mchash |= 1ULL << (ha->addr[5] >> 2);
969
970 return mchash;
971 }
972
carl9170_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * new_flags,u64 multicast)973 static void carl9170_op_configure_filter(struct ieee80211_hw *hw,
974 unsigned int changed_flags,
975 unsigned int *new_flags,
976 u64 multicast)
977 {
978 struct ar9170 *ar = hw->priv;
979
980 /* mask supported flags */
981 *new_flags &= FIF_ALLMULTI | ar->rx_filter_caps;
982
983 if (!IS_ACCEPTING_CMD(ar))
984 return;
985
986 mutex_lock(&ar->mutex);
987
988 ar->filter_state = *new_flags;
989 /*
990 * We can support more by setting the sniffer bit and
991 * then checking the error flags, later.
992 */
993
994 if (*new_flags & FIF_ALLMULTI)
995 multicast = ~0ULL;
996
997 if (multicast != ar->cur_mc_hash)
998 WARN_ON(carl9170_update_multicast(ar, multicast));
999
1000 if (changed_flags & FIF_OTHER_BSS) {
1001 ar->sniffer_enabled = !!(*new_flags & FIF_OTHER_BSS);
1002
1003 WARN_ON(carl9170_set_operating_mode(ar));
1004 }
1005
1006 if (ar->fw.rx_filter && changed_flags & ar->rx_filter_caps) {
1007 u32 rx_filter = 0;
1008
1009 if (!ar->fw.ba_filter)
1010 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1011
1012 if (!(*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL)))
1013 rx_filter |= CARL9170_RX_FILTER_BAD;
1014
1015 if (!(*new_flags & FIF_CONTROL))
1016 rx_filter |= CARL9170_RX_FILTER_CTL_OTHER;
1017
1018 if (!(*new_flags & FIF_PSPOLL))
1019 rx_filter |= CARL9170_RX_FILTER_CTL_PSPOLL;
1020
1021 if (!(*new_flags & FIF_OTHER_BSS)) {
1022 rx_filter |= CARL9170_RX_FILTER_OTHER_RA;
1023 rx_filter |= CARL9170_RX_FILTER_DECRY_FAIL;
1024 }
1025
1026 WARN_ON(carl9170_rx_filter(ar, rx_filter));
1027 }
1028
1029 mutex_unlock(&ar->mutex);
1030 }
1031
1032
carl9170_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)1033 static void carl9170_op_bss_info_changed(struct ieee80211_hw *hw,
1034 struct ieee80211_vif *vif,
1035 struct ieee80211_bss_conf *bss_conf,
1036 u32 changed)
1037 {
1038 struct ar9170 *ar = hw->priv;
1039 struct ath_common *common = &ar->common;
1040 int err = 0;
1041 struct carl9170_vif_info *vif_priv;
1042 struct ieee80211_vif *main_vif;
1043
1044 mutex_lock(&ar->mutex);
1045 vif_priv = (void *) vif->drv_priv;
1046 main_vif = carl9170_get_main_vif(ar);
1047 if (WARN_ON(!main_vif))
1048 goto out;
1049
1050 if (changed & BSS_CHANGED_BEACON_ENABLED) {
1051 struct carl9170_vif_info *iter;
1052 int i = 0;
1053
1054 vif_priv->enable_beacon = bss_conf->enable_beacon;
1055 rcu_read_lock();
1056 list_for_each_entry_rcu(iter, &ar->vif_list, list) {
1057 if (iter->active && iter->enable_beacon)
1058 i++;
1059
1060 }
1061 rcu_read_unlock();
1062
1063 ar->beacon_enabled = i;
1064 }
1065
1066 if (changed & BSS_CHANGED_BEACON) {
1067 err = carl9170_update_beacon(ar, false);
1068 if (err)
1069 goto out;
1070 }
1071
1072 if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON |
1073 BSS_CHANGED_BEACON_INT)) {
1074
1075 if (main_vif != vif) {
1076 bss_conf->beacon_int = main_vif->bss_conf.beacon_int;
1077 bss_conf->dtim_period = main_vif->bss_conf.dtim_period;
1078 }
1079
1080 /*
1081 * Therefore a hard limit for the broadcast traffic should
1082 * prevent false alarms.
1083 */
1084 if (vif->type != NL80211_IFTYPE_STATION &&
1085 (bss_conf->beacon_int * bss_conf->dtim_period >=
1086 (CARL9170_QUEUE_STUCK_TIMEOUT / 2))) {
1087 err = -EINVAL;
1088 goto out;
1089 }
1090
1091 err = carl9170_set_beacon_timers(ar);
1092 if (err)
1093 goto out;
1094 }
1095
1096 if (changed & BSS_CHANGED_HT) {
1097 /* TODO */
1098 err = 0;
1099 if (err)
1100 goto out;
1101 }
1102
1103 if (main_vif != vif)
1104 goto out;
1105
1106 /*
1107 * The following settings can only be changed by the
1108 * master interface.
1109 */
1110
1111 if (changed & BSS_CHANGED_BSSID) {
1112 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1113 err = carl9170_set_operating_mode(ar);
1114 if (err)
1115 goto out;
1116 }
1117
1118 if (changed & BSS_CHANGED_ASSOC) {
1119 ar->common.curaid = bss_conf->aid;
1120 err = carl9170_set_beacon_timers(ar);
1121 if (err)
1122 goto out;
1123 }
1124
1125 if (changed & BSS_CHANGED_ERP_SLOT) {
1126 err = carl9170_set_slot_time(ar);
1127 if (err)
1128 goto out;
1129 }
1130
1131 if (changed & BSS_CHANGED_BASIC_RATES) {
1132 err = carl9170_set_mac_rates(ar);
1133 if (err)
1134 goto out;
1135 }
1136
1137 out:
1138 WARN_ON_ONCE(err && IS_STARTED(ar));
1139 mutex_unlock(&ar->mutex);
1140 }
1141
carl9170_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)1142 static u64 carl9170_op_get_tsf(struct ieee80211_hw *hw,
1143 struct ieee80211_vif *vif)
1144 {
1145 struct ar9170 *ar = hw->priv;
1146 struct carl9170_tsf_rsp tsf;
1147 int err;
1148
1149 mutex_lock(&ar->mutex);
1150 err = carl9170_exec_cmd(ar, CARL9170_CMD_READ_TSF,
1151 0, NULL, sizeof(tsf), &tsf);
1152 mutex_unlock(&ar->mutex);
1153 if (WARN_ON(err))
1154 return 0;
1155
1156 return le64_to_cpu(tsf.tsf_64);
1157 }
1158
carl9170_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)1159 static int carl9170_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1160 struct ieee80211_vif *vif,
1161 struct ieee80211_sta *sta,
1162 struct ieee80211_key_conf *key)
1163 {
1164 struct ar9170 *ar = hw->priv;
1165 int err = 0, i;
1166 u8 ktype;
1167
1168 if (ar->disable_offload || !vif)
1169 return -EOPNOTSUPP;
1170
1171 /* Fall back to software encryption whenever the driver is connected
1172 * to more than one network.
1173 *
1174 * This is very unfortunate, because some machines cannot handle
1175 * the high througput speed in 802.11n networks.
1176 */
1177
1178 if (!is_main_vif(ar, vif)) {
1179 mutex_lock(&ar->mutex);
1180 goto err_softw;
1181 }
1182
1183 /*
1184 * While the hardware supports *catch-all* key, for offloading
1185 * group-key en-/de-cryption. The way of how the hardware
1186 * decides which keyId maps to which key, remains a mystery...
1187 */
1188 if ((vif->type != NL80211_IFTYPE_STATION &&
1189 vif->type != NL80211_IFTYPE_ADHOC) &&
1190 !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1191 return -EOPNOTSUPP;
1192
1193 switch (key->cipher) {
1194 case WLAN_CIPHER_SUITE_WEP40:
1195 ktype = AR9170_ENC_ALG_WEP64;
1196 break;
1197 case WLAN_CIPHER_SUITE_WEP104:
1198 ktype = AR9170_ENC_ALG_WEP128;
1199 break;
1200 case WLAN_CIPHER_SUITE_TKIP:
1201 ktype = AR9170_ENC_ALG_TKIP;
1202 break;
1203 case WLAN_CIPHER_SUITE_CCMP:
1204 ktype = AR9170_ENC_ALG_AESCCMP;
1205 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
1206 break;
1207 default:
1208 return -EOPNOTSUPP;
1209 }
1210
1211 mutex_lock(&ar->mutex);
1212 if (cmd == SET_KEY) {
1213 if (!IS_STARTED(ar)) {
1214 err = -EOPNOTSUPP;
1215 goto out;
1216 }
1217
1218 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
1219 sta = NULL;
1220
1221 i = 64 + key->keyidx;
1222 } else {
1223 for (i = 0; i < 64; i++)
1224 if (!(ar->usedkeys & BIT(i)))
1225 break;
1226 if (i == 64)
1227 goto err_softw;
1228 }
1229
1230 key->hw_key_idx = i;
1231
1232 err = carl9170_upload_key(ar, i, sta ? sta->addr : NULL,
1233 ktype, 0, key->key,
1234 min_t(u8, 16, key->keylen));
1235 if (err)
1236 goto out;
1237
1238 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1239 err = carl9170_upload_key(ar, i, sta ? sta->addr :
1240 NULL, ktype, 1,
1241 key->key + 16, 16);
1242 if (err)
1243 goto out;
1244
1245 /*
1246 * hardware is not capable generating MMIC
1247 * of fragmented frames!
1248 */
1249 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1250 }
1251
1252 if (i < 64)
1253 ar->usedkeys |= BIT(i);
1254
1255 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1256 } else {
1257 if (!IS_STARTED(ar)) {
1258 /* The device is gone... together with the key ;-) */
1259 err = 0;
1260 goto out;
1261 }
1262
1263 if (key->hw_key_idx < 64) {
1264 ar->usedkeys &= ~BIT(key->hw_key_idx);
1265 } else {
1266 err = carl9170_upload_key(ar, key->hw_key_idx, NULL,
1267 AR9170_ENC_ALG_NONE, 0,
1268 NULL, 0);
1269 if (err)
1270 goto out;
1271
1272 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
1273 err = carl9170_upload_key(ar, key->hw_key_idx,
1274 NULL,
1275 AR9170_ENC_ALG_NONE,
1276 1, NULL, 0);
1277 if (err)
1278 goto out;
1279 }
1280
1281 }
1282
1283 err = carl9170_disable_key(ar, key->hw_key_idx);
1284 if (err)
1285 goto out;
1286 }
1287
1288 out:
1289 mutex_unlock(&ar->mutex);
1290 return err;
1291
1292 err_softw:
1293 if (!ar->rx_software_decryption) {
1294 ar->rx_software_decryption = true;
1295 carl9170_set_operating_mode(ar);
1296 }
1297 mutex_unlock(&ar->mutex);
1298 return -ENOSPC;
1299 }
1300
carl9170_op_sta_add(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1301 static int carl9170_op_sta_add(struct ieee80211_hw *hw,
1302 struct ieee80211_vif *vif,
1303 struct ieee80211_sta *sta)
1304 {
1305 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1306 unsigned int i;
1307
1308 atomic_set(&sta_info->pending_frames, 0);
1309
1310 if (sta->deflink.ht_cap.ht_supported) {
1311 if (sta->deflink.ht_cap.ampdu_density > 6) {
1312 /*
1313 * HW does support 16us AMPDU density.
1314 * No HT-Xmit for station.
1315 */
1316
1317 return 0;
1318 }
1319
1320 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++)
1321 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1322
1323 sta_info->ampdu_max_len = 1 << (3 + sta->deflink.ht_cap.ampdu_factor);
1324 sta_info->ht_sta = true;
1325 }
1326
1327 return 0;
1328 }
1329
carl9170_op_sta_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)1330 static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
1331 struct ieee80211_vif *vif,
1332 struct ieee80211_sta *sta)
1333 {
1334 struct ar9170 *ar = hw->priv;
1335 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1336 unsigned int i;
1337 bool cleanup = false;
1338
1339 if (sta->deflink.ht_cap.ht_supported) {
1340
1341 sta_info->ht_sta = false;
1342
1343 rcu_read_lock();
1344 for (i = 0; i < ARRAY_SIZE(sta_info->agg); i++) {
1345 struct carl9170_sta_tid *tid_info;
1346
1347 tid_info = rcu_dereference(sta_info->agg[i]);
1348 RCU_INIT_POINTER(sta_info->agg[i], NULL);
1349
1350 if (!tid_info)
1351 continue;
1352
1353 spin_lock_bh(&ar->tx_ampdu_list_lock);
1354 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1355 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1356 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1357 cleanup = true;
1358 }
1359 rcu_read_unlock();
1360
1361 if (cleanup)
1362 carl9170_ampdu_gc(ar);
1363 }
1364
1365 return 0;
1366 }
1367
carl9170_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * param)1368 static int carl9170_op_conf_tx(struct ieee80211_hw *hw,
1369 struct ieee80211_vif *vif, u16 queue,
1370 const struct ieee80211_tx_queue_params *param)
1371 {
1372 struct ar9170 *ar = hw->priv;
1373 int ret;
1374
1375 mutex_lock(&ar->mutex);
1376 memcpy(&ar->edcf[ar9170_qmap(queue)], param, sizeof(*param));
1377 ret = carl9170_set_qos(ar);
1378 mutex_unlock(&ar->mutex);
1379 return ret;
1380 }
1381
carl9170_ampdu_work(struct work_struct * work)1382 static void carl9170_ampdu_work(struct work_struct *work)
1383 {
1384 struct ar9170 *ar = container_of(work, struct ar9170,
1385 ampdu_work);
1386
1387 if (!IS_STARTED(ar))
1388 return;
1389
1390 mutex_lock(&ar->mutex);
1391 carl9170_ampdu_gc(ar);
1392 mutex_unlock(&ar->mutex);
1393 }
1394
carl9170_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)1395 static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
1396 struct ieee80211_vif *vif,
1397 struct ieee80211_ampdu_params *params)
1398 {
1399 struct ieee80211_sta *sta = params->sta;
1400 enum ieee80211_ampdu_mlme_action action = params->action;
1401 u16 tid = params->tid;
1402 u16 *ssn = ¶ms->ssn;
1403 struct ar9170 *ar = hw->priv;
1404 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1405 struct carl9170_sta_tid *tid_info;
1406
1407 if (modparam_noht)
1408 return -EOPNOTSUPP;
1409
1410 switch (action) {
1411 case IEEE80211_AMPDU_TX_START:
1412 if (!sta_info->ht_sta)
1413 return -EOPNOTSUPP;
1414
1415 tid_info = kzalloc(sizeof(struct carl9170_sta_tid),
1416 GFP_ATOMIC);
1417 if (!tid_info)
1418 return -ENOMEM;
1419
1420 tid_info->hsn = tid_info->bsn = tid_info->snx = (*ssn);
1421 tid_info->state = CARL9170_TID_STATE_PROGRESS;
1422 tid_info->tid = tid;
1423 tid_info->max = sta_info->ampdu_max_len;
1424 tid_info->sta = sta;
1425 tid_info->vif = vif;
1426
1427 INIT_LIST_HEAD(&tid_info->list);
1428 INIT_LIST_HEAD(&tid_info->tmp_list);
1429 skb_queue_head_init(&tid_info->queue);
1430 spin_lock_init(&tid_info->lock);
1431
1432 spin_lock_bh(&ar->tx_ampdu_list_lock);
1433 ar->tx_ampdu_list_len++;
1434 list_add_tail_rcu(&tid_info->list, &ar->tx_ampdu_list);
1435 rcu_assign_pointer(sta_info->agg[tid], tid_info);
1436 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1437
1438 return IEEE80211_AMPDU_TX_START_IMMEDIATE;
1439
1440 case IEEE80211_AMPDU_TX_STOP_CONT:
1441 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1442 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1443 rcu_read_lock();
1444 tid_info = rcu_dereference(sta_info->agg[tid]);
1445 if (tid_info) {
1446 spin_lock_bh(&ar->tx_ampdu_list_lock);
1447 if (tid_info->state > CARL9170_TID_STATE_SHUTDOWN)
1448 tid_info->state = CARL9170_TID_STATE_SHUTDOWN;
1449 spin_unlock_bh(&ar->tx_ampdu_list_lock);
1450 }
1451
1452 RCU_INIT_POINTER(sta_info->agg[tid], NULL);
1453 rcu_read_unlock();
1454
1455 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1456 ieee80211_queue_work(ar->hw, &ar->ampdu_work);
1457 break;
1458
1459 case IEEE80211_AMPDU_TX_OPERATIONAL:
1460 rcu_read_lock();
1461 tid_info = rcu_dereference(sta_info->agg[tid]);
1462
1463 sta_info->stats[tid].clear = true;
1464 sta_info->stats[tid].req = false;
1465
1466 if (tid_info) {
1467 bitmap_zero(tid_info->bitmap, CARL9170_BAW_SIZE);
1468 tid_info->state = CARL9170_TID_STATE_IDLE;
1469 }
1470 rcu_read_unlock();
1471
1472 if (WARN_ON_ONCE(!tid_info))
1473 return -EFAULT;
1474
1475 break;
1476
1477 case IEEE80211_AMPDU_RX_START:
1478 case IEEE80211_AMPDU_RX_STOP:
1479 /* Handled by hardware */
1480 break;
1481
1482 default:
1483 return -EOPNOTSUPP;
1484 }
1485
1486 return 0;
1487 }
1488
1489 #ifdef CONFIG_CARL9170_WPC
carl9170_register_wps_button(struct ar9170 * ar)1490 static int carl9170_register_wps_button(struct ar9170 *ar)
1491 {
1492 struct input_dev *input;
1493 int err;
1494
1495 if (!(ar->features & CARL9170_WPS_BUTTON))
1496 return 0;
1497
1498 input = input_allocate_device();
1499 if (!input)
1500 return -ENOMEM;
1501
1502 snprintf(ar->wps.name, sizeof(ar->wps.name), "%s WPS Button",
1503 wiphy_name(ar->hw->wiphy));
1504
1505 snprintf(ar->wps.phys, sizeof(ar->wps.phys),
1506 "ieee80211/%s/input0", wiphy_name(ar->hw->wiphy));
1507
1508 input->name = ar->wps.name;
1509 input->phys = ar->wps.phys;
1510 input->id.bustype = BUS_USB;
1511 input->dev.parent = &ar->hw->wiphy->dev;
1512
1513 input_set_capability(input, EV_KEY, KEY_WPS_BUTTON);
1514
1515 err = input_register_device(input);
1516 if (err) {
1517 input_free_device(input);
1518 return err;
1519 }
1520
1521 ar->wps.pbc = input;
1522 return 0;
1523 }
1524 #endif /* CONFIG_CARL9170_WPC */
1525
1526 #ifdef CONFIG_CARL9170_HWRNG
carl9170_rng_get(struct ar9170 * ar)1527 static int carl9170_rng_get(struct ar9170 *ar)
1528 {
1529
1530 #define RW (CARL9170_MAX_CMD_PAYLOAD_LEN / sizeof(u32))
1531 #define RB (CARL9170_MAX_CMD_PAYLOAD_LEN)
1532
1533 static const __le32 rng_load[RW] = {
1534 [0 ... (RW - 1)] = cpu_to_le32(AR9170_RAND_REG_NUM)};
1535
1536 u32 buf[RW];
1537
1538 unsigned int i, off = 0, transfer, count;
1539 int err;
1540
1541 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_PAYLOAD_LEN);
1542
1543 if (!IS_ACCEPTING_CMD(ar) || !ar->rng.initialized)
1544 return -EAGAIN;
1545
1546 count = ARRAY_SIZE(ar->rng.cache);
1547 while (count) {
1548 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1549 RB, (u8 *) rng_load,
1550 RB, (u8 *) buf);
1551 if (err)
1552 return err;
1553
1554 transfer = min_t(unsigned int, count, RW);
1555 for (i = 0; i < transfer; i++)
1556 ar->rng.cache[off + i] = buf[i];
1557
1558 off += transfer;
1559 count -= transfer;
1560 }
1561
1562 ar->rng.cache_idx = 0;
1563
1564 #undef RW
1565 #undef RB
1566 return 0;
1567 }
1568
carl9170_rng_read(struct hwrng * rng,u32 * data)1569 static int carl9170_rng_read(struct hwrng *rng, u32 *data)
1570 {
1571 struct ar9170 *ar = (struct ar9170 *)rng->priv;
1572 int ret = -EIO;
1573
1574 mutex_lock(&ar->mutex);
1575 if (ar->rng.cache_idx >= ARRAY_SIZE(ar->rng.cache)) {
1576 ret = carl9170_rng_get(ar);
1577 if (ret) {
1578 mutex_unlock(&ar->mutex);
1579 return ret;
1580 }
1581 }
1582
1583 *data = ar->rng.cache[ar->rng.cache_idx++];
1584 mutex_unlock(&ar->mutex);
1585
1586 return sizeof(u16);
1587 }
1588
carl9170_unregister_hwrng(struct ar9170 * ar)1589 static void carl9170_unregister_hwrng(struct ar9170 *ar)
1590 {
1591 if (ar->rng.initialized) {
1592 hwrng_unregister(&ar->rng.rng);
1593 ar->rng.initialized = false;
1594 }
1595 }
1596
carl9170_register_hwrng(struct ar9170 * ar)1597 static int carl9170_register_hwrng(struct ar9170 *ar)
1598 {
1599 int err;
1600
1601 snprintf(ar->rng.name, ARRAY_SIZE(ar->rng.name),
1602 "%s_%s", KBUILD_MODNAME, wiphy_name(ar->hw->wiphy));
1603 ar->rng.rng.name = ar->rng.name;
1604 ar->rng.rng.data_read = carl9170_rng_read;
1605 ar->rng.rng.priv = (unsigned long)ar;
1606
1607 if (WARN_ON(ar->rng.initialized))
1608 return -EALREADY;
1609
1610 err = hwrng_register(&ar->rng.rng);
1611 if (err) {
1612 dev_err(&ar->udev->dev, "Failed to register the random "
1613 "number generator (%d)\n", err);
1614 return err;
1615 }
1616
1617 ar->rng.initialized = true;
1618
1619 err = carl9170_rng_get(ar);
1620 if (err) {
1621 carl9170_unregister_hwrng(ar);
1622 return err;
1623 }
1624
1625 return 0;
1626 }
1627 #endif /* CONFIG_CARL9170_HWRNG */
1628
carl9170_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)1629 static int carl9170_op_get_survey(struct ieee80211_hw *hw, int idx,
1630 struct survey_info *survey)
1631 {
1632 struct ar9170 *ar = hw->priv;
1633 struct ieee80211_channel *chan;
1634 struct ieee80211_supported_band *band;
1635 int err, b, i;
1636
1637 chan = ar->channel;
1638 if (!chan)
1639 return -ENODEV;
1640
1641 if (idx == chan->hw_value) {
1642 mutex_lock(&ar->mutex);
1643 err = carl9170_update_survey(ar, false, true);
1644 mutex_unlock(&ar->mutex);
1645 if (err)
1646 return err;
1647 }
1648
1649 for (b = 0; b < NUM_NL80211_BANDS; b++) {
1650 band = ar->hw->wiphy->bands[b];
1651
1652 if (!band)
1653 continue;
1654
1655 for (i = 0; i < band->n_channels; i++) {
1656 if (band->channels[i].hw_value == idx) {
1657 chan = &band->channels[i];
1658 goto found;
1659 }
1660 }
1661 }
1662 return -ENOENT;
1663
1664 found:
1665 memcpy(survey, &ar->survey[idx], sizeof(*survey));
1666
1667 survey->channel = chan;
1668 survey->filled = SURVEY_INFO_NOISE_DBM;
1669
1670 if (ar->channel == chan)
1671 survey->filled |= SURVEY_INFO_IN_USE;
1672
1673 if (ar->fw.hw_counters) {
1674 survey->filled |= SURVEY_INFO_TIME |
1675 SURVEY_INFO_TIME_BUSY |
1676 SURVEY_INFO_TIME_TX;
1677 }
1678
1679 return 0;
1680 }
1681
carl9170_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)1682 static void carl9170_op_flush(struct ieee80211_hw *hw,
1683 struct ieee80211_vif *vif,
1684 u32 queues, bool drop)
1685 {
1686 struct ar9170 *ar = hw->priv;
1687 unsigned int vid;
1688
1689 mutex_lock(&ar->mutex);
1690 for_each_set_bit(vid, &ar->vif_bitmap, ar->fw.vif_num)
1691 carl9170_flush_cab(ar, vid);
1692
1693 carl9170_flush(ar, drop);
1694 mutex_unlock(&ar->mutex);
1695 }
1696
carl9170_op_get_stats(struct ieee80211_hw * hw,struct ieee80211_low_level_stats * stats)1697 static int carl9170_op_get_stats(struct ieee80211_hw *hw,
1698 struct ieee80211_low_level_stats *stats)
1699 {
1700 struct ar9170 *ar = hw->priv;
1701
1702 memset(stats, 0, sizeof(*stats));
1703 stats->dot11ACKFailureCount = ar->tx_ack_failures;
1704 stats->dot11FCSErrorCount = ar->tx_fcs_errors;
1705 return 0;
1706 }
1707
carl9170_op_sta_notify(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum sta_notify_cmd cmd,struct ieee80211_sta * sta)1708 static void carl9170_op_sta_notify(struct ieee80211_hw *hw,
1709 struct ieee80211_vif *vif,
1710 enum sta_notify_cmd cmd,
1711 struct ieee80211_sta *sta)
1712 {
1713 struct carl9170_sta_info *sta_info = (void *) sta->drv_priv;
1714
1715 switch (cmd) {
1716 case STA_NOTIFY_SLEEP:
1717 sta_info->sleeping = true;
1718 if (atomic_read(&sta_info->pending_frames))
1719 ieee80211_sta_block_awake(hw, sta, true);
1720 break;
1721
1722 case STA_NOTIFY_AWAKE:
1723 sta_info->sleeping = false;
1724 break;
1725 }
1726 }
1727
carl9170_tx_frames_pending(struct ieee80211_hw * hw)1728 static bool carl9170_tx_frames_pending(struct ieee80211_hw *hw)
1729 {
1730 struct ar9170 *ar = hw->priv;
1731
1732 return !!atomic_read(&ar->tx_total_queued);
1733 }
1734
1735 static const struct ieee80211_ops carl9170_ops = {
1736 .start = carl9170_op_start,
1737 .stop = carl9170_op_stop,
1738 .tx = carl9170_op_tx,
1739 .flush = carl9170_op_flush,
1740 .add_interface = carl9170_op_add_interface,
1741 .remove_interface = carl9170_op_remove_interface,
1742 .config = carl9170_op_config,
1743 .prepare_multicast = carl9170_op_prepare_multicast,
1744 .configure_filter = carl9170_op_configure_filter,
1745 .conf_tx = carl9170_op_conf_tx,
1746 .bss_info_changed = carl9170_op_bss_info_changed,
1747 .get_tsf = carl9170_op_get_tsf,
1748 .set_key = carl9170_op_set_key,
1749 .sta_add = carl9170_op_sta_add,
1750 .sta_remove = carl9170_op_sta_remove,
1751 .sta_notify = carl9170_op_sta_notify,
1752 .get_survey = carl9170_op_get_survey,
1753 .get_stats = carl9170_op_get_stats,
1754 .ampdu_action = carl9170_op_ampdu_action,
1755 .tx_frames_pending = carl9170_tx_frames_pending,
1756 };
1757
carl9170_alloc(size_t priv_size)1758 void *carl9170_alloc(size_t priv_size)
1759 {
1760 struct ieee80211_hw *hw;
1761 struct ar9170 *ar;
1762 struct sk_buff *skb;
1763 int i;
1764
1765 /*
1766 * this buffer is used for rx stream reconstruction.
1767 * Under heavy load this device (or the transport layer?)
1768 * tends to split the streams into separate rx descriptors.
1769 */
1770
1771 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
1772 if (!skb)
1773 goto err_nomem;
1774
1775 hw = ieee80211_alloc_hw(priv_size, &carl9170_ops);
1776 if (!hw)
1777 goto err_nomem;
1778
1779 ar = hw->priv;
1780 ar->hw = hw;
1781 ar->rx_failover = skb;
1782
1783 memset(&ar->rx_plcp, 0, sizeof(struct ar9170_rx_head));
1784 ar->rx_has_plcp = false;
1785
1786 /*
1787 * Here's a hidden pitfall!
1788 *
1789 * All 4 AC queues work perfectly well under _legacy_ operation.
1790 * However as soon as aggregation is enabled, the traffic flow
1791 * gets very bumpy. Therefore we have to _switch_ to a
1792 * software AC with a single HW queue.
1793 */
1794 hw->queues = __AR9170_NUM_TXQ;
1795
1796 mutex_init(&ar->mutex);
1797 spin_lock_init(&ar->beacon_lock);
1798 spin_lock_init(&ar->cmd_lock);
1799 spin_lock_init(&ar->tx_stats_lock);
1800 spin_lock_init(&ar->tx_ampdu_list_lock);
1801 spin_lock_init(&ar->mem_lock);
1802 spin_lock_init(&ar->state_lock);
1803 atomic_set(&ar->pending_restarts, 0);
1804 ar->vifs = 0;
1805 for (i = 0; i < ar->hw->queues; i++) {
1806 skb_queue_head_init(&ar->tx_status[i]);
1807 skb_queue_head_init(&ar->tx_pending[i]);
1808
1809 INIT_LIST_HEAD(&ar->bar_list[i]);
1810 spin_lock_init(&ar->bar_list_lock[i]);
1811 }
1812 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1813 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1814 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1815 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1816 INIT_DELAYED_WORK(&ar->stat_work, carl9170_stat_work);
1817 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
1818 INIT_LIST_HEAD(&ar->tx_ampdu_list);
1819 rcu_assign_pointer(ar->tx_ampdu_iter,
1820 (struct carl9170_sta_tid *) &ar->tx_ampdu_list);
1821
1822 bitmap_zero(&ar->vif_bitmap, ar->fw.vif_num);
1823 INIT_LIST_HEAD(&ar->vif_list);
1824 init_completion(&ar->tx_flush);
1825
1826 /* firmware decides which modes we support */
1827 hw->wiphy->interface_modes = 0;
1828
1829 ieee80211_hw_set(hw, RX_INCLUDES_FCS);
1830 ieee80211_hw_set(hw, MFP_CAPABLE);
1831 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
1832 ieee80211_hw_set(hw, SUPPORTS_PS);
1833 ieee80211_hw_set(hw, PS_NULLFUNC_STACK);
1834 ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC);
1835 ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
1836 ieee80211_hw_set(hw, SIGNAL_DBM);
1837 ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES);
1838
1839 if (!modparam_noht) {
1840 /*
1841 * see the comment above, why we allow the user
1842 * to disable HT by a module parameter.
1843 */
1844 ieee80211_hw_set(hw, AMPDU_AGGREGATION);
1845 }
1846
1847 hw->extra_tx_headroom = sizeof(struct _carl9170_tx_superframe);
1848 hw->sta_data_size = sizeof(struct carl9170_sta_info);
1849 hw->vif_data_size = sizeof(struct carl9170_vif_info);
1850
1851 hw->max_rates = CARL9170_TX_MAX_RATES;
1852 hw->max_rate_tries = CARL9170_TX_USER_RATE_TRIES;
1853
1854 for (i = 0; i < ARRAY_SIZE(ar->noise); i++)
1855 ar->noise[i] = -95; /* ATH_DEFAULT_NOISE_FLOOR */
1856
1857 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
1858
1859 return ar;
1860
1861 err_nomem:
1862 kfree_skb(skb);
1863 return ERR_PTR(-ENOMEM);
1864 }
1865
carl9170_read_eeprom(struct ar9170 * ar)1866 static int carl9170_read_eeprom(struct ar9170 *ar)
1867 {
1868 #define RW 8 /* number of words to read at once */
1869 #define RB (sizeof(u32) * RW)
1870 u8 *eeprom = (void *)&ar->eeprom;
1871 __le32 offsets[RW];
1872 int i, j, err;
1873
1874 BUILD_BUG_ON(sizeof(ar->eeprom) & 3);
1875
1876 BUILD_BUG_ON(RB > CARL9170_MAX_CMD_LEN - 4);
1877 #ifndef __CHECKER__
1878 /* don't want to handle trailing remains */
1879 BUILD_BUG_ON(sizeof(ar->eeprom) % RB);
1880 #endif
1881
1882 for (i = 0; i < sizeof(ar->eeprom) / RB; i++) {
1883 for (j = 0; j < RW; j++)
1884 offsets[j] = cpu_to_le32(AR9170_EEPROM_START +
1885 RB * i + 4 * j);
1886
1887 err = carl9170_exec_cmd(ar, CARL9170_CMD_RREG,
1888 RB, (u8 *) &offsets,
1889 RB, eeprom + RB * i);
1890 if (err)
1891 return err;
1892 }
1893
1894 #undef RW
1895 #undef RB
1896 return 0;
1897 }
1898
carl9170_parse_eeprom(struct ar9170 * ar)1899 static int carl9170_parse_eeprom(struct ar9170 *ar)
1900 {
1901 struct ath_regulatory *regulatory = &ar->common.regulatory;
1902 unsigned int rx_streams, tx_streams, tx_params = 0;
1903 int bands = 0;
1904 int chans = 0;
1905
1906 if (ar->eeprom.length == cpu_to_le16(0xffff))
1907 return -ENODATA;
1908
1909 rx_streams = hweight8(ar->eeprom.rx_mask);
1910 tx_streams = hweight8(ar->eeprom.tx_mask);
1911
1912 if (rx_streams != tx_streams) {
1913 tx_params = IEEE80211_HT_MCS_TX_RX_DIFF;
1914
1915 WARN_ON(!(tx_streams >= 1 && tx_streams <=
1916 IEEE80211_HT_MCS_TX_MAX_STREAMS));
1917
1918 tx_params |= (tx_streams - 1) <<
1919 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1920
1921 carl9170_band_2GHz.ht_cap.mcs.tx_params |= tx_params;
1922 carl9170_band_5GHz.ht_cap.mcs.tx_params |= tx_params;
1923 }
1924
1925 if (ar->eeprom.operating_flags & AR9170_OPFLAG_2GHZ) {
1926 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] =
1927 &carl9170_band_2GHz;
1928 chans += carl9170_band_2GHz.n_channels;
1929 bands++;
1930 }
1931 if (ar->eeprom.operating_flags & AR9170_OPFLAG_5GHZ) {
1932 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] =
1933 &carl9170_band_5GHz;
1934 chans += carl9170_band_5GHz.n_channels;
1935 bands++;
1936 }
1937
1938 if (!bands)
1939 return -EINVAL;
1940
1941 ar->survey = kcalloc(chans, sizeof(struct survey_info), GFP_KERNEL);
1942 if (!ar->survey)
1943 return -ENOMEM;
1944 ar->num_channels = chans;
1945
1946 regulatory->current_rd = le16_to_cpu(ar->eeprom.reg_domain[0]);
1947
1948 /* second part of wiphy init */
1949 SET_IEEE80211_PERM_ADDR(ar->hw, ar->eeprom.mac_address);
1950
1951 return 0;
1952 }
1953
carl9170_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)1954 static void carl9170_reg_notifier(struct wiphy *wiphy,
1955 struct regulatory_request *request)
1956 {
1957 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1958 struct ar9170 *ar = hw->priv;
1959
1960 ath_reg_notifier_apply(wiphy, request, &ar->common.regulatory);
1961 }
1962
carl9170_register(struct ar9170 * ar)1963 int carl9170_register(struct ar9170 *ar)
1964 {
1965 struct ath_regulatory *regulatory = &ar->common.regulatory;
1966 int err = 0, i;
1967
1968 if (WARN_ON(ar->mem_bitmap))
1969 return -EINVAL;
1970
1971 ar->mem_bitmap = kcalloc(roundup(ar->fw.mem_blocks, BITS_PER_LONG),
1972 sizeof(unsigned long),
1973 GFP_KERNEL);
1974
1975 if (!ar->mem_bitmap)
1976 return -ENOMEM;
1977
1978 /* try to read EEPROM, init MAC addr */
1979 err = carl9170_read_eeprom(ar);
1980 if (err)
1981 return err;
1982
1983 err = carl9170_parse_eeprom(ar);
1984 if (err)
1985 return err;
1986
1987 err = ath_regd_init(regulatory, ar->hw->wiphy,
1988 carl9170_reg_notifier);
1989 if (err)
1990 return err;
1991
1992 if (modparam_noht) {
1993 carl9170_band_2GHz.ht_cap.ht_supported = false;
1994 carl9170_band_5GHz.ht_cap.ht_supported = false;
1995 }
1996
1997 for (i = 0; i < ar->fw.vif_num; i++) {
1998 ar->vif_priv[i].id = i;
1999 ar->vif_priv[i].vif = NULL;
2000 }
2001
2002 err = ieee80211_register_hw(ar->hw);
2003 if (err)
2004 return err;
2005
2006 /* mac80211 interface is now registered */
2007 ar->registered = true;
2008
2009 if (!ath_is_world_regd(regulatory))
2010 regulatory_hint(ar->hw->wiphy, regulatory->alpha2);
2011
2012 #ifdef CONFIG_CARL9170_DEBUGFS
2013 carl9170_debugfs_register(ar);
2014 #endif /* CONFIG_CARL9170_DEBUGFS */
2015
2016 err = carl9170_led_init(ar);
2017 if (err)
2018 goto err_unreg;
2019
2020 #ifdef CONFIG_CARL9170_LEDS
2021 err = carl9170_led_register(ar);
2022 if (err)
2023 goto err_unreg;
2024 #endif /* CONFIG_CARL9170_LEDS */
2025
2026 #ifdef CONFIG_CARL9170_WPC
2027 err = carl9170_register_wps_button(ar);
2028 if (err)
2029 goto err_unreg;
2030 #endif /* CONFIG_CARL9170_WPC */
2031
2032 #ifdef CONFIG_CARL9170_HWRNG
2033 err = carl9170_register_hwrng(ar);
2034 if (err)
2035 goto err_unreg;
2036 #endif /* CONFIG_CARL9170_HWRNG */
2037
2038 dev_info(&ar->udev->dev, "Atheros AR9170 is registered as '%s'\n",
2039 wiphy_name(ar->hw->wiphy));
2040
2041 return 0;
2042
2043 err_unreg:
2044 carl9170_unregister(ar);
2045 return err;
2046 }
2047
carl9170_unregister(struct ar9170 * ar)2048 void carl9170_unregister(struct ar9170 *ar)
2049 {
2050 if (!ar->registered)
2051 return;
2052
2053 ar->registered = false;
2054
2055 #ifdef CONFIG_CARL9170_LEDS
2056 carl9170_led_unregister(ar);
2057 #endif /* CONFIG_CARL9170_LEDS */
2058
2059 #ifdef CONFIG_CARL9170_DEBUGFS
2060 carl9170_debugfs_unregister(ar);
2061 #endif /* CONFIG_CARL9170_DEBUGFS */
2062
2063 #ifdef CONFIG_CARL9170_WPC
2064 if (ar->wps.pbc) {
2065 input_unregister_device(ar->wps.pbc);
2066 ar->wps.pbc = NULL;
2067 }
2068 #endif /* CONFIG_CARL9170_WPC */
2069
2070 #ifdef CONFIG_CARL9170_HWRNG
2071 carl9170_unregister_hwrng(ar);
2072 #endif /* CONFIG_CARL9170_HWRNG */
2073
2074 carl9170_cancel_worker(ar);
2075 cancel_work_sync(&ar->restart_work);
2076
2077 ieee80211_unregister_hw(ar->hw);
2078 }
2079
carl9170_free(struct ar9170 * ar)2080 void carl9170_free(struct ar9170 *ar)
2081 {
2082 WARN_ON(ar->registered);
2083 WARN_ON(IS_INITIALIZED(ar));
2084
2085 kfree_skb(ar->rx_failover);
2086 ar->rx_failover = NULL;
2087
2088 kfree(ar->mem_bitmap);
2089 ar->mem_bitmap = NULL;
2090
2091 kfree(ar->survey);
2092 ar->survey = NULL;
2093
2094 mutex_destroy(&ar->mutex);
2095
2096 ieee80211_free_hw(ar->hw);
2097 }
2098