1 /*
2 * This file is part of wlcore
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 * Copyright (C) 2011-2013 Texas Instruments Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19 * 02110-1301 USA
20 *
21 */
22
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29
30 #include "wlcore.h"
31 #include "debug.h"
32 #include "wl12xx_80211.h"
33 #include "io.h"
34 #include "tx.h"
35 #include "ps.h"
36 #include "init.h"
37 #include "debugfs.h"
38 #include "testmode.h"
39 #include "vendor_cmd.h"
40 #include "scan.h"
41 #include "hw_ops.h"
42 #include "sysfs.h"
43
44 #define WL1271_BOOT_RETRIES 3
45
46 static char *fwlog_param;
47 static int fwlog_mem_blocks = -1;
48 static int bug_on_recovery = -1;
49 static int no_recovery = -1;
50
51 static void __wl1271_op_remove_interface(struct wl1271 *wl,
52 struct ieee80211_vif *vif,
53 bool reset_tx_queues);
54 static void wlcore_op_stop_locked(struct wl1271 *wl);
55 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
56
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)57 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
58 {
59 int ret;
60
61 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
62 return -EINVAL;
63
64 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
65 return 0;
66
67 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
68 return 0;
69
70 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
71 if (ret < 0)
72 return ret;
73
74 wl1271_info("Association completed.");
75 return 0;
76 }
77
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)78 static void wl1271_reg_notify(struct wiphy *wiphy,
79 struct regulatory_request *request)
80 {
81 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
82 struct wl1271 *wl = hw->priv;
83
84 /* copy the current dfs region */
85 if (request)
86 wl->dfs_region = request->dfs_region;
87
88 wlcore_regdomain_config(wl);
89 }
90
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)91 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
92 bool enable)
93 {
94 int ret = 0;
95
96 /* we should hold wl->mutex */
97 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
98 if (ret < 0)
99 goto out;
100
101 if (enable)
102 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
103 else
104 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
105 out:
106 return ret;
107 }
108
109 /*
110 * this function is being called when the rx_streaming interval
111 * has beed changed or rx_streaming should be disabled
112 */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)113 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
114 {
115 int ret = 0;
116 int period = wl->conf.rx_streaming.interval;
117
118 /* don't reconfigure if rx_streaming is disabled */
119 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
120 goto out;
121
122 /* reconfigure/disable according to new streaming_period */
123 if (period &&
124 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
125 (wl->conf.rx_streaming.always ||
126 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
127 ret = wl1271_set_rx_streaming(wl, wlvif, true);
128 else {
129 ret = wl1271_set_rx_streaming(wl, wlvif, false);
130 /* don't cancel_work_sync since we might deadlock */
131 del_timer_sync(&wlvif->rx_streaming_timer);
132 }
133 out:
134 return ret;
135 }
136
wl1271_rx_streaming_enable_work(struct work_struct * work)137 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
138 {
139 int ret;
140 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
141 rx_streaming_enable_work);
142 struct wl1271 *wl = wlvif->wl;
143
144 mutex_lock(&wl->mutex);
145
146 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
147 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
148 (!wl->conf.rx_streaming.always &&
149 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 goto out;
151
152 if (!wl->conf.rx_streaming.interval)
153 goto out;
154
155 ret = wl1271_ps_elp_wakeup(wl);
156 if (ret < 0)
157 goto out;
158
159 ret = wl1271_set_rx_streaming(wl, wlvif, true);
160 if (ret < 0)
161 goto out_sleep;
162
163 /* stop it after some time of inactivity */
164 mod_timer(&wlvif->rx_streaming_timer,
165 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
166
167 out_sleep:
168 wl1271_ps_elp_sleep(wl);
169 out:
170 mutex_unlock(&wl->mutex);
171 }
172
wl1271_rx_streaming_disable_work(struct work_struct * work)173 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
174 {
175 int ret;
176 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
177 rx_streaming_disable_work);
178 struct wl1271 *wl = wlvif->wl;
179
180 mutex_lock(&wl->mutex);
181
182 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
183 goto out;
184
185 ret = wl1271_ps_elp_wakeup(wl);
186 if (ret < 0)
187 goto out;
188
189 ret = wl1271_set_rx_streaming(wl, wlvif, false);
190 if (ret)
191 goto out_sleep;
192
193 out_sleep:
194 wl1271_ps_elp_sleep(wl);
195 out:
196 mutex_unlock(&wl->mutex);
197 }
198
wl1271_rx_streaming_timer(unsigned long data)199 static void wl1271_rx_streaming_timer(unsigned long data)
200 {
201 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
202 struct wl1271 *wl = wlvif->wl;
203 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
204 }
205
206 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)207 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
208 {
209 /* if the watchdog is not armed, don't do anything */
210 if (wl->tx_allocated_blocks == 0)
211 return;
212
213 cancel_delayed_work(&wl->tx_watchdog_work);
214 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
215 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
216 }
217
wlcore_rc_update_work(struct work_struct * work)218 static void wlcore_rc_update_work(struct work_struct *work)
219 {
220 int ret;
221 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
222 rc_update_work);
223 struct wl1271 *wl = wlvif->wl;
224 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
225
226 mutex_lock(&wl->mutex);
227
228 if (unlikely(wl->state != WLCORE_STATE_ON))
229 goto out;
230
231 ret = wl1271_ps_elp_wakeup(wl);
232 if (ret < 0)
233 goto out;
234
235 if (ieee80211_vif_is_mesh(vif)) {
236 ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
237 true, wlvif->sta.hlid);
238 if (ret < 0)
239 goto out_sleep;
240 } else {
241 wlcore_hw_sta_rc_update(wl, wlvif);
242 }
243
244 out_sleep:
245 wl1271_ps_elp_sleep(wl);
246 out:
247 mutex_unlock(&wl->mutex);
248 }
249
wl12xx_tx_watchdog_work(struct work_struct * work)250 static void wl12xx_tx_watchdog_work(struct work_struct *work)
251 {
252 struct delayed_work *dwork;
253 struct wl1271 *wl;
254
255 dwork = to_delayed_work(work);
256 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
257
258 mutex_lock(&wl->mutex);
259
260 if (unlikely(wl->state != WLCORE_STATE_ON))
261 goto out;
262
263 /* Tx went out in the meantime - everything is ok */
264 if (unlikely(wl->tx_allocated_blocks == 0))
265 goto out;
266
267 /*
268 * if a ROC is in progress, we might not have any Tx for a long
269 * time (e.g. pending Tx on the non-ROC channels)
270 */
271 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
272 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
273 wl->conf.tx.tx_watchdog_timeout);
274 wl12xx_rearm_tx_watchdog_locked(wl);
275 goto out;
276 }
277
278 /*
279 * if a scan is in progress, we might not have any Tx for a long
280 * time
281 */
282 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
283 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
284 wl->conf.tx.tx_watchdog_timeout);
285 wl12xx_rearm_tx_watchdog_locked(wl);
286 goto out;
287 }
288
289 /*
290 * AP might cache a frame for a long time for a sleeping station,
291 * so rearm the timer if there's an AP interface with stations. If
292 * Tx is genuinely stuck we will most hopefully discover it when all
293 * stations are removed due to inactivity.
294 */
295 if (wl->active_sta_count) {
296 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
297 " %d stations",
298 wl->conf.tx.tx_watchdog_timeout,
299 wl->active_sta_count);
300 wl12xx_rearm_tx_watchdog_locked(wl);
301 goto out;
302 }
303
304 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
305 wl->conf.tx.tx_watchdog_timeout);
306 wl12xx_queue_recovery_work(wl);
307
308 out:
309 mutex_unlock(&wl->mutex);
310 }
311
wlcore_adjust_conf(struct wl1271 * wl)312 static void wlcore_adjust_conf(struct wl1271 *wl)
313 {
314
315 if (fwlog_param) {
316 if (!strcmp(fwlog_param, "continuous")) {
317 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
318 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
319 } else if (!strcmp(fwlog_param, "dbgpins")) {
320 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
321 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
322 } else if (!strcmp(fwlog_param, "disable")) {
323 wl->conf.fwlog.mem_blocks = 0;
324 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
325 } else {
326 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
327 }
328 }
329
330 if (bug_on_recovery != -1)
331 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
332
333 if (no_recovery != -1)
334 wl->conf.recovery.no_recovery = (u8) no_recovery;
335 }
336
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)337 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
338 struct wl12xx_vif *wlvif,
339 u8 hlid, u8 tx_pkts)
340 {
341 bool fw_ps;
342
343 fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
344
345 /*
346 * Wake up from high level PS if the STA is asleep with too little
347 * packets in FW or if the STA is awake.
348 */
349 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
350 wl12xx_ps_link_end(wl, wlvif, hlid);
351
352 /*
353 * Start high-level PS if the STA is asleep with enough blocks in FW.
354 * Make an exception if this is the only connected link. In this
355 * case FW-memory congestion is less of a problem.
356 * Note that a single connected STA means 2*ap_count + 1 active links,
357 * since we must account for the global and broadcast AP links
358 * for each AP. The "fw_ps" check assures us the other link is a STA
359 * connected to the AP. Otherwise the FW would not set the PSM bit.
360 */
361 else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
362 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
363 wl12xx_ps_link_start(wl, wlvif, hlid, true);
364 }
365
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)366 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
367 struct wl12xx_vif *wlvif,
368 struct wl_fw_status *status)
369 {
370 unsigned long cur_fw_ps_map;
371 u8 hlid;
372
373 cur_fw_ps_map = status->link_ps_bitmap;
374 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
375 wl1271_debug(DEBUG_PSM,
376 "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
377 wl->ap_fw_ps_map, cur_fw_ps_map,
378 wl->ap_fw_ps_map ^ cur_fw_ps_map);
379
380 wl->ap_fw_ps_map = cur_fw_ps_map;
381 }
382
383 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
384 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
385 wl->links[hlid].allocated_pkts);
386 }
387
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)388 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
389 {
390 struct wl12xx_vif *wlvif;
391 struct timespec ts;
392 u32 old_tx_blk_count = wl->tx_blocks_available;
393 int avail, freed_blocks;
394 int i;
395 int ret;
396 struct wl1271_link *lnk;
397
398 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
399 wl->raw_fw_status,
400 wl->fw_status_len, false);
401 if (ret < 0)
402 return ret;
403
404 wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
405
406 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
407 "drv_rx_counter = %d, tx_results_counter = %d)",
408 status->intr,
409 status->fw_rx_counter,
410 status->drv_rx_counter,
411 status->tx_results_counter);
412
413 for (i = 0; i < NUM_TX_QUEUES; i++) {
414 /* prevent wrap-around in freed-packets counter */
415 wl->tx_allocated_pkts[i] -=
416 (status->counters.tx_released_pkts[i] -
417 wl->tx_pkts_freed[i]) & 0xff;
418
419 wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
420 }
421
422
423 for_each_set_bit(i, wl->links_map, wl->num_links) {
424 u8 diff;
425 lnk = &wl->links[i];
426
427 /* prevent wrap-around in freed-packets counter */
428 diff = (status->counters.tx_lnk_free_pkts[i] -
429 lnk->prev_freed_pkts) & 0xff;
430
431 if (diff == 0)
432 continue;
433
434 lnk->allocated_pkts -= diff;
435 lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
436
437 /* accumulate the prev_freed_pkts counter */
438 lnk->total_freed_pkts += diff;
439 }
440
441 /* prevent wrap-around in total blocks counter */
442 if (likely(wl->tx_blocks_freed <= status->total_released_blks))
443 freed_blocks = status->total_released_blks -
444 wl->tx_blocks_freed;
445 else
446 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
447 status->total_released_blks;
448
449 wl->tx_blocks_freed = status->total_released_blks;
450
451 wl->tx_allocated_blocks -= freed_blocks;
452
453 /*
454 * If the FW freed some blocks:
455 * If we still have allocated blocks - re-arm the timer, Tx is
456 * not stuck. Otherwise, cancel the timer (no Tx currently).
457 */
458 if (freed_blocks) {
459 if (wl->tx_allocated_blocks)
460 wl12xx_rearm_tx_watchdog_locked(wl);
461 else
462 cancel_delayed_work(&wl->tx_watchdog_work);
463 }
464
465 avail = status->tx_total - wl->tx_allocated_blocks;
466
467 /*
468 * The FW might change the total number of TX memblocks before
469 * we get a notification about blocks being released. Thus, the
470 * available blocks calculation might yield a temporary result
471 * which is lower than the actual available blocks. Keeping in
472 * mind that only blocks that were allocated can be moved from
473 * TX to RX, tx_blocks_available should never decrease here.
474 */
475 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
476 avail);
477
478 /* if more blocks are available now, tx work can be scheduled */
479 if (wl->tx_blocks_available > old_tx_blk_count)
480 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
481
482 /* for AP update num of allocated TX blocks per link and ps status */
483 wl12xx_for_each_wlvif_ap(wl, wlvif) {
484 wl12xx_irq_update_links_status(wl, wlvif, status);
485 }
486
487 /* update the host-chipset time offset */
488 getnstimeofday(&ts);
489 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
490 (s64)(status->fw_localtime);
491
492 wl->fw_fast_lnk_map = status->link_fast_bitmap;
493
494 return 0;
495 }
496
wl1271_flush_deferred_work(struct wl1271 * wl)497 static void wl1271_flush_deferred_work(struct wl1271 *wl)
498 {
499 struct sk_buff *skb;
500
501 /* Pass all received frames to the network stack */
502 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
503 ieee80211_rx_ni(wl->hw, skb);
504
505 /* Return sent skbs to the network stack */
506 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
507 ieee80211_tx_status_ni(wl->hw, skb);
508 }
509
wl1271_netstack_work(struct work_struct * work)510 static void wl1271_netstack_work(struct work_struct *work)
511 {
512 struct wl1271 *wl =
513 container_of(work, struct wl1271, netstack_work);
514
515 do {
516 wl1271_flush_deferred_work(wl);
517 } while (skb_queue_len(&wl->deferred_rx_queue));
518 }
519
520 #define WL1271_IRQ_MAX_LOOPS 256
521
wlcore_irq_locked(struct wl1271 * wl)522 static int wlcore_irq_locked(struct wl1271 *wl)
523 {
524 int ret = 0;
525 u32 intr;
526 int loopcount = WL1271_IRQ_MAX_LOOPS;
527 bool done = false;
528 unsigned int defer_count;
529 unsigned long flags;
530
531 /*
532 * In case edge triggered interrupt must be used, we cannot iterate
533 * more than once without introducing race conditions with the hardirq.
534 */
535 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
536 loopcount = 1;
537
538 wl1271_debug(DEBUG_IRQ, "IRQ work");
539
540 if (unlikely(wl->state != WLCORE_STATE_ON))
541 goto out;
542
543 ret = wl1271_ps_elp_wakeup(wl);
544 if (ret < 0)
545 goto out;
546
547 while (!done && loopcount--) {
548 /*
549 * In order to avoid a race with the hardirq, clear the flag
550 * before acknowledging the chip. Since the mutex is held,
551 * wl1271_ps_elp_wakeup cannot be called concurrently.
552 */
553 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
554 smp_mb__after_atomic();
555
556 ret = wlcore_fw_status(wl, wl->fw_status);
557 if (ret < 0)
558 goto out;
559
560 wlcore_hw_tx_immediate_compl(wl);
561
562 intr = wl->fw_status->intr;
563 intr &= WLCORE_ALL_INTR_MASK;
564 if (!intr) {
565 done = true;
566 continue;
567 }
568
569 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
570 wl1271_error("HW watchdog interrupt received! starting recovery.");
571 wl->watchdog_recovery = true;
572 ret = -EIO;
573
574 /* restarting the chip. ignore any other interrupt. */
575 goto out;
576 }
577
578 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
579 wl1271_error("SW watchdog interrupt received! "
580 "starting recovery.");
581 wl->watchdog_recovery = true;
582 ret = -EIO;
583
584 /* restarting the chip. ignore any other interrupt. */
585 goto out;
586 }
587
588 if (likely(intr & WL1271_ACX_INTR_DATA)) {
589 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
590
591 ret = wlcore_rx(wl, wl->fw_status);
592 if (ret < 0)
593 goto out;
594
595 /* Check if any tx blocks were freed */
596 spin_lock_irqsave(&wl->wl_lock, flags);
597 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
598 wl1271_tx_total_queue_count(wl) > 0) {
599 spin_unlock_irqrestore(&wl->wl_lock, flags);
600 /*
601 * In order to avoid starvation of the TX path,
602 * call the work function directly.
603 */
604 ret = wlcore_tx_work_locked(wl);
605 if (ret < 0)
606 goto out;
607 } else {
608 spin_unlock_irqrestore(&wl->wl_lock, flags);
609 }
610
611 /* check for tx results */
612 ret = wlcore_hw_tx_delayed_compl(wl);
613 if (ret < 0)
614 goto out;
615
616 /* Make sure the deferred queues don't get too long */
617 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
618 skb_queue_len(&wl->deferred_rx_queue);
619 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
620 wl1271_flush_deferred_work(wl);
621 }
622
623 if (intr & WL1271_ACX_INTR_EVENT_A) {
624 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
625 ret = wl1271_event_handle(wl, 0);
626 if (ret < 0)
627 goto out;
628 }
629
630 if (intr & WL1271_ACX_INTR_EVENT_B) {
631 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
632 ret = wl1271_event_handle(wl, 1);
633 if (ret < 0)
634 goto out;
635 }
636
637 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
638 wl1271_debug(DEBUG_IRQ,
639 "WL1271_ACX_INTR_INIT_COMPLETE");
640
641 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
642 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
643 }
644
645 wl1271_ps_elp_sleep(wl);
646
647 out:
648 return ret;
649 }
650
wlcore_irq(int irq,void * cookie)651 static irqreturn_t wlcore_irq(int irq, void *cookie)
652 {
653 int ret;
654 unsigned long flags;
655 struct wl1271 *wl = cookie;
656
657 /* complete the ELP completion */
658 spin_lock_irqsave(&wl->wl_lock, flags);
659 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
660 if (wl->elp_compl) {
661 complete(wl->elp_compl);
662 wl->elp_compl = NULL;
663 }
664
665 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
666 /* don't enqueue a work right now. mark it as pending */
667 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
668 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
669 disable_irq_nosync(wl->irq);
670 pm_wakeup_event(wl->dev, 0);
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672 return IRQ_HANDLED;
673 }
674 spin_unlock_irqrestore(&wl->wl_lock, flags);
675
676 /* TX might be handled here, avoid redundant work */
677 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
678 cancel_work_sync(&wl->tx_work);
679
680 mutex_lock(&wl->mutex);
681
682 ret = wlcore_irq_locked(wl);
683 if (ret)
684 wl12xx_queue_recovery_work(wl);
685
686 spin_lock_irqsave(&wl->wl_lock, flags);
687 /* In case TX was not handled here, queue TX work */
688 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
690 wl1271_tx_total_queue_count(wl) > 0)
691 ieee80211_queue_work(wl->hw, &wl->tx_work);
692 spin_unlock_irqrestore(&wl->wl_lock, flags);
693
694 mutex_unlock(&wl->mutex);
695
696 return IRQ_HANDLED;
697 }
698
699 struct vif_counter_data {
700 u8 counter;
701
702 struct ieee80211_vif *cur_vif;
703 bool cur_vif_running;
704 };
705
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 struct ieee80211_vif *vif)
708 {
709 struct vif_counter_data *counter = data;
710
711 counter->counter++;
712 if (counter->cur_vif == vif)
713 counter->cur_vif_running = true;
714 }
715
716 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 struct ieee80211_vif *cur_vif,
719 struct vif_counter_data *data)
720 {
721 memset(data, 0, sizeof(*data));
722 data->cur_vif = cur_vif;
723
724 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 wl12xx_vif_count_iter, data);
726 }
727
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 const struct firmware *fw;
731 const char *fw_name;
732 enum wl12xx_fw_type fw_type;
733 int ret;
734
735 if (plt) {
736 fw_type = WL12XX_FW_TYPE_PLT;
737 fw_name = wl->plt_fw_name;
738 } else {
739 /*
740 * we can't call wl12xx_get_vif_count() here because
741 * wl->mutex is taken, so use the cached last_vif_count value
742 */
743 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 fw_type = WL12XX_FW_TYPE_MULTI;
745 fw_name = wl->mr_fw_name;
746 } else {
747 fw_type = WL12XX_FW_TYPE_NORMAL;
748 fw_name = wl->sr_fw_name;
749 }
750 }
751
752 if (wl->fw_type == fw_type)
753 return 0;
754
755 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756
757 ret = request_firmware(&fw, fw_name, wl->dev);
758
759 if (ret < 0) {
760 wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 return ret;
762 }
763
764 if (fw->size % 4) {
765 wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 fw->size);
767 ret = -EILSEQ;
768 goto out;
769 }
770
771 vfree(wl->fw);
772 wl->fw_type = WL12XX_FW_TYPE_NONE;
773 wl->fw_len = fw->size;
774 wl->fw = vmalloc(wl->fw_len);
775
776 if (!wl->fw) {
777 wl1271_error("could not allocate memory for the firmware");
778 ret = -ENOMEM;
779 goto out;
780 }
781
782 memcpy(wl->fw, fw->data, wl->fw_len);
783 ret = 0;
784 wl->fw_type = fw_type;
785 out:
786 release_firmware(fw);
787
788 return ret;
789 }
790
wl12xx_queue_recovery_work(struct wl1271 * wl)791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 /* Avoid a recursive recovery */
794 if (wl->state == WLCORE_STATE_ON) {
795 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 &wl->flags));
797
798 wl->state = WLCORE_STATE_RESTARTING;
799 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 wl1271_ps_elp_wakeup(wl);
801 wlcore_disable_interrupts_nosync(wl);
802 ieee80211_queue_work(wl->hw, &wl->recovery_work);
803 }
804 }
805
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)806 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
807 {
808 size_t len;
809
810 /* Make sure we have enough room */
811 len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
812
813 /* Fill the FW log file, consumed by the sysfs fwlog entry */
814 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
815 wl->fwlog_size += len;
816
817 return len;
818 }
819
wl12xx_read_fwlog_panic(struct wl1271 * wl)820 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
821 {
822 u32 end_of_log = 0;
823
824 if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
825 return;
826
827 wl1271_info("Reading FW panic log");
828
829 /*
830 * Make sure the chip is awake and the logger isn't active.
831 * Do not send a stop fwlog command if the fw is hanged or if
832 * dbgpins are used (due to some fw bug).
833 */
834 if (wl1271_ps_elp_wakeup(wl))
835 return;
836 if (!wl->watchdog_recovery &&
837 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 wl12xx_cmd_stop_fwlog(wl);
839
840 /* Traverse the memory blocks linked list */
841 do {
842 end_of_log = wlcore_event_fw_logger(wl);
843 if (end_of_log == 0) {
844 msleep(100);
845 end_of_log = wlcore_event_fw_logger(wl);
846 }
847 } while (end_of_log != 0);
848 }
849
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 u8 hlid, struct ieee80211_sta *sta)
852 {
853 struct wl1271_station *wl_sta;
854 u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855
856 wl_sta = (void *)sta->drv_priv;
857 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
858
859 /*
860 * increment the initial seq number on recovery to account for
861 * transmitted packets that we haven't yet got in the FW status
862 */
863 if (wlvif->encryption_type == KEY_GEM)
864 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865
866 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 wl_sta->total_freed_pkts += sqn_recovery_padding;
868 }
869
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 struct wl12xx_vif *wlvif,
872 u8 hlid, const u8 *addr)
873 {
874 struct ieee80211_sta *sta;
875 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876
877 if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 is_zero_ether_addr(addr)))
879 return;
880
881 rcu_read_lock();
882 sta = ieee80211_find_sta(vif, addr);
883 if (sta)
884 wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
885 rcu_read_unlock();
886 }
887
wlcore_print_recovery(struct wl1271 * wl)888 static void wlcore_print_recovery(struct wl1271 *wl)
889 {
890 u32 pc = 0;
891 u32 hint_sts = 0;
892 int ret;
893
894 wl1271_info("Hardware recovery in progress. FW ver: %s",
895 wl->chip.fw_ver_str);
896
897 /* change partitions momentarily so we can read the FW pc */
898 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 if (ret < 0)
900 return;
901
902 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 if (ret < 0)
904 return;
905
906 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 if (ret < 0)
908 return;
909
910 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 pc, hint_sts, ++wl->recovery_count);
912
913 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 }
915
916
wl1271_recovery_work(struct work_struct * work)917 static void wl1271_recovery_work(struct work_struct *work)
918 {
919 struct wl1271 *wl =
920 container_of(work, struct wl1271, recovery_work);
921 struct wl12xx_vif *wlvif;
922 struct ieee80211_vif *vif;
923
924 mutex_lock(&wl->mutex);
925
926 if (wl->state == WLCORE_STATE_OFF || wl->plt)
927 goto out_unlock;
928
929 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
930 if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
931 wl12xx_read_fwlog_panic(wl);
932 wlcore_print_recovery(wl);
933 }
934
935 BUG_ON(wl->conf.recovery.bug_on_recovery &&
936 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
937
938 if (wl->conf.recovery.no_recovery) {
939 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
940 goto out_unlock;
941 }
942
943 /* Prevent spurious TX during FW restart */
944 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
945
946 /* reboot the chipset */
947 while (!list_empty(&wl->wlvif_list)) {
948 wlvif = list_first_entry(&wl->wlvif_list,
949 struct wl12xx_vif, list);
950 vif = wl12xx_wlvif_to_vif(wlvif);
951
952 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
953 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
954 wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
955 vif->bss_conf.bssid);
956 }
957
958 __wl1271_op_remove_interface(wl, vif, false);
959 }
960
961 wlcore_op_stop_locked(wl);
962
963 ieee80211_restart_hw(wl->hw);
964
965 /*
966 * Its safe to enable TX now - the queues are stopped after a request
967 * to restart the HW.
968 */
969 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
970
971 out_unlock:
972 wl->watchdog_recovery = false;
973 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
974 mutex_unlock(&wl->mutex);
975 }
976
wlcore_fw_wakeup(struct wl1271 * wl)977 static int wlcore_fw_wakeup(struct wl1271 *wl)
978 {
979 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
980 }
981
wl1271_setup(struct wl1271 * wl)982 static int wl1271_setup(struct wl1271 *wl)
983 {
984 wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
985 if (!wl->raw_fw_status)
986 goto err;
987
988 wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
989 if (!wl->fw_status)
990 goto err;
991
992 wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
993 if (!wl->tx_res_if)
994 goto err;
995
996 return 0;
997 err:
998 kfree(wl->fw_status);
999 kfree(wl->raw_fw_status);
1000 return -ENOMEM;
1001 }
1002
wl12xx_set_power_on(struct wl1271 * wl)1003 static int wl12xx_set_power_on(struct wl1271 *wl)
1004 {
1005 int ret;
1006
1007 msleep(WL1271_PRE_POWER_ON_SLEEP);
1008 ret = wl1271_power_on(wl);
1009 if (ret < 0)
1010 goto out;
1011 msleep(WL1271_POWER_ON_SLEEP);
1012 wl1271_io_reset(wl);
1013 wl1271_io_init(wl);
1014
1015 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1016 if (ret < 0)
1017 goto fail;
1018
1019 /* ELP module wake up */
1020 ret = wlcore_fw_wakeup(wl);
1021 if (ret < 0)
1022 goto fail;
1023
1024 out:
1025 return ret;
1026
1027 fail:
1028 wl1271_power_off(wl);
1029 return ret;
1030 }
1031
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1032 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1033 {
1034 int ret = 0;
1035
1036 ret = wl12xx_set_power_on(wl);
1037 if (ret < 0)
1038 goto out;
1039
1040 /*
1041 * For wl127x based devices we could use the default block
1042 * size (512 bytes), but due to a bug in the sdio driver, we
1043 * need to set it explicitly after the chip is powered on. To
1044 * simplify the code and since the performance impact is
1045 * negligible, we use the same block size for all different
1046 * chip types.
1047 *
1048 * Check if the bus supports blocksize alignment and, if it
1049 * doesn't, make sure we don't have the quirk.
1050 */
1051 if (!wl1271_set_block_size(wl))
1052 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1053
1054 /* TODO: make sure the lower driver has set things up correctly */
1055
1056 ret = wl1271_setup(wl);
1057 if (ret < 0)
1058 goto out;
1059
1060 ret = wl12xx_fetch_firmware(wl, plt);
1061 if (ret < 0)
1062 goto out;
1063
1064 out:
1065 return ret;
1066 }
1067
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1068 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1069 {
1070 int retries = WL1271_BOOT_RETRIES;
1071 struct wiphy *wiphy = wl->hw->wiphy;
1072
1073 static const char* const PLT_MODE[] = {
1074 "PLT_OFF",
1075 "PLT_ON",
1076 "PLT_FEM_DETECT",
1077 "PLT_CHIP_AWAKE"
1078 };
1079
1080 int ret;
1081
1082 mutex_lock(&wl->mutex);
1083
1084 wl1271_notice("power up");
1085
1086 if (wl->state != WLCORE_STATE_OFF) {
1087 wl1271_error("cannot go into PLT state because not "
1088 "in off state: %d", wl->state);
1089 ret = -EBUSY;
1090 goto out;
1091 }
1092
1093 /* Indicate to lower levels that we are now in PLT mode */
1094 wl->plt = true;
1095 wl->plt_mode = plt_mode;
1096
1097 while (retries) {
1098 retries--;
1099 ret = wl12xx_chip_wakeup(wl, true);
1100 if (ret < 0)
1101 goto power_off;
1102
1103 if (plt_mode != PLT_CHIP_AWAKE) {
1104 ret = wl->ops->plt_init(wl);
1105 if (ret < 0)
1106 goto power_off;
1107 }
1108
1109 wl->state = WLCORE_STATE_ON;
1110 wl1271_notice("firmware booted in PLT mode %s (%s)",
1111 PLT_MODE[plt_mode],
1112 wl->chip.fw_ver_str);
1113
1114 /* update hw/fw version info in wiphy struct */
1115 wiphy->hw_version = wl->chip.id;
1116 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1117 sizeof(wiphy->fw_version));
1118
1119 goto out;
1120
1121 power_off:
1122 wl1271_power_off(wl);
1123 }
1124
1125 wl->plt = false;
1126 wl->plt_mode = PLT_OFF;
1127
1128 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1129 WL1271_BOOT_RETRIES);
1130 out:
1131 mutex_unlock(&wl->mutex);
1132
1133 return ret;
1134 }
1135
wl1271_plt_stop(struct wl1271 * wl)1136 int wl1271_plt_stop(struct wl1271 *wl)
1137 {
1138 int ret = 0;
1139
1140 wl1271_notice("power down");
1141
1142 /*
1143 * Interrupts must be disabled before setting the state to OFF.
1144 * Otherwise, the interrupt handler might be called and exit without
1145 * reading the interrupt status.
1146 */
1147 wlcore_disable_interrupts(wl);
1148 mutex_lock(&wl->mutex);
1149 if (!wl->plt) {
1150 mutex_unlock(&wl->mutex);
1151
1152 /*
1153 * This will not necessarily enable interrupts as interrupts
1154 * may have been disabled when op_stop was called. It will,
1155 * however, balance the above call to disable_interrupts().
1156 */
1157 wlcore_enable_interrupts(wl);
1158
1159 wl1271_error("cannot power down because not in PLT "
1160 "state: %d", wl->state);
1161 ret = -EBUSY;
1162 goto out;
1163 }
1164
1165 mutex_unlock(&wl->mutex);
1166
1167 wl1271_flush_deferred_work(wl);
1168 cancel_work_sync(&wl->netstack_work);
1169 cancel_work_sync(&wl->recovery_work);
1170 cancel_delayed_work_sync(&wl->elp_work);
1171 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1172
1173 mutex_lock(&wl->mutex);
1174 wl1271_power_off(wl);
1175 wl->flags = 0;
1176 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1177 wl->state = WLCORE_STATE_OFF;
1178 wl->plt = false;
1179 wl->plt_mode = PLT_OFF;
1180 wl->rx_counter = 0;
1181 mutex_unlock(&wl->mutex);
1182
1183 out:
1184 return ret;
1185 }
1186
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1187 static void wl1271_op_tx(struct ieee80211_hw *hw,
1188 struct ieee80211_tx_control *control,
1189 struct sk_buff *skb)
1190 {
1191 struct wl1271 *wl = hw->priv;
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 struct ieee80211_vif *vif = info->control.vif;
1194 struct wl12xx_vif *wlvif = NULL;
1195 unsigned long flags;
1196 int q, mapping;
1197 u8 hlid;
1198
1199 if (!vif) {
1200 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1201 ieee80211_free_txskb(hw, skb);
1202 return;
1203 }
1204
1205 wlvif = wl12xx_vif_to_data(vif);
1206 mapping = skb_get_queue_mapping(skb);
1207 q = wl1271_tx_get_queue(mapping);
1208
1209 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1210
1211 spin_lock_irqsave(&wl->wl_lock, flags);
1212
1213 /*
1214 * drop the packet if the link is invalid or the queue is stopped
1215 * for any reason but watermark. Watermark is a "soft"-stop so we
1216 * allow these packets through.
1217 */
1218 if (hlid == WL12XX_INVALID_LINK_ID ||
1219 (!test_bit(hlid, wlvif->links_map)) ||
1220 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1221 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1222 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1223 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1224 ieee80211_free_txskb(hw, skb);
1225 goto out;
1226 }
1227
1228 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1229 hlid, q, skb->len);
1230 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1231
1232 wl->tx_queue_count[q]++;
1233 wlvif->tx_queue_count[q]++;
1234
1235 /*
1236 * The workqueue is slow to process the tx_queue and we need stop
1237 * the queue here, otherwise the queue will get too long.
1238 */
1239 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1240 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1241 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1242 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1243 wlcore_stop_queue_locked(wl, wlvif, q,
1244 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1245 }
1246
1247 /*
1248 * The chip specific setup must run before the first TX packet -
1249 * before that, the tx_work will not be initialized!
1250 */
1251
1252 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1253 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1254 ieee80211_queue_work(wl->hw, &wl->tx_work);
1255
1256 out:
1257 spin_unlock_irqrestore(&wl->wl_lock, flags);
1258 }
1259
wl1271_tx_dummy_packet(struct wl1271 * wl)1260 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1261 {
1262 unsigned long flags;
1263 int q;
1264
1265 /* no need to queue a new dummy packet if one is already pending */
1266 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1267 return 0;
1268
1269 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1270
1271 spin_lock_irqsave(&wl->wl_lock, flags);
1272 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1273 wl->tx_queue_count[q]++;
1274 spin_unlock_irqrestore(&wl->wl_lock, flags);
1275
1276 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1277 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1278 return wlcore_tx_work_locked(wl);
1279
1280 /*
1281 * If the FW TX is busy, TX work will be scheduled by the threaded
1282 * interrupt handler function
1283 */
1284 return 0;
1285 }
1286
1287 /*
1288 * The size of the dummy packet should be at least 1400 bytes. However, in
1289 * order to minimize the number of bus transactions, aligning it to 512 bytes
1290 * boundaries could be beneficial, performance wise
1291 */
1292 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1293
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1294 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1295 {
1296 struct sk_buff *skb;
1297 struct ieee80211_hdr_3addr *hdr;
1298 unsigned int dummy_packet_size;
1299
1300 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1301 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1302
1303 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1304 if (!skb) {
1305 wl1271_warning("Failed to allocate a dummy packet skb");
1306 return NULL;
1307 }
1308
1309 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1310
1311 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1312 memset(hdr, 0, sizeof(*hdr));
1313 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1314 IEEE80211_STYPE_NULLFUNC |
1315 IEEE80211_FCTL_TODS);
1316
1317 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1318
1319 /* Dummy packets require the TID to be management */
1320 skb->priority = WL1271_TID_MGMT;
1321
1322 /* Initialize all fields that might be used */
1323 skb_set_queue_mapping(skb, 0);
1324 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1325
1326 return skb;
1327 }
1328
1329
1330 #ifdef CONFIG_PM
1331 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1332 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1333 {
1334 int num_fields = 0, in_field = 0, fields_size = 0;
1335 int i, pattern_len = 0;
1336
1337 if (!p->mask) {
1338 wl1271_warning("No mask in WoWLAN pattern");
1339 return -EINVAL;
1340 }
1341
1342 /*
1343 * The pattern is broken up into segments of bytes at different offsets
1344 * that need to be checked by the FW filter. Each segment is called
1345 * a field in the FW API. We verify that the total number of fields
1346 * required for this pattern won't exceed FW limits (8)
1347 * as well as the total fields buffer won't exceed the FW limit.
1348 * Note that if there's a pattern which crosses Ethernet/IP header
1349 * boundary a new field is required.
1350 */
1351 for (i = 0; i < p->pattern_len; i++) {
1352 if (test_bit(i, (unsigned long *)p->mask)) {
1353 if (!in_field) {
1354 in_field = 1;
1355 pattern_len = 1;
1356 } else {
1357 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1358 num_fields++;
1359 fields_size += pattern_len +
1360 RX_FILTER_FIELD_OVERHEAD;
1361 pattern_len = 1;
1362 } else
1363 pattern_len++;
1364 }
1365 } else {
1366 if (in_field) {
1367 in_field = 0;
1368 fields_size += pattern_len +
1369 RX_FILTER_FIELD_OVERHEAD;
1370 num_fields++;
1371 }
1372 }
1373 }
1374
1375 if (in_field) {
1376 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1377 num_fields++;
1378 }
1379
1380 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1381 wl1271_warning("RX Filter too complex. Too many segments");
1382 return -EINVAL;
1383 }
1384
1385 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1386 wl1271_warning("RX filter pattern is too big");
1387 return -E2BIG;
1388 }
1389
1390 return 0;
1391 }
1392
wl1271_rx_filter_alloc(void)1393 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1394 {
1395 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1396 }
1397
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1398 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1399 {
1400 int i;
1401
1402 if (filter == NULL)
1403 return;
1404
1405 for (i = 0; i < filter->num_fields; i++)
1406 kfree(filter->fields[i].pattern);
1407
1408 kfree(filter);
1409 }
1410
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1411 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1412 u16 offset, u8 flags,
1413 const u8 *pattern, u8 len)
1414 {
1415 struct wl12xx_rx_filter_field *field;
1416
1417 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1418 wl1271_warning("Max fields per RX filter. can't alloc another");
1419 return -EINVAL;
1420 }
1421
1422 field = &filter->fields[filter->num_fields];
1423
1424 field->pattern = kzalloc(len, GFP_KERNEL);
1425 if (!field->pattern) {
1426 wl1271_warning("Failed to allocate RX filter pattern");
1427 return -ENOMEM;
1428 }
1429
1430 filter->num_fields++;
1431
1432 field->offset = cpu_to_le16(offset);
1433 field->flags = flags;
1434 field->len = len;
1435 memcpy(field->pattern, pattern, len);
1436
1437 return 0;
1438 }
1439
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1440 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1441 {
1442 int i, fields_size = 0;
1443
1444 for (i = 0; i < filter->num_fields; i++)
1445 fields_size += filter->fields[i].len +
1446 sizeof(struct wl12xx_rx_filter_field) -
1447 sizeof(u8 *);
1448
1449 return fields_size;
1450 }
1451
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1452 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1453 u8 *buf)
1454 {
1455 int i;
1456 struct wl12xx_rx_filter_field *field;
1457
1458 for (i = 0; i < filter->num_fields; i++) {
1459 field = (struct wl12xx_rx_filter_field *)buf;
1460
1461 field->offset = filter->fields[i].offset;
1462 field->flags = filter->fields[i].flags;
1463 field->len = filter->fields[i].len;
1464
1465 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1466 buf += sizeof(struct wl12xx_rx_filter_field) -
1467 sizeof(u8 *) + field->len;
1468 }
1469 }
1470
1471 /*
1472 * Allocates an RX filter returned through f
1473 * which needs to be freed using rx_filter_free()
1474 */
1475 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1476 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1477 struct wl12xx_rx_filter **f)
1478 {
1479 int i, j, ret = 0;
1480 struct wl12xx_rx_filter *filter;
1481 u16 offset;
1482 u8 flags, len;
1483
1484 filter = wl1271_rx_filter_alloc();
1485 if (!filter) {
1486 wl1271_warning("Failed to alloc rx filter");
1487 ret = -ENOMEM;
1488 goto err;
1489 }
1490
1491 i = 0;
1492 while (i < p->pattern_len) {
1493 if (!test_bit(i, (unsigned long *)p->mask)) {
1494 i++;
1495 continue;
1496 }
1497
1498 for (j = i; j < p->pattern_len; j++) {
1499 if (!test_bit(j, (unsigned long *)p->mask))
1500 break;
1501
1502 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1503 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1504 break;
1505 }
1506
1507 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1508 offset = i;
1509 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1510 } else {
1511 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1512 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1513 }
1514
1515 len = j - i;
1516
1517 ret = wl1271_rx_filter_alloc_field(filter,
1518 offset,
1519 flags,
1520 &p->pattern[i], len);
1521 if (ret)
1522 goto err;
1523
1524 i = j;
1525 }
1526
1527 filter->action = FILTER_SIGNAL;
1528
1529 *f = filter;
1530 return 0;
1531
1532 err:
1533 wl1271_rx_filter_free(filter);
1534 *f = NULL;
1535
1536 return ret;
1537 }
1538
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1539 static int wl1271_configure_wowlan(struct wl1271 *wl,
1540 struct cfg80211_wowlan *wow)
1541 {
1542 int i, ret;
1543
1544 if (!wow || wow->any || !wow->n_patterns) {
1545 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1546 FILTER_SIGNAL);
1547 if (ret)
1548 goto out;
1549
1550 ret = wl1271_rx_filter_clear_all(wl);
1551 if (ret)
1552 goto out;
1553
1554 return 0;
1555 }
1556
1557 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1558 return -EINVAL;
1559
1560 /* Validate all incoming patterns before clearing current FW state */
1561 for (i = 0; i < wow->n_patterns; i++) {
1562 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1563 if (ret) {
1564 wl1271_warning("Bad wowlan pattern %d", i);
1565 return ret;
1566 }
1567 }
1568
1569 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1570 if (ret)
1571 goto out;
1572
1573 ret = wl1271_rx_filter_clear_all(wl);
1574 if (ret)
1575 goto out;
1576
1577 /* Translate WoWLAN patterns into filters */
1578 for (i = 0; i < wow->n_patterns; i++) {
1579 struct cfg80211_pkt_pattern *p;
1580 struct wl12xx_rx_filter *filter = NULL;
1581
1582 p = &wow->patterns[i];
1583
1584 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1585 if (ret) {
1586 wl1271_warning("Failed to create an RX filter from "
1587 "wowlan pattern %d", i);
1588 goto out;
1589 }
1590
1591 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1592
1593 wl1271_rx_filter_free(filter);
1594 if (ret)
1595 goto out;
1596 }
1597
1598 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1599
1600 out:
1601 return ret;
1602 }
1603
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1604 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1605 struct wl12xx_vif *wlvif,
1606 struct cfg80211_wowlan *wow)
1607 {
1608 int ret = 0;
1609
1610 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1611 goto out;
1612
1613 ret = wl1271_configure_wowlan(wl, wow);
1614 if (ret < 0)
1615 goto out;
1616
1617 if ((wl->conf.conn.suspend_wake_up_event ==
1618 wl->conf.conn.wake_up_event) &&
1619 (wl->conf.conn.suspend_listen_interval ==
1620 wl->conf.conn.listen_interval))
1621 goto out;
1622
1623 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1624 wl->conf.conn.suspend_wake_up_event,
1625 wl->conf.conn.suspend_listen_interval);
1626
1627 if (ret < 0)
1628 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1629 out:
1630 return ret;
1631
1632 }
1633
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1634 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1635 struct wl12xx_vif *wlvif,
1636 struct cfg80211_wowlan *wow)
1637 {
1638 int ret = 0;
1639
1640 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1641 goto out;
1642
1643 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1644 if (ret < 0)
1645 goto out;
1646
1647 ret = wl1271_configure_wowlan(wl, wow);
1648 if (ret < 0)
1649 goto out;
1650
1651 out:
1652 return ret;
1653
1654 }
1655
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1656 static int wl1271_configure_suspend(struct wl1271 *wl,
1657 struct wl12xx_vif *wlvif,
1658 struct cfg80211_wowlan *wow)
1659 {
1660 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1661 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1662 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1663 return wl1271_configure_suspend_ap(wl, wlvif, wow);
1664 return 0;
1665 }
1666
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1667 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1668 {
1669 int ret = 0;
1670 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1671 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1672
1673 if ((!is_ap) && (!is_sta))
1674 return;
1675
1676 if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1677 (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1678 return;
1679
1680 wl1271_configure_wowlan(wl, NULL);
1681
1682 if (is_sta) {
1683 if ((wl->conf.conn.suspend_wake_up_event ==
1684 wl->conf.conn.wake_up_event) &&
1685 (wl->conf.conn.suspend_listen_interval ==
1686 wl->conf.conn.listen_interval))
1687 return;
1688
1689 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1690 wl->conf.conn.wake_up_event,
1691 wl->conf.conn.listen_interval);
1692
1693 if (ret < 0)
1694 wl1271_error("resume: wake up conditions failed: %d",
1695 ret);
1696
1697 } else if (is_ap) {
1698 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1699 }
1700 }
1701
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1702 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1703 struct cfg80211_wowlan *wow)
1704 {
1705 struct wl1271 *wl = hw->priv;
1706 struct wl12xx_vif *wlvif;
1707 int ret;
1708
1709 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1710 WARN_ON(!wow);
1711
1712 /* we want to perform the recovery before suspending */
1713 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1714 wl1271_warning("postponing suspend to perform recovery");
1715 return -EBUSY;
1716 }
1717
1718 wl1271_tx_flush(wl);
1719
1720 mutex_lock(&wl->mutex);
1721
1722 ret = wl1271_ps_elp_wakeup(wl);
1723 if (ret < 0) {
1724 mutex_unlock(&wl->mutex);
1725 return ret;
1726 }
1727
1728 wl->wow_enabled = true;
1729 wl12xx_for_each_wlvif(wl, wlvif) {
1730 if (wlcore_is_p2p_mgmt(wlvif))
1731 continue;
1732
1733 ret = wl1271_configure_suspend(wl, wlvif, wow);
1734 if (ret < 0) {
1735 mutex_unlock(&wl->mutex);
1736 wl1271_warning("couldn't prepare device to suspend");
1737 return ret;
1738 }
1739 }
1740
1741 /* disable fast link flow control notifications from FW */
1742 ret = wlcore_hw_interrupt_notify(wl, false);
1743 if (ret < 0)
1744 goto out_sleep;
1745
1746 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1747 ret = wlcore_hw_rx_ba_filter(wl,
1748 !!wl->conf.conn.suspend_rx_ba_activity);
1749 if (ret < 0)
1750 goto out_sleep;
1751
1752 out_sleep:
1753 wl1271_ps_elp_sleep(wl);
1754 mutex_unlock(&wl->mutex);
1755
1756 if (ret < 0) {
1757 wl1271_warning("couldn't prepare device to suspend");
1758 return ret;
1759 }
1760
1761 /* flush any remaining work */
1762 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1763
1764 /*
1765 * disable and re-enable interrupts in order to flush
1766 * the threaded_irq
1767 */
1768 wlcore_disable_interrupts(wl);
1769
1770 /*
1771 * set suspended flag to avoid triggering a new threaded_irq
1772 * work. no need for spinlock as interrupts are disabled.
1773 */
1774 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1775
1776 wlcore_enable_interrupts(wl);
1777 flush_work(&wl->tx_work);
1778 flush_delayed_work(&wl->elp_work);
1779
1780 /*
1781 * Cancel the watchdog even if above tx_flush failed. We will detect
1782 * it on resume anyway.
1783 */
1784 cancel_delayed_work(&wl->tx_watchdog_work);
1785
1786 return 0;
1787 }
1788
wl1271_op_resume(struct ieee80211_hw * hw)1789 static int wl1271_op_resume(struct ieee80211_hw *hw)
1790 {
1791 struct wl1271 *wl = hw->priv;
1792 struct wl12xx_vif *wlvif;
1793 unsigned long flags;
1794 bool run_irq_work = false, pending_recovery;
1795 int ret;
1796
1797 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1798 wl->wow_enabled);
1799 WARN_ON(!wl->wow_enabled);
1800
1801 /*
1802 * re-enable irq_work enqueuing, and call irq_work directly if
1803 * there is a pending work.
1804 */
1805 spin_lock_irqsave(&wl->wl_lock, flags);
1806 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1807 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1808 run_irq_work = true;
1809 spin_unlock_irqrestore(&wl->wl_lock, flags);
1810
1811 mutex_lock(&wl->mutex);
1812
1813 /* test the recovery flag before calling any SDIO functions */
1814 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1815 &wl->flags);
1816
1817 if (run_irq_work) {
1818 wl1271_debug(DEBUG_MAC80211,
1819 "run postponed irq_work directly");
1820
1821 /* don't talk to the HW if recovery is pending */
1822 if (!pending_recovery) {
1823 ret = wlcore_irq_locked(wl);
1824 if (ret)
1825 wl12xx_queue_recovery_work(wl);
1826 }
1827
1828 wlcore_enable_interrupts(wl);
1829 }
1830
1831 if (pending_recovery) {
1832 wl1271_warning("queuing forgotten recovery on resume");
1833 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1834 goto out_sleep;
1835 }
1836
1837 ret = wl1271_ps_elp_wakeup(wl);
1838 if (ret < 0)
1839 goto out;
1840
1841 wl12xx_for_each_wlvif(wl, wlvif) {
1842 if (wlcore_is_p2p_mgmt(wlvif))
1843 continue;
1844
1845 wl1271_configure_resume(wl, wlvif);
1846 }
1847
1848 ret = wlcore_hw_interrupt_notify(wl, true);
1849 if (ret < 0)
1850 goto out_sleep;
1851
1852 /* if filtering is enabled, configure the FW to drop all RX BA frames */
1853 ret = wlcore_hw_rx_ba_filter(wl, false);
1854 if (ret < 0)
1855 goto out_sleep;
1856
1857 out_sleep:
1858 wl1271_ps_elp_sleep(wl);
1859
1860 out:
1861 wl->wow_enabled = false;
1862
1863 /*
1864 * Set a flag to re-init the watchdog on the first Tx after resume.
1865 * That way we avoid possible conditions where Tx-complete interrupts
1866 * fail to arrive and we perform a spurious recovery.
1867 */
1868 set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1869 mutex_unlock(&wl->mutex);
1870
1871 return 0;
1872 }
1873 #endif
1874
wl1271_op_start(struct ieee80211_hw * hw)1875 static int wl1271_op_start(struct ieee80211_hw *hw)
1876 {
1877 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1878
1879 /*
1880 * We have to delay the booting of the hardware because
1881 * we need to know the local MAC address before downloading and
1882 * initializing the firmware. The MAC address cannot be changed
1883 * after boot, and without the proper MAC address, the firmware
1884 * will not function properly.
1885 *
1886 * The MAC address is first known when the corresponding interface
1887 * is added. That is where we will initialize the hardware.
1888 */
1889
1890 return 0;
1891 }
1892
wlcore_op_stop_locked(struct wl1271 * wl)1893 static void wlcore_op_stop_locked(struct wl1271 *wl)
1894 {
1895 int i;
1896
1897 if (wl->state == WLCORE_STATE_OFF) {
1898 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1899 &wl->flags))
1900 wlcore_enable_interrupts(wl);
1901
1902 return;
1903 }
1904
1905 /*
1906 * this must be before the cancel_work calls below, so that the work
1907 * functions don't perform further work.
1908 */
1909 wl->state = WLCORE_STATE_OFF;
1910
1911 /*
1912 * Use the nosync variant to disable interrupts, so the mutex could be
1913 * held while doing so without deadlocking.
1914 */
1915 wlcore_disable_interrupts_nosync(wl);
1916
1917 mutex_unlock(&wl->mutex);
1918
1919 wlcore_synchronize_interrupts(wl);
1920 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1921 cancel_work_sync(&wl->recovery_work);
1922 wl1271_flush_deferred_work(wl);
1923 cancel_delayed_work_sync(&wl->scan_complete_work);
1924 cancel_work_sync(&wl->netstack_work);
1925 cancel_work_sync(&wl->tx_work);
1926 cancel_delayed_work_sync(&wl->elp_work);
1927 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1928
1929 /* let's notify MAC80211 about the remaining pending TX frames */
1930 mutex_lock(&wl->mutex);
1931 wl12xx_tx_reset(wl);
1932
1933 wl1271_power_off(wl);
1934 /*
1935 * In case a recovery was scheduled, interrupts were disabled to avoid
1936 * an interrupt storm. Now that the power is down, it is safe to
1937 * re-enable interrupts to balance the disable depth
1938 */
1939 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1940 wlcore_enable_interrupts(wl);
1941
1942 wl->band = NL80211_BAND_2GHZ;
1943
1944 wl->rx_counter = 0;
1945 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1946 wl->channel_type = NL80211_CHAN_NO_HT;
1947 wl->tx_blocks_available = 0;
1948 wl->tx_allocated_blocks = 0;
1949 wl->tx_results_count = 0;
1950 wl->tx_packets_count = 0;
1951 wl->time_offset = 0;
1952 wl->ap_fw_ps_map = 0;
1953 wl->ap_ps_map = 0;
1954 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1955 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1956 memset(wl->links_map, 0, sizeof(wl->links_map));
1957 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1958 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1959 memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1960 wl->active_sta_count = 0;
1961 wl->active_link_count = 0;
1962
1963 /* The system link is always allocated */
1964 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1965 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1966 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1967
1968 /*
1969 * this is performed after the cancel_work calls and the associated
1970 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1971 * get executed before all these vars have been reset.
1972 */
1973 wl->flags = 0;
1974
1975 wl->tx_blocks_freed = 0;
1976
1977 for (i = 0; i < NUM_TX_QUEUES; i++) {
1978 wl->tx_pkts_freed[i] = 0;
1979 wl->tx_allocated_pkts[i] = 0;
1980 }
1981
1982 wl1271_debugfs_reset(wl);
1983
1984 kfree(wl->raw_fw_status);
1985 wl->raw_fw_status = NULL;
1986 kfree(wl->fw_status);
1987 wl->fw_status = NULL;
1988 kfree(wl->tx_res_if);
1989 wl->tx_res_if = NULL;
1990 kfree(wl->target_mem_map);
1991 wl->target_mem_map = NULL;
1992
1993 /*
1994 * FW channels must be re-calibrated after recovery,
1995 * save current Reg-Domain channel configuration and clear it.
1996 */
1997 memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
1998 sizeof(wl->reg_ch_conf_pending));
1999 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2000 }
2001
wlcore_op_stop(struct ieee80211_hw * hw)2002 static void wlcore_op_stop(struct ieee80211_hw *hw)
2003 {
2004 struct wl1271 *wl = hw->priv;
2005
2006 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2007
2008 mutex_lock(&wl->mutex);
2009
2010 wlcore_op_stop_locked(wl);
2011
2012 mutex_unlock(&wl->mutex);
2013 }
2014
wlcore_channel_switch_work(struct work_struct * work)2015 static void wlcore_channel_switch_work(struct work_struct *work)
2016 {
2017 struct delayed_work *dwork;
2018 struct wl1271 *wl;
2019 struct ieee80211_vif *vif;
2020 struct wl12xx_vif *wlvif;
2021 int ret;
2022
2023 dwork = to_delayed_work(work);
2024 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2025 wl = wlvif->wl;
2026
2027 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2028
2029 mutex_lock(&wl->mutex);
2030
2031 if (unlikely(wl->state != WLCORE_STATE_ON))
2032 goto out;
2033
2034 /* check the channel switch is still ongoing */
2035 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2036 goto out;
2037
2038 vif = wl12xx_wlvif_to_vif(wlvif);
2039 ieee80211_chswitch_done(vif, false);
2040
2041 ret = wl1271_ps_elp_wakeup(wl);
2042 if (ret < 0)
2043 goto out;
2044
2045 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2046
2047 wl1271_ps_elp_sleep(wl);
2048 out:
2049 mutex_unlock(&wl->mutex);
2050 }
2051
wlcore_connection_loss_work(struct work_struct * work)2052 static void wlcore_connection_loss_work(struct work_struct *work)
2053 {
2054 struct delayed_work *dwork;
2055 struct wl1271 *wl;
2056 struct ieee80211_vif *vif;
2057 struct wl12xx_vif *wlvif;
2058
2059 dwork = to_delayed_work(work);
2060 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2061 wl = wlvif->wl;
2062
2063 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2064
2065 mutex_lock(&wl->mutex);
2066
2067 if (unlikely(wl->state != WLCORE_STATE_ON))
2068 goto out;
2069
2070 /* Call mac80211 connection loss */
2071 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2072 goto out;
2073
2074 vif = wl12xx_wlvif_to_vif(wlvif);
2075 ieee80211_connection_loss(vif);
2076 out:
2077 mutex_unlock(&wl->mutex);
2078 }
2079
wlcore_pending_auth_complete_work(struct work_struct * work)2080 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2081 {
2082 struct delayed_work *dwork;
2083 struct wl1271 *wl;
2084 struct wl12xx_vif *wlvif;
2085 unsigned long time_spare;
2086 int ret;
2087
2088 dwork = to_delayed_work(work);
2089 wlvif = container_of(dwork, struct wl12xx_vif,
2090 pending_auth_complete_work);
2091 wl = wlvif->wl;
2092
2093 mutex_lock(&wl->mutex);
2094
2095 if (unlikely(wl->state != WLCORE_STATE_ON))
2096 goto out;
2097
2098 /*
2099 * Make sure a second really passed since the last auth reply. Maybe
2100 * a second auth reply arrived while we were stuck on the mutex.
2101 * Check for a little less than the timeout to protect from scheduler
2102 * irregularities.
2103 */
2104 time_spare = jiffies +
2105 msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2106 if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2107 goto out;
2108
2109 ret = wl1271_ps_elp_wakeup(wl);
2110 if (ret < 0)
2111 goto out;
2112
2113 /* cancel the ROC if active */
2114 wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2115
2116 wl1271_ps_elp_sleep(wl);
2117 out:
2118 mutex_unlock(&wl->mutex);
2119 }
2120
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2121 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2122 {
2123 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2124 WL12XX_MAX_RATE_POLICIES);
2125 if (policy >= WL12XX_MAX_RATE_POLICIES)
2126 return -EBUSY;
2127
2128 __set_bit(policy, wl->rate_policies_map);
2129 *idx = policy;
2130 return 0;
2131 }
2132
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2133 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2134 {
2135 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2136 return;
2137
2138 __clear_bit(*idx, wl->rate_policies_map);
2139 *idx = WL12XX_MAX_RATE_POLICIES;
2140 }
2141
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2142 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2143 {
2144 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2145 WLCORE_MAX_KLV_TEMPLATES);
2146 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2147 return -EBUSY;
2148
2149 __set_bit(policy, wl->klv_templates_map);
2150 *idx = policy;
2151 return 0;
2152 }
2153
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2154 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2155 {
2156 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2157 return;
2158
2159 __clear_bit(*idx, wl->klv_templates_map);
2160 *idx = WLCORE_MAX_KLV_TEMPLATES;
2161 }
2162
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2163 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2164 {
2165 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2166
2167 switch (wlvif->bss_type) {
2168 case BSS_TYPE_AP_BSS:
2169 if (wlvif->p2p)
2170 return WL1271_ROLE_P2P_GO;
2171 else if (ieee80211_vif_is_mesh(vif))
2172 return WL1271_ROLE_MESH_POINT;
2173 else
2174 return WL1271_ROLE_AP;
2175
2176 case BSS_TYPE_STA_BSS:
2177 if (wlvif->p2p)
2178 return WL1271_ROLE_P2P_CL;
2179 else
2180 return WL1271_ROLE_STA;
2181
2182 case BSS_TYPE_IBSS:
2183 return WL1271_ROLE_IBSS;
2184
2185 default:
2186 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2187 }
2188 return WL12XX_INVALID_ROLE_TYPE;
2189 }
2190
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2191 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2192 {
2193 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2194 int i;
2195
2196 /* clear everything but the persistent data */
2197 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2198
2199 switch (ieee80211_vif_type_p2p(vif)) {
2200 case NL80211_IFTYPE_P2P_CLIENT:
2201 wlvif->p2p = 1;
2202 /* fall-through */
2203 case NL80211_IFTYPE_STATION:
2204 case NL80211_IFTYPE_P2P_DEVICE:
2205 wlvif->bss_type = BSS_TYPE_STA_BSS;
2206 break;
2207 case NL80211_IFTYPE_ADHOC:
2208 wlvif->bss_type = BSS_TYPE_IBSS;
2209 break;
2210 case NL80211_IFTYPE_P2P_GO:
2211 wlvif->p2p = 1;
2212 /* fall-through */
2213 case NL80211_IFTYPE_AP:
2214 case NL80211_IFTYPE_MESH_POINT:
2215 wlvif->bss_type = BSS_TYPE_AP_BSS;
2216 break;
2217 default:
2218 wlvif->bss_type = MAX_BSS_TYPE;
2219 return -EOPNOTSUPP;
2220 }
2221
2222 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2223 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2224 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2225
2226 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2227 wlvif->bss_type == BSS_TYPE_IBSS) {
2228 /* init sta/ibss data */
2229 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2230 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2231 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2232 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2233 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2234 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2235 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2236 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2237 } else {
2238 /* init ap data */
2239 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2240 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2241 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2242 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2243 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2244 wl12xx_allocate_rate_policy(wl,
2245 &wlvif->ap.ucast_rate_idx[i]);
2246 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2247 /*
2248 * TODO: check if basic_rate shouldn't be
2249 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2250 * instead (the same thing for STA above).
2251 */
2252 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2253 /* TODO: this seems to be used only for STA, check it */
2254 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2255 }
2256
2257 wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2258 wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2259 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2260
2261 /*
2262 * mac80211 configures some values globally, while we treat them
2263 * per-interface. thus, on init, we have to copy them from wl
2264 */
2265 wlvif->band = wl->band;
2266 wlvif->channel = wl->channel;
2267 wlvif->power_level = wl->power_level;
2268 wlvif->channel_type = wl->channel_type;
2269
2270 INIT_WORK(&wlvif->rx_streaming_enable_work,
2271 wl1271_rx_streaming_enable_work);
2272 INIT_WORK(&wlvif->rx_streaming_disable_work,
2273 wl1271_rx_streaming_disable_work);
2274 INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2275 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2276 wlcore_channel_switch_work);
2277 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2278 wlcore_connection_loss_work);
2279 INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2280 wlcore_pending_auth_complete_work);
2281 INIT_LIST_HEAD(&wlvif->list);
2282
2283 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2284 (unsigned long) wlvif);
2285 return 0;
2286 }
2287
wl12xx_init_fw(struct wl1271 * wl)2288 static int wl12xx_init_fw(struct wl1271 *wl)
2289 {
2290 int retries = WL1271_BOOT_RETRIES;
2291 bool booted = false;
2292 struct wiphy *wiphy = wl->hw->wiphy;
2293 int ret;
2294
2295 while (retries) {
2296 retries--;
2297 ret = wl12xx_chip_wakeup(wl, false);
2298 if (ret < 0)
2299 goto power_off;
2300
2301 ret = wl->ops->boot(wl);
2302 if (ret < 0)
2303 goto power_off;
2304
2305 ret = wl1271_hw_init(wl);
2306 if (ret < 0)
2307 goto irq_disable;
2308
2309 booted = true;
2310 break;
2311
2312 irq_disable:
2313 mutex_unlock(&wl->mutex);
2314 /* Unlocking the mutex in the middle of handling is
2315 inherently unsafe. In this case we deem it safe to do,
2316 because we need to let any possibly pending IRQ out of
2317 the system (and while we are WLCORE_STATE_OFF the IRQ
2318 work function will not do anything.) Also, any other
2319 possible concurrent operations will fail due to the
2320 current state, hence the wl1271 struct should be safe. */
2321 wlcore_disable_interrupts(wl);
2322 wl1271_flush_deferred_work(wl);
2323 cancel_work_sync(&wl->netstack_work);
2324 mutex_lock(&wl->mutex);
2325 power_off:
2326 wl1271_power_off(wl);
2327 }
2328
2329 if (!booted) {
2330 wl1271_error("firmware boot failed despite %d retries",
2331 WL1271_BOOT_RETRIES);
2332 goto out;
2333 }
2334
2335 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2336
2337 /* update hw/fw version info in wiphy struct */
2338 wiphy->hw_version = wl->chip.id;
2339 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2340 sizeof(wiphy->fw_version));
2341
2342 /*
2343 * Now we know if 11a is supported (info from the NVS), so disable
2344 * 11a channels if not supported
2345 */
2346 if (!wl->enable_11a)
2347 wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2348
2349 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2350 wl->enable_11a ? "" : "not ");
2351
2352 wl->state = WLCORE_STATE_ON;
2353 out:
2354 return ret;
2355 }
2356
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2357 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2358 {
2359 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2360 }
2361
2362 /*
2363 * Check whether a fw switch (i.e. moving from one loaded
2364 * fw to another) is needed. This function is also responsible
2365 * for updating wl->last_vif_count, so it must be called before
2366 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2367 * will be used).
2368 */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2369 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2370 struct vif_counter_data vif_counter_data,
2371 bool add)
2372 {
2373 enum wl12xx_fw_type current_fw = wl->fw_type;
2374 u8 vif_count = vif_counter_data.counter;
2375
2376 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2377 return false;
2378
2379 /* increase the vif count if this is a new vif */
2380 if (add && !vif_counter_data.cur_vif_running)
2381 vif_count++;
2382
2383 wl->last_vif_count = vif_count;
2384
2385 /* no need for fw change if the device is OFF */
2386 if (wl->state == WLCORE_STATE_OFF)
2387 return false;
2388
2389 /* no need for fw change if a single fw is used */
2390 if (!wl->mr_fw_name)
2391 return false;
2392
2393 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2394 return true;
2395 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2396 return true;
2397
2398 return false;
2399 }
2400
2401 /*
2402 * Enter "forced psm". Make sure the sta is in psm against the ap,
2403 * to make the fw switch a bit more disconnection-persistent.
2404 */
wl12xx_force_active_psm(struct wl1271 * wl)2405 static void wl12xx_force_active_psm(struct wl1271 *wl)
2406 {
2407 struct wl12xx_vif *wlvif;
2408
2409 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2410 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2411 }
2412 }
2413
2414 struct wlcore_hw_queue_iter_data {
2415 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2416 /* current vif */
2417 struct ieee80211_vif *vif;
2418 /* is the current vif among those iterated */
2419 bool cur_running;
2420 };
2421
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2422 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2423 struct ieee80211_vif *vif)
2424 {
2425 struct wlcore_hw_queue_iter_data *iter_data = data;
2426
2427 if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2428 WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2429 return;
2430
2431 if (iter_data->cur_running || vif == iter_data->vif) {
2432 iter_data->cur_running = true;
2433 return;
2434 }
2435
2436 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2437 }
2438
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2439 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2440 struct wl12xx_vif *wlvif)
2441 {
2442 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2443 struct wlcore_hw_queue_iter_data iter_data = {};
2444 int i, q_base;
2445
2446 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2447 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2448 return 0;
2449 }
2450
2451 iter_data.vif = vif;
2452
2453 /* mark all bits taken by active interfaces */
2454 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2455 IEEE80211_IFACE_ITER_RESUME_ALL,
2456 wlcore_hw_queue_iter, &iter_data);
2457
2458 /* the current vif is already running in mac80211 (resume/recovery) */
2459 if (iter_data.cur_running) {
2460 wlvif->hw_queue_base = vif->hw_queue[0];
2461 wl1271_debug(DEBUG_MAC80211,
2462 "using pre-allocated hw queue base %d",
2463 wlvif->hw_queue_base);
2464
2465 /* interface type might have changed type */
2466 goto adjust_cab_queue;
2467 }
2468
2469 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2470 WLCORE_NUM_MAC_ADDRESSES);
2471 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2472 return -EBUSY;
2473
2474 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2475 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2476 wlvif->hw_queue_base);
2477
2478 for (i = 0; i < NUM_TX_QUEUES; i++) {
2479 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2480 /* register hw queues in mac80211 */
2481 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2482 }
2483
2484 adjust_cab_queue:
2485 /* the last places are reserved for cab queues per interface */
2486 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2487 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2488 wlvif->hw_queue_base / NUM_TX_QUEUES;
2489 else
2490 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2491
2492 return 0;
2493 }
2494
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2495 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2496 struct ieee80211_vif *vif)
2497 {
2498 struct wl1271 *wl = hw->priv;
2499 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2500 struct vif_counter_data vif_count;
2501 int ret = 0;
2502 u8 role_type;
2503
2504 if (wl->plt) {
2505 wl1271_error("Adding Interface not allowed while in PLT mode");
2506 return -EBUSY;
2507 }
2508
2509 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2510 IEEE80211_VIF_SUPPORTS_UAPSD |
2511 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2512
2513 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2514 ieee80211_vif_type_p2p(vif), vif->addr);
2515
2516 wl12xx_get_vif_count(hw, vif, &vif_count);
2517
2518 mutex_lock(&wl->mutex);
2519 ret = wl1271_ps_elp_wakeup(wl);
2520 if (ret < 0)
2521 goto out_unlock;
2522
2523 /*
2524 * in some very corner case HW recovery scenarios its possible to
2525 * get here before __wl1271_op_remove_interface is complete, so
2526 * opt out if that is the case.
2527 */
2528 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2529 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2530 ret = -EBUSY;
2531 goto out;
2532 }
2533
2534
2535 ret = wl12xx_init_vif_data(wl, vif);
2536 if (ret < 0)
2537 goto out;
2538
2539 wlvif->wl = wl;
2540 role_type = wl12xx_get_role_type(wl, wlvif);
2541 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2542 ret = -EINVAL;
2543 goto out;
2544 }
2545
2546 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2547 if (ret < 0)
2548 goto out;
2549
2550 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2551 wl12xx_force_active_psm(wl);
2552 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2553 mutex_unlock(&wl->mutex);
2554 wl1271_recovery_work(&wl->recovery_work);
2555 return 0;
2556 }
2557
2558 /*
2559 * TODO: after the nvs issue will be solved, move this block
2560 * to start(), and make sure here the driver is ON.
2561 */
2562 if (wl->state == WLCORE_STATE_OFF) {
2563 /*
2564 * we still need this in order to configure the fw
2565 * while uploading the nvs
2566 */
2567 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2568
2569 ret = wl12xx_init_fw(wl);
2570 if (ret < 0)
2571 goto out;
2572 }
2573
2574 if (!wlcore_is_p2p_mgmt(wlvif)) {
2575 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2576 role_type, &wlvif->role_id);
2577 if (ret < 0)
2578 goto out;
2579
2580 ret = wl1271_init_vif_specific(wl, vif);
2581 if (ret < 0)
2582 goto out;
2583
2584 } else {
2585 ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2586 &wlvif->dev_role_id);
2587 if (ret < 0)
2588 goto out;
2589
2590 /* needed mainly for configuring rate policies */
2591 ret = wl1271_sta_hw_init(wl, wlvif);
2592 if (ret < 0)
2593 goto out;
2594 }
2595
2596 list_add(&wlvif->list, &wl->wlvif_list);
2597 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2598
2599 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2600 wl->ap_count++;
2601 else
2602 wl->sta_count++;
2603 out:
2604 wl1271_ps_elp_sleep(wl);
2605 out_unlock:
2606 mutex_unlock(&wl->mutex);
2607
2608 return ret;
2609 }
2610
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2611 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2612 struct ieee80211_vif *vif,
2613 bool reset_tx_queues)
2614 {
2615 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2616 int i, ret;
2617 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2618
2619 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2620
2621 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2622 return;
2623
2624 /* because of hardware recovery, we may get here twice */
2625 if (wl->state == WLCORE_STATE_OFF)
2626 return;
2627
2628 wl1271_info("down");
2629
2630 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2631 wl->scan_wlvif == wlvif) {
2632 struct cfg80211_scan_info info = {
2633 .aborted = true,
2634 };
2635
2636 /*
2637 * Rearm the tx watchdog just before idling scan. This
2638 * prevents just-finished scans from triggering the watchdog
2639 */
2640 wl12xx_rearm_tx_watchdog_locked(wl);
2641
2642 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2643 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2644 wl->scan_wlvif = NULL;
2645 wl->scan.req = NULL;
2646 ieee80211_scan_completed(wl->hw, &info);
2647 }
2648
2649 if (wl->sched_vif == wlvif)
2650 wl->sched_vif = NULL;
2651
2652 if (wl->roc_vif == vif) {
2653 wl->roc_vif = NULL;
2654 ieee80211_remain_on_channel_expired(wl->hw);
2655 }
2656
2657 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2658 /* disable active roles */
2659 ret = wl1271_ps_elp_wakeup(wl);
2660 if (ret < 0)
2661 goto deinit;
2662
2663 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2664 wlvif->bss_type == BSS_TYPE_IBSS) {
2665 if (wl12xx_dev_role_started(wlvif))
2666 wl12xx_stop_dev(wl, wlvif);
2667 }
2668
2669 if (!wlcore_is_p2p_mgmt(wlvif)) {
2670 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2671 if (ret < 0)
2672 goto deinit;
2673 } else {
2674 ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2675 if (ret < 0)
2676 goto deinit;
2677 }
2678
2679 wl1271_ps_elp_sleep(wl);
2680 }
2681 deinit:
2682 wl12xx_tx_reset_wlvif(wl, wlvif);
2683
2684 /* clear all hlids (except system_hlid) */
2685 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2686
2687 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2688 wlvif->bss_type == BSS_TYPE_IBSS) {
2689 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2690 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2691 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2692 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2693 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2694 } else {
2695 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2696 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2697 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2698 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2699 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2700 wl12xx_free_rate_policy(wl,
2701 &wlvif->ap.ucast_rate_idx[i]);
2702 wl1271_free_ap_keys(wl, wlvif);
2703 }
2704
2705 dev_kfree_skb(wlvif->probereq);
2706 wlvif->probereq = NULL;
2707 if (wl->last_wlvif == wlvif)
2708 wl->last_wlvif = NULL;
2709 list_del(&wlvif->list);
2710 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2711 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2712 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2713
2714 if (is_ap)
2715 wl->ap_count--;
2716 else
2717 wl->sta_count--;
2718
2719 /*
2720 * Last AP, have more stations. Configure sleep auth according to STA.
2721 * Don't do thin on unintended recovery.
2722 */
2723 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2724 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2725 goto unlock;
2726
2727 if (wl->ap_count == 0 && is_ap) {
2728 /* mask ap events */
2729 wl->event_mask &= ~wl->ap_event_mask;
2730 wl1271_event_unmask(wl);
2731 }
2732
2733 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2734 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2735 /* Configure for power according to debugfs */
2736 if (sta_auth != WL1271_PSM_ILLEGAL)
2737 wl1271_acx_sleep_auth(wl, sta_auth);
2738 /* Configure for ELP power saving */
2739 else
2740 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2741 }
2742
2743 unlock:
2744 mutex_unlock(&wl->mutex);
2745
2746 del_timer_sync(&wlvif->rx_streaming_timer);
2747 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2748 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2749 cancel_work_sync(&wlvif->rc_update_work);
2750 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2751 cancel_delayed_work_sync(&wlvif->channel_switch_work);
2752 cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2753
2754 mutex_lock(&wl->mutex);
2755 }
2756
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2757 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2758 struct ieee80211_vif *vif)
2759 {
2760 struct wl1271 *wl = hw->priv;
2761 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2762 struct wl12xx_vif *iter;
2763 struct vif_counter_data vif_count;
2764
2765 wl12xx_get_vif_count(hw, vif, &vif_count);
2766 mutex_lock(&wl->mutex);
2767
2768 if (wl->state == WLCORE_STATE_OFF ||
2769 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2770 goto out;
2771
2772 /*
2773 * wl->vif can be null here if someone shuts down the interface
2774 * just when hardware recovery has been started.
2775 */
2776 wl12xx_for_each_wlvif(wl, iter) {
2777 if (iter != wlvif)
2778 continue;
2779
2780 __wl1271_op_remove_interface(wl, vif, true);
2781 break;
2782 }
2783 WARN_ON(iter != wlvif);
2784 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2785 wl12xx_force_active_psm(wl);
2786 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2787 wl12xx_queue_recovery_work(wl);
2788 }
2789 out:
2790 mutex_unlock(&wl->mutex);
2791 }
2792
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2793 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2794 struct ieee80211_vif *vif,
2795 enum nl80211_iftype new_type, bool p2p)
2796 {
2797 struct wl1271 *wl = hw->priv;
2798 int ret;
2799
2800 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2801 wl1271_op_remove_interface(hw, vif);
2802
2803 vif->type = new_type;
2804 vif->p2p = p2p;
2805 ret = wl1271_op_add_interface(hw, vif);
2806
2807 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2808 return ret;
2809 }
2810
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2811 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2812 {
2813 int ret;
2814 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2815
2816 /*
2817 * One of the side effects of the JOIN command is that is clears
2818 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2819 * to a WPA/WPA2 access point will therefore kill the data-path.
2820 * Currently the only valid scenario for JOIN during association
2821 * is on roaming, in which case we will also be given new keys.
2822 * Keep the below message for now, unless it starts bothering
2823 * users who really like to roam a lot :)
2824 */
2825 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2826 wl1271_info("JOIN while associated.");
2827
2828 /* clear encryption type */
2829 wlvif->encryption_type = KEY_NONE;
2830
2831 if (is_ibss)
2832 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2833 else {
2834 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2835 /*
2836 * TODO: this is an ugly workaround for wl12xx fw
2837 * bug - we are not able to tx/rx after the first
2838 * start_sta, so make dummy start+stop calls,
2839 * and then call start_sta again.
2840 * this should be fixed in the fw.
2841 */
2842 wl12xx_cmd_role_start_sta(wl, wlvif);
2843 wl12xx_cmd_role_stop_sta(wl, wlvif);
2844 }
2845
2846 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2847 }
2848
2849 return ret;
2850 }
2851
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2852 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2853 int offset)
2854 {
2855 u8 ssid_len;
2856 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2857 skb->len - offset);
2858
2859 if (!ptr) {
2860 wl1271_error("No SSID in IEs!");
2861 return -ENOENT;
2862 }
2863
2864 ssid_len = ptr[1];
2865 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2866 wl1271_error("SSID is too long!");
2867 return -EINVAL;
2868 }
2869
2870 wlvif->ssid_len = ssid_len;
2871 memcpy(wlvif->ssid, ptr+2, ssid_len);
2872 return 0;
2873 }
2874
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2875 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2876 {
2877 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2878 struct sk_buff *skb;
2879 int ieoffset;
2880
2881 /* we currently only support setting the ssid from the ap probe req */
2882 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2883 return -EINVAL;
2884
2885 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2886 if (!skb)
2887 return -EINVAL;
2888
2889 ieoffset = offsetof(struct ieee80211_mgmt,
2890 u.probe_req.variable);
2891 wl1271_ssid_set(wlvif, skb, ieoffset);
2892 dev_kfree_skb(skb);
2893
2894 return 0;
2895 }
2896
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2897 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2898 struct ieee80211_bss_conf *bss_conf,
2899 u32 sta_rate_set)
2900 {
2901 int ieoffset;
2902 int ret;
2903
2904 wlvif->aid = bss_conf->aid;
2905 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2906 wlvif->beacon_int = bss_conf->beacon_int;
2907 wlvif->wmm_enabled = bss_conf->qos;
2908
2909 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2910
2911 /*
2912 * with wl1271, we don't need to update the
2913 * beacon_int and dtim_period, because the firmware
2914 * updates it by itself when the first beacon is
2915 * received after a join.
2916 */
2917 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2918 if (ret < 0)
2919 return ret;
2920
2921 /*
2922 * Get a template for hardware connection maintenance
2923 */
2924 dev_kfree_skb(wlvif->probereq);
2925 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2926 wlvif,
2927 NULL);
2928 ieoffset = offsetof(struct ieee80211_mgmt,
2929 u.probe_req.variable);
2930 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2931
2932 /* enable the connection monitoring feature */
2933 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2934 if (ret < 0)
2935 return ret;
2936
2937 /*
2938 * The join command disable the keep-alive mode, shut down its process,
2939 * and also clear the template config, so we need to reset it all after
2940 * the join. The acx_aid starts the keep-alive process, and the order
2941 * of the commands below is relevant.
2942 */
2943 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2944 if (ret < 0)
2945 return ret;
2946
2947 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2948 if (ret < 0)
2949 return ret;
2950
2951 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2952 if (ret < 0)
2953 return ret;
2954
2955 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2956 wlvif->sta.klv_template_id,
2957 ACX_KEEP_ALIVE_TPL_VALID);
2958 if (ret < 0)
2959 return ret;
2960
2961 /*
2962 * The default fw psm configuration is AUTO, while mac80211 default
2963 * setting is off (ACTIVE), so sync the fw with the correct value.
2964 */
2965 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2966 if (ret < 0)
2967 return ret;
2968
2969 if (sta_rate_set) {
2970 wlvif->rate_set =
2971 wl1271_tx_enabled_rates_get(wl,
2972 sta_rate_set,
2973 wlvif->band);
2974 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2975 if (ret < 0)
2976 return ret;
2977 }
2978
2979 return ret;
2980 }
2981
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)2982 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2983 {
2984 int ret;
2985 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2986
2987 /* make sure we are connected (sta) joined */
2988 if (sta &&
2989 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2990 return false;
2991
2992 /* make sure we are joined (ibss) */
2993 if (!sta &&
2994 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2995 return false;
2996
2997 if (sta) {
2998 /* use defaults when not associated */
2999 wlvif->aid = 0;
3000
3001 /* free probe-request template */
3002 dev_kfree_skb(wlvif->probereq);
3003 wlvif->probereq = NULL;
3004
3005 /* disable connection monitor features */
3006 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3007 if (ret < 0)
3008 return ret;
3009
3010 /* Disable the keep-alive feature */
3011 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3012 if (ret < 0)
3013 return ret;
3014
3015 /* disable beacon filtering */
3016 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3017 if (ret < 0)
3018 return ret;
3019 }
3020
3021 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3022 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3023
3024 wl12xx_cmd_stop_channel_switch(wl, wlvif);
3025 ieee80211_chswitch_done(vif, false);
3026 cancel_delayed_work(&wlvif->channel_switch_work);
3027 }
3028
3029 /* invalidate keep-alive template */
3030 wl1271_acx_keep_alive_config(wl, wlvif,
3031 wlvif->sta.klv_template_id,
3032 ACX_KEEP_ALIVE_TPL_INVALID);
3033
3034 return 0;
3035 }
3036
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3037 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3038 {
3039 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3040 wlvif->rate_set = wlvif->basic_rate_set;
3041 }
3042
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3043 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3044 bool idle)
3045 {
3046 bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3047
3048 if (idle == cur_idle)
3049 return;
3050
3051 if (idle) {
3052 clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3053 } else {
3054 /* The current firmware only supports sched_scan in idle */
3055 if (wl->sched_vif == wlvif)
3056 wl->ops->sched_scan_stop(wl, wlvif);
3057
3058 set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3059 }
3060 }
3061
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3062 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3063 struct ieee80211_conf *conf, u32 changed)
3064 {
3065 int ret;
3066
3067 if (wlcore_is_p2p_mgmt(wlvif))
3068 return 0;
3069
3070 if (conf->power_level != wlvif->power_level) {
3071 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3072 if (ret < 0)
3073 return ret;
3074
3075 wlvif->power_level = conf->power_level;
3076 }
3077
3078 return 0;
3079 }
3080
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3081 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3082 {
3083 struct wl1271 *wl = hw->priv;
3084 struct wl12xx_vif *wlvif;
3085 struct ieee80211_conf *conf = &hw->conf;
3086 int ret = 0;
3087
3088 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3089 " changed 0x%x",
3090 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3091 conf->power_level,
3092 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3093 changed);
3094
3095 mutex_lock(&wl->mutex);
3096
3097 if (changed & IEEE80211_CONF_CHANGE_POWER)
3098 wl->power_level = conf->power_level;
3099
3100 if (unlikely(wl->state != WLCORE_STATE_ON))
3101 goto out;
3102
3103 ret = wl1271_ps_elp_wakeup(wl);
3104 if (ret < 0)
3105 goto out;
3106
3107 /* configure each interface */
3108 wl12xx_for_each_wlvif(wl, wlvif) {
3109 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3110 if (ret < 0)
3111 goto out_sleep;
3112 }
3113
3114 out_sleep:
3115 wl1271_ps_elp_sleep(wl);
3116
3117 out:
3118 mutex_unlock(&wl->mutex);
3119
3120 return ret;
3121 }
3122
3123 struct wl1271_filter_params {
3124 bool enabled;
3125 int mc_list_length;
3126 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3127 };
3128
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3129 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3130 struct netdev_hw_addr_list *mc_list)
3131 {
3132 struct wl1271_filter_params *fp;
3133 struct netdev_hw_addr *ha;
3134
3135 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3136 if (!fp) {
3137 wl1271_error("Out of memory setting filters.");
3138 return 0;
3139 }
3140
3141 /* update multicast filtering parameters */
3142 fp->mc_list_length = 0;
3143 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3144 fp->enabled = false;
3145 } else {
3146 fp->enabled = true;
3147 netdev_hw_addr_list_for_each(ha, mc_list) {
3148 memcpy(fp->mc_list[fp->mc_list_length],
3149 ha->addr, ETH_ALEN);
3150 fp->mc_list_length++;
3151 }
3152 }
3153
3154 return (u64)(unsigned long)fp;
3155 }
3156
3157 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3158 FIF_FCSFAIL | \
3159 FIF_BCN_PRBRESP_PROMISC | \
3160 FIF_CONTROL | \
3161 FIF_OTHER_BSS)
3162
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3163 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3164 unsigned int changed,
3165 unsigned int *total, u64 multicast)
3166 {
3167 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3168 struct wl1271 *wl = hw->priv;
3169 struct wl12xx_vif *wlvif;
3170
3171 int ret;
3172
3173 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3174 " total %x", changed, *total);
3175
3176 mutex_lock(&wl->mutex);
3177
3178 *total &= WL1271_SUPPORTED_FILTERS;
3179 changed &= WL1271_SUPPORTED_FILTERS;
3180
3181 if (unlikely(wl->state != WLCORE_STATE_ON))
3182 goto out;
3183
3184 ret = wl1271_ps_elp_wakeup(wl);
3185 if (ret < 0)
3186 goto out;
3187
3188 wl12xx_for_each_wlvif(wl, wlvif) {
3189 if (wlcore_is_p2p_mgmt(wlvif))
3190 continue;
3191
3192 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3193 if (*total & FIF_ALLMULTI)
3194 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3195 false,
3196 NULL, 0);
3197 else if (fp)
3198 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3199 fp->enabled,
3200 fp->mc_list,
3201 fp->mc_list_length);
3202 if (ret < 0)
3203 goto out_sleep;
3204 }
3205 }
3206
3207 /*
3208 * the fw doesn't provide an api to configure the filters. instead,
3209 * the filters configuration is based on the active roles / ROC
3210 * state.
3211 */
3212
3213 out_sleep:
3214 wl1271_ps_elp_sleep(wl);
3215
3216 out:
3217 mutex_unlock(&wl->mutex);
3218 kfree(fp);
3219 }
3220
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3221 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3222 u8 id, u8 key_type, u8 key_size,
3223 const u8 *key, u8 hlid, u32 tx_seq_32,
3224 u16 tx_seq_16)
3225 {
3226 struct wl1271_ap_key *ap_key;
3227 int i;
3228
3229 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3230
3231 if (key_size > MAX_KEY_SIZE)
3232 return -EINVAL;
3233
3234 /*
3235 * Find next free entry in ap_keys. Also check we are not replacing
3236 * an existing key.
3237 */
3238 for (i = 0; i < MAX_NUM_KEYS; i++) {
3239 if (wlvif->ap.recorded_keys[i] == NULL)
3240 break;
3241
3242 if (wlvif->ap.recorded_keys[i]->id == id) {
3243 wl1271_warning("trying to record key replacement");
3244 return -EINVAL;
3245 }
3246 }
3247
3248 if (i == MAX_NUM_KEYS)
3249 return -EBUSY;
3250
3251 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3252 if (!ap_key)
3253 return -ENOMEM;
3254
3255 ap_key->id = id;
3256 ap_key->key_type = key_type;
3257 ap_key->key_size = key_size;
3258 memcpy(ap_key->key, key, key_size);
3259 ap_key->hlid = hlid;
3260 ap_key->tx_seq_32 = tx_seq_32;
3261 ap_key->tx_seq_16 = tx_seq_16;
3262
3263 wlvif->ap.recorded_keys[i] = ap_key;
3264 return 0;
3265 }
3266
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3267 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3268 {
3269 int i;
3270
3271 for (i = 0; i < MAX_NUM_KEYS; i++) {
3272 kfree(wlvif->ap.recorded_keys[i]);
3273 wlvif->ap.recorded_keys[i] = NULL;
3274 }
3275 }
3276
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3277 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3278 {
3279 int i, ret = 0;
3280 struct wl1271_ap_key *key;
3281 bool wep_key_added = false;
3282
3283 for (i = 0; i < MAX_NUM_KEYS; i++) {
3284 u8 hlid;
3285 if (wlvif->ap.recorded_keys[i] == NULL)
3286 break;
3287
3288 key = wlvif->ap.recorded_keys[i];
3289 hlid = key->hlid;
3290 if (hlid == WL12XX_INVALID_LINK_ID)
3291 hlid = wlvif->ap.bcast_hlid;
3292
3293 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3294 key->id, key->key_type,
3295 key->key_size, key->key,
3296 hlid, key->tx_seq_32,
3297 key->tx_seq_16);
3298 if (ret < 0)
3299 goto out;
3300
3301 if (key->key_type == KEY_WEP)
3302 wep_key_added = true;
3303 }
3304
3305 if (wep_key_added) {
3306 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3307 wlvif->ap.bcast_hlid);
3308 if (ret < 0)
3309 goto out;
3310 }
3311
3312 out:
3313 wl1271_free_ap_keys(wl, wlvif);
3314 return ret;
3315 }
3316
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3317 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3318 u16 action, u8 id, u8 key_type,
3319 u8 key_size, const u8 *key, u32 tx_seq_32,
3320 u16 tx_seq_16, struct ieee80211_sta *sta)
3321 {
3322 int ret;
3323 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3324
3325 if (is_ap) {
3326 struct wl1271_station *wl_sta;
3327 u8 hlid;
3328
3329 if (sta) {
3330 wl_sta = (struct wl1271_station *)sta->drv_priv;
3331 hlid = wl_sta->hlid;
3332 } else {
3333 hlid = wlvif->ap.bcast_hlid;
3334 }
3335
3336 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3337 /*
3338 * We do not support removing keys after AP shutdown.
3339 * Pretend we do to make mac80211 happy.
3340 */
3341 if (action != KEY_ADD_OR_REPLACE)
3342 return 0;
3343
3344 ret = wl1271_record_ap_key(wl, wlvif, id,
3345 key_type, key_size,
3346 key, hlid, tx_seq_32,
3347 tx_seq_16);
3348 } else {
3349 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3350 id, key_type, key_size,
3351 key, hlid, tx_seq_32,
3352 tx_seq_16);
3353 }
3354
3355 if (ret < 0)
3356 return ret;
3357 } else {
3358 const u8 *addr;
3359 static const u8 bcast_addr[ETH_ALEN] = {
3360 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3361 };
3362
3363 addr = sta ? sta->addr : bcast_addr;
3364
3365 if (is_zero_ether_addr(addr)) {
3366 /* We dont support TX only encryption */
3367 return -EOPNOTSUPP;
3368 }
3369
3370 /* The wl1271 does not allow to remove unicast keys - they
3371 will be cleared automatically on next CMD_JOIN. Ignore the
3372 request silently, as we dont want the mac80211 to emit
3373 an error message. */
3374 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3375 return 0;
3376
3377 /* don't remove key if hlid was already deleted */
3378 if (action == KEY_REMOVE &&
3379 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3380 return 0;
3381
3382 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3383 id, key_type, key_size,
3384 key, addr, tx_seq_32,
3385 tx_seq_16);
3386 if (ret < 0)
3387 return ret;
3388
3389 }
3390
3391 return 0;
3392 }
3393
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3394 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3395 struct ieee80211_vif *vif,
3396 struct ieee80211_sta *sta,
3397 struct ieee80211_key_conf *key_conf)
3398 {
3399 struct wl1271 *wl = hw->priv;
3400 int ret;
3401 bool might_change_spare =
3402 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3403 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3404
3405 if (might_change_spare) {
3406 /*
3407 * stop the queues and flush to ensure the next packets are
3408 * in sync with FW spare block accounting
3409 */
3410 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3411 wl1271_tx_flush(wl);
3412 }
3413
3414 mutex_lock(&wl->mutex);
3415
3416 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3417 ret = -EAGAIN;
3418 goto out_wake_queues;
3419 }
3420
3421 ret = wl1271_ps_elp_wakeup(wl);
3422 if (ret < 0)
3423 goto out_wake_queues;
3424
3425 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3426
3427 wl1271_ps_elp_sleep(wl);
3428
3429 out_wake_queues:
3430 if (might_change_spare)
3431 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3432
3433 mutex_unlock(&wl->mutex);
3434
3435 return ret;
3436 }
3437
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3438 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3439 struct ieee80211_vif *vif,
3440 struct ieee80211_sta *sta,
3441 struct ieee80211_key_conf *key_conf)
3442 {
3443 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3444 int ret;
3445 u32 tx_seq_32 = 0;
3446 u16 tx_seq_16 = 0;
3447 u8 key_type;
3448 u8 hlid;
3449
3450 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3451
3452 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3453 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3454 key_conf->cipher, key_conf->keyidx,
3455 key_conf->keylen, key_conf->flags);
3456 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3457
3458 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3459 if (sta) {
3460 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3461 hlid = wl_sta->hlid;
3462 } else {
3463 hlid = wlvif->ap.bcast_hlid;
3464 }
3465 else
3466 hlid = wlvif->sta.hlid;
3467
3468 if (hlid != WL12XX_INVALID_LINK_ID) {
3469 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3470 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3471 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3472 }
3473
3474 switch (key_conf->cipher) {
3475 case WLAN_CIPHER_SUITE_WEP40:
3476 case WLAN_CIPHER_SUITE_WEP104:
3477 key_type = KEY_WEP;
3478
3479 key_conf->hw_key_idx = key_conf->keyidx;
3480 break;
3481 case WLAN_CIPHER_SUITE_TKIP:
3482 key_type = KEY_TKIP;
3483 key_conf->hw_key_idx = key_conf->keyidx;
3484 break;
3485 case WLAN_CIPHER_SUITE_CCMP:
3486 key_type = KEY_AES;
3487 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3488 break;
3489 case WL1271_CIPHER_SUITE_GEM:
3490 key_type = KEY_GEM;
3491 break;
3492 default:
3493 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3494
3495 return -EOPNOTSUPP;
3496 }
3497
3498 switch (cmd) {
3499 case SET_KEY:
3500 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3501 key_conf->keyidx, key_type,
3502 key_conf->keylen, key_conf->key,
3503 tx_seq_32, tx_seq_16, sta);
3504 if (ret < 0) {
3505 wl1271_error("Could not add or replace key");
3506 return ret;
3507 }
3508
3509 /*
3510 * reconfiguring arp response if the unicast (or common)
3511 * encryption key type was changed
3512 */
3513 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3514 (sta || key_type == KEY_WEP) &&
3515 wlvif->encryption_type != key_type) {
3516 wlvif->encryption_type = key_type;
3517 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3518 if (ret < 0) {
3519 wl1271_warning("build arp rsp failed: %d", ret);
3520 return ret;
3521 }
3522 }
3523 break;
3524
3525 case DISABLE_KEY:
3526 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3527 key_conf->keyidx, key_type,
3528 key_conf->keylen, key_conf->key,
3529 0, 0, sta);
3530 if (ret < 0) {
3531 wl1271_error("Could not remove key");
3532 return ret;
3533 }
3534 break;
3535
3536 default:
3537 wl1271_error("Unsupported key cmd 0x%x", cmd);
3538 return -EOPNOTSUPP;
3539 }
3540
3541 return ret;
3542 }
3543 EXPORT_SYMBOL_GPL(wlcore_set_key);
3544
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3545 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3546 struct ieee80211_vif *vif,
3547 int key_idx)
3548 {
3549 struct wl1271 *wl = hw->priv;
3550 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3551 int ret;
3552
3553 wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3554 key_idx);
3555
3556 /* we don't handle unsetting of default key */
3557 if (key_idx == -1)
3558 return;
3559
3560 mutex_lock(&wl->mutex);
3561
3562 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3563 ret = -EAGAIN;
3564 goto out_unlock;
3565 }
3566
3567 ret = wl1271_ps_elp_wakeup(wl);
3568 if (ret < 0)
3569 goto out_unlock;
3570
3571 wlvif->default_key = key_idx;
3572
3573 /* the default WEP key needs to be configured at least once */
3574 if (wlvif->encryption_type == KEY_WEP) {
3575 ret = wl12xx_cmd_set_default_wep_key(wl,
3576 key_idx,
3577 wlvif->sta.hlid);
3578 if (ret < 0)
3579 goto out_sleep;
3580 }
3581
3582 out_sleep:
3583 wl1271_ps_elp_sleep(wl);
3584
3585 out_unlock:
3586 mutex_unlock(&wl->mutex);
3587 }
3588
wlcore_regdomain_config(struct wl1271 * wl)3589 void wlcore_regdomain_config(struct wl1271 *wl)
3590 {
3591 int ret;
3592
3593 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3594 return;
3595
3596 mutex_lock(&wl->mutex);
3597
3598 if (unlikely(wl->state != WLCORE_STATE_ON))
3599 goto out;
3600
3601 ret = wl1271_ps_elp_wakeup(wl);
3602 if (ret < 0)
3603 goto out;
3604
3605 ret = wlcore_cmd_regdomain_config_locked(wl);
3606 if (ret < 0) {
3607 wl12xx_queue_recovery_work(wl);
3608 goto out;
3609 }
3610
3611 wl1271_ps_elp_sleep(wl);
3612 out:
3613 mutex_unlock(&wl->mutex);
3614 }
3615
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3616 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3617 struct ieee80211_vif *vif,
3618 struct ieee80211_scan_request *hw_req)
3619 {
3620 struct cfg80211_scan_request *req = &hw_req->req;
3621 struct wl1271 *wl = hw->priv;
3622 int ret;
3623 u8 *ssid = NULL;
3624 size_t len = 0;
3625
3626 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3627
3628 if (req->n_ssids) {
3629 ssid = req->ssids[0].ssid;
3630 len = req->ssids[0].ssid_len;
3631 }
3632
3633 mutex_lock(&wl->mutex);
3634
3635 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3636 /*
3637 * We cannot return -EBUSY here because cfg80211 will expect
3638 * a call to ieee80211_scan_completed if we do - in this case
3639 * there won't be any call.
3640 */
3641 ret = -EAGAIN;
3642 goto out;
3643 }
3644
3645 ret = wl1271_ps_elp_wakeup(wl);
3646 if (ret < 0)
3647 goto out;
3648
3649 /* fail if there is any role in ROC */
3650 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3651 /* don't allow scanning right now */
3652 ret = -EBUSY;
3653 goto out_sleep;
3654 }
3655
3656 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3657 out_sleep:
3658 wl1271_ps_elp_sleep(wl);
3659 out:
3660 mutex_unlock(&wl->mutex);
3661
3662 return ret;
3663 }
3664
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3665 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3666 struct ieee80211_vif *vif)
3667 {
3668 struct wl1271 *wl = hw->priv;
3669 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3670 struct cfg80211_scan_info info = {
3671 .aborted = true,
3672 };
3673 int ret;
3674
3675 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3676
3677 mutex_lock(&wl->mutex);
3678
3679 if (unlikely(wl->state != WLCORE_STATE_ON))
3680 goto out;
3681
3682 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3683 goto out;
3684
3685 ret = wl1271_ps_elp_wakeup(wl);
3686 if (ret < 0)
3687 goto out;
3688
3689 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3690 ret = wl->ops->scan_stop(wl, wlvif);
3691 if (ret < 0)
3692 goto out_sleep;
3693 }
3694
3695 /*
3696 * Rearm the tx watchdog just before idling scan. This
3697 * prevents just-finished scans from triggering the watchdog
3698 */
3699 wl12xx_rearm_tx_watchdog_locked(wl);
3700
3701 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3702 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3703 wl->scan_wlvif = NULL;
3704 wl->scan.req = NULL;
3705 ieee80211_scan_completed(wl->hw, &info);
3706
3707 out_sleep:
3708 wl1271_ps_elp_sleep(wl);
3709 out:
3710 mutex_unlock(&wl->mutex);
3711
3712 cancel_delayed_work_sync(&wl->scan_complete_work);
3713 }
3714
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3715 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3716 struct ieee80211_vif *vif,
3717 struct cfg80211_sched_scan_request *req,
3718 struct ieee80211_scan_ies *ies)
3719 {
3720 struct wl1271 *wl = hw->priv;
3721 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3722 int ret;
3723
3724 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3725
3726 mutex_lock(&wl->mutex);
3727
3728 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3729 ret = -EAGAIN;
3730 goto out;
3731 }
3732
3733 ret = wl1271_ps_elp_wakeup(wl);
3734 if (ret < 0)
3735 goto out;
3736
3737 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3738 if (ret < 0)
3739 goto out_sleep;
3740
3741 wl->sched_vif = wlvif;
3742
3743 out_sleep:
3744 wl1271_ps_elp_sleep(wl);
3745 out:
3746 mutex_unlock(&wl->mutex);
3747 return ret;
3748 }
3749
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3750 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3751 struct ieee80211_vif *vif)
3752 {
3753 struct wl1271 *wl = hw->priv;
3754 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3755 int ret;
3756
3757 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3758
3759 mutex_lock(&wl->mutex);
3760
3761 if (unlikely(wl->state != WLCORE_STATE_ON))
3762 goto out;
3763
3764 ret = wl1271_ps_elp_wakeup(wl);
3765 if (ret < 0)
3766 goto out;
3767
3768 wl->ops->sched_scan_stop(wl, wlvif);
3769
3770 wl1271_ps_elp_sleep(wl);
3771 out:
3772 mutex_unlock(&wl->mutex);
3773
3774 return 0;
3775 }
3776
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3777 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3778 {
3779 struct wl1271 *wl = hw->priv;
3780 int ret = 0;
3781
3782 mutex_lock(&wl->mutex);
3783
3784 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3785 ret = -EAGAIN;
3786 goto out;
3787 }
3788
3789 ret = wl1271_ps_elp_wakeup(wl);
3790 if (ret < 0)
3791 goto out;
3792
3793 ret = wl1271_acx_frag_threshold(wl, value);
3794 if (ret < 0)
3795 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3796
3797 wl1271_ps_elp_sleep(wl);
3798
3799 out:
3800 mutex_unlock(&wl->mutex);
3801
3802 return ret;
3803 }
3804
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3805 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3806 {
3807 struct wl1271 *wl = hw->priv;
3808 struct wl12xx_vif *wlvif;
3809 int ret = 0;
3810
3811 mutex_lock(&wl->mutex);
3812
3813 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3814 ret = -EAGAIN;
3815 goto out;
3816 }
3817
3818 ret = wl1271_ps_elp_wakeup(wl);
3819 if (ret < 0)
3820 goto out;
3821
3822 wl12xx_for_each_wlvif(wl, wlvif) {
3823 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3824 if (ret < 0)
3825 wl1271_warning("set rts threshold failed: %d", ret);
3826 }
3827 wl1271_ps_elp_sleep(wl);
3828
3829 out:
3830 mutex_unlock(&wl->mutex);
3831
3832 return ret;
3833 }
3834
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3835 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3836 {
3837 int len;
3838 const u8 *next, *end = skb->data + skb->len;
3839 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3840 skb->len - ieoffset);
3841 if (!ie)
3842 return;
3843 len = ie[1] + 2;
3844 next = ie + len;
3845 memmove(ie, next, end - next);
3846 skb_trim(skb, skb->len - len);
3847 }
3848
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3849 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3850 unsigned int oui, u8 oui_type,
3851 int ieoffset)
3852 {
3853 int len;
3854 const u8 *next, *end = skb->data + skb->len;
3855 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3856 skb->data + ieoffset,
3857 skb->len - ieoffset);
3858 if (!ie)
3859 return;
3860 len = ie[1] + 2;
3861 next = ie + len;
3862 memmove(ie, next, end - next);
3863 skb_trim(skb, skb->len - len);
3864 }
3865
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3866 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3867 struct ieee80211_vif *vif)
3868 {
3869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3870 struct sk_buff *skb;
3871 int ret;
3872
3873 skb = ieee80211_proberesp_get(wl->hw, vif);
3874 if (!skb)
3875 return -EOPNOTSUPP;
3876
3877 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3878 CMD_TEMPL_AP_PROBE_RESPONSE,
3879 skb->data,
3880 skb->len, 0,
3881 rates);
3882 dev_kfree_skb(skb);
3883
3884 if (ret < 0)
3885 goto out;
3886
3887 wl1271_debug(DEBUG_AP, "probe response updated");
3888 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3889
3890 out:
3891 return ret;
3892 }
3893
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3894 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3895 struct ieee80211_vif *vif,
3896 u8 *probe_rsp_data,
3897 size_t probe_rsp_len,
3898 u32 rates)
3899 {
3900 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3901 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3902 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3903 int ssid_ie_offset, ie_offset, templ_len;
3904 const u8 *ptr;
3905
3906 /* no need to change probe response if the SSID is set correctly */
3907 if (wlvif->ssid_len > 0)
3908 return wl1271_cmd_template_set(wl, wlvif->role_id,
3909 CMD_TEMPL_AP_PROBE_RESPONSE,
3910 probe_rsp_data,
3911 probe_rsp_len, 0,
3912 rates);
3913
3914 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3915 wl1271_error("probe_rsp template too big");
3916 return -EINVAL;
3917 }
3918
3919 /* start searching from IE offset */
3920 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3921
3922 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3923 probe_rsp_len - ie_offset);
3924 if (!ptr) {
3925 wl1271_error("No SSID in beacon!");
3926 return -EINVAL;
3927 }
3928
3929 ssid_ie_offset = ptr - probe_rsp_data;
3930 ptr += (ptr[1] + 2);
3931
3932 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3933
3934 /* insert SSID from bss_conf */
3935 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3936 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3937 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3938 bss_conf->ssid, bss_conf->ssid_len);
3939 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3940
3941 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3942 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3943 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3944
3945 return wl1271_cmd_template_set(wl, wlvif->role_id,
3946 CMD_TEMPL_AP_PROBE_RESPONSE,
3947 probe_rsp_templ,
3948 templ_len, 0,
3949 rates);
3950 }
3951
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)3952 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3953 struct ieee80211_vif *vif,
3954 struct ieee80211_bss_conf *bss_conf,
3955 u32 changed)
3956 {
3957 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3958 int ret = 0;
3959
3960 if (changed & BSS_CHANGED_ERP_SLOT) {
3961 if (bss_conf->use_short_slot)
3962 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3963 else
3964 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3965 if (ret < 0) {
3966 wl1271_warning("Set slot time failed %d", ret);
3967 goto out;
3968 }
3969 }
3970
3971 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3972 if (bss_conf->use_short_preamble)
3973 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3974 else
3975 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3976 }
3977
3978 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3979 if (bss_conf->use_cts_prot)
3980 ret = wl1271_acx_cts_protect(wl, wlvif,
3981 CTSPROTECT_ENABLE);
3982 else
3983 ret = wl1271_acx_cts_protect(wl, wlvif,
3984 CTSPROTECT_DISABLE);
3985 if (ret < 0) {
3986 wl1271_warning("Set ctsprotect failed %d", ret);
3987 goto out;
3988 }
3989 }
3990
3991 out:
3992 return ret;
3993 }
3994
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)3995 static int wlcore_set_beacon_template(struct wl1271 *wl,
3996 struct ieee80211_vif *vif,
3997 bool is_ap)
3998 {
3999 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4000 struct ieee80211_hdr *hdr;
4001 u32 min_rate;
4002 int ret;
4003 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4004 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4005 u16 tmpl_id;
4006
4007 if (!beacon) {
4008 ret = -EINVAL;
4009 goto out;
4010 }
4011
4012 wl1271_debug(DEBUG_MASTER, "beacon updated");
4013
4014 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4015 if (ret < 0) {
4016 dev_kfree_skb(beacon);
4017 goto out;
4018 }
4019 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4020 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4021 CMD_TEMPL_BEACON;
4022 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4023 beacon->data,
4024 beacon->len, 0,
4025 min_rate);
4026 if (ret < 0) {
4027 dev_kfree_skb(beacon);
4028 goto out;
4029 }
4030
4031 wlvif->wmm_enabled =
4032 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4033 WLAN_OUI_TYPE_MICROSOFT_WMM,
4034 beacon->data + ieoffset,
4035 beacon->len - ieoffset);
4036
4037 /*
4038 * In case we already have a probe-resp beacon set explicitly
4039 * by usermode, don't use the beacon data.
4040 */
4041 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4042 goto end_bcn;
4043
4044 /* remove TIM ie from probe response */
4045 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4046
4047 /*
4048 * remove p2p ie from probe response.
4049 * the fw reponds to probe requests that don't include
4050 * the p2p ie. probe requests with p2p ie will be passed,
4051 * and will be responded by the supplicant (the spec
4052 * forbids including the p2p ie when responding to probe
4053 * requests that didn't include it).
4054 */
4055 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4056 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4057
4058 hdr = (struct ieee80211_hdr *) beacon->data;
4059 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4060 IEEE80211_STYPE_PROBE_RESP);
4061 if (is_ap)
4062 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4063 beacon->data,
4064 beacon->len,
4065 min_rate);
4066 else
4067 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4068 CMD_TEMPL_PROBE_RESPONSE,
4069 beacon->data,
4070 beacon->len, 0,
4071 min_rate);
4072 end_bcn:
4073 dev_kfree_skb(beacon);
4074 if (ret < 0)
4075 goto out;
4076
4077 out:
4078 return ret;
4079 }
4080
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4081 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4082 struct ieee80211_vif *vif,
4083 struct ieee80211_bss_conf *bss_conf,
4084 u32 changed)
4085 {
4086 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4087 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4088 int ret = 0;
4089
4090 if (changed & BSS_CHANGED_BEACON_INT) {
4091 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4092 bss_conf->beacon_int);
4093
4094 wlvif->beacon_int = bss_conf->beacon_int;
4095 }
4096
4097 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4098 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4099
4100 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4101 }
4102
4103 if (changed & BSS_CHANGED_BEACON) {
4104 ret = wlcore_set_beacon_template(wl, vif, is_ap);
4105 if (ret < 0)
4106 goto out;
4107
4108 if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4109 &wlvif->flags)) {
4110 ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4111 if (ret < 0)
4112 goto out;
4113 }
4114 }
4115 out:
4116 if (ret != 0)
4117 wl1271_error("beacon info change failed: %d", ret);
4118 return ret;
4119 }
4120
4121 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4122 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4123 struct ieee80211_vif *vif,
4124 struct ieee80211_bss_conf *bss_conf,
4125 u32 changed)
4126 {
4127 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4128 int ret = 0;
4129
4130 if (changed & BSS_CHANGED_BASIC_RATES) {
4131 u32 rates = bss_conf->basic_rates;
4132
4133 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4134 wlvif->band);
4135 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4136 wlvif->basic_rate_set);
4137
4138 ret = wl1271_init_ap_rates(wl, wlvif);
4139 if (ret < 0) {
4140 wl1271_error("AP rate policy change failed %d", ret);
4141 goto out;
4142 }
4143
4144 ret = wl1271_ap_init_templates(wl, vif);
4145 if (ret < 0)
4146 goto out;
4147
4148 /* No need to set probe resp template for mesh */
4149 if (!ieee80211_vif_is_mesh(vif)) {
4150 ret = wl1271_ap_set_probe_resp_tmpl(wl,
4151 wlvif->basic_rate,
4152 vif);
4153 if (ret < 0)
4154 goto out;
4155 }
4156
4157 ret = wlcore_set_beacon_template(wl, vif, true);
4158 if (ret < 0)
4159 goto out;
4160 }
4161
4162 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4163 if (ret < 0)
4164 goto out;
4165
4166 if (changed & BSS_CHANGED_BEACON_ENABLED) {
4167 if (bss_conf->enable_beacon) {
4168 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4169 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4170 if (ret < 0)
4171 goto out;
4172
4173 ret = wl1271_ap_init_hwenc(wl, wlvif);
4174 if (ret < 0)
4175 goto out;
4176
4177 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4178 wl1271_debug(DEBUG_AP, "started AP");
4179 }
4180 } else {
4181 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4182 /*
4183 * AP might be in ROC in case we have just
4184 * sent auth reply. handle it.
4185 */
4186 if (test_bit(wlvif->role_id, wl->roc_map))
4187 wl12xx_croc(wl, wlvif->role_id);
4188
4189 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4190 if (ret < 0)
4191 goto out;
4192
4193 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4194 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4195 &wlvif->flags);
4196 wl1271_debug(DEBUG_AP, "stopped AP");
4197 }
4198 }
4199 }
4200
4201 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4202 if (ret < 0)
4203 goto out;
4204
4205 /* Handle HT information change */
4206 if ((changed & BSS_CHANGED_HT) &&
4207 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4208 ret = wl1271_acx_set_ht_information(wl, wlvif,
4209 bss_conf->ht_operation_mode);
4210 if (ret < 0) {
4211 wl1271_warning("Set ht information failed %d", ret);
4212 goto out;
4213 }
4214 }
4215
4216 out:
4217 return;
4218 }
4219
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4220 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4221 struct ieee80211_bss_conf *bss_conf,
4222 u32 sta_rate_set)
4223 {
4224 u32 rates;
4225 int ret;
4226
4227 wl1271_debug(DEBUG_MAC80211,
4228 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4229 bss_conf->bssid, bss_conf->aid,
4230 bss_conf->beacon_int,
4231 bss_conf->basic_rates, sta_rate_set);
4232
4233 wlvif->beacon_int = bss_conf->beacon_int;
4234 rates = bss_conf->basic_rates;
4235 wlvif->basic_rate_set =
4236 wl1271_tx_enabled_rates_get(wl, rates,
4237 wlvif->band);
4238 wlvif->basic_rate =
4239 wl1271_tx_min_rate_get(wl,
4240 wlvif->basic_rate_set);
4241
4242 if (sta_rate_set)
4243 wlvif->rate_set =
4244 wl1271_tx_enabled_rates_get(wl,
4245 sta_rate_set,
4246 wlvif->band);
4247
4248 /* we only support sched_scan while not connected */
4249 if (wl->sched_vif == wlvif)
4250 wl->ops->sched_scan_stop(wl, wlvif);
4251
4252 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4253 if (ret < 0)
4254 return ret;
4255
4256 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4257 if (ret < 0)
4258 return ret;
4259
4260 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4261 if (ret < 0)
4262 return ret;
4263
4264 wlcore_set_ssid(wl, wlvif);
4265
4266 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4267
4268 return 0;
4269 }
4270
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4271 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4272 {
4273 int ret;
4274
4275 /* revert back to minimum rates for the current band */
4276 wl1271_set_band_rate(wl, wlvif);
4277 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4278
4279 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4280 if (ret < 0)
4281 return ret;
4282
4283 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4284 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4285 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4286 if (ret < 0)
4287 return ret;
4288 }
4289
4290 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4291 return 0;
4292 }
4293 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4294 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4295 struct ieee80211_vif *vif,
4296 struct ieee80211_bss_conf *bss_conf,
4297 u32 changed)
4298 {
4299 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4300 bool do_join = false;
4301 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4302 bool ibss_joined = false;
4303 u32 sta_rate_set = 0;
4304 int ret;
4305 struct ieee80211_sta *sta;
4306 bool sta_exists = false;
4307 struct ieee80211_sta_ht_cap sta_ht_cap;
4308
4309 if (is_ibss) {
4310 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4311 changed);
4312 if (ret < 0)
4313 goto out;
4314 }
4315
4316 if (changed & BSS_CHANGED_IBSS) {
4317 if (bss_conf->ibss_joined) {
4318 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4319 ibss_joined = true;
4320 } else {
4321 wlcore_unset_assoc(wl, wlvif);
4322 wl12xx_cmd_role_stop_sta(wl, wlvif);
4323 }
4324 }
4325
4326 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4327 do_join = true;
4328
4329 /* Need to update the SSID (for filtering etc) */
4330 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4331 do_join = true;
4332
4333 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4334 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4335 bss_conf->enable_beacon ? "enabled" : "disabled");
4336
4337 do_join = true;
4338 }
4339
4340 if (changed & BSS_CHANGED_IDLE && !is_ibss)
4341 wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4342
4343 if (changed & BSS_CHANGED_CQM) {
4344 bool enable = false;
4345 if (bss_conf->cqm_rssi_thold)
4346 enable = true;
4347 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4348 bss_conf->cqm_rssi_thold,
4349 bss_conf->cqm_rssi_hyst);
4350 if (ret < 0)
4351 goto out;
4352 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4353 }
4354
4355 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4356 BSS_CHANGED_ASSOC)) {
4357 rcu_read_lock();
4358 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4359 if (sta) {
4360 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4361
4362 /* save the supp_rates of the ap */
4363 sta_rate_set = sta->supp_rates[wlvif->band];
4364 if (sta->ht_cap.ht_supported)
4365 sta_rate_set |=
4366 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4367 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4368 sta_ht_cap = sta->ht_cap;
4369 sta_exists = true;
4370 }
4371
4372 rcu_read_unlock();
4373 }
4374
4375 if (changed & BSS_CHANGED_BSSID) {
4376 if (!is_zero_ether_addr(bss_conf->bssid)) {
4377 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4378 sta_rate_set);
4379 if (ret < 0)
4380 goto out;
4381
4382 /* Need to update the BSSID (for filtering etc) */
4383 do_join = true;
4384 } else {
4385 ret = wlcore_clear_bssid(wl, wlvif);
4386 if (ret < 0)
4387 goto out;
4388 }
4389 }
4390
4391 if (changed & BSS_CHANGED_IBSS) {
4392 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4393 bss_conf->ibss_joined);
4394
4395 if (bss_conf->ibss_joined) {
4396 u32 rates = bss_conf->basic_rates;
4397 wlvif->basic_rate_set =
4398 wl1271_tx_enabled_rates_get(wl, rates,
4399 wlvif->band);
4400 wlvif->basic_rate =
4401 wl1271_tx_min_rate_get(wl,
4402 wlvif->basic_rate_set);
4403
4404 /* by default, use 11b + OFDM rates */
4405 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4406 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4407 if (ret < 0)
4408 goto out;
4409 }
4410 }
4411
4412 if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4413 /* enable beacon filtering */
4414 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4415 if (ret < 0)
4416 goto out;
4417 }
4418
4419 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4420 if (ret < 0)
4421 goto out;
4422
4423 if (do_join) {
4424 ret = wlcore_join(wl, wlvif);
4425 if (ret < 0) {
4426 wl1271_warning("cmd join failed %d", ret);
4427 goto out;
4428 }
4429 }
4430
4431 if (changed & BSS_CHANGED_ASSOC) {
4432 if (bss_conf->assoc) {
4433 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4434 sta_rate_set);
4435 if (ret < 0)
4436 goto out;
4437
4438 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4439 wl12xx_set_authorized(wl, wlvif);
4440 } else {
4441 wlcore_unset_assoc(wl, wlvif);
4442 }
4443 }
4444
4445 if (changed & BSS_CHANGED_PS) {
4446 if ((bss_conf->ps) &&
4447 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4448 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4449 int ps_mode;
4450 char *ps_mode_str;
4451
4452 if (wl->conf.conn.forced_ps) {
4453 ps_mode = STATION_POWER_SAVE_MODE;
4454 ps_mode_str = "forced";
4455 } else {
4456 ps_mode = STATION_AUTO_PS_MODE;
4457 ps_mode_str = "auto";
4458 }
4459
4460 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4461
4462 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4463 if (ret < 0)
4464 wl1271_warning("enter %s ps failed %d",
4465 ps_mode_str, ret);
4466 } else if (!bss_conf->ps &&
4467 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4468 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4469
4470 ret = wl1271_ps_set_mode(wl, wlvif,
4471 STATION_ACTIVE_MODE);
4472 if (ret < 0)
4473 wl1271_warning("exit auto ps failed %d", ret);
4474 }
4475 }
4476
4477 /* Handle new association with HT. Do this after join. */
4478 if (sta_exists) {
4479 bool enabled =
4480 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4481
4482 ret = wlcore_hw_set_peer_cap(wl,
4483 &sta_ht_cap,
4484 enabled,
4485 wlvif->rate_set,
4486 wlvif->sta.hlid);
4487 if (ret < 0) {
4488 wl1271_warning("Set ht cap failed %d", ret);
4489 goto out;
4490
4491 }
4492
4493 if (enabled) {
4494 ret = wl1271_acx_set_ht_information(wl, wlvif,
4495 bss_conf->ht_operation_mode);
4496 if (ret < 0) {
4497 wl1271_warning("Set ht information failed %d",
4498 ret);
4499 goto out;
4500 }
4501 }
4502 }
4503
4504 /* Handle arp filtering. Done after join. */
4505 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4506 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4507 __be32 addr = bss_conf->arp_addr_list[0];
4508 wlvif->sta.qos = bss_conf->qos;
4509 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4510
4511 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4512 wlvif->ip_addr = addr;
4513 /*
4514 * The template should have been configured only upon
4515 * association. however, it seems that the correct ip
4516 * isn't being set (when sending), so we have to
4517 * reconfigure the template upon every ip change.
4518 */
4519 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4520 if (ret < 0) {
4521 wl1271_warning("build arp rsp failed: %d", ret);
4522 goto out;
4523 }
4524
4525 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4526 (ACX_ARP_FILTER_ARP_FILTERING |
4527 ACX_ARP_FILTER_AUTO_ARP),
4528 addr);
4529 } else {
4530 wlvif->ip_addr = 0;
4531 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4532 }
4533
4534 if (ret < 0)
4535 goto out;
4536 }
4537
4538 out:
4539 return;
4540 }
4541
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4542 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4543 struct ieee80211_vif *vif,
4544 struct ieee80211_bss_conf *bss_conf,
4545 u32 changed)
4546 {
4547 struct wl1271 *wl = hw->priv;
4548 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4549 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4550 int ret;
4551
4552 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4553 wlvif->role_id, (int)changed);
4554
4555 /*
4556 * make sure to cancel pending disconnections if our association
4557 * state changed
4558 */
4559 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4560 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4561
4562 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4563 !bss_conf->enable_beacon)
4564 wl1271_tx_flush(wl);
4565
4566 mutex_lock(&wl->mutex);
4567
4568 if (unlikely(wl->state != WLCORE_STATE_ON))
4569 goto out;
4570
4571 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4572 goto out;
4573
4574 ret = wl1271_ps_elp_wakeup(wl);
4575 if (ret < 0)
4576 goto out;
4577
4578 if ((changed & BSS_CHANGED_TXPOWER) &&
4579 bss_conf->txpower != wlvif->power_level) {
4580
4581 ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4582 if (ret < 0)
4583 goto out;
4584
4585 wlvif->power_level = bss_conf->txpower;
4586 }
4587
4588 if (is_ap)
4589 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4590 else
4591 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4592
4593 wl1271_ps_elp_sleep(wl);
4594
4595 out:
4596 mutex_unlock(&wl->mutex);
4597 }
4598
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4599 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4600 struct ieee80211_chanctx_conf *ctx)
4601 {
4602 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4603 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4604 cfg80211_get_chandef_type(&ctx->def));
4605 return 0;
4606 }
4607
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4608 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4609 struct ieee80211_chanctx_conf *ctx)
4610 {
4611 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4612 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4613 cfg80211_get_chandef_type(&ctx->def));
4614 }
4615
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4616 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4617 struct ieee80211_chanctx_conf *ctx,
4618 u32 changed)
4619 {
4620 struct wl1271 *wl = hw->priv;
4621 struct wl12xx_vif *wlvif;
4622 int ret;
4623 int channel = ieee80211_frequency_to_channel(
4624 ctx->def.chan->center_freq);
4625
4626 wl1271_debug(DEBUG_MAC80211,
4627 "mac80211 change chanctx %d (type %d) changed 0x%x",
4628 channel, cfg80211_get_chandef_type(&ctx->def), changed);
4629
4630 mutex_lock(&wl->mutex);
4631
4632 ret = wl1271_ps_elp_wakeup(wl);
4633 if (ret < 0)
4634 goto out;
4635
4636 wl12xx_for_each_wlvif(wl, wlvif) {
4637 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4638
4639 rcu_read_lock();
4640 if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4641 rcu_read_unlock();
4642 continue;
4643 }
4644 rcu_read_unlock();
4645
4646 /* start radar if needed */
4647 if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4648 wlvif->bss_type == BSS_TYPE_AP_BSS &&
4649 ctx->radar_enabled && !wlvif->radar_enabled &&
4650 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4651 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4652 wlcore_hw_set_cac(wl, wlvif, true);
4653 wlvif->radar_enabled = true;
4654 }
4655 }
4656
4657 wl1271_ps_elp_sleep(wl);
4658 out:
4659 mutex_unlock(&wl->mutex);
4660 }
4661
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4662 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4663 struct ieee80211_vif *vif,
4664 struct ieee80211_chanctx_conf *ctx)
4665 {
4666 struct wl1271 *wl = hw->priv;
4667 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4668 int channel = ieee80211_frequency_to_channel(
4669 ctx->def.chan->center_freq);
4670 int ret = -EINVAL;
4671
4672 wl1271_debug(DEBUG_MAC80211,
4673 "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4674 wlvif->role_id, channel,
4675 cfg80211_get_chandef_type(&ctx->def),
4676 ctx->radar_enabled, ctx->def.chan->dfs_state);
4677
4678 mutex_lock(&wl->mutex);
4679
4680 if (unlikely(wl->state != WLCORE_STATE_ON))
4681 goto out;
4682
4683 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4684 goto out;
4685
4686 ret = wl1271_ps_elp_wakeup(wl);
4687 if (ret < 0)
4688 goto out;
4689
4690 wlvif->band = ctx->def.chan->band;
4691 wlvif->channel = channel;
4692 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4693
4694 /* update default rates according to the band */
4695 wl1271_set_band_rate(wl, wlvif);
4696
4697 if (ctx->radar_enabled &&
4698 ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4699 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4700 wlcore_hw_set_cac(wl, wlvif, true);
4701 wlvif->radar_enabled = true;
4702 }
4703
4704 wl1271_ps_elp_sleep(wl);
4705 out:
4706 mutex_unlock(&wl->mutex);
4707
4708 return 0;
4709 }
4710
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4711 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4712 struct ieee80211_vif *vif,
4713 struct ieee80211_chanctx_conf *ctx)
4714 {
4715 struct wl1271 *wl = hw->priv;
4716 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4717 int ret;
4718
4719 wl1271_debug(DEBUG_MAC80211,
4720 "mac80211 unassign chanctx (role %d) %d (type %d)",
4721 wlvif->role_id,
4722 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4723 cfg80211_get_chandef_type(&ctx->def));
4724
4725 wl1271_tx_flush(wl);
4726
4727 mutex_lock(&wl->mutex);
4728
4729 if (unlikely(wl->state != WLCORE_STATE_ON))
4730 goto out;
4731
4732 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4733 goto out;
4734
4735 ret = wl1271_ps_elp_wakeup(wl);
4736 if (ret < 0)
4737 goto out;
4738
4739 if (wlvif->radar_enabled) {
4740 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4741 wlcore_hw_set_cac(wl, wlvif, false);
4742 wlvif->radar_enabled = false;
4743 }
4744
4745 wl1271_ps_elp_sleep(wl);
4746 out:
4747 mutex_unlock(&wl->mutex);
4748 }
4749
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4750 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4751 struct wl12xx_vif *wlvif,
4752 struct ieee80211_chanctx_conf *new_ctx)
4753 {
4754 int channel = ieee80211_frequency_to_channel(
4755 new_ctx->def.chan->center_freq);
4756
4757 wl1271_debug(DEBUG_MAC80211,
4758 "switch vif (role %d) %d -> %d chan_type: %d",
4759 wlvif->role_id, wlvif->channel, channel,
4760 cfg80211_get_chandef_type(&new_ctx->def));
4761
4762 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4763 return 0;
4764
4765 WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4766
4767 if (wlvif->radar_enabled) {
4768 wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4769 wlcore_hw_set_cac(wl, wlvif, false);
4770 wlvif->radar_enabled = false;
4771 }
4772
4773 wlvif->band = new_ctx->def.chan->band;
4774 wlvif->channel = channel;
4775 wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4776
4777 /* start radar if needed */
4778 if (new_ctx->radar_enabled) {
4779 wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4780 wlcore_hw_set_cac(wl, wlvif, true);
4781 wlvif->radar_enabled = true;
4782 }
4783
4784 return 0;
4785 }
4786
4787 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4788 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4789 struct ieee80211_vif_chanctx_switch *vifs,
4790 int n_vifs,
4791 enum ieee80211_chanctx_switch_mode mode)
4792 {
4793 struct wl1271 *wl = hw->priv;
4794 int i, ret;
4795
4796 wl1271_debug(DEBUG_MAC80211,
4797 "mac80211 switch chanctx n_vifs %d mode %d",
4798 n_vifs, mode);
4799
4800 mutex_lock(&wl->mutex);
4801
4802 ret = wl1271_ps_elp_wakeup(wl);
4803 if (ret < 0)
4804 goto out;
4805
4806 for (i = 0; i < n_vifs; i++) {
4807 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4808
4809 ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4810 if (ret)
4811 goto out_sleep;
4812 }
4813 out_sleep:
4814 wl1271_ps_elp_sleep(wl);
4815 out:
4816 mutex_unlock(&wl->mutex);
4817
4818 return 0;
4819 }
4820
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4821 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4822 struct ieee80211_vif *vif, u16 queue,
4823 const struct ieee80211_tx_queue_params *params)
4824 {
4825 struct wl1271 *wl = hw->priv;
4826 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4827 u8 ps_scheme;
4828 int ret = 0;
4829
4830 if (wlcore_is_p2p_mgmt(wlvif))
4831 return 0;
4832
4833 mutex_lock(&wl->mutex);
4834
4835 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4836
4837 if (params->uapsd)
4838 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4839 else
4840 ps_scheme = CONF_PS_SCHEME_LEGACY;
4841
4842 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4843 goto out;
4844
4845 ret = wl1271_ps_elp_wakeup(wl);
4846 if (ret < 0)
4847 goto out;
4848
4849 /*
4850 * the txop is confed in units of 32us by the mac80211,
4851 * we need us
4852 */
4853 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4854 params->cw_min, params->cw_max,
4855 params->aifs, params->txop << 5);
4856 if (ret < 0)
4857 goto out_sleep;
4858
4859 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4860 CONF_CHANNEL_TYPE_EDCF,
4861 wl1271_tx_get_queue(queue),
4862 ps_scheme, CONF_ACK_POLICY_LEGACY,
4863 0, 0);
4864
4865 out_sleep:
4866 wl1271_ps_elp_sleep(wl);
4867
4868 out:
4869 mutex_unlock(&wl->mutex);
4870
4871 return ret;
4872 }
4873
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4874 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4875 struct ieee80211_vif *vif)
4876 {
4877
4878 struct wl1271 *wl = hw->priv;
4879 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4880 u64 mactime = ULLONG_MAX;
4881 int ret;
4882
4883 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4884
4885 mutex_lock(&wl->mutex);
4886
4887 if (unlikely(wl->state != WLCORE_STATE_ON))
4888 goto out;
4889
4890 ret = wl1271_ps_elp_wakeup(wl);
4891 if (ret < 0)
4892 goto out;
4893
4894 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4895 if (ret < 0)
4896 goto out_sleep;
4897
4898 out_sleep:
4899 wl1271_ps_elp_sleep(wl);
4900
4901 out:
4902 mutex_unlock(&wl->mutex);
4903 return mactime;
4904 }
4905
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4906 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4907 struct survey_info *survey)
4908 {
4909 struct ieee80211_conf *conf = &hw->conf;
4910
4911 if (idx != 0)
4912 return -ENOENT;
4913
4914 survey->channel = conf->chandef.chan;
4915 survey->filled = 0;
4916 return 0;
4917 }
4918
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4919 static int wl1271_allocate_sta(struct wl1271 *wl,
4920 struct wl12xx_vif *wlvif,
4921 struct ieee80211_sta *sta)
4922 {
4923 struct wl1271_station *wl_sta;
4924 int ret;
4925
4926
4927 if (wl->active_sta_count >= wl->max_ap_stations) {
4928 wl1271_warning("could not allocate HLID - too much stations");
4929 return -EBUSY;
4930 }
4931
4932 wl_sta = (struct wl1271_station *)sta->drv_priv;
4933 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4934 if (ret < 0) {
4935 wl1271_warning("could not allocate HLID - too many links");
4936 return -EBUSY;
4937 }
4938
4939 /* use the previous security seq, if this is a recovery/resume */
4940 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4941
4942 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4943 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4944 wl->active_sta_count++;
4945 return 0;
4946 }
4947
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)4948 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4949 {
4950 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4951 return;
4952
4953 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4954 __clear_bit(hlid, &wl->ap_ps_map);
4955 __clear_bit(hlid, &wl->ap_fw_ps_map);
4956
4957 /*
4958 * save the last used PN in the private part of iee80211_sta,
4959 * in case of recovery/suspend
4960 */
4961 wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4962
4963 wl12xx_free_link(wl, wlvif, &hlid);
4964 wl->active_sta_count--;
4965
4966 /*
4967 * rearm the tx watchdog when the last STA is freed - give the FW a
4968 * chance to return STA-buffered packets before complaining.
4969 */
4970 if (wl->active_sta_count == 0)
4971 wl12xx_rearm_tx_watchdog_locked(wl);
4972 }
4973
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4974 static int wl12xx_sta_add(struct wl1271 *wl,
4975 struct wl12xx_vif *wlvif,
4976 struct ieee80211_sta *sta)
4977 {
4978 struct wl1271_station *wl_sta;
4979 int ret = 0;
4980 u8 hlid;
4981
4982 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4983
4984 ret = wl1271_allocate_sta(wl, wlvif, sta);
4985 if (ret < 0)
4986 return ret;
4987
4988 wl_sta = (struct wl1271_station *)sta->drv_priv;
4989 hlid = wl_sta->hlid;
4990
4991 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4992 if (ret < 0)
4993 wl1271_free_sta(wl, wlvif, hlid);
4994
4995 return ret;
4996 }
4997
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4998 static int wl12xx_sta_remove(struct wl1271 *wl,
4999 struct wl12xx_vif *wlvif,
5000 struct ieee80211_sta *sta)
5001 {
5002 struct wl1271_station *wl_sta;
5003 int ret = 0, id;
5004
5005 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5006
5007 wl_sta = (struct wl1271_station *)sta->drv_priv;
5008 id = wl_sta->hlid;
5009 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5010 return -EINVAL;
5011
5012 ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5013 if (ret < 0)
5014 return ret;
5015
5016 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5017 return ret;
5018 }
5019
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5020 static void wlcore_roc_if_possible(struct wl1271 *wl,
5021 struct wl12xx_vif *wlvif)
5022 {
5023 if (find_first_bit(wl->roc_map,
5024 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5025 return;
5026
5027 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5028 return;
5029
5030 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5031 }
5032
5033 /*
5034 * when wl_sta is NULL, we treat this call as if coming from a
5035 * pending auth reply.
5036 * wl->mutex must be taken and the FW must be awake when the call
5037 * takes place.
5038 */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5039 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5040 struct wl1271_station *wl_sta, bool in_conn)
5041 {
5042 if (in_conn) {
5043 if (WARN_ON(wl_sta && wl_sta->in_connection))
5044 return;
5045
5046 if (!wlvif->ap_pending_auth_reply &&
5047 !wlvif->inconn_count)
5048 wlcore_roc_if_possible(wl, wlvif);
5049
5050 if (wl_sta) {
5051 wl_sta->in_connection = true;
5052 wlvif->inconn_count++;
5053 } else {
5054 wlvif->ap_pending_auth_reply = true;
5055 }
5056 } else {
5057 if (wl_sta && !wl_sta->in_connection)
5058 return;
5059
5060 if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5061 return;
5062
5063 if (WARN_ON(wl_sta && !wlvif->inconn_count))
5064 return;
5065
5066 if (wl_sta) {
5067 wl_sta->in_connection = false;
5068 wlvif->inconn_count--;
5069 } else {
5070 wlvif->ap_pending_auth_reply = false;
5071 }
5072
5073 if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5074 test_bit(wlvif->role_id, wl->roc_map))
5075 wl12xx_croc(wl, wlvif->role_id);
5076 }
5077 }
5078
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5079 static int wl12xx_update_sta_state(struct wl1271 *wl,
5080 struct wl12xx_vif *wlvif,
5081 struct ieee80211_sta *sta,
5082 enum ieee80211_sta_state old_state,
5083 enum ieee80211_sta_state new_state)
5084 {
5085 struct wl1271_station *wl_sta;
5086 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5087 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5088 int ret;
5089
5090 wl_sta = (struct wl1271_station *)sta->drv_priv;
5091
5092 /* Add station (AP mode) */
5093 if (is_ap &&
5094 old_state == IEEE80211_STA_NOTEXIST &&
5095 new_state == IEEE80211_STA_NONE) {
5096 ret = wl12xx_sta_add(wl, wlvif, sta);
5097 if (ret)
5098 return ret;
5099
5100 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5101 }
5102
5103 /* Remove station (AP mode) */
5104 if (is_ap &&
5105 old_state == IEEE80211_STA_NONE &&
5106 new_state == IEEE80211_STA_NOTEXIST) {
5107 /* must not fail */
5108 wl12xx_sta_remove(wl, wlvif, sta);
5109
5110 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5111 }
5112
5113 /* Authorize station (AP mode) */
5114 if (is_ap &&
5115 new_state == IEEE80211_STA_AUTHORIZED) {
5116 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5117 if (ret < 0)
5118 return ret;
5119
5120 /* reconfigure rates */
5121 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5122 if (ret < 0)
5123 return ret;
5124
5125 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5126 wl_sta->hlid);
5127 if (ret)
5128 return ret;
5129
5130 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5131 }
5132
5133 /* Authorize station */
5134 if (is_sta &&
5135 new_state == IEEE80211_STA_AUTHORIZED) {
5136 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5137 ret = wl12xx_set_authorized(wl, wlvif);
5138 if (ret)
5139 return ret;
5140 }
5141
5142 if (is_sta &&
5143 old_state == IEEE80211_STA_AUTHORIZED &&
5144 new_state == IEEE80211_STA_ASSOC) {
5145 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5146 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5147 }
5148
5149 /* save seq number on disassoc (suspend) */
5150 if (is_sta &&
5151 old_state == IEEE80211_STA_ASSOC &&
5152 new_state == IEEE80211_STA_AUTH) {
5153 wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5154 wlvif->total_freed_pkts = 0;
5155 }
5156
5157 /* restore seq number on assoc (resume) */
5158 if (is_sta &&
5159 old_state == IEEE80211_STA_AUTH &&
5160 new_state == IEEE80211_STA_ASSOC) {
5161 wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5162 }
5163
5164 /* clear ROCs on failure or authorization */
5165 if (is_sta &&
5166 (new_state == IEEE80211_STA_AUTHORIZED ||
5167 new_state == IEEE80211_STA_NOTEXIST)) {
5168 if (test_bit(wlvif->role_id, wl->roc_map))
5169 wl12xx_croc(wl, wlvif->role_id);
5170 }
5171
5172 if (is_sta &&
5173 old_state == IEEE80211_STA_NOTEXIST &&
5174 new_state == IEEE80211_STA_NONE) {
5175 if (find_first_bit(wl->roc_map,
5176 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5177 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5178 wl12xx_roc(wl, wlvif, wlvif->role_id,
5179 wlvif->band, wlvif->channel);
5180 }
5181 }
5182 return 0;
5183 }
5184
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5185 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5186 struct ieee80211_vif *vif,
5187 struct ieee80211_sta *sta,
5188 enum ieee80211_sta_state old_state,
5189 enum ieee80211_sta_state new_state)
5190 {
5191 struct wl1271 *wl = hw->priv;
5192 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5193 int ret;
5194
5195 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5196 sta->aid, old_state, new_state);
5197
5198 mutex_lock(&wl->mutex);
5199
5200 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5201 ret = -EBUSY;
5202 goto out;
5203 }
5204
5205 ret = wl1271_ps_elp_wakeup(wl);
5206 if (ret < 0)
5207 goto out;
5208
5209 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5210
5211 wl1271_ps_elp_sleep(wl);
5212 out:
5213 mutex_unlock(&wl->mutex);
5214 if (new_state < old_state)
5215 return 0;
5216 return ret;
5217 }
5218
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5219 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5220 struct ieee80211_vif *vif,
5221 struct ieee80211_ampdu_params *params)
5222 {
5223 struct wl1271 *wl = hw->priv;
5224 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5225 int ret;
5226 u8 hlid, *ba_bitmap;
5227 struct ieee80211_sta *sta = params->sta;
5228 enum ieee80211_ampdu_mlme_action action = params->action;
5229 u16 tid = params->tid;
5230 u16 *ssn = ¶ms->ssn;
5231
5232 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5233 tid);
5234
5235 /* sanity check - the fields in FW are only 8bits wide */
5236 if (WARN_ON(tid > 0xFF))
5237 return -ENOTSUPP;
5238
5239 mutex_lock(&wl->mutex);
5240
5241 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5242 ret = -EAGAIN;
5243 goto out;
5244 }
5245
5246 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5247 hlid = wlvif->sta.hlid;
5248 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5249 struct wl1271_station *wl_sta;
5250
5251 wl_sta = (struct wl1271_station *)sta->drv_priv;
5252 hlid = wl_sta->hlid;
5253 } else {
5254 ret = -EINVAL;
5255 goto out;
5256 }
5257
5258 ba_bitmap = &wl->links[hlid].ba_bitmap;
5259
5260 ret = wl1271_ps_elp_wakeup(wl);
5261 if (ret < 0)
5262 goto out;
5263
5264 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5265 tid, action);
5266
5267 switch (action) {
5268 case IEEE80211_AMPDU_RX_START:
5269 if (!wlvif->ba_support || !wlvif->ba_allowed) {
5270 ret = -ENOTSUPP;
5271 break;
5272 }
5273
5274 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5275 ret = -EBUSY;
5276 wl1271_error("exceeded max RX BA sessions");
5277 break;
5278 }
5279
5280 if (*ba_bitmap & BIT(tid)) {
5281 ret = -EINVAL;
5282 wl1271_error("cannot enable RX BA session on active "
5283 "tid: %d", tid);
5284 break;
5285 }
5286
5287 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5288 hlid,
5289 params->buf_size);
5290
5291 if (!ret) {
5292 *ba_bitmap |= BIT(tid);
5293 wl->ba_rx_session_count++;
5294 }
5295 break;
5296
5297 case IEEE80211_AMPDU_RX_STOP:
5298 if (!(*ba_bitmap & BIT(tid))) {
5299 /*
5300 * this happens on reconfig - so only output a debug
5301 * message for now, and don't fail the function.
5302 */
5303 wl1271_debug(DEBUG_MAC80211,
5304 "no active RX BA session on tid: %d",
5305 tid);
5306 ret = 0;
5307 break;
5308 }
5309
5310 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5311 hlid, 0);
5312 if (!ret) {
5313 *ba_bitmap &= ~BIT(tid);
5314 wl->ba_rx_session_count--;
5315 }
5316 break;
5317
5318 /*
5319 * The BA initiator session management in FW independently.
5320 * Falling break here on purpose for all TX APDU commands.
5321 */
5322 case IEEE80211_AMPDU_TX_START:
5323 case IEEE80211_AMPDU_TX_STOP_CONT:
5324 case IEEE80211_AMPDU_TX_STOP_FLUSH:
5325 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5326 case IEEE80211_AMPDU_TX_OPERATIONAL:
5327 ret = -EINVAL;
5328 break;
5329
5330 default:
5331 wl1271_error("Incorrect ampdu action id=%x\n", action);
5332 ret = -EINVAL;
5333 }
5334
5335 wl1271_ps_elp_sleep(wl);
5336
5337 out:
5338 mutex_unlock(&wl->mutex);
5339
5340 return ret;
5341 }
5342
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5343 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5344 struct ieee80211_vif *vif,
5345 const struct cfg80211_bitrate_mask *mask)
5346 {
5347 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5348 struct wl1271 *wl = hw->priv;
5349 int i, ret = 0;
5350
5351 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5352 mask->control[NL80211_BAND_2GHZ].legacy,
5353 mask->control[NL80211_BAND_5GHZ].legacy);
5354
5355 mutex_lock(&wl->mutex);
5356
5357 for (i = 0; i < WLCORE_NUM_BANDS; i++)
5358 wlvif->bitrate_masks[i] =
5359 wl1271_tx_enabled_rates_get(wl,
5360 mask->control[i].legacy,
5361 i);
5362
5363 if (unlikely(wl->state != WLCORE_STATE_ON))
5364 goto out;
5365
5366 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5367 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5368
5369 ret = wl1271_ps_elp_wakeup(wl);
5370 if (ret < 0)
5371 goto out;
5372
5373 wl1271_set_band_rate(wl, wlvif);
5374 wlvif->basic_rate =
5375 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5376 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5377
5378 wl1271_ps_elp_sleep(wl);
5379 }
5380 out:
5381 mutex_unlock(&wl->mutex);
5382
5383 return ret;
5384 }
5385
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5386 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5387 struct ieee80211_vif *vif,
5388 struct ieee80211_channel_switch *ch_switch)
5389 {
5390 struct wl1271 *wl = hw->priv;
5391 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5392 int ret;
5393
5394 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5395
5396 wl1271_tx_flush(wl);
5397
5398 mutex_lock(&wl->mutex);
5399
5400 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5401 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5402 ieee80211_chswitch_done(vif, false);
5403 goto out;
5404 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5405 goto out;
5406 }
5407
5408 ret = wl1271_ps_elp_wakeup(wl);
5409 if (ret < 0)
5410 goto out;
5411
5412 /* TODO: change mac80211 to pass vif as param */
5413
5414 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5415 unsigned long delay_usec;
5416
5417 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5418 if (ret)
5419 goto out_sleep;
5420
5421 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5422
5423 /* indicate failure 5 seconds after channel switch time */
5424 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5425 ch_switch->count;
5426 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5427 usecs_to_jiffies(delay_usec) +
5428 msecs_to_jiffies(5000));
5429 }
5430
5431 out_sleep:
5432 wl1271_ps_elp_sleep(wl);
5433
5434 out:
5435 mutex_unlock(&wl->mutex);
5436 }
5437
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5438 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5439 struct wl12xx_vif *wlvif,
5440 u8 eid)
5441 {
5442 int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5443 struct sk_buff *beacon =
5444 ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5445
5446 if (!beacon)
5447 return NULL;
5448
5449 return cfg80211_find_ie(eid,
5450 beacon->data + ieoffset,
5451 beacon->len - ieoffset);
5452 }
5453
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5454 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5455 u8 *csa_count)
5456 {
5457 const u8 *ie;
5458 const struct ieee80211_channel_sw_ie *ie_csa;
5459
5460 ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5461 if (!ie)
5462 return -EINVAL;
5463
5464 ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5465 *csa_count = ie_csa->count;
5466
5467 return 0;
5468 }
5469
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5470 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5471 struct ieee80211_vif *vif,
5472 struct cfg80211_chan_def *chandef)
5473 {
5474 struct wl1271 *wl = hw->priv;
5475 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5476 struct ieee80211_channel_switch ch_switch = {
5477 .block_tx = true,
5478 .chandef = *chandef,
5479 };
5480 int ret;
5481
5482 wl1271_debug(DEBUG_MAC80211,
5483 "mac80211 channel switch beacon (role %d)",
5484 wlvif->role_id);
5485
5486 ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5487 if (ret < 0) {
5488 wl1271_error("error getting beacon (for CSA counter)");
5489 return;
5490 }
5491
5492 mutex_lock(&wl->mutex);
5493
5494 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5495 ret = -EBUSY;
5496 goto out;
5497 }
5498
5499 ret = wl1271_ps_elp_wakeup(wl);
5500 if (ret < 0)
5501 goto out;
5502
5503 ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5504 if (ret)
5505 goto out_sleep;
5506
5507 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5508
5509 out_sleep:
5510 wl1271_ps_elp_sleep(wl);
5511 out:
5512 mutex_unlock(&wl->mutex);
5513 }
5514
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5515 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5516 u32 queues, bool drop)
5517 {
5518 struct wl1271 *wl = hw->priv;
5519
5520 wl1271_tx_flush(wl);
5521 }
5522
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5523 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5524 struct ieee80211_vif *vif,
5525 struct ieee80211_channel *chan,
5526 int duration,
5527 enum ieee80211_roc_type type)
5528 {
5529 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5530 struct wl1271 *wl = hw->priv;
5531 int channel, active_roc, ret = 0;
5532
5533 channel = ieee80211_frequency_to_channel(chan->center_freq);
5534
5535 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5536 channel, wlvif->role_id);
5537
5538 mutex_lock(&wl->mutex);
5539
5540 if (unlikely(wl->state != WLCORE_STATE_ON))
5541 goto out;
5542
5543 /* return EBUSY if we can't ROC right now */
5544 active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5545 if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5546 wl1271_warning("active roc on role %d", active_roc);
5547 ret = -EBUSY;
5548 goto out;
5549 }
5550
5551 ret = wl1271_ps_elp_wakeup(wl);
5552 if (ret < 0)
5553 goto out;
5554
5555 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5556 if (ret < 0)
5557 goto out_sleep;
5558
5559 wl->roc_vif = vif;
5560 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5561 msecs_to_jiffies(duration));
5562 out_sleep:
5563 wl1271_ps_elp_sleep(wl);
5564 out:
5565 mutex_unlock(&wl->mutex);
5566 return ret;
5567 }
5568
__wlcore_roc_completed(struct wl1271 * wl)5569 static int __wlcore_roc_completed(struct wl1271 *wl)
5570 {
5571 struct wl12xx_vif *wlvif;
5572 int ret;
5573
5574 /* already completed */
5575 if (unlikely(!wl->roc_vif))
5576 return 0;
5577
5578 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5579
5580 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5581 return -EBUSY;
5582
5583 ret = wl12xx_stop_dev(wl, wlvif);
5584 if (ret < 0)
5585 return ret;
5586
5587 wl->roc_vif = NULL;
5588
5589 return 0;
5590 }
5591
wlcore_roc_completed(struct wl1271 * wl)5592 static int wlcore_roc_completed(struct wl1271 *wl)
5593 {
5594 int ret;
5595
5596 wl1271_debug(DEBUG_MAC80211, "roc complete");
5597
5598 mutex_lock(&wl->mutex);
5599
5600 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5601 ret = -EBUSY;
5602 goto out;
5603 }
5604
5605 ret = wl1271_ps_elp_wakeup(wl);
5606 if (ret < 0)
5607 goto out;
5608
5609 ret = __wlcore_roc_completed(wl);
5610
5611 wl1271_ps_elp_sleep(wl);
5612 out:
5613 mutex_unlock(&wl->mutex);
5614
5615 return ret;
5616 }
5617
wlcore_roc_complete_work(struct work_struct * work)5618 static void wlcore_roc_complete_work(struct work_struct *work)
5619 {
5620 struct delayed_work *dwork;
5621 struct wl1271 *wl;
5622 int ret;
5623
5624 dwork = to_delayed_work(work);
5625 wl = container_of(dwork, struct wl1271, roc_complete_work);
5626
5627 ret = wlcore_roc_completed(wl);
5628 if (!ret)
5629 ieee80211_remain_on_channel_expired(wl->hw);
5630 }
5631
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw)5632 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5633 {
5634 struct wl1271 *wl = hw->priv;
5635
5636 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5637
5638 /* TODO: per-vif */
5639 wl1271_tx_flush(wl);
5640
5641 /*
5642 * we can't just flush_work here, because it might deadlock
5643 * (as we might get called from the same workqueue)
5644 */
5645 cancel_delayed_work_sync(&wl->roc_complete_work);
5646 wlcore_roc_completed(wl);
5647
5648 return 0;
5649 }
5650
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5651 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5652 struct ieee80211_vif *vif,
5653 struct ieee80211_sta *sta,
5654 u32 changed)
5655 {
5656 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5657
5658 wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5659
5660 if (!(changed & IEEE80211_RC_BW_CHANGED))
5661 return;
5662
5663 /* this callback is atomic, so schedule a new work */
5664 wlvif->rc_update_bw = sta->bandwidth;
5665 memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5666 ieee80211_queue_work(hw, &wlvif->rc_update_work);
5667 }
5668
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5669 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5670 struct ieee80211_vif *vif,
5671 struct ieee80211_sta *sta,
5672 struct station_info *sinfo)
5673 {
5674 struct wl1271 *wl = hw->priv;
5675 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5676 s8 rssi_dbm;
5677 int ret;
5678
5679 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5680
5681 mutex_lock(&wl->mutex);
5682
5683 if (unlikely(wl->state != WLCORE_STATE_ON))
5684 goto out;
5685
5686 ret = wl1271_ps_elp_wakeup(wl);
5687 if (ret < 0)
5688 goto out_sleep;
5689
5690 ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5691 if (ret < 0)
5692 goto out_sleep;
5693
5694 sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5695 sinfo->signal = rssi_dbm;
5696
5697 out_sleep:
5698 wl1271_ps_elp_sleep(wl);
5699
5700 out:
5701 mutex_unlock(&wl->mutex);
5702 }
5703
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5704 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5705 struct ieee80211_sta *sta)
5706 {
5707 struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5708 struct wl1271 *wl = hw->priv;
5709 u8 hlid = wl_sta->hlid;
5710
5711 /* return in units of Kbps */
5712 return (wl->links[hlid].fw_rate_mbps * 1000);
5713 }
5714
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5715 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5716 {
5717 struct wl1271 *wl = hw->priv;
5718 bool ret = false;
5719
5720 mutex_lock(&wl->mutex);
5721
5722 if (unlikely(wl->state != WLCORE_STATE_ON))
5723 goto out;
5724
5725 /* packets are considered pending if in the TX queue or the FW */
5726 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5727 out:
5728 mutex_unlock(&wl->mutex);
5729
5730 return ret;
5731 }
5732
5733 /* can't be const, mac80211 writes to this */
5734 static struct ieee80211_rate wl1271_rates[] = {
5735 { .bitrate = 10,
5736 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5737 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5738 { .bitrate = 20,
5739 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5740 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5741 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5742 { .bitrate = 55,
5743 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5744 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5745 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5746 { .bitrate = 110,
5747 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5748 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5749 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5750 { .bitrate = 60,
5751 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5752 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5753 { .bitrate = 90,
5754 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5755 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5756 { .bitrate = 120,
5757 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5758 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5759 { .bitrate = 180,
5760 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5761 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5762 { .bitrate = 240,
5763 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5764 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5765 { .bitrate = 360,
5766 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5767 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5768 { .bitrate = 480,
5769 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5770 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5771 { .bitrate = 540,
5772 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5773 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5774 };
5775
5776 /* can't be const, mac80211 writes to this */
5777 static struct ieee80211_channel wl1271_channels[] = {
5778 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5779 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5780 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5781 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5782 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5783 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5784 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5785 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5786 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5787 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5788 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5789 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5790 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5791 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5792 };
5793
5794 /* can't be const, mac80211 writes to this */
5795 static struct ieee80211_supported_band wl1271_band_2ghz = {
5796 .channels = wl1271_channels,
5797 .n_channels = ARRAY_SIZE(wl1271_channels),
5798 .bitrates = wl1271_rates,
5799 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5800 };
5801
5802 /* 5 GHz data rates for WL1273 */
5803 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5804 { .bitrate = 60,
5805 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5806 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5807 { .bitrate = 90,
5808 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5809 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5810 { .bitrate = 120,
5811 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5812 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5813 { .bitrate = 180,
5814 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5815 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5816 { .bitrate = 240,
5817 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5818 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5819 { .bitrate = 360,
5820 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5821 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5822 { .bitrate = 480,
5823 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5824 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5825 { .bitrate = 540,
5826 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5827 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5828 };
5829
5830 /* 5 GHz band channels for WL1273 */
5831 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5832 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5833 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5834 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5835 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5836 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5837 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5838 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5839 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5840 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5841 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5842 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5843 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5844 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5845 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5846 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5847 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5848 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5849 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5850 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5851 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5852 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5853 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5854 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5855 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5856 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5857 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5858 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5859 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5860 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5861 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5862 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5863 };
5864
5865 static struct ieee80211_supported_band wl1271_band_5ghz = {
5866 .channels = wl1271_channels_5ghz,
5867 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5868 .bitrates = wl1271_rates_5ghz,
5869 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5870 };
5871
5872 static const struct ieee80211_ops wl1271_ops = {
5873 .start = wl1271_op_start,
5874 .stop = wlcore_op_stop,
5875 .add_interface = wl1271_op_add_interface,
5876 .remove_interface = wl1271_op_remove_interface,
5877 .change_interface = wl12xx_op_change_interface,
5878 #ifdef CONFIG_PM
5879 .suspend = wl1271_op_suspend,
5880 .resume = wl1271_op_resume,
5881 #endif
5882 .config = wl1271_op_config,
5883 .prepare_multicast = wl1271_op_prepare_multicast,
5884 .configure_filter = wl1271_op_configure_filter,
5885 .tx = wl1271_op_tx,
5886 .set_key = wlcore_op_set_key,
5887 .hw_scan = wl1271_op_hw_scan,
5888 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5889 .sched_scan_start = wl1271_op_sched_scan_start,
5890 .sched_scan_stop = wl1271_op_sched_scan_stop,
5891 .bss_info_changed = wl1271_op_bss_info_changed,
5892 .set_frag_threshold = wl1271_op_set_frag_threshold,
5893 .set_rts_threshold = wl1271_op_set_rts_threshold,
5894 .conf_tx = wl1271_op_conf_tx,
5895 .get_tsf = wl1271_op_get_tsf,
5896 .get_survey = wl1271_op_get_survey,
5897 .sta_state = wl12xx_op_sta_state,
5898 .ampdu_action = wl1271_op_ampdu_action,
5899 .tx_frames_pending = wl1271_tx_frames_pending,
5900 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5901 .set_default_unicast_key = wl1271_op_set_default_key_idx,
5902 .channel_switch = wl12xx_op_channel_switch,
5903 .channel_switch_beacon = wlcore_op_channel_switch_beacon,
5904 .flush = wlcore_op_flush,
5905 .remain_on_channel = wlcore_op_remain_on_channel,
5906 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5907 .add_chanctx = wlcore_op_add_chanctx,
5908 .remove_chanctx = wlcore_op_remove_chanctx,
5909 .change_chanctx = wlcore_op_change_chanctx,
5910 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5911 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5912 .switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5913 .sta_rc_update = wlcore_op_sta_rc_update,
5914 .sta_statistics = wlcore_op_sta_statistics,
5915 .get_expected_throughput = wlcore_op_get_expected_throughput,
5916 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5917 };
5918
5919
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)5920 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5921 {
5922 u8 idx;
5923
5924 BUG_ON(band >= 2);
5925
5926 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5927 wl1271_error("Illegal RX rate from HW: %d", rate);
5928 return 0;
5929 }
5930
5931 idx = wl->band_rate_to_idx[band][rate];
5932 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5933 wl1271_error("Unsupported RX rate from HW: %d", rate);
5934 return 0;
5935 }
5936
5937 return idx;
5938 }
5939
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)5940 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5941 {
5942 int i;
5943
5944 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5945 oui, nic);
5946
5947 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5948 wl1271_warning("NIC part of the MAC address wraps around!");
5949
5950 for (i = 0; i < wl->num_mac_addr; i++) {
5951 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5952 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5953 wl->addresses[i].addr[2] = (u8) oui;
5954 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5955 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5956 wl->addresses[i].addr[5] = (u8) nic;
5957 nic++;
5958 }
5959
5960 /* we may be one address short at the most */
5961 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5962
5963 /*
5964 * turn on the LAA bit in the first address and use it as
5965 * the last address.
5966 */
5967 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5968 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5969 memcpy(&wl->addresses[idx], &wl->addresses[0],
5970 sizeof(wl->addresses[0]));
5971 /* LAA bit */
5972 wl->addresses[idx].addr[0] |= BIT(1);
5973 }
5974
5975 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5976 wl->hw->wiphy->addresses = wl->addresses;
5977 }
5978
wl12xx_get_hw_info(struct wl1271 * wl)5979 static int wl12xx_get_hw_info(struct wl1271 *wl)
5980 {
5981 int ret;
5982
5983 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5984 if (ret < 0)
5985 goto out;
5986
5987 wl->fuse_oui_addr = 0;
5988 wl->fuse_nic_addr = 0;
5989
5990 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5991 if (ret < 0)
5992 goto out;
5993
5994 if (wl->ops->get_mac)
5995 ret = wl->ops->get_mac(wl);
5996
5997 out:
5998 return ret;
5999 }
6000
wl1271_register_hw(struct wl1271 * wl)6001 static int wl1271_register_hw(struct wl1271 *wl)
6002 {
6003 int ret;
6004 u32 oui_addr = 0, nic_addr = 0;
6005
6006 if (wl->mac80211_registered)
6007 return 0;
6008
6009 if (wl->nvs_len >= 12) {
6010 /* NOTE: The wl->nvs->nvs element must be first, in
6011 * order to simplify the casting, we assume it is at
6012 * the beginning of the wl->nvs structure.
6013 */
6014 u8 *nvs_ptr = (u8 *)wl->nvs;
6015
6016 oui_addr =
6017 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6018 nic_addr =
6019 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6020 }
6021
6022 /* if the MAC address is zeroed in the NVS derive from fuse */
6023 if (oui_addr == 0 && nic_addr == 0) {
6024 oui_addr = wl->fuse_oui_addr;
6025 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
6026 nic_addr = wl->fuse_nic_addr + 1;
6027 }
6028
6029 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6030
6031 ret = ieee80211_register_hw(wl->hw);
6032 if (ret < 0) {
6033 wl1271_error("unable to register mac80211 hw: %d", ret);
6034 goto out;
6035 }
6036
6037 wl->mac80211_registered = true;
6038
6039 wl1271_debugfs_init(wl);
6040
6041 wl1271_notice("loaded");
6042
6043 out:
6044 return ret;
6045 }
6046
wl1271_unregister_hw(struct wl1271 * wl)6047 static void wl1271_unregister_hw(struct wl1271 *wl)
6048 {
6049 if (wl->plt)
6050 wl1271_plt_stop(wl);
6051
6052 ieee80211_unregister_hw(wl->hw);
6053 wl->mac80211_registered = false;
6054
6055 }
6056
wl1271_init_ieee80211(struct wl1271 * wl)6057 static int wl1271_init_ieee80211(struct wl1271 *wl)
6058 {
6059 int i;
6060 static const u32 cipher_suites[] = {
6061 WLAN_CIPHER_SUITE_WEP40,
6062 WLAN_CIPHER_SUITE_WEP104,
6063 WLAN_CIPHER_SUITE_TKIP,
6064 WLAN_CIPHER_SUITE_CCMP,
6065 WL1271_CIPHER_SUITE_GEM,
6066 };
6067
6068 /* The tx descriptor buffer */
6069 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6070
6071 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6072 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6073
6074 /* unit us */
6075 /* FIXME: find a proper value */
6076 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6077
6078 ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6079 ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6080 ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6081 ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6082 ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6083 ieee80211_hw_set(wl->hw, AP_LINK_PS);
6084 ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6085 ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6086 ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6087 ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6088 ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6089 ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6090 ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6091
6092 wl->hw->wiphy->cipher_suites = cipher_suites;
6093 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6094
6095 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6096 BIT(NL80211_IFTYPE_AP) |
6097 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6098 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6099 #ifdef CONFIG_MAC80211_MESH
6100 BIT(NL80211_IFTYPE_MESH_POINT) |
6101 #endif
6102 BIT(NL80211_IFTYPE_P2P_GO);
6103
6104 wl->hw->wiphy->max_scan_ssids = 1;
6105 wl->hw->wiphy->max_sched_scan_ssids = 16;
6106 wl->hw->wiphy->max_match_sets = 16;
6107 /*
6108 * Maximum length of elements in scanning probe request templates
6109 * should be the maximum length possible for a template, without
6110 * the IEEE80211 header of the template
6111 */
6112 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6113 sizeof(struct ieee80211_header);
6114
6115 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6116 sizeof(struct ieee80211_header);
6117
6118 wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6119
6120 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6121 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6122 WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6123 WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6124
6125 /* make sure all our channels fit in the scanned_ch bitmask */
6126 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6127 ARRAY_SIZE(wl1271_channels_5ghz) >
6128 WL1271_MAX_CHANNELS);
6129 /*
6130 * clear channel flags from the previous usage
6131 * and restore max_power & max_antenna_gain values.
6132 */
6133 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6134 wl1271_band_2ghz.channels[i].flags = 0;
6135 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6136 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6137 }
6138
6139 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6140 wl1271_band_5ghz.channels[i].flags = 0;
6141 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6142 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6143 }
6144
6145 /*
6146 * We keep local copies of the band structs because we need to
6147 * modify them on a per-device basis.
6148 */
6149 memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6150 sizeof(wl1271_band_2ghz));
6151 memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6152 &wl->ht_cap[NL80211_BAND_2GHZ],
6153 sizeof(*wl->ht_cap));
6154 memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6155 sizeof(wl1271_band_5ghz));
6156 memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6157 &wl->ht_cap[NL80211_BAND_5GHZ],
6158 sizeof(*wl->ht_cap));
6159
6160 wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6161 &wl->bands[NL80211_BAND_2GHZ];
6162 wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6163 &wl->bands[NL80211_BAND_5GHZ];
6164
6165 /*
6166 * allow 4 queues per mac address we support +
6167 * 1 cab queue per mac + one global offchannel Tx queue
6168 */
6169 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6170
6171 /* the last queue is the offchannel queue */
6172 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6173 wl->hw->max_rates = 1;
6174
6175 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6176
6177 /* the FW answers probe-requests in AP-mode */
6178 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6179 wl->hw->wiphy->probe_resp_offload =
6180 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6181 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6182 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6183
6184 /* allowed interface combinations */
6185 wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6186 wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6187
6188 /* register vendor commands */
6189 wlcore_set_vendor_commands(wl->hw->wiphy);
6190
6191 SET_IEEE80211_DEV(wl->hw, wl->dev);
6192
6193 wl->hw->sta_data_size = sizeof(struct wl1271_station);
6194 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6195
6196 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6197
6198 return 0;
6199 }
6200
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6201 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6202 u32 mbox_size)
6203 {
6204 struct ieee80211_hw *hw;
6205 struct wl1271 *wl;
6206 int i, j, ret;
6207 unsigned int order;
6208
6209 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6210 if (!hw) {
6211 wl1271_error("could not alloc ieee80211_hw");
6212 ret = -ENOMEM;
6213 goto err_hw_alloc;
6214 }
6215
6216 wl = hw->priv;
6217 memset(wl, 0, sizeof(*wl));
6218
6219 wl->priv = kzalloc(priv_size, GFP_KERNEL);
6220 if (!wl->priv) {
6221 wl1271_error("could not alloc wl priv");
6222 ret = -ENOMEM;
6223 goto err_priv_alloc;
6224 }
6225
6226 INIT_LIST_HEAD(&wl->wlvif_list);
6227
6228 wl->hw = hw;
6229
6230 /*
6231 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6232 * we don't allocate any additional resource here, so that's fine.
6233 */
6234 for (i = 0; i < NUM_TX_QUEUES; i++)
6235 for (j = 0; j < WLCORE_MAX_LINKS; j++)
6236 skb_queue_head_init(&wl->links[j].tx_queue[i]);
6237
6238 skb_queue_head_init(&wl->deferred_rx_queue);
6239 skb_queue_head_init(&wl->deferred_tx_queue);
6240
6241 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6242 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6243 INIT_WORK(&wl->tx_work, wl1271_tx_work);
6244 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6245 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6246 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6247 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6248
6249 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6250 if (!wl->freezable_wq) {
6251 ret = -ENOMEM;
6252 goto err_hw;
6253 }
6254
6255 wl->channel = 0;
6256 wl->rx_counter = 0;
6257 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6258 wl->band = NL80211_BAND_2GHZ;
6259 wl->channel_type = NL80211_CHAN_NO_HT;
6260 wl->flags = 0;
6261 wl->sg_enabled = true;
6262 wl->sleep_auth = WL1271_PSM_ILLEGAL;
6263 wl->recovery_count = 0;
6264 wl->hw_pg_ver = -1;
6265 wl->ap_ps_map = 0;
6266 wl->ap_fw_ps_map = 0;
6267 wl->quirks = 0;
6268 wl->system_hlid = WL12XX_SYSTEM_HLID;
6269 wl->active_sta_count = 0;
6270 wl->active_link_count = 0;
6271 wl->fwlog_size = 0;
6272
6273 /* The system link is always allocated */
6274 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6275
6276 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6277 for (i = 0; i < wl->num_tx_desc; i++)
6278 wl->tx_frames[i] = NULL;
6279
6280 spin_lock_init(&wl->wl_lock);
6281
6282 wl->state = WLCORE_STATE_OFF;
6283 wl->fw_type = WL12XX_FW_TYPE_NONE;
6284 mutex_init(&wl->mutex);
6285 mutex_init(&wl->flush_mutex);
6286 init_completion(&wl->nvs_loading_complete);
6287
6288 order = get_order(aggr_buf_size);
6289 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6290 if (!wl->aggr_buf) {
6291 ret = -ENOMEM;
6292 goto err_wq;
6293 }
6294 wl->aggr_buf_size = aggr_buf_size;
6295
6296 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6297 if (!wl->dummy_packet) {
6298 ret = -ENOMEM;
6299 goto err_aggr;
6300 }
6301
6302 /* Allocate one page for the FW log */
6303 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6304 if (!wl->fwlog) {
6305 ret = -ENOMEM;
6306 goto err_dummy_packet;
6307 }
6308
6309 wl->mbox_size = mbox_size;
6310 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6311 if (!wl->mbox) {
6312 ret = -ENOMEM;
6313 goto err_fwlog;
6314 }
6315
6316 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6317 if (!wl->buffer_32) {
6318 ret = -ENOMEM;
6319 goto err_mbox;
6320 }
6321
6322 return hw;
6323
6324 err_mbox:
6325 kfree(wl->mbox);
6326
6327 err_fwlog:
6328 free_page((unsigned long)wl->fwlog);
6329
6330 err_dummy_packet:
6331 dev_kfree_skb(wl->dummy_packet);
6332
6333 err_aggr:
6334 free_pages((unsigned long)wl->aggr_buf, order);
6335
6336 err_wq:
6337 destroy_workqueue(wl->freezable_wq);
6338
6339 err_hw:
6340 wl1271_debugfs_exit(wl);
6341 kfree(wl->priv);
6342
6343 err_priv_alloc:
6344 ieee80211_free_hw(hw);
6345
6346 err_hw_alloc:
6347
6348 return ERR_PTR(ret);
6349 }
6350 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6351
wlcore_free_hw(struct wl1271 * wl)6352 int wlcore_free_hw(struct wl1271 *wl)
6353 {
6354 /* Unblock any fwlog readers */
6355 mutex_lock(&wl->mutex);
6356 wl->fwlog_size = -1;
6357 mutex_unlock(&wl->mutex);
6358
6359 wlcore_sysfs_free(wl);
6360
6361 kfree(wl->buffer_32);
6362 kfree(wl->mbox);
6363 free_page((unsigned long)wl->fwlog);
6364 dev_kfree_skb(wl->dummy_packet);
6365 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6366
6367 wl1271_debugfs_exit(wl);
6368
6369 vfree(wl->fw);
6370 wl->fw = NULL;
6371 wl->fw_type = WL12XX_FW_TYPE_NONE;
6372 kfree(wl->nvs);
6373 wl->nvs = NULL;
6374
6375 kfree(wl->raw_fw_status);
6376 kfree(wl->fw_status);
6377 kfree(wl->tx_res_if);
6378 destroy_workqueue(wl->freezable_wq);
6379
6380 kfree(wl->priv);
6381 ieee80211_free_hw(wl->hw);
6382
6383 return 0;
6384 }
6385 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6386
6387 #ifdef CONFIG_PM
6388 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6389 .flags = WIPHY_WOWLAN_ANY,
6390 .n_patterns = WL1271_MAX_RX_FILTERS,
6391 .pattern_min_len = 1,
6392 .pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6393 };
6394 #endif
6395
wlcore_hardirq(int irq,void * cookie)6396 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6397 {
6398 return IRQ_WAKE_THREAD;
6399 }
6400
wlcore_nvs_cb(const struct firmware * fw,void * context)6401 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6402 {
6403 struct wl1271 *wl = context;
6404 struct platform_device *pdev = wl->pdev;
6405 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6406 struct resource *res;
6407
6408 int ret;
6409 irq_handler_t hardirq_fn = NULL;
6410
6411 if (fw) {
6412 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6413 if (!wl->nvs) {
6414 wl1271_error("Could not allocate nvs data");
6415 goto out;
6416 }
6417 wl->nvs_len = fw->size;
6418 } else if (pdev_data->family->nvs_name) {
6419 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6420 pdev_data->family->nvs_name);
6421 wl->nvs = NULL;
6422 wl->nvs_len = 0;
6423 } else {
6424 wl->nvs = NULL;
6425 wl->nvs_len = 0;
6426 }
6427
6428 ret = wl->ops->setup(wl);
6429 if (ret < 0)
6430 goto out_free_nvs;
6431
6432 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6433
6434 /* adjust some runtime configuration parameters */
6435 wlcore_adjust_conf(wl);
6436
6437 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6438 if (!res) {
6439 wl1271_error("Could not get IRQ resource");
6440 goto out_free_nvs;
6441 }
6442
6443 wl->irq = res->start;
6444 wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6445 wl->if_ops = pdev_data->if_ops;
6446
6447 if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6448 hardirq_fn = wlcore_hardirq;
6449 else
6450 wl->irq_flags |= IRQF_ONESHOT;
6451
6452 ret = wl12xx_set_power_on(wl);
6453 if (ret < 0)
6454 goto out_free_nvs;
6455
6456 ret = wl12xx_get_hw_info(wl);
6457 if (ret < 0) {
6458 wl1271_error("couldn't get hw info");
6459 wl1271_power_off(wl);
6460 goto out_free_nvs;
6461 }
6462
6463 ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6464 wl->irq_flags, pdev->name, wl);
6465 if (ret < 0) {
6466 wl1271_error("interrupt configuration failed");
6467 wl1271_power_off(wl);
6468 goto out_free_nvs;
6469 }
6470
6471 #ifdef CONFIG_PM
6472 ret = enable_irq_wake(wl->irq);
6473 if (!ret) {
6474 wl->irq_wake_enabled = true;
6475 device_init_wakeup(wl->dev, 1);
6476 if (pdev_data->pwr_in_suspend)
6477 wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6478 }
6479 #endif
6480 disable_irq(wl->irq);
6481 wl1271_power_off(wl);
6482
6483 ret = wl->ops->identify_chip(wl);
6484 if (ret < 0)
6485 goto out_irq;
6486
6487 ret = wl1271_init_ieee80211(wl);
6488 if (ret)
6489 goto out_irq;
6490
6491 ret = wl1271_register_hw(wl);
6492 if (ret)
6493 goto out_irq;
6494
6495 ret = wlcore_sysfs_init(wl);
6496 if (ret)
6497 goto out_unreg;
6498
6499 wl->initialized = true;
6500 goto out;
6501
6502 out_unreg:
6503 wl1271_unregister_hw(wl);
6504
6505 out_irq:
6506 free_irq(wl->irq, wl);
6507
6508 out_free_nvs:
6509 kfree(wl->nvs);
6510
6511 out:
6512 release_firmware(fw);
6513 complete_all(&wl->nvs_loading_complete);
6514 }
6515
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6516 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6517 {
6518 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6519 const char *nvs_name;
6520 int ret = 0;
6521
6522 if (!wl->ops || !wl->ptable || !pdev_data)
6523 return -EINVAL;
6524
6525 wl->dev = &pdev->dev;
6526 wl->pdev = pdev;
6527 platform_set_drvdata(pdev, wl);
6528
6529 if (pdev_data->family && pdev_data->family->nvs_name) {
6530 nvs_name = pdev_data->family->nvs_name;
6531 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6532 nvs_name, &pdev->dev, GFP_KERNEL,
6533 wl, wlcore_nvs_cb);
6534 if (ret < 0) {
6535 wl1271_error("request_firmware_nowait failed for %s: %d",
6536 nvs_name, ret);
6537 complete_all(&wl->nvs_loading_complete);
6538 }
6539 } else {
6540 wlcore_nvs_cb(NULL, wl);
6541 }
6542
6543 return ret;
6544 }
6545 EXPORT_SYMBOL_GPL(wlcore_probe);
6546
wlcore_remove(struct platform_device * pdev)6547 int wlcore_remove(struct platform_device *pdev)
6548 {
6549 struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6550 struct wl1271 *wl = platform_get_drvdata(pdev);
6551
6552 if (pdev_data->family && pdev_data->family->nvs_name)
6553 wait_for_completion(&wl->nvs_loading_complete);
6554 if (!wl->initialized)
6555 return 0;
6556
6557 if (wl->irq_wake_enabled) {
6558 device_init_wakeup(wl->dev, 0);
6559 disable_irq_wake(wl->irq);
6560 }
6561 wl1271_unregister_hw(wl);
6562 free_irq(wl->irq, wl);
6563 wlcore_free_hw(wl);
6564
6565 return 0;
6566 }
6567 EXPORT_SYMBOL_GPL(wlcore_remove);
6568
6569 u32 wl12xx_debug_level = DEBUG_NONE;
6570 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6571 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6572 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6573
6574 module_param_named(fwlog, fwlog_param, charp, 0);
6575 MODULE_PARM_DESC(fwlog,
6576 "FW logger options: continuous, dbgpins or disable");
6577
6578 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6579 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6580
6581 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6582 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6583
6584 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6585 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6586
6587 MODULE_LICENSE("GPL");
6588 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6589 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6590