• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * This file is part of wlcore
3  *
4  * Copyright (C) 2008-2010 Nokia Corporation
5  * Copyright (C) 2011-2013 Texas Instruments Inc.
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License
9  * version 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
19  * 02110-1301 USA
20  *
21  */
22 
23 #include <linux/module.h>
24 #include <linux/firmware.h>
25 #include <linux/etherdevice.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/irq.h>
29 #include <linux/pm_runtime.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44 
45 #define WL1271_BOOT_RETRIES 3
46 #define WL1271_SUSPEND_SLEEP 100
47 #define WL1271_WAKEUP_TIMEOUT 500
48 
49 static char *fwlog_param;
50 static int fwlog_mem_blocks = -1;
51 static int bug_on_recovery = -1;
52 static int no_recovery     = -1;
53 
54 static void __wl1271_op_remove_interface(struct wl1271 *wl,
55 					 struct ieee80211_vif *vif,
56 					 bool reset_tx_queues);
57 static void wlcore_op_stop_locked(struct wl1271 *wl);
58 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
59 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)60 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
61 {
62 	int ret;
63 
64 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
65 		return -EINVAL;
66 
67 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
68 		return 0;
69 
70 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
71 		return 0;
72 
73 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
74 	if (ret < 0)
75 		return ret;
76 
77 	wl1271_info("Association completed.");
78 	return 0;
79 }
80 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)81 static void wl1271_reg_notify(struct wiphy *wiphy,
82 			      struct regulatory_request *request)
83 {
84 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
85 	struct wl1271 *wl = hw->priv;
86 
87 	/* copy the current dfs region */
88 	if (request)
89 		wl->dfs_region = request->dfs_region;
90 
91 	wlcore_regdomain_config(wl);
92 }
93 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)94 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
95 				   bool enable)
96 {
97 	int ret = 0;
98 
99 	/* we should hold wl->mutex */
100 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
101 	if (ret < 0)
102 		goto out;
103 
104 	if (enable)
105 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 	else
107 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
108 out:
109 	return ret;
110 }
111 
112 /*
113  * this function is being called when the rx_streaming interval
114  * has beed changed or rx_streaming should be disabled
115  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)116 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
117 {
118 	int ret = 0;
119 	int period = wl->conf.rx_streaming.interval;
120 
121 	/* don't reconfigure if rx_streaming is disabled */
122 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
123 		goto out;
124 
125 	/* reconfigure/disable according to new streaming_period */
126 	if (period &&
127 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
128 	    (wl->conf.rx_streaming.always ||
129 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
130 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
131 	else {
132 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
133 		/* don't cancel_work_sync since we might deadlock */
134 		del_timer_sync(&wlvif->rx_streaming_timer);
135 	}
136 out:
137 	return ret;
138 }
139 
wl1271_rx_streaming_enable_work(struct work_struct * work)140 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
141 {
142 	int ret;
143 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
144 						rx_streaming_enable_work);
145 	struct wl1271 *wl = wlvif->wl;
146 
147 	mutex_lock(&wl->mutex);
148 
149 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
150 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
151 	    (!wl->conf.rx_streaming.always &&
152 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 		goto out;
154 
155 	if (!wl->conf.rx_streaming.interval)
156 		goto out;
157 
158 	ret = pm_runtime_get_sync(wl->dev);
159 	if (ret < 0) {
160 		pm_runtime_put_noidle(wl->dev);
161 		goto out;
162 	}
163 
164 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
165 	if (ret < 0)
166 		goto out_sleep;
167 
168 	/* stop it after some time of inactivity */
169 	mod_timer(&wlvif->rx_streaming_timer,
170 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
171 
172 out_sleep:
173 	pm_runtime_mark_last_busy(wl->dev);
174 	pm_runtime_put_autosuspend(wl->dev);
175 out:
176 	mutex_unlock(&wl->mutex);
177 }
178 
wl1271_rx_streaming_disable_work(struct work_struct * work)179 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
180 {
181 	int ret;
182 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
183 						rx_streaming_disable_work);
184 	struct wl1271 *wl = wlvif->wl;
185 
186 	mutex_lock(&wl->mutex);
187 
188 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
189 		goto out;
190 
191 	ret = pm_runtime_get_sync(wl->dev);
192 	if (ret < 0) {
193 		pm_runtime_put_noidle(wl->dev);
194 		goto out;
195 	}
196 
197 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
198 	if (ret)
199 		goto out_sleep;
200 
201 out_sleep:
202 	pm_runtime_mark_last_busy(wl->dev);
203 	pm_runtime_put_autosuspend(wl->dev);
204 out:
205 	mutex_unlock(&wl->mutex);
206 }
207 
wl1271_rx_streaming_timer(struct timer_list * t)208 static void wl1271_rx_streaming_timer(struct timer_list *t)
209 {
210 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
211 	struct wl1271 *wl = wlvif->wl;
212 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
213 }
214 
215 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)216 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
217 {
218 	/* if the watchdog is not armed, don't do anything */
219 	if (wl->tx_allocated_blocks == 0)
220 		return;
221 
222 	cancel_delayed_work(&wl->tx_watchdog_work);
223 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
224 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
225 }
226 
wlcore_rc_update_work(struct work_struct * work)227 static void wlcore_rc_update_work(struct work_struct *work)
228 {
229 	int ret;
230 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
231 						rc_update_work);
232 	struct wl1271 *wl = wlvif->wl;
233 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
234 
235 	mutex_lock(&wl->mutex);
236 
237 	if (unlikely(wl->state != WLCORE_STATE_ON))
238 		goto out;
239 
240 	ret = pm_runtime_get_sync(wl->dev);
241 	if (ret < 0) {
242 		pm_runtime_put_noidle(wl->dev);
243 		goto out;
244 	}
245 
246 	if (ieee80211_vif_is_mesh(vif)) {
247 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
248 						     true, wlvif->sta.hlid);
249 		if (ret < 0)
250 			goto out_sleep;
251 	} else {
252 		wlcore_hw_sta_rc_update(wl, wlvif);
253 	}
254 
255 out_sleep:
256 	pm_runtime_mark_last_busy(wl->dev);
257 	pm_runtime_put_autosuspend(wl->dev);
258 out:
259 	mutex_unlock(&wl->mutex);
260 }
261 
wl12xx_tx_watchdog_work(struct work_struct * work)262 static void wl12xx_tx_watchdog_work(struct work_struct *work)
263 {
264 	struct delayed_work *dwork;
265 	struct wl1271 *wl;
266 
267 	dwork = to_delayed_work(work);
268 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
269 
270 	mutex_lock(&wl->mutex);
271 
272 	if (unlikely(wl->state != WLCORE_STATE_ON))
273 		goto out;
274 
275 	/* Tx went out in the meantime - everything is ok */
276 	if (unlikely(wl->tx_allocated_blocks == 0))
277 		goto out;
278 
279 	/*
280 	 * if a ROC is in progress, we might not have any Tx for a long
281 	 * time (e.g. pending Tx on the non-ROC channels)
282 	 */
283 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
284 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
285 			     wl->conf.tx.tx_watchdog_timeout);
286 		wl12xx_rearm_tx_watchdog_locked(wl);
287 		goto out;
288 	}
289 
290 	/*
291 	 * if a scan is in progress, we might not have any Tx for a long
292 	 * time
293 	 */
294 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
295 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
296 			     wl->conf.tx.tx_watchdog_timeout);
297 		wl12xx_rearm_tx_watchdog_locked(wl);
298 		goto out;
299 	}
300 
301 	/*
302 	* AP might cache a frame for a long time for a sleeping station,
303 	* so rearm the timer if there's an AP interface with stations. If
304 	* Tx is genuinely stuck we will most hopefully discover it when all
305 	* stations are removed due to inactivity.
306 	*/
307 	if (wl->active_sta_count) {
308 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
309 			     " %d stations",
310 			      wl->conf.tx.tx_watchdog_timeout,
311 			      wl->active_sta_count);
312 		wl12xx_rearm_tx_watchdog_locked(wl);
313 		goto out;
314 	}
315 
316 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
317 		     wl->conf.tx.tx_watchdog_timeout);
318 	wl12xx_queue_recovery_work(wl);
319 
320 out:
321 	mutex_unlock(&wl->mutex);
322 }
323 
wlcore_adjust_conf(struct wl1271 * wl)324 static void wlcore_adjust_conf(struct wl1271 *wl)
325 {
326 
327 	if (fwlog_param) {
328 		if (!strcmp(fwlog_param, "continuous")) {
329 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
330 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
331 		} else if (!strcmp(fwlog_param, "dbgpins")) {
332 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
333 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
334 		} else if (!strcmp(fwlog_param, "disable")) {
335 			wl->conf.fwlog.mem_blocks = 0;
336 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
337 		} else {
338 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
339 		}
340 	}
341 
342 	if (bug_on_recovery != -1)
343 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
344 
345 	if (no_recovery != -1)
346 		wl->conf.recovery.no_recovery = (u8) no_recovery;
347 }
348 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)349 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
350 					struct wl12xx_vif *wlvif,
351 					u8 hlid, u8 tx_pkts)
352 {
353 	bool fw_ps;
354 
355 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
356 
357 	/*
358 	 * Wake up from high level PS if the STA is asleep with too little
359 	 * packets in FW or if the STA is awake.
360 	 */
361 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
362 		wl12xx_ps_link_end(wl, wlvif, hlid);
363 
364 	/*
365 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
366 	 * Make an exception if this is the only connected link. In this
367 	 * case FW-memory congestion is less of a problem.
368 	 * Note that a single connected STA means 2*ap_count + 1 active links,
369 	 * since we must account for the global and broadcast AP links
370 	 * for each AP. The "fw_ps" check assures us the other link is a STA
371 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
372 	 */
373 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
374 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
375 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
376 }
377 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)378 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
379 					   struct wl12xx_vif *wlvif,
380 					   struct wl_fw_status *status)
381 {
382 	unsigned long cur_fw_ps_map;
383 	u8 hlid;
384 
385 	cur_fw_ps_map = status->link_ps_bitmap;
386 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
387 		wl1271_debug(DEBUG_PSM,
388 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
389 			     wl->ap_fw_ps_map, cur_fw_ps_map,
390 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
391 
392 		wl->ap_fw_ps_map = cur_fw_ps_map;
393 	}
394 
395 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
396 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
397 					    wl->links[hlid].allocated_pkts);
398 }
399 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)400 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
401 {
402 	struct wl12xx_vif *wlvif;
403 	u32 old_tx_blk_count = wl->tx_blocks_available;
404 	int avail, freed_blocks;
405 	int i;
406 	int ret;
407 	struct wl1271_link *lnk;
408 
409 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
410 				   wl->raw_fw_status,
411 				   wl->fw_status_len, false);
412 	if (ret < 0)
413 		return ret;
414 
415 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
416 
417 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
418 		     "drv_rx_counter = %d, tx_results_counter = %d)",
419 		     status->intr,
420 		     status->fw_rx_counter,
421 		     status->drv_rx_counter,
422 		     status->tx_results_counter);
423 
424 	for (i = 0; i < NUM_TX_QUEUES; i++) {
425 		/* prevent wrap-around in freed-packets counter */
426 		wl->tx_allocated_pkts[i] -=
427 				(status->counters.tx_released_pkts[i] -
428 				wl->tx_pkts_freed[i]) & 0xff;
429 
430 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
431 	}
432 
433 
434 	for_each_set_bit(i, wl->links_map, wl->num_links) {
435 		u8 diff;
436 		lnk = &wl->links[i];
437 
438 		/* prevent wrap-around in freed-packets counter */
439 		diff = (status->counters.tx_lnk_free_pkts[i] -
440 		       lnk->prev_freed_pkts) & 0xff;
441 
442 		if (diff == 0)
443 			continue;
444 
445 		lnk->allocated_pkts -= diff;
446 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
447 
448 		/* accumulate the prev_freed_pkts counter */
449 		lnk->total_freed_pkts += diff;
450 	}
451 
452 	/* prevent wrap-around in total blocks counter */
453 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
454 		freed_blocks = status->total_released_blks -
455 			       wl->tx_blocks_freed;
456 	else
457 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
458 			       status->total_released_blks;
459 
460 	wl->tx_blocks_freed = status->total_released_blks;
461 
462 	wl->tx_allocated_blocks -= freed_blocks;
463 
464 	/*
465 	 * If the FW freed some blocks:
466 	 * If we still have allocated blocks - re-arm the timer, Tx is
467 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
468 	 */
469 	if (freed_blocks) {
470 		if (wl->tx_allocated_blocks)
471 			wl12xx_rearm_tx_watchdog_locked(wl);
472 		else
473 			cancel_delayed_work(&wl->tx_watchdog_work);
474 	}
475 
476 	avail = status->tx_total - wl->tx_allocated_blocks;
477 
478 	/*
479 	 * The FW might change the total number of TX memblocks before
480 	 * we get a notification about blocks being released. Thus, the
481 	 * available blocks calculation might yield a temporary result
482 	 * which is lower than the actual available blocks. Keeping in
483 	 * mind that only blocks that were allocated can be moved from
484 	 * TX to RX, tx_blocks_available should never decrease here.
485 	 */
486 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
487 				      avail);
488 
489 	/* if more blocks are available now, tx work can be scheduled */
490 	if (wl->tx_blocks_available > old_tx_blk_count)
491 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
492 
493 	/* for AP update num of allocated TX blocks per link and ps status */
494 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
495 		wl12xx_irq_update_links_status(wl, wlvif, status);
496 	}
497 
498 	/* update the host-chipset time offset */
499 	wl->time_offset = (ktime_get_boot_ns() >> 10) -
500 		(s64)(status->fw_localtime);
501 
502 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
503 
504 	return 0;
505 }
506 
wl1271_flush_deferred_work(struct wl1271 * wl)507 static void wl1271_flush_deferred_work(struct wl1271 *wl)
508 {
509 	struct sk_buff *skb;
510 
511 	/* Pass all received frames to the network stack */
512 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
513 		ieee80211_rx_ni(wl->hw, skb);
514 
515 	/* Return sent skbs to the network stack */
516 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
517 		ieee80211_tx_status_ni(wl->hw, skb);
518 }
519 
wl1271_netstack_work(struct work_struct * work)520 static void wl1271_netstack_work(struct work_struct *work)
521 {
522 	struct wl1271 *wl =
523 		container_of(work, struct wl1271, netstack_work);
524 
525 	do {
526 		wl1271_flush_deferred_work(wl);
527 	} while (skb_queue_len(&wl->deferred_rx_queue));
528 }
529 
530 #define WL1271_IRQ_MAX_LOOPS 256
531 
wlcore_irq_locked(struct wl1271 * wl)532 static int wlcore_irq_locked(struct wl1271 *wl)
533 {
534 	int ret = 0;
535 	u32 intr;
536 	int loopcount = WL1271_IRQ_MAX_LOOPS;
537 	bool done = false;
538 	unsigned int defer_count;
539 	unsigned long flags;
540 
541 	/*
542 	 * In case edge triggered interrupt must be used, we cannot iterate
543 	 * more than once without introducing race conditions with the hardirq.
544 	 */
545 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
546 		loopcount = 1;
547 
548 	wl1271_debug(DEBUG_IRQ, "IRQ work");
549 
550 	if (unlikely(wl->state != WLCORE_STATE_ON))
551 		goto out;
552 
553 	ret = pm_runtime_get_sync(wl->dev);
554 	if (ret < 0) {
555 		pm_runtime_put_noidle(wl->dev);
556 		goto out;
557 	}
558 
559 	while (!done && loopcount--) {
560 		/*
561 		 * In order to avoid a race with the hardirq, clear the flag
562 		 * before acknowledging the chip.
563 		 */
564 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
565 		smp_mb__after_atomic();
566 
567 		ret = wlcore_fw_status(wl, wl->fw_status);
568 		if (ret < 0)
569 			goto out;
570 
571 		wlcore_hw_tx_immediate_compl(wl);
572 
573 		intr = wl->fw_status->intr;
574 		intr &= WLCORE_ALL_INTR_MASK;
575 		if (!intr) {
576 			done = true;
577 			continue;
578 		}
579 
580 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
581 			wl1271_error("HW watchdog interrupt received! starting recovery.");
582 			wl->watchdog_recovery = true;
583 			ret = -EIO;
584 
585 			/* restarting the chip. ignore any other interrupt. */
586 			goto out;
587 		}
588 
589 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
590 			wl1271_error("SW watchdog interrupt received! "
591 				     "starting recovery.");
592 			wl->watchdog_recovery = true;
593 			ret = -EIO;
594 
595 			/* restarting the chip. ignore any other interrupt. */
596 			goto out;
597 		}
598 
599 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
600 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
601 
602 			ret = wlcore_rx(wl, wl->fw_status);
603 			if (ret < 0)
604 				goto out;
605 
606 			/* Check if any tx blocks were freed */
607 			spin_lock_irqsave(&wl->wl_lock, flags);
608 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
609 			    wl1271_tx_total_queue_count(wl) > 0) {
610 				spin_unlock_irqrestore(&wl->wl_lock, flags);
611 				/*
612 				 * In order to avoid starvation of the TX path,
613 				 * call the work function directly.
614 				 */
615 				ret = wlcore_tx_work_locked(wl);
616 				if (ret < 0)
617 					goto out;
618 			} else {
619 				spin_unlock_irqrestore(&wl->wl_lock, flags);
620 			}
621 
622 			/* check for tx results */
623 			ret = wlcore_hw_tx_delayed_compl(wl);
624 			if (ret < 0)
625 				goto out;
626 
627 			/* Make sure the deferred queues don't get too long */
628 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
629 				      skb_queue_len(&wl->deferred_rx_queue);
630 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
631 				wl1271_flush_deferred_work(wl);
632 		}
633 
634 		if (intr & WL1271_ACX_INTR_EVENT_A) {
635 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
636 			ret = wl1271_event_handle(wl, 0);
637 			if (ret < 0)
638 				goto out;
639 		}
640 
641 		if (intr & WL1271_ACX_INTR_EVENT_B) {
642 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
643 			ret = wl1271_event_handle(wl, 1);
644 			if (ret < 0)
645 				goto out;
646 		}
647 
648 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
649 			wl1271_debug(DEBUG_IRQ,
650 				     "WL1271_ACX_INTR_INIT_COMPLETE");
651 
652 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
653 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
654 	}
655 
656 	pm_runtime_mark_last_busy(wl->dev);
657 	pm_runtime_put_autosuspend(wl->dev);
658 
659 out:
660 	return ret;
661 }
662 
wlcore_irq(int irq,void * cookie)663 static irqreturn_t wlcore_irq(int irq, void *cookie)
664 {
665 	int ret;
666 	unsigned long flags;
667 	struct wl1271 *wl = cookie;
668 
669 	/* complete the ELP completion */
670 	spin_lock_irqsave(&wl->wl_lock, flags);
671 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
672 	if (wl->elp_compl) {
673 		complete(wl->elp_compl);
674 		wl->elp_compl = NULL;
675 	}
676 
677 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
678 		/* don't enqueue a work right now. mark it as pending */
679 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
680 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
681 		disable_irq_nosync(wl->irq);
682 		pm_wakeup_event(wl->dev, 0);
683 		spin_unlock_irqrestore(&wl->wl_lock, flags);
684 		return IRQ_HANDLED;
685 	}
686 	spin_unlock_irqrestore(&wl->wl_lock, flags);
687 
688 	/* TX might be handled here, avoid redundant work */
689 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
690 	cancel_work_sync(&wl->tx_work);
691 
692 	mutex_lock(&wl->mutex);
693 
694 	ret = wlcore_irq_locked(wl);
695 	if (ret)
696 		wl12xx_queue_recovery_work(wl);
697 
698 	spin_lock_irqsave(&wl->wl_lock, flags);
699 	/* In case TX was not handled here, queue TX work */
700 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
701 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
702 	    wl1271_tx_total_queue_count(wl) > 0)
703 		ieee80211_queue_work(wl->hw, &wl->tx_work);
704 	spin_unlock_irqrestore(&wl->wl_lock, flags);
705 
706 	mutex_unlock(&wl->mutex);
707 
708 	return IRQ_HANDLED;
709 }
710 
711 struct vif_counter_data {
712 	u8 counter;
713 
714 	struct ieee80211_vif *cur_vif;
715 	bool cur_vif_running;
716 };
717 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)718 static void wl12xx_vif_count_iter(void *data, u8 *mac,
719 				  struct ieee80211_vif *vif)
720 {
721 	struct vif_counter_data *counter = data;
722 
723 	counter->counter++;
724 	if (counter->cur_vif == vif)
725 		counter->cur_vif_running = true;
726 }
727 
728 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)729 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
730 			       struct ieee80211_vif *cur_vif,
731 			       struct vif_counter_data *data)
732 {
733 	memset(data, 0, sizeof(*data));
734 	data->cur_vif = cur_vif;
735 
736 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
737 					    wl12xx_vif_count_iter, data);
738 }
739 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)740 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
741 {
742 	const struct firmware *fw;
743 	const char *fw_name;
744 	enum wl12xx_fw_type fw_type;
745 	int ret;
746 
747 	if (plt) {
748 		fw_type = WL12XX_FW_TYPE_PLT;
749 		fw_name = wl->plt_fw_name;
750 	} else {
751 		/*
752 		 * we can't call wl12xx_get_vif_count() here because
753 		 * wl->mutex is taken, so use the cached last_vif_count value
754 		 */
755 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
756 			fw_type = WL12XX_FW_TYPE_MULTI;
757 			fw_name = wl->mr_fw_name;
758 		} else {
759 			fw_type = WL12XX_FW_TYPE_NORMAL;
760 			fw_name = wl->sr_fw_name;
761 		}
762 	}
763 
764 	if (wl->fw_type == fw_type)
765 		return 0;
766 
767 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
768 
769 	ret = request_firmware(&fw, fw_name, wl->dev);
770 
771 	if (ret < 0) {
772 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
773 		return ret;
774 	}
775 
776 	if (fw->size % 4) {
777 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
778 			     fw->size);
779 		ret = -EILSEQ;
780 		goto out;
781 	}
782 
783 	vfree(wl->fw);
784 	wl->fw_type = WL12XX_FW_TYPE_NONE;
785 	wl->fw_len = fw->size;
786 	wl->fw = vmalloc(wl->fw_len);
787 
788 	if (!wl->fw) {
789 		wl1271_error("could not allocate memory for the firmware");
790 		ret = -ENOMEM;
791 		goto out;
792 	}
793 
794 	memcpy(wl->fw, fw->data, wl->fw_len);
795 	ret = 0;
796 	wl->fw_type = fw_type;
797 out:
798 	release_firmware(fw);
799 
800 	return ret;
801 }
802 
wl12xx_queue_recovery_work(struct wl1271 * wl)803 void wl12xx_queue_recovery_work(struct wl1271 *wl)
804 {
805 	/* Avoid a recursive recovery */
806 	if (wl->state == WLCORE_STATE_ON) {
807 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
808 				  &wl->flags));
809 
810 		wl->state = WLCORE_STATE_RESTARTING;
811 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
812 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
813 	}
814 }
815 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)816 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
817 {
818 	size_t len;
819 
820 	/* Make sure we have enough room */
821 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
822 
823 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
824 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
825 	wl->fwlog_size += len;
826 
827 	return len;
828 }
829 
wl12xx_read_fwlog_panic(struct wl1271 * wl)830 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
831 {
832 	u32 end_of_log = 0;
833 	int error;
834 
835 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
836 		return;
837 
838 	wl1271_info("Reading FW panic log");
839 
840 	/*
841 	 * Make sure the chip is awake and the logger isn't active.
842 	 * Do not send a stop fwlog command if the fw is hanged or if
843 	 * dbgpins are used (due to some fw bug).
844 	 */
845 	error = pm_runtime_get_sync(wl->dev);
846 	if (error < 0) {
847 		pm_runtime_put_noidle(wl->dev);
848 		return;
849 	}
850 	if (!wl->watchdog_recovery &&
851 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
852 		wl12xx_cmd_stop_fwlog(wl);
853 
854 	/* Traverse the memory blocks linked list */
855 	do {
856 		end_of_log = wlcore_event_fw_logger(wl);
857 		if (end_of_log == 0) {
858 			msleep(100);
859 			end_of_log = wlcore_event_fw_logger(wl);
860 		}
861 	} while (end_of_log != 0);
862 }
863 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)864 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
865 				   u8 hlid, struct ieee80211_sta *sta)
866 {
867 	struct wl1271_station *wl_sta;
868 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
869 
870 	wl_sta = (void *)sta->drv_priv;
871 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
872 
873 	/*
874 	 * increment the initial seq number on recovery to account for
875 	 * transmitted packets that we haven't yet got in the FW status
876 	 */
877 	if (wlvif->encryption_type == KEY_GEM)
878 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
879 
880 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
881 		wl_sta->total_freed_pkts += sqn_recovery_padding;
882 }
883 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)884 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
885 					struct wl12xx_vif *wlvif,
886 					u8 hlid, const u8 *addr)
887 {
888 	struct ieee80211_sta *sta;
889 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
890 
891 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
892 		    is_zero_ether_addr(addr)))
893 		return;
894 
895 	rcu_read_lock();
896 	sta = ieee80211_find_sta(vif, addr);
897 	if (sta)
898 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
899 	rcu_read_unlock();
900 }
901 
wlcore_print_recovery(struct wl1271 * wl)902 static void wlcore_print_recovery(struct wl1271 *wl)
903 {
904 	u32 pc = 0;
905 	u32 hint_sts = 0;
906 	int ret;
907 
908 	wl1271_info("Hardware recovery in progress. FW ver: %s",
909 		    wl->chip.fw_ver_str);
910 
911 	/* change partitions momentarily so we can read the FW pc */
912 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
913 	if (ret < 0)
914 		return;
915 
916 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
917 	if (ret < 0)
918 		return;
919 
920 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
921 	if (ret < 0)
922 		return;
923 
924 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
925 				pc, hint_sts, ++wl->recovery_count);
926 
927 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
928 }
929 
930 
wl1271_recovery_work(struct work_struct * work)931 static void wl1271_recovery_work(struct work_struct *work)
932 {
933 	struct wl1271 *wl =
934 		container_of(work, struct wl1271, recovery_work);
935 	struct wl12xx_vif *wlvif;
936 	struct ieee80211_vif *vif;
937 	int error;
938 
939 	mutex_lock(&wl->mutex);
940 
941 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
942 		goto out_unlock;
943 
944 	error = pm_runtime_get_sync(wl->dev);
945 	if (error < 0) {
946 		wl1271_warning("Enable for recovery failed");
947 		pm_runtime_put_noidle(wl->dev);
948 	}
949 	wlcore_disable_interrupts_nosync(wl);
950 
951 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
952 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
953 			wl12xx_read_fwlog_panic(wl);
954 		wlcore_print_recovery(wl);
955 	}
956 
957 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
958 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
959 
960 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
961 
962 	if (wl->conf.recovery.no_recovery) {
963 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
964 		goto out_unlock;
965 	}
966 
967 	/* Prevent spurious TX during FW restart */
968 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
969 
970 	/* reboot the chipset */
971 	while (!list_empty(&wl->wlvif_list)) {
972 		wlvif = list_first_entry(&wl->wlvif_list,
973 				       struct wl12xx_vif, list);
974 		vif = wl12xx_wlvif_to_vif(wlvif);
975 
976 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
977 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
978 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
979 						    vif->bss_conf.bssid);
980 		}
981 
982 		__wl1271_op_remove_interface(wl, vif, false);
983 	}
984 
985 	wlcore_op_stop_locked(wl);
986 	pm_runtime_mark_last_busy(wl->dev);
987 	pm_runtime_put_autosuspend(wl->dev);
988 
989 	ieee80211_restart_hw(wl->hw);
990 
991 	/*
992 	 * Its safe to enable TX now - the queues are stopped after a request
993 	 * to restart the HW.
994 	 */
995 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
996 
997 out_unlock:
998 	wl->watchdog_recovery = false;
999 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1000 	mutex_unlock(&wl->mutex);
1001 }
1002 
wlcore_fw_wakeup(struct wl1271 * wl)1003 static int wlcore_fw_wakeup(struct wl1271 *wl)
1004 {
1005 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1006 }
1007 
wl1271_setup(struct wl1271 * wl)1008 static int wl1271_setup(struct wl1271 *wl)
1009 {
1010 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1011 	if (!wl->raw_fw_status)
1012 		goto err;
1013 
1014 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1015 	if (!wl->fw_status)
1016 		goto err;
1017 
1018 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1019 	if (!wl->tx_res_if)
1020 		goto err;
1021 
1022 	return 0;
1023 err:
1024 	kfree(wl->fw_status);
1025 	kfree(wl->raw_fw_status);
1026 	return -ENOMEM;
1027 }
1028 
wl12xx_set_power_on(struct wl1271 * wl)1029 static int wl12xx_set_power_on(struct wl1271 *wl)
1030 {
1031 	int ret;
1032 
1033 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1034 	ret = wl1271_power_on(wl);
1035 	if (ret < 0)
1036 		goto out;
1037 	msleep(WL1271_POWER_ON_SLEEP);
1038 	wl1271_io_reset(wl);
1039 	wl1271_io_init(wl);
1040 
1041 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1042 	if (ret < 0)
1043 		goto fail;
1044 
1045 	/* ELP module wake up */
1046 	ret = wlcore_fw_wakeup(wl);
1047 	if (ret < 0)
1048 		goto fail;
1049 
1050 out:
1051 	return ret;
1052 
1053 fail:
1054 	wl1271_power_off(wl);
1055 	return ret;
1056 }
1057 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1058 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1059 {
1060 	int ret = 0;
1061 
1062 	ret = wl12xx_set_power_on(wl);
1063 	if (ret < 0)
1064 		goto out;
1065 
1066 	/*
1067 	 * For wl127x based devices we could use the default block
1068 	 * size (512 bytes), but due to a bug in the sdio driver, we
1069 	 * need to set it explicitly after the chip is powered on.  To
1070 	 * simplify the code and since the performance impact is
1071 	 * negligible, we use the same block size for all different
1072 	 * chip types.
1073 	 *
1074 	 * Check if the bus supports blocksize alignment and, if it
1075 	 * doesn't, make sure we don't have the quirk.
1076 	 */
1077 	if (!wl1271_set_block_size(wl))
1078 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1079 
1080 	/* TODO: make sure the lower driver has set things up correctly */
1081 
1082 	ret = wl1271_setup(wl);
1083 	if (ret < 0)
1084 		goto out;
1085 
1086 	ret = wl12xx_fetch_firmware(wl, plt);
1087 	if (ret < 0) {
1088 		kfree(wl->fw_status);
1089 		kfree(wl->raw_fw_status);
1090 		kfree(wl->tx_res_if);
1091 	}
1092 
1093 out:
1094 	return ret;
1095 }
1096 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1097 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1098 {
1099 	int retries = WL1271_BOOT_RETRIES;
1100 	struct wiphy *wiphy = wl->hw->wiphy;
1101 
1102 	static const char* const PLT_MODE[] = {
1103 		"PLT_OFF",
1104 		"PLT_ON",
1105 		"PLT_FEM_DETECT",
1106 		"PLT_CHIP_AWAKE"
1107 	};
1108 
1109 	int ret;
1110 
1111 	mutex_lock(&wl->mutex);
1112 
1113 	wl1271_notice("power up");
1114 
1115 	if (wl->state != WLCORE_STATE_OFF) {
1116 		wl1271_error("cannot go into PLT state because not "
1117 			     "in off state: %d", wl->state);
1118 		ret = -EBUSY;
1119 		goto out;
1120 	}
1121 
1122 	/* Indicate to lower levels that we are now in PLT mode */
1123 	wl->plt = true;
1124 	wl->plt_mode = plt_mode;
1125 
1126 	while (retries) {
1127 		retries--;
1128 		ret = wl12xx_chip_wakeup(wl, true);
1129 		if (ret < 0)
1130 			goto power_off;
1131 
1132 		if (plt_mode != PLT_CHIP_AWAKE) {
1133 			ret = wl->ops->plt_init(wl);
1134 			if (ret < 0)
1135 				goto power_off;
1136 		}
1137 
1138 		wl->state = WLCORE_STATE_ON;
1139 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1140 			      PLT_MODE[plt_mode],
1141 			      wl->chip.fw_ver_str);
1142 
1143 		/* update hw/fw version info in wiphy struct */
1144 		wiphy->hw_version = wl->chip.id;
1145 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1146 			sizeof(wiphy->fw_version));
1147 
1148 		goto out;
1149 
1150 power_off:
1151 		wl1271_power_off(wl);
1152 	}
1153 
1154 	wl->plt = false;
1155 	wl->plt_mode = PLT_OFF;
1156 
1157 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1158 		     WL1271_BOOT_RETRIES);
1159 out:
1160 	mutex_unlock(&wl->mutex);
1161 
1162 	return ret;
1163 }
1164 
wl1271_plt_stop(struct wl1271 * wl)1165 int wl1271_plt_stop(struct wl1271 *wl)
1166 {
1167 	int ret = 0;
1168 
1169 	wl1271_notice("power down");
1170 
1171 	/*
1172 	 * Interrupts must be disabled before setting the state to OFF.
1173 	 * Otherwise, the interrupt handler might be called and exit without
1174 	 * reading the interrupt status.
1175 	 */
1176 	wlcore_disable_interrupts(wl);
1177 	mutex_lock(&wl->mutex);
1178 	if (!wl->plt) {
1179 		mutex_unlock(&wl->mutex);
1180 
1181 		/*
1182 		 * This will not necessarily enable interrupts as interrupts
1183 		 * may have been disabled when op_stop was called. It will,
1184 		 * however, balance the above call to disable_interrupts().
1185 		 */
1186 		wlcore_enable_interrupts(wl);
1187 
1188 		wl1271_error("cannot power down because not in PLT "
1189 			     "state: %d", wl->state);
1190 		ret = -EBUSY;
1191 		goto out;
1192 	}
1193 
1194 	mutex_unlock(&wl->mutex);
1195 
1196 	wl1271_flush_deferred_work(wl);
1197 	cancel_work_sync(&wl->netstack_work);
1198 	cancel_work_sync(&wl->recovery_work);
1199 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1200 
1201 	mutex_lock(&wl->mutex);
1202 	wl1271_power_off(wl);
1203 	wl->flags = 0;
1204 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1205 	wl->state = WLCORE_STATE_OFF;
1206 	wl->plt = false;
1207 	wl->plt_mode = PLT_OFF;
1208 	wl->rx_counter = 0;
1209 	mutex_unlock(&wl->mutex);
1210 
1211 out:
1212 	return ret;
1213 }
1214 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1215 static void wl1271_op_tx(struct ieee80211_hw *hw,
1216 			 struct ieee80211_tx_control *control,
1217 			 struct sk_buff *skb)
1218 {
1219 	struct wl1271 *wl = hw->priv;
1220 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1221 	struct ieee80211_vif *vif = info->control.vif;
1222 	struct wl12xx_vif *wlvif = NULL;
1223 	unsigned long flags;
1224 	int q, mapping;
1225 	u8 hlid;
1226 
1227 	if (!vif) {
1228 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1229 		ieee80211_free_txskb(hw, skb);
1230 		return;
1231 	}
1232 
1233 	wlvif = wl12xx_vif_to_data(vif);
1234 	mapping = skb_get_queue_mapping(skb);
1235 	q = wl1271_tx_get_queue(mapping);
1236 
1237 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1238 
1239 	spin_lock_irqsave(&wl->wl_lock, flags);
1240 
1241 	/*
1242 	 * drop the packet if the link is invalid or the queue is stopped
1243 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1244 	 * allow these packets through.
1245 	 */
1246 	if (hlid == WL12XX_INVALID_LINK_ID ||
1247 	    (!test_bit(hlid, wlvif->links_map)) ||
1248 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1249 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1250 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1251 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1252 		ieee80211_free_txskb(hw, skb);
1253 		goto out;
1254 	}
1255 
1256 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1257 		     hlid, q, skb->len);
1258 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1259 
1260 	wl->tx_queue_count[q]++;
1261 	wlvif->tx_queue_count[q]++;
1262 
1263 	/*
1264 	 * The workqueue is slow to process the tx_queue and we need stop
1265 	 * the queue here, otherwise the queue will get too long.
1266 	 */
1267 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1268 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1269 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1270 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1271 		wlcore_stop_queue_locked(wl, wlvif, q,
1272 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1273 	}
1274 
1275 	/*
1276 	 * The chip specific setup must run before the first TX packet -
1277 	 * before that, the tx_work will not be initialized!
1278 	 */
1279 
1280 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1281 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1282 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1283 
1284 out:
1285 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1286 }
1287 
wl1271_tx_dummy_packet(struct wl1271 * wl)1288 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1289 {
1290 	unsigned long flags;
1291 	int q;
1292 
1293 	/* no need to queue a new dummy packet if one is already pending */
1294 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1295 		return 0;
1296 
1297 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1298 
1299 	spin_lock_irqsave(&wl->wl_lock, flags);
1300 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1301 	wl->tx_queue_count[q]++;
1302 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1303 
1304 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1305 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1306 		return wlcore_tx_work_locked(wl);
1307 
1308 	/*
1309 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1310 	 * interrupt handler function
1311 	 */
1312 	return 0;
1313 }
1314 
1315 /*
1316  * The size of the dummy packet should be at least 1400 bytes. However, in
1317  * order to minimize the number of bus transactions, aligning it to 512 bytes
1318  * boundaries could be beneficial, performance wise
1319  */
1320 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1321 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1322 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1323 {
1324 	struct sk_buff *skb;
1325 	struct ieee80211_hdr_3addr *hdr;
1326 	unsigned int dummy_packet_size;
1327 
1328 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1329 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1330 
1331 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1332 	if (!skb) {
1333 		wl1271_warning("Failed to allocate a dummy packet skb");
1334 		return NULL;
1335 	}
1336 
1337 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1338 
1339 	hdr = skb_put_zero(skb, sizeof(*hdr));
1340 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1341 					 IEEE80211_STYPE_NULLFUNC |
1342 					 IEEE80211_FCTL_TODS);
1343 
1344 	skb_put_zero(skb, dummy_packet_size);
1345 
1346 	/* Dummy packets require the TID to be management */
1347 	skb->priority = WL1271_TID_MGMT;
1348 
1349 	/* Initialize all fields that might be used */
1350 	skb_set_queue_mapping(skb, 0);
1351 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1352 
1353 	return skb;
1354 }
1355 
1356 
1357 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1358 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1359 {
1360 	int num_fields = 0, in_field = 0, fields_size = 0;
1361 	int i, pattern_len = 0;
1362 
1363 	if (!p->mask) {
1364 		wl1271_warning("No mask in WoWLAN pattern");
1365 		return -EINVAL;
1366 	}
1367 
1368 	/*
1369 	 * The pattern is broken up into segments of bytes at different offsets
1370 	 * that need to be checked by the FW filter. Each segment is called
1371 	 * a field in the FW API. We verify that the total number of fields
1372 	 * required for this pattern won't exceed FW limits (8)
1373 	 * as well as the total fields buffer won't exceed the FW limit.
1374 	 * Note that if there's a pattern which crosses Ethernet/IP header
1375 	 * boundary a new field is required.
1376 	 */
1377 	for (i = 0; i < p->pattern_len; i++) {
1378 		if (test_bit(i, (unsigned long *)p->mask)) {
1379 			if (!in_field) {
1380 				in_field = 1;
1381 				pattern_len = 1;
1382 			} else {
1383 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1384 					num_fields++;
1385 					fields_size += pattern_len +
1386 						RX_FILTER_FIELD_OVERHEAD;
1387 					pattern_len = 1;
1388 				} else
1389 					pattern_len++;
1390 			}
1391 		} else {
1392 			if (in_field) {
1393 				in_field = 0;
1394 				fields_size += pattern_len +
1395 					RX_FILTER_FIELD_OVERHEAD;
1396 				num_fields++;
1397 			}
1398 		}
1399 	}
1400 
1401 	if (in_field) {
1402 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1403 		num_fields++;
1404 	}
1405 
1406 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1407 		wl1271_warning("RX Filter too complex. Too many segments");
1408 		return -EINVAL;
1409 	}
1410 
1411 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1412 		wl1271_warning("RX filter pattern is too big");
1413 		return -E2BIG;
1414 	}
1415 
1416 	return 0;
1417 }
1418 
wl1271_rx_filter_alloc(void)1419 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1420 {
1421 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1422 }
1423 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1424 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1425 {
1426 	int i;
1427 
1428 	if (filter == NULL)
1429 		return;
1430 
1431 	for (i = 0; i < filter->num_fields; i++)
1432 		kfree(filter->fields[i].pattern);
1433 
1434 	kfree(filter);
1435 }
1436 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1437 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1438 				 u16 offset, u8 flags,
1439 				 const u8 *pattern, u8 len)
1440 {
1441 	struct wl12xx_rx_filter_field *field;
1442 
1443 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1444 		wl1271_warning("Max fields per RX filter. can't alloc another");
1445 		return -EINVAL;
1446 	}
1447 
1448 	field = &filter->fields[filter->num_fields];
1449 
1450 	field->pattern = kzalloc(len, GFP_KERNEL);
1451 	if (!field->pattern) {
1452 		wl1271_warning("Failed to allocate RX filter pattern");
1453 		return -ENOMEM;
1454 	}
1455 
1456 	filter->num_fields++;
1457 
1458 	field->offset = cpu_to_le16(offset);
1459 	field->flags = flags;
1460 	field->len = len;
1461 	memcpy(field->pattern, pattern, len);
1462 
1463 	return 0;
1464 }
1465 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1466 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1467 {
1468 	int i, fields_size = 0;
1469 
1470 	for (i = 0; i < filter->num_fields; i++)
1471 		fields_size += filter->fields[i].len +
1472 			sizeof(struct wl12xx_rx_filter_field) -
1473 			sizeof(u8 *);
1474 
1475 	return fields_size;
1476 }
1477 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1478 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1479 				    u8 *buf)
1480 {
1481 	int i;
1482 	struct wl12xx_rx_filter_field *field;
1483 
1484 	for (i = 0; i < filter->num_fields; i++) {
1485 		field = (struct wl12xx_rx_filter_field *)buf;
1486 
1487 		field->offset = filter->fields[i].offset;
1488 		field->flags = filter->fields[i].flags;
1489 		field->len = filter->fields[i].len;
1490 
1491 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1492 		buf += sizeof(struct wl12xx_rx_filter_field) -
1493 			sizeof(u8 *) + field->len;
1494 	}
1495 }
1496 
1497 /*
1498  * Allocates an RX filter returned through f
1499  * which needs to be freed using rx_filter_free()
1500  */
1501 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1502 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1503 					   struct wl12xx_rx_filter **f)
1504 {
1505 	int i, j, ret = 0;
1506 	struct wl12xx_rx_filter *filter;
1507 	u16 offset;
1508 	u8 flags, len;
1509 
1510 	filter = wl1271_rx_filter_alloc();
1511 	if (!filter) {
1512 		wl1271_warning("Failed to alloc rx filter");
1513 		ret = -ENOMEM;
1514 		goto err;
1515 	}
1516 
1517 	i = 0;
1518 	while (i < p->pattern_len) {
1519 		if (!test_bit(i, (unsigned long *)p->mask)) {
1520 			i++;
1521 			continue;
1522 		}
1523 
1524 		for (j = i; j < p->pattern_len; j++) {
1525 			if (!test_bit(j, (unsigned long *)p->mask))
1526 				break;
1527 
1528 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1529 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1530 				break;
1531 		}
1532 
1533 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1534 			offset = i;
1535 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1536 		} else {
1537 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1538 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1539 		}
1540 
1541 		len = j - i;
1542 
1543 		ret = wl1271_rx_filter_alloc_field(filter,
1544 						   offset,
1545 						   flags,
1546 						   &p->pattern[i], len);
1547 		if (ret)
1548 			goto err;
1549 
1550 		i = j;
1551 	}
1552 
1553 	filter->action = FILTER_SIGNAL;
1554 
1555 	*f = filter;
1556 	return 0;
1557 
1558 err:
1559 	wl1271_rx_filter_free(filter);
1560 	*f = NULL;
1561 
1562 	return ret;
1563 }
1564 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1565 static int wl1271_configure_wowlan(struct wl1271 *wl,
1566 				   struct cfg80211_wowlan *wow)
1567 {
1568 	int i, ret;
1569 
1570 	if (!wow || wow->any || !wow->n_patterns) {
1571 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1572 							  FILTER_SIGNAL);
1573 		if (ret)
1574 			goto out;
1575 
1576 		ret = wl1271_rx_filter_clear_all(wl);
1577 		if (ret)
1578 			goto out;
1579 
1580 		return 0;
1581 	}
1582 
1583 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1584 		return -EINVAL;
1585 
1586 	/* Validate all incoming patterns before clearing current FW state */
1587 	for (i = 0; i < wow->n_patterns; i++) {
1588 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1589 		if (ret) {
1590 			wl1271_warning("Bad wowlan pattern %d", i);
1591 			return ret;
1592 		}
1593 	}
1594 
1595 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1596 	if (ret)
1597 		goto out;
1598 
1599 	ret = wl1271_rx_filter_clear_all(wl);
1600 	if (ret)
1601 		goto out;
1602 
1603 	/* Translate WoWLAN patterns into filters */
1604 	for (i = 0; i < wow->n_patterns; i++) {
1605 		struct cfg80211_pkt_pattern *p;
1606 		struct wl12xx_rx_filter *filter = NULL;
1607 
1608 		p = &wow->patterns[i];
1609 
1610 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1611 		if (ret) {
1612 			wl1271_warning("Failed to create an RX filter from "
1613 				       "wowlan pattern %d", i);
1614 			goto out;
1615 		}
1616 
1617 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1618 
1619 		wl1271_rx_filter_free(filter);
1620 		if (ret)
1621 			goto out;
1622 	}
1623 
1624 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1625 
1626 out:
1627 	return ret;
1628 }
1629 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1630 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1631 					struct wl12xx_vif *wlvif,
1632 					struct cfg80211_wowlan *wow)
1633 {
1634 	int ret = 0;
1635 
1636 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1637 		goto out;
1638 
1639 	ret = wl1271_configure_wowlan(wl, wow);
1640 	if (ret < 0)
1641 		goto out;
1642 
1643 	if ((wl->conf.conn.suspend_wake_up_event ==
1644 	     wl->conf.conn.wake_up_event) &&
1645 	    (wl->conf.conn.suspend_listen_interval ==
1646 	     wl->conf.conn.listen_interval))
1647 		goto out;
1648 
1649 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1650 				    wl->conf.conn.suspend_wake_up_event,
1651 				    wl->conf.conn.suspend_listen_interval);
1652 
1653 	if (ret < 0)
1654 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1655 out:
1656 	return ret;
1657 
1658 }
1659 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1660 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1661 					struct wl12xx_vif *wlvif,
1662 					struct cfg80211_wowlan *wow)
1663 {
1664 	int ret = 0;
1665 
1666 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1667 		goto out;
1668 
1669 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1670 	if (ret < 0)
1671 		goto out;
1672 
1673 	ret = wl1271_configure_wowlan(wl, wow);
1674 	if (ret < 0)
1675 		goto out;
1676 
1677 out:
1678 	return ret;
1679 
1680 }
1681 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1682 static int wl1271_configure_suspend(struct wl1271 *wl,
1683 				    struct wl12xx_vif *wlvif,
1684 				    struct cfg80211_wowlan *wow)
1685 {
1686 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1687 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1688 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1689 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1690 	return 0;
1691 }
1692 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1693 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1694 {
1695 	int ret = 0;
1696 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1697 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1698 
1699 	if ((!is_ap) && (!is_sta))
1700 		return;
1701 
1702 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1703 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1704 		return;
1705 
1706 	wl1271_configure_wowlan(wl, NULL);
1707 
1708 	if (is_sta) {
1709 		if ((wl->conf.conn.suspend_wake_up_event ==
1710 		     wl->conf.conn.wake_up_event) &&
1711 		    (wl->conf.conn.suspend_listen_interval ==
1712 		     wl->conf.conn.listen_interval))
1713 			return;
1714 
1715 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1716 				    wl->conf.conn.wake_up_event,
1717 				    wl->conf.conn.listen_interval);
1718 
1719 		if (ret < 0)
1720 			wl1271_error("resume: wake up conditions failed: %d",
1721 				     ret);
1722 
1723 	} else if (is_ap) {
1724 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1725 	}
1726 }
1727 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1728 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1729 					    struct cfg80211_wowlan *wow)
1730 {
1731 	struct wl1271 *wl = hw->priv;
1732 	struct wl12xx_vif *wlvif;
1733 	unsigned long flags;
1734 	int ret;
1735 
1736 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1737 	WARN_ON(!wow);
1738 
1739 	/* we want to perform the recovery before suspending */
1740 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1741 		wl1271_warning("postponing suspend to perform recovery");
1742 		return -EBUSY;
1743 	}
1744 
1745 	wl1271_tx_flush(wl);
1746 
1747 	mutex_lock(&wl->mutex);
1748 
1749 	ret = pm_runtime_get_sync(wl->dev);
1750 	if (ret < 0) {
1751 		pm_runtime_put_noidle(wl->dev);
1752 		mutex_unlock(&wl->mutex);
1753 		return ret;
1754 	}
1755 
1756 	wl->wow_enabled = true;
1757 	wl12xx_for_each_wlvif(wl, wlvif) {
1758 		if (wlcore_is_p2p_mgmt(wlvif))
1759 			continue;
1760 
1761 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1762 		if (ret < 0) {
1763 			mutex_unlock(&wl->mutex);
1764 			wl1271_warning("couldn't prepare device to suspend");
1765 			return ret;
1766 		}
1767 	}
1768 
1769 	/* disable fast link flow control notifications from FW */
1770 	ret = wlcore_hw_interrupt_notify(wl, false);
1771 	if (ret < 0)
1772 		goto out_sleep;
1773 
1774 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1775 	ret = wlcore_hw_rx_ba_filter(wl,
1776 				     !!wl->conf.conn.suspend_rx_ba_activity);
1777 	if (ret < 0)
1778 		goto out_sleep;
1779 
1780 out_sleep:
1781 	pm_runtime_put_noidle(wl->dev);
1782 	mutex_unlock(&wl->mutex);
1783 
1784 	if (ret < 0) {
1785 		wl1271_warning("couldn't prepare device to suspend");
1786 		return ret;
1787 	}
1788 
1789 	/* flush any remaining work */
1790 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1791 
1792 	flush_work(&wl->tx_work);
1793 
1794 	/*
1795 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1796 	 * it on resume anyway.
1797 	 */
1798 	cancel_delayed_work(&wl->tx_watchdog_work);
1799 
1800 	/*
1801 	 * set suspended flag to avoid triggering a new threaded_irq
1802 	 * work.
1803 	 */
1804 	spin_lock_irqsave(&wl->wl_lock, flags);
1805 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1806 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1807 
1808 	return pm_runtime_force_suspend(wl->dev);
1809 }
1810 
wl1271_op_resume(struct ieee80211_hw * hw)1811 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1812 {
1813 	struct wl1271 *wl = hw->priv;
1814 	struct wl12xx_vif *wlvif;
1815 	unsigned long flags;
1816 	bool run_irq_work = false, pending_recovery;
1817 	int ret;
1818 
1819 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1820 		     wl->wow_enabled);
1821 	WARN_ON(!wl->wow_enabled);
1822 
1823 	ret = pm_runtime_force_resume(wl->dev);
1824 	if (ret < 0) {
1825 		wl1271_error("ELP wakeup failure!");
1826 		goto out_sleep;
1827 	}
1828 
1829 	/*
1830 	 * re-enable irq_work enqueuing, and call irq_work directly if
1831 	 * there is a pending work.
1832 	 */
1833 	spin_lock_irqsave(&wl->wl_lock, flags);
1834 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1835 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1836 		run_irq_work = true;
1837 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1838 
1839 	mutex_lock(&wl->mutex);
1840 
1841 	/* test the recovery flag before calling any SDIO functions */
1842 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1843 				    &wl->flags);
1844 
1845 	if (run_irq_work) {
1846 		wl1271_debug(DEBUG_MAC80211,
1847 			     "run postponed irq_work directly");
1848 
1849 		/* don't talk to the HW if recovery is pending */
1850 		if (!pending_recovery) {
1851 			ret = wlcore_irq_locked(wl);
1852 			if (ret)
1853 				wl12xx_queue_recovery_work(wl);
1854 		}
1855 
1856 		wlcore_enable_interrupts(wl);
1857 	}
1858 
1859 	if (pending_recovery) {
1860 		wl1271_warning("queuing forgotten recovery on resume");
1861 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1862 		goto out_sleep;
1863 	}
1864 
1865 	ret = pm_runtime_get_sync(wl->dev);
1866 	if (ret < 0) {
1867 		pm_runtime_put_noidle(wl->dev);
1868 		goto out;
1869 	}
1870 
1871 	wl12xx_for_each_wlvif(wl, wlvif) {
1872 		if (wlcore_is_p2p_mgmt(wlvif))
1873 			continue;
1874 
1875 		wl1271_configure_resume(wl, wlvif);
1876 	}
1877 
1878 	ret = wlcore_hw_interrupt_notify(wl, true);
1879 	if (ret < 0)
1880 		goto out_sleep;
1881 
1882 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1883 	ret = wlcore_hw_rx_ba_filter(wl, false);
1884 	if (ret < 0)
1885 		goto out_sleep;
1886 
1887 out_sleep:
1888 	pm_runtime_mark_last_busy(wl->dev);
1889 	pm_runtime_put_autosuspend(wl->dev);
1890 
1891 out:
1892 	wl->wow_enabled = false;
1893 
1894 	/*
1895 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1896 	 * That way we avoid possible conditions where Tx-complete interrupts
1897 	 * fail to arrive and we perform a spurious recovery.
1898 	 */
1899 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1900 	mutex_unlock(&wl->mutex);
1901 
1902 	return 0;
1903 }
1904 
wl1271_op_start(struct ieee80211_hw * hw)1905 static int wl1271_op_start(struct ieee80211_hw *hw)
1906 {
1907 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1908 
1909 	/*
1910 	 * We have to delay the booting of the hardware because
1911 	 * we need to know the local MAC address before downloading and
1912 	 * initializing the firmware. The MAC address cannot be changed
1913 	 * after boot, and without the proper MAC address, the firmware
1914 	 * will not function properly.
1915 	 *
1916 	 * The MAC address is first known when the corresponding interface
1917 	 * is added. That is where we will initialize the hardware.
1918 	 */
1919 
1920 	return 0;
1921 }
1922 
wlcore_op_stop_locked(struct wl1271 * wl)1923 static void wlcore_op_stop_locked(struct wl1271 *wl)
1924 {
1925 	int i;
1926 
1927 	if (wl->state == WLCORE_STATE_OFF) {
1928 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1929 					&wl->flags))
1930 			wlcore_enable_interrupts(wl);
1931 
1932 		return;
1933 	}
1934 
1935 	/*
1936 	 * this must be before the cancel_work calls below, so that the work
1937 	 * functions don't perform further work.
1938 	 */
1939 	wl->state = WLCORE_STATE_OFF;
1940 
1941 	/*
1942 	 * Use the nosync variant to disable interrupts, so the mutex could be
1943 	 * held while doing so without deadlocking.
1944 	 */
1945 	wlcore_disable_interrupts_nosync(wl);
1946 
1947 	mutex_unlock(&wl->mutex);
1948 
1949 	wlcore_synchronize_interrupts(wl);
1950 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1951 		cancel_work_sync(&wl->recovery_work);
1952 	wl1271_flush_deferred_work(wl);
1953 	cancel_delayed_work_sync(&wl->scan_complete_work);
1954 	cancel_work_sync(&wl->netstack_work);
1955 	cancel_work_sync(&wl->tx_work);
1956 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1957 
1958 	/* let's notify MAC80211 about the remaining pending TX frames */
1959 	mutex_lock(&wl->mutex);
1960 	wl12xx_tx_reset(wl);
1961 
1962 	wl1271_power_off(wl);
1963 	/*
1964 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1965 	 * an interrupt storm. Now that the power is down, it is safe to
1966 	 * re-enable interrupts to balance the disable depth
1967 	 */
1968 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1969 		wlcore_enable_interrupts(wl);
1970 
1971 	wl->band = NL80211_BAND_2GHZ;
1972 
1973 	wl->rx_counter = 0;
1974 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1975 	wl->channel_type = NL80211_CHAN_NO_HT;
1976 	wl->tx_blocks_available = 0;
1977 	wl->tx_allocated_blocks = 0;
1978 	wl->tx_results_count = 0;
1979 	wl->tx_packets_count = 0;
1980 	wl->time_offset = 0;
1981 	wl->ap_fw_ps_map = 0;
1982 	wl->ap_ps_map = 0;
1983 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1984 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1985 	memset(wl->links_map, 0, sizeof(wl->links_map));
1986 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1987 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1988 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1989 	wl->active_sta_count = 0;
1990 	wl->active_link_count = 0;
1991 
1992 	/* The system link is always allocated */
1993 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1994 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1995 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1996 
1997 	/*
1998 	 * this is performed after the cancel_work calls and the associated
1999 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2000 	 * get executed before all these vars have been reset.
2001 	 */
2002 	wl->flags = 0;
2003 
2004 	wl->tx_blocks_freed = 0;
2005 
2006 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2007 		wl->tx_pkts_freed[i] = 0;
2008 		wl->tx_allocated_pkts[i] = 0;
2009 	}
2010 
2011 	wl1271_debugfs_reset(wl);
2012 
2013 	kfree(wl->raw_fw_status);
2014 	wl->raw_fw_status = NULL;
2015 	kfree(wl->fw_status);
2016 	wl->fw_status = NULL;
2017 	kfree(wl->tx_res_if);
2018 	wl->tx_res_if = NULL;
2019 	kfree(wl->target_mem_map);
2020 	wl->target_mem_map = NULL;
2021 
2022 	/*
2023 	 * FW channels must be re-calibrated after recovery,
2024 	 * save current Reg-Domain channel configuration and clear it.
2025 	 */
2026 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2027 	       sizeof(wl->reg_ch_conf_pending));
2028 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2029 }
2030 
wlcore_op_stop(struct ieee80211_hw * hw)2031 static void wlcore_op_stop(struct ieee80211_hw *hw)
2032 {
2033 	struct wl1271 *wl = hw->priv;
2034 
2035 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2036 
2037 	mutex_lock(&wl->mutex);
2038 
2039 	wlcore_op_stop_locked(wl);
2040 
2041 	mutex_unlock(&wl->mutex);
2042 }
2043 
wlcore_channel_switch_work(struct work_struct * work)2044 static void wlcore_channel_switch_work(struct work_struct *work)
2045 {
2046 	struct delayed_work *dwork;
2047 	struct wl1271 *wl;
2048 	struct ieee80211_vif *vif;
2049 	struct wl12xx_vif *wlvif;
2050 	int ret;
2051 
2052 	dwork = to_delayed_work(work);
2053 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2054 	wl = wlvif->wl;
2055 
2056 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2057 
2058 	mutex_lock(&wl->mutex);
2059 
2060 	if (unlikely(wl->state != WLCORE_STATE_ON))
2061 		goto out;
2062 
2063 	/* check the channel switch is still ongoing */
2064 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2065 		goto out;
2066 
2067 	vif = wl12xx_wlvif_to_vif(wlvif);
2068 	ieee80211_chswitch_done(vif, false);
2069 
2070 	ret = pm_runtime_get_sync(wl->dev);
2071 	if (ret < 0) {
2072 		pm_runtime_put_noidle(wl->dev);
2073 		goto out;
2074 	}
2075 
2076 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2077 
2078 	pm_runtime_mark_last_busy(wl->dev);
2079 	pm_runtime_put_autosuspend(wl->dev);
2080 out:
2081 	mutex_unlock(&wl->mutex);
2082 }
2083 
wlcore_connection_loss_work(struct work_struct * work)2084 static void wlcore_connection_loss_work(struct work_struct *work)
2085 {
2086 	struct delayed_work *dwork;
2087 	struct wl1271 *wl;
2088 	struct ieee80211_vif *vif;
2089 	struct wl12xx_vif *wlvif;
2090 
2091 	dwork = to_delayed_work(work);
2092 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2093 	wl = wlvif->wl;
2094 
2095 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2096 
2097 	mutex_lock(&wl->mutex);
2098 
2099 	if (unlikely(wl->state != WLCORE_STATE_ON))
2100 		goto out;
2101 
2102 	/* Call mac80211 connection loss */
2103 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2104 		goto out;
2105 
2106 	vif = wl12xx_wlvif_to_vif(wlvif);
2107 	ieee80211_connection_loss(vif);
2108 out:
2109 	mutex_unlock(&wl->mutex);
2110 }
2111 
wlcore_pending_auth_complete_work(struct work_struct * work)2112 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2113 {
2114 	struct delayed_work *dwork;
2115 	struct wl1271 *wl;
2116 	struct wl12xx_vif *wlvif;
2117 	unsigned long time_spare;
2118 	int ret;
2119 
2120 	dwork = to_delayed_work(work);
2121 	wlvif = container_of(dwork, struct wl12xx_vif,
2122 			     pending_auth_complete_work);
2123 	wl = wlvif->wl;
2124 
2125 	mutex_lock(&wl->mutex);
2126 
2127 	if (unlikely(wl->state != WLCORE_STATE_ON))
2128 		goto out;
2129 
2130 	/*
2131 	 * Make sure a second really passed since the last auth reply. Maybe
2132 	 * a second auth reply arrived while we were stuck on the mutex.
2133 	 * Check for a little less than the timeout to protect from scheduler
2134 	 * irregularities.
2135 	 */
2136 	time_spare = jiffies +
2137 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2138 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2139 		goto out;
2140 
2141 	ret = pm_runtime_get_sync(wl->dev);
2142 	if (ret < 0) {
2143 		pm_runtime_put_noidle(wl->dev);
2144 		goto out;
2145 	}
2146 
2147 	/* cancel the ROC if active */
2148 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2149 
2150 	pm_runtime_mark_last_busy(wl->dev);
2151 	pm_runtime_put_autosuspend(wl->dev);
2152 out:
2153 	mutex_unlock(&wl->mutex);
2154 }
2155 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2156 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2157 {
2158 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2159 					WL12XX_MAX_RATE_POLICIES);
2160 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2161 		return -EBUSY;
2162 
2163 	__set_bit(policy, wl->rate_policies_map);
2164 	*idx = policy;
2165 	return 0;
2166 }
2167 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2168 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2169 {
2170 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2171 		return;
2172 
2173 	__clear_bit(*idx, wl->rate_policies_map);
2174 	*idx = WL12XX_MAX_RATE_POLICIES;
2175 }
2176 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2177 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2178 {
2179 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2180 					WLCORE_MAX_KLV_TEMPLATES);
2181 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2182 		return -EBUSY;
2183 
2184 	__set_bit(policy, wl->klv_templates_map);
2185 	*idx = policy;
2186 	return 0;
2187 }
2188 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2189 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2190 {
2191 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2192 		return;
2193 
2194 	__clear_bit(*idx, wl->klv_templates_map);
2195 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2196 }
2197 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2198 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2199 {
2200 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2201 
2202 	switch (wlvif->bss_type) {
2203 	case BSS_TYPE_AP_BSS:
2204 		if (wlvif->p2p)
2205 			return WL1271_ROLE_P2P_GO;
2206 		else if (ieee80211_vif_is_mesh(vif))
2207 			return WL1271_ROLE_MESH_POINT;
2208 		else
2209 			return WL1271_ROLE_AP;
2210 
2211 	case BSS_TYPE_STA_BSS:
2212 		if (wlvif->p2p)
2213 			return WL1271_ROLE_P2P_CL;
2214 		else
2215 			return WL1271_ROLE_STA;
2216 
2217 	case BSS_TYPE_IBSS:
2218 		return WL1271_ROLE_IBSS;
2219 
2220 	default:
2221 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2222 	}
2223 	return WL12XX_INVALID_ROLE_TYPE;
2224 }
2225 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2226 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2227 {
2228 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2229 	int i;
2230 
2231 	/* clear everything but the persistent data */
2232 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2233 
2234 	switch (ieee80211_vif_type_p2p(vif)) {
2235 	case NL80211_IFTYPE_P2P_CLIENT:
2236 		wlvif->p2p = 1;
2237 		/* fall-through */
2238 	case NL80211_IFTYPE_STATION:
2239 	case NL80211_IFTYPE_P2P_DEVICE:
2240 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2241 		break;
2242 	case NL80211_IFTYPE_ADHOC:
2243 		wlvif->bss_type = BSS_TYPE_IBSS;
2244 		break;
2245 	case NL80211_IFTYPE_P2P_GO:
2246 		wlvif->p2p = 1;
2247 		/* fall-through */
2248 	case NL80211_IFTYPE_AP:
2249 	case NL80211_IFTYPE_MESH_POINT:
2250 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2251 		break;
2252 	default:
2253 		wlvif->bss_type = MAX_BSS_TYPE;
2254 		return -EOPNOTSUPP;
2255 	}
2256 
2257 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2258 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2259 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2260 
2261 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2262 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2263 		/* init sta/ibss data */
2264 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2265 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2266 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2267 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2268 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2269 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2270 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2271 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2272 	} else {
2273 		/* init ap data */
2274 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2275 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2276 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2277 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2278 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2279 			wl12xx_allocate_rate_policy(wl,
2280 						&wlvif->ap.ucast_rate_idx[i]);
2281 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2282 		/*
2283 		 * TODO: check if basic_rate shouldn't be
2284 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2285 		 * instead (the same thing for STA above).
2286 		*/
2287 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2288 		/* TODO: this seems to be used only for STA, check it */
2289 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2290 	}
2291 
2292 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2293 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2294 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2295 
2296 	/*
2297 	 * mac80211 configures some values globally, while we treat them
2298 	 * per-interface. thus, on init, we have to copy them from wl
2299 	 */
2300 	wlvif->band = wl->band;
2301 	wlvif->channel = wl->channel;
2302 	wlvif->power_level = wl->power_level;
2303 	wlvif->channel_type = wl->channel_type;
2304 
2305 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2306 		  wl1271_rx_streaming_enable_work);
2307 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2308 		  wl1271_rx_streaming_disable_work);
2309 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2310 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2311 			  wlcore_channel_switch_work);
2312 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2313 			  wlcore_connection_loss_work);
2314 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2315 			  wlcore_pending_auth_complete_work);
2316 	INIT_LIST_HEAD(&wlvif->list);
2317 
2318 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2319 	return 0;
2320 }
2321 
wl12xx_init_fw(struct wl1271 * wl)2322 static int wl12xx_init_fw(struct wl1271 *wl)
2323 {
2324 	int retries = WL1271_BOOT_RETRIES;
2325 	bool booted = false;
2326 	struct wiphy *wiphy = wl->hw->wiphy;
2327 	int ret;
2328 
2329 	while (retries) {
2330 		retries--;
2331 		ret = wl12xx_chip_wakeup(wl, false);
2332 		if (ret < 0)
2333 			goto power_off;
2334 
2335 		ret = wl->ops->boot(wl);
2336 		if (ret < 0)
2337 			goto power_off;
2338 
2339 		ret = wl1271_hw_init(wl);
2340 		if (ret < 0)
2341 			goto irq_disable;
2342 
2343 		booted = true;
2344 		break;
2345 
2346 irq_disable:
2347 		mutex_unlock(&wl->mutex);
2348 		/* Unlocking the mutex in the middle of handling is
2349 		   inherently unsafe. In this case we deem it safe to do,
2350 		   because we need to let any possibly pending IRQ out of
2351 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2352 		   work function will not do anything.) Also, any other
2353 		   possible concurrent operations will fail due to the
2354 		   current state, hence the wl1271 struct should be safe. */
2355 		wlcore_disable_interrupts(wl);
2356 		wl1271_flush_deferred_work(wl);
2357 		cancel_work_sync(&wl->netstack_work);
2358 		mutex_lock(&wl->mutex);
2359 power_off:
2360 		wl1271_power_off(wl);
2361 	}
2362 
2363 	if (!booted) {
2364 		wl1271_error("firmware boot failed despite %d retries",
2365 			     WL1271_BOOT_RETRIES);
2366 		goto out;
2367 	}
2368 
2369 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2370 
2371 	/* update hw/fw version info in wiphy struct */
2372 	wiphy->hw_version = wl->chip.id;
2373 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2374 		sizeof(wiphy->fw_version));
2375 
2376 	/*
2377 	 * Now we know if 11a is supported (info from the NVS), so disable
2378 	 * 11a channels if not supported
2379 	 */
2380 	if (!wl->enable_11a)
2381 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2382 
2383 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2384 		     wl->enable_11a ? "" : "not ");
2385 
2386 	wl->state = WLCORE_STATE_ON;
2387 out:
2388 	return ret;
2389 }
2390 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2391 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2392 {
2393 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2394 }
2395 
2396 /*
2397  * Check whether a fw switch (i.e. moving from one loaded
2398  * fw to another) is needed. This function is also responsible
2399  * for updating wl->last_vif_count, so it must be called before
2400  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2401  * will be used).
2402  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2403 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2404 				  struct vif_counter_data vif_counter_data,
2405 				  bool add)
2406 {
2407 	enum wl12xx_fw_type current_fw = wl->fw_type;
2408 	u8 vif_count = vif_counter_data.counter;
2409 
2410 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2411 		return false;
2412 
2413 	/* increase the vif count if this is a new vif */
2414 	if (add && !vif_counter_data.cur_vif_running)
2415 		vif_count++;
2416 
2417 	wl->last_vif_count = vif_count;
2418 
2419 	/* no need for fw change if the device is OFF */
2420 	if (wl->state == WLCORE_STATE_OFF)
2421 		return false;
2422 
2423 	/* no need for fw change if a single fw is used */
2424 	if (!wl->mr_fw_name)
2425 		return false;
2426 
2427 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2428 		return true;
2429 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2430 		return true;
2431 
2432 	return false;
2433 }
2434 
2435 /*
2436  * Enter "forced psm". Make sure the sta is in psm against the ap,
2437  * to make the fw switch a bit more disconnection-persistent.
2438  */
wl12xx_force_active_psm(struct wl1271 * wl)2439 static void wl12xx_force_active_psm(struct wl1271 *wl)
2440 {
2441 	struct wl12xx_vif *wlvif;
2442 
2443 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2444 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2445 	}
2446 }
2447 
2448 struct wlcore_hw_queue_iter_data {
2449 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2450 	/* current vif */
2451 	struct ieee80211_vif *vif;
2452 	/* is the current vif among those iterated */
2453 	bool cur_running;
2454 };
2455 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2456 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2457 				 struct ieee80211_vif *vif)
2458 {
2459 	struct wlcore_hw_queue_iter_data *iter_data = data;
2460 
2461 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2462 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2463 		return;
2464 
2465 	if (iter_data->cur_running || vif == iter_data->vif) {
2466 		iter_data->cur_running = true;
2467 		return;
2468 	}
2469 
2470 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2471 }
2472 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2473 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2474 					 struct wl12xx_vif *wlvif)
2475 {
2476 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2477 	struct wlcore_hw_queue_iter_data iter_data = {};
2478 	int i, q_base;
2479 
2480 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2481 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2482 		return 0;
2483 	}
2484 
2485 	iter_data.vif = vif;
2486 
2487 	/* mark all bits taken by active interfaces */
2488 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2489 					IEEE80211_IFACE_ITER_RESUME_ALL,
2490 					wlcore_hw_queue_iter, &iter_data);
2491 
2492 	/* the current vif is already running in mac80211 (resume/recovery) */
2493 	if (iter_data.cur_running) {
2494 		wlvif->hw_queue_base = vif->hw_queue[0];
2495 		wl1271_debug(DEBUG_MAC80211,
2496 			     "using pre-allocated hw queue base %d",
2497 			     wlvif->hw_queue_base);
2498 
2499 		/* interface type might have changed type */
2500 		goto adjust_cab_queue;
2501 	}
2502 
2503 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2504 				     WLCORE_NUM_MAC_ADDRESSES);
2505 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2506 		return -EBUSY;
2507 
2508 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2509 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2510 		     wlvif->hw_queue_base);
2511 
2512 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2513 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2514 		/* register hw queues in mac80211 */
2515 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2516 	}
2517 
2518 adjust_cab_queue:
2519 	/* the last places are reserved for cab queues per interface */
2520 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2521 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2522 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2523 	else
2524 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2525 
2526 	return 0;
2527 }
2528 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2529 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2530 				   struct ieee80211_vif *vif)
2531 {
2532 	struct wl1271 *wl = hw->priv;
2533 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2534 	struct vif_counter_data vif_count;
2535 	int ret = 0;
2536 	u8 role_type;
2537 
2538 	if (wl->plt) {
2539 		wl1271_error("Adding Interface not allowed while in PLT mode");
2540 		return -EBUSY;
2541 	}
2542 
2543 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2544 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2545 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2546 
2547 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2548 		     ieee80211_vif_type_p2p(vif), vif->addr);
2549 
2550 	wl12xx_get_vif_count(hw, vif, &vif_count);
2551 
2552 	mutex_lock(&wl->mutex);
2553 
2554 	/*
2555 	 * in some very corner case HW recovery scenarios its possible to
2556 	 * get here before __wl1271_op_remove_interface is complete, so
2557 	 * opt out if that is the case.
2558 	 */
2559 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2560 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2561 		ret = -EBUSY;
2562 		goto out;
2563 	}
2564 
2565 
2566 	ret = wl12xx_init_vif_data(wl, vif);
2567 	if (ret < 0)
2568 		goto out;
2569 
2570 	wlvif->wl = wl;
2571 	role_type = wl12xx_get_role_type(wl, wlvif);
2572 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2573 		ret = -EINVAL;
2574 		goto out;
2575 	}
2576 
2577 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2578 	if (ret < 0)
2579 		goto out;
2580 
2581 	/*
2582 	 * TODO: after the nvs issue will be solved, move this block
2583 	 * to start(), and make sure here the driver is ON.
2584 	 */
2585 	if (wl->state == WLCORE_STATE_OFF) {
2586 		/*
2587 		 * we still need this in order to configure the fw
2588 		 * while uploading the nvs
2589 		 */
2590 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2591 
2592 		ret = wl12xx_init_fw(wl);
2593 		if (ret < 0)
2594 			goto out;
2595 	}
2596 
2597 	/*
2598 	 * Call runtime PM only after possible wl12xx_init_fw() above
2599 	 * is done. Otherwise we do not have interrupts enabled.
2600 	 */
2601 	ret = pm_runtime_get_sync(wl->dev);
2602 	if (ret < 0) {
2603 		pm_runtime_put_noidle(wl->dev);
2604 		goto out_unlock;
2605 	}
2606 
2607 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2608 		wl12xx_force_active_psm(wl);
2609 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2610 		mutex_unlock(&wl->mutex);
2611 		wl1271_recovery_work(&wl->recovery_work);
2612 		return 0;
2613 	}
2614 
2615 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2616 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2617 					     role_type, &wlvif->role_id);
2618 		if (ret < 0)
2619 			goto out;
2620 
2621 		ret = wl1271_init_vif_specific(wl, vif);
2622 		if (ret < 0)
2623 			goto out;
2624 
2625 	} else {
2626 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2627 					     &wlvif->dev_role_id);
2628 		if (ret < 0)
2629 			goto out;
2630 
2631 		/* needed mainly for configuring rate policies */
2632 		ret = wl1271_sta_hw_init(wl, wlvif);
2633 		if (ret < 0)
2634 			goto out;
2635 	}
2636 
2637 	list_add(&wlvif->list, &wl->wlvif_list);
2638 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2639 
2640 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2641 		wl->ap_count++;
2642 	else
2643 		wl->sta_count++;
2644 out:
2645 	pm_runtime_mark_last_busy(wl->dev);
2646 	pm_runtime_put_autosuspend(wl->dev);
2647 out_unlock:
2648 	mutex_unlock(&wl->mutex);
2649 
2650 	return ret;
2651 }
2652 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2653 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2654 					 struct ieee80211_vif *vif,
2655 					 bool reset_tx_queues)
2656 {
2657 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2658 	int i, ret;
2659 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2660 
2661 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2662 
2663 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2664 		return;
2665 
2666 	/* because of hardware recovery, we may get here twice */
2667 	if (wl->state == WLCORE_STATE_OFF)
2668 		return;
2669 
2670 	wl1271_info("down");
2671 
2672 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2673 	    wl->scan_wlvif == wlvif) {
2674 		struct cfg80211_scan_info info = {
2675 			.aborted = true,
2676 		};
2677 
2678 		/*
2679 		 * Rearm the tx watchdog just before idling scan. This
2680 		 * prevents just-finished scans from triggering the watchdog
2681 		 */
2682 		wl12xx_rearm_tx_watchdog_locked(wl);
2683 
2684 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2685 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2686 		wl->scan_wlvif = NULL;
2687 		wl->scan.req = NULL;
2688 		ieee80211_scan_completed(wl->hw, &info);
2689 	}
2690 
2691 	if (wl->sched_vif == wlvif)
2692 		wl->sched_vif = NULL;
2693 
2694 	if (wl->roc_vif == vif) {
2695 		wl->roc_vif = NULL;
2696 		ieee80211_remain_on_channel_expired(wl->hw);
2697 	}
2698 
2699 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2700 		/* disable active roles */
2701 		ret = pm_runtime_get_sync(wl->dev);
2702 		if (ret < 0) {
2703 			pm_runtime_put_noidle(wl->dev);
2704 			goto deinit;
2705 		}
2706 
2707 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2708 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2709 			if (wl12xx_dev_role_started(wlvif))
2710 				wl12xx_stop_dev(wl, wlvif);
2711 		}
2712 
2713 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2714 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2715 			if (ret < 0)
2716 				goto deinit;
2717 		} else {
2718 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2719 			if (ret < 0)
2720 				goto deinit;
2721 		}
2722 
2723 		pm_runtime_mark_last_busy(wl->dev);
2724 		pm_runtime_put_autosuspend(wl->dev);
2725 	}
2726 deinit:
2727 	wl12xx_tx_reset_wlvif(wl, wlvif);
2728 
2729 	/* clear all hlids (except system_hlid) */
2730 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2731 
2732 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2733 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2734 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2735 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2736 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2737 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2738 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2739 	} else {
2740 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2741 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2742 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2743 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2744 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2745 			wl12xx_free_rate_policy(wl,
2746 						&wlvif->ap.ucast_rate_idx[i]);
2747 		wl1271_free_ap_keys(wl, wlvif);
2748 	}
2749 
2750 	dev_kfree_skb(wlvif->probereq);
2751 	wlvif->probereq = NULL;
2752 	if (wl->last_wlvif == wlvif)
2753 		wl->last_wlvif = NULL;
2754 	list_del(&wlvif->list);
2755 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2756 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2757 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2758 
2759 	if (is_ap)
2760 		wl->ap_count--;
2761 	else
2762 		wl->sta_count--;
2763 
2764 	/*
2765 	 * Last AP, have more stations. Configure sleep auth according to STA.
2766 	 * Don't do thin on unintended recovery.
2767 	 */
2768 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2769 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2770 		goto unlock;
2771 
2772 	if (wl->ap_count == 0 && is_ap) {
2773 		/* mask ap events */
2774 		wl->event_mask &= ~wl->ap_event_mask;
2775 		wl1271_event_unmask(wl);
2776 	}
2777 
2778 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2779 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2780 		/* Configure for power according to debugfs */
2781 		if (sta_auth != WL1271_PSM_ILLEGAL)
2782 			wl1271_acx_sleep_auth(wl, sta_auth);
2783 		/* Configure for ELP power saving */
2784 		else
2785 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2786 	}
2787 
2788 unlock:
2789 	mutex_unlock(&wl->mutex);
2790 
2791 	del_timer_sync(&wlvif->rx_streaming_timer);
2792 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2793 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2794 	cancel_work_sync(&wlvif->rc_update_work);
2795 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2796 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2797 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2798 
2799 	mutex_lock(&wl->mutex);
2800 }
2801 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2802 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2803 				       struct ieee80211_vif *vif)
2804 {
2805 	struct wl1271 *wl = hw->priv;
2806 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2807 	struct wl12xx_vif *iter;
2808 	struct vif_counter_data vif_count;
2809 
2810 	wl12xx_get_vif_count(hw, vif, &vif_count);
2811 	mutex_lock(&wl->mutex);
2812 
2813 	if (wl->state == WLCORE_STATE_OFF ||
2814 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2815 		goto out;
2816 
2817 	/*
2818 	 * wl->vif can be null here if someone shuts down the interface
2819 	 * just when hardware recovery has been started.
2820 	 */
2821 	wl12xx_for_each_wlvif(wl, iter) {
2822 		if (iter != wlvif)
2823 			continue;
2824 
2825 		__wl1271_op_remove_interface(wl, vif, true);
2826 		break;
2827 	}
2828 	WARN_ON(iter != wlvif);
2829 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2830 		wl12xx_force_active_psm(wl);
2831 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2832 		wl12xx_queue_recovery_work(wl);
2833 	}
2834 out:
2835 	mutex_unlock(&wl->mutex);
2836 }
2837 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2838 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2839 				      struct ieee80211_vif *vif,
2840 				      enum nl80211_iftype new_type, bool p2p)
2841 {
2842 	struct wl1271 *wl = hw->priv;
2843 	int ret;
2844 
2845 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2846 	wl1271_op_remove_interface(hw, vif);
2847 
2848 	vif->type = new_type;
2849 	vif->p2p = p2p;
2850 	ret = wl1271_op_add_interface(hw, vif);
2851 
2852 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2853 	return ret;
2854 }
2855 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2856 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2857 {
2858 	int ret;
2859 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2860 
2861 	/*
2862 	 * One of the side effects of the JOIN command is that is clears
2863 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2864 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2865 	 * Currently the only valid scenario for JOIN during association
2866 	 * is on roaming, in which case we will also be given new keys.
2867 	 * Keep the below message for now, unless it starts bothering
2868 	 * users who really like to roam a lot :)
2869 	 */
2870 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2871 		wl1271_info("JOIN while associated.");
2872 
2873 	/* clear encryption type */
2874 	wlvif->encryption_type = KEY_NONE;
2875 
2876 	if (is_ibss)
2877 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2878 	else {
2879 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2880 			/*
2881 			 * TODO: this is an ugly workaround for wl12xx fw
2882 			 * bug - we are not able to tx/rx after the first
2883 			 * start_sta, so make dummy start+stop calls,
2884 			 * and then call start_sta again.
2885 			 * this should be fixed in the fw.
2886 			 */
2887 			wl12xx_cmd_role_start_sta(wl, wlvif);
2888 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2889 		}
2890 
2891 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2892 	}
2893 
2894 	return ret;
2895 }
2896 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2897 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2898 			    int offset)
2899 {
2900 	u8 ssid_len;
2901 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2902 					 skb->len - offset);
2903 
2904 	if (!ptr) {
2905 		wl1271_error("No SSID in IEs!");
2906 		return -ENOENT;
2907 	}
2908 
2909 	ssid_len = ptr[1];
2910 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2911 		wl1271_error("SSID is too long!");
2912 		return -EINVAL;
2913 	}
2914 
2915 	wlvif->ssid_len = ssid_len;
2916 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2917 	return 0;
2918 }
2919 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2920 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2921 {
2922 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2923 	struct sk_buff *skb;
2924 	int ieoffset;
2925 
2926 	/* we currently only support setting the ssid from the ap probe req */
2927 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2928 		return -EINVAL;
2929 
2930 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2931 	if (!skb)
2932 		return -EINVAL;
2933 
2934 	ieoffset = offsetof(struct ieee80211_mgmt,
2935 			    u.probe_req.variable);
2936 	wl1271_ssid_set(wlvif, skb, ieoffset);
2937 	dev_kfree_skb(skb);
2938 
2939 	return 0;
2940 }
2941 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2942 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2943 			    struct ieee80211_bss_conf *bss_conf,
2944 			    u32 sta_rate_set)
2945 {
2946 	int ieoffset;
2947 	int ret;
2948 
2949 	wlvif->aid = bss_conf->aid;
2950 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2951 	wlvif->beacon_int = bss_conf->beacon_int;
2952 	wlvif->wmm_enabled = bss_conf->qos;
2953 
2954 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2955 
2956 	/*
2957 	 * with wl1271, we don't need to update the
2958 	 * beacon_int and dtim_period, because the firmware
2959 	 * updates it by itself when the first beacon is
2960 	 * received after a join.
2961 	 */
2962 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2963 	if (ret < 0)
2964 		return ret;
2965 
2966 	/*
2967 	 * Get a template for hardware connection maintenance
2968 	 */
2969 	dev_kfree_skb(wlvif->probereq);
2970 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2971 							wlvif,
2972 							NULL);
2973 	ieoffset = offsetof(struct ieee80211_mgmt,
2974 			    u.probe_req.variable);
2975 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2976 
2977 	/* enable the connection monitoring feature */
2978 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2979 	if (ret < 0)
2980 		return ret;
2981 
2982 	/*
2983 	 * The join command disable the keep-alive mode, shut down its process,
2984 	 * and also clear the template config, so we need to reset it all after
2985 	 * the join. The acx_aid starts the keep-alive process, and the order
2986 	 * of the commands below is relevant.
2987 	 */
2988 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2989 	if (ret < 0)
2990 		return ret;
2991 
2992 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2993 	if (ret < 0)
2994 		return ret;
2995 
2996 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2997 	if (ret < 0)
2998 		return ret;
2999 
3000 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3001 					   wlvif->sta.klv_template_id,
3002 					   ACX_KEEP_ALIVE_TPL_VALID);
3003 	if (ret < 0)
3004 		return ret;
3005 
3006 	/*
3007 	 * The default fw psm configuration is AUTO, while mac80211 default
3008 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3009 	 */
3010 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3011 	if (ret < 0)
3012 		return ret;
3013 
3014 	if (sta_rate_set) {
3015 		wlvif->rate_set =
3016 			wl1271_tx_enabled_rates_get(wl,
3017 						    sta_rate_set,
3018 						    wlvif->band);
3019 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3020 		if (ret < 0)
3021 			return ret;
3022 	}
3023 
3024 	return ret;
3025 }
3026 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3027 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3028 {
3029 	int ret;
3030 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3031 
3032 	/* make sure we are connected (sta) joined */
3033 	if (sta &&
3034 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3035 		return false;
3036 
3037 	/* make sure we are joined (ibss) */
3038 	if (!sta &&
3039 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3040 		return false;
3041 
3042 	if (sta) {
3043 		/* use defaults when not associated */
3044 		wlvif->aid = 0;
3045 
3046 		/* free probe-request template */
3047 		dev_kfree_skb(wlvif->probereq);
3048 		wlvif->probereq = NULL;
3049 
3050 		/* disable connection monitor features */
3051 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3052 		if (ret < 0)
3053 			return ret;
3054 
3055 		/* Disable the keep-alive feature */
3056 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3057 		if (ret < 0)
3058 			return ret;
3059 
3060 		/* disable beacon filtering */
3061 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3062 		if (ret < 0)
3063 			return ret;
3064 	}
3065 
3066 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3067 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3068 
3069 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3070 		ieee80211_chswitch_done(vif, false);
3071 		cancel_delayed_work(&wlvif->channel_switch_work);
3072 	}
3073 
3074 	/* invalidate keep-alive template */
3075 	wl1271_acx_keep_alive_config(wl, wlvif,
3076 				     wlvif->sta.klv_template_id,
3077 				     ACX_KEEP_ALIVE_TPL_INVALID);
3078 
3079 	return 0;
3080 }
3081 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3082 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3083 {
3084 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3085 	wlvif->rate_set = wlvif->basic_rate_set;
3086 }
3087 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3088 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3089 				   bool idle)
3090 {
3091 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3092 
3093 	if (idle == cur_idle)
3094 		return;
3095 
3096 	if (idle) {
3097 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3098 	} else {
3099 		/* The current firmware only supports sched_scan in idle */
3100 		if (wl->sched_vif == wlvif)
3101 			wl->ops->sched_scan_stop(wl, wlvif);
3102 
3103 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3104 	}
3105 }
3106 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3107 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3108 			     struct ieee80211_conf *conf, u32 changed)
3109 {
3110 	int ret;
3111 
3112 	if (wlcore_is_p2p_mgmt(wlvif))
3113 		return 0;
3114 
3115 	if (conf->power_level != wlvif->power_level) {
3116 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3117 		if (ret < 0)
3118 			return ret;
3119 
3120 		wlvif->power_level = conf->power_level;
3121 	}
3122 
3123 	return 0;
3124 }
3125 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3126 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3127 {
3128 	struct wl1271 *wl = hw->priv;
3129 	struct wl12xx_vif *wlvif;
3130 	struct ieee80211_conf *conf = &hw->conf;
3131 	int ret = 0;
3132 
3133 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3134 		     " changed 0x%x",
3135 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3136 		     conf->power_level,
3137 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3138 			 changed);
3139 
3140 	mutex_lock(&wl->mutex);
3141 
3142 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3143 		wl->power_level = conf->power_level;
3144 
3145 	if (unlikely(wl->state != WLCORE_STATE_ON))
3146 		goto out;
3147 
3148 	ret = pm_runtime_get_sync(wl->dev);
3149 	if (ret < 0) {
3150 		pm_runtime_put_noidle(wl->dev);
3151 		goto out;
3152 	}
3153 
3154 	/* configure each interface */
3155 	wl12xx_for_each_wlvif(wl, wlvif) {
3156 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3157 		if (ret < 0)
3158 			goto out_sleep;
3159 	}
3160 
3161 out_sleep:
3162 	pm_runtime_mark_last_busy(wl->dev);
3163 	pm_runtime_put_autosuspend(wl->dev);
3164 
3165 out:
3166 	mutex_unlock(&wl->mutex);
3167 
3168 	return ret;
3169 }
3170 
3171 struct wl1271_filter_params {
3172 	bool enabled;
3173 	int mc_list_length;
3174 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3175 };
3176 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3177 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3178 				       struct netdev_hw_addr_list *mc_list)
3179 {
3180 	struct wl1271_filter_params *fp;
3181 	struct netdev_hw_addr *ha;
3182 
3183 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3184 	if (!fp) {
3185 		wl1271_error("Out of memory setting filters.");
3186 		return 0;
3187 	}
3188 
3189 	/* update multicast filtering parameters */
3190 	fp->mc_list_length = 0;
3191 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3192 		fp->enabled = false;
3193 	} else {
3194 		fp->enabled = true;
3195 		netdev_hw_addr_list_for_each(ha, mc_list) {
3196 			memcpy(fp->mc_list[fp->mc_list_length],
3197 					ha->addr, ETH_ALEN);
3198 			fp->mc_list_length++;
3199 		}
3200 	}
3201 
3202 	return (u64)(unsigned long)fp;
3203 }
3204 
3205 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3206 				  FIF_FCSFAIL | \
3207 				  FIF_BCN_PRBRESP_PROMISC | \
3208 				  FIF_CONTROL | \
3209 				  FIF_OTHER_BSS)
3210 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3211 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3212 				       unsigned int changed,
3213 				       unsigned int *total, u64 multicast)
3214 {
3215 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3216 	struct wl1271 *wl = hw->priv;
3217 	struct wl12xx_vif *wlvif;
3218 
3219 	int ret;
3220 
3221 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3222 		     " total %x", changed, *total);
3223 
3224 	mutex_lock(&wl->mutex);
3225 
3226 	*total &= WL1271_SUPPORTED_FILTERS;
3227 	changed &= WL1271_SUPPORTED_FILTERS;
3228 
3229 	if (unlikely(wl->state != WLCORE_STATE_ON))
3230 		goto out;
3231 
3232 	ret = pm_runtime_get_sync(wl->dev);
3233 	if (ret < 0) {
3234 		pm_runtime_put_noidle(wl->dev);
3235 		goto out;
3236 	}
3237 
3238 	wl12xx_for_each_wlvif(wl, wlvif) {
3239 		if (wlcore_is_p2p_mgmt(wlvif))
3240 			continue;
3241 
3242 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3243 			if (*total & FIF_ALLMULTI)
3244 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3245 								   false,
3246 								   NULL, 0);
3247 			else if (fp)
3248 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3249 							fp->enabled,
3250 							fp->mc_list,
3251 							fp->mc_list_length);
3252 			if (ret < 0)
3253 				goto out_sleep;
3254 		}
3255 
3256 		/*
3257 		 * If interface in AP mode and created with allmulticast then disable
3258 		 * the firmware filters so that all multicast packets are passed
3259 		 * This is mandatory for MDNS based discovery protocols
3260 		 */
3261  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3262  			if (*total & FIF_ALLMULTI) {
3263 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3264 							false,
3265 							NULL, 0);
3266 				if (ret < 0)
3267 					goto out_sleep;
3268 			}
3269 		}
3270 	}
3271 
3272 	/*
3273 	 * the fw doesn't provide an api to configure the filters. instead,
3274 	 * the filters configuration is based on the active roles / ROC
3275 	 * state.
3276 	 */
3277 
3278 out_sleep:
3279 	pm_runtime_mark_last_busy(wl->dev);
3280 	pm_runtime_put_autosuspend(wl->dev);
3281 
3282 out:
3283 	mutex_unlock(&wl->mutex);
3284 	kfree(fp);
3285 }
3286 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3287 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3288 				u8 id, u8 key_type, u8 key_size,
3289 				const u8 *key, u8 hlid, u32 tx_seq_32,
3290 				u16 tx_seq_16)
3291 {
3292 	struct wl1271_ap_key *ap_key;
3293 	int i;
3294 
3295 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3296 
3297 	if (key_size > MAX_KEY_SIZE)
3298 		return -EINVAL;
3299 
3300 	/*
3301 	 * Find next free entry in ap_keys. Also check we are not replacing
3302 	 * an existing key.
3303 	 */
3304 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3305 		if (wlvif->ap.recorded_keys[i] == NULL)
3306 			break;
3307 
3308 		if (wlvif->ap.recorded_keys[i]->id == id) {
3309 			wl1271_warning("trying to record key replacement");
3310 			return -EINVAL;
3311 		}
3312 	}
3313 
3314 	if (i == MAX_NUM_KEYS)
3315 		return -EBUSY;
3316 
3317 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3318 	if (!ap_key)
3319 		return -ENOMEM;
3320 
3321 	ap_key->id = id;
3322 	ap_key->key_type = key_type;
3323 	ap_key->key_size = key_size;
3324 	memcpy(ap_key->key, key, key_size);
3325 	ap_key->hlid = hlid;
3326 	ap_key->tx_seq_32 = tx_seq_32;
3327 	ap_key->tx_seq_16 = tx_seq_16;
3328 
3329 	wlvif->ap.recorded_keys[i] = ap_key;
3330 	return 0;
3331 }
3332 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3333 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3334 {
3335 	int i;
3336 
3337 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3338 		kfree(wlvif->ap.recorded_keys[i]);
3339 		wlvif->ap.recorded_keys[i] = NULL;
3340 	}
3341 }
3342 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3343 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3344 {
3345 	int i, ret = 0;
3346 	struct wl1271_ap_key *key;
3347 	bool wep_key_added = false;
3348 
3349 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3350 		u8 hlid;
3351 		if (wlvif->ap.recorded_keys[i] == NULL)
3352 			break;
3353 
3354 		key = wlvif->ap.recorded_keys[i];
3355 		hlid = key->hlid;
3356 		if (hlid == WL12XX_INVALID_LINK_ID)
3357 			hlid = wlvif->ap.bcast_hlid;
3358 
3359 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3360 					    key->id, key->key_type,
3361 					    key->key_size, key->key,
3362 					    hlid, key->tx_seq_32,
3363 					    key->tx_seq_16);
3364 		if (ret < 0)
3365 			goto out;
3366 
3367 		if (key->key_type == KEY_WEP)
3368 			wep_key_added = true;
3369 	}
3370 
3371 	if (wep_key_added) {
3372 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3373 						     wlvif->ap.bcast_hlid);
3374 		if (ret < 0)
3375 			goto out;
3376 	}
3377 
3378 out:
3379 	wl1271_free_ap_keys(wl, wlvif);
3380 	return ret;
3381 }
3382 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3383 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3384 		       u16 action, u8 id, u8 key_type,
3385 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3386 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3387 {
3388 	int ret;
3389 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3390 
3391 	if (is_ap) {
3392 		struct wl1271_station *wl_sta;
3393 		u8 hlid;
3394 
3395 		if (sta) {
3396 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3397 			hlid = wl_sta->hlid;
3398 		} else {
3399 			hlid = wlvif->ap.bcast_hlid;
3400 		}
3401 
3402 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3403 			/*
3404 			 * We do not support removing keys after AP shutdown.
3405 			 * Pretend we do to make mac80211 happy.
3406 			 */
3407 			if (action != KEY_ADD_OR_REPLACE)
3408 				return 0;
3409 
3410 			ret = wl1271_record_ap_key(wl, wlvif, id,
3411 					     key_type, key_size,
3412 					     key, hlid, tx_seq_32,
3413 					     tx_seq_16);
3414 		} else {
3415 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3416 					     id, key_type, key_size,
3417 					     key, hlid, tx_seq_32,
3418 					     tx_seq_16);
3419 		}
3420 
3421 		if (ret < 0)
3422 			return ret;
3423 	} else {
3424 		const u8 *addr;
3425 		static const u8 bcast_addr[ETH_ALEN] = {
3426 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3427 		};
3428 
3429 		addr = sta ? sta->addr : bcast_addr;
3430 
3431 		if (is_zero_ether_addr(addr)) {
3432 			/* We dont support TX only encryption */
3433 			return -EOPNOTSUPP;
3434 		}
3435 
3436 		/* The wl1271 does not allow to remove unicast keys - they
3437 		   will be cleared automatically on next CMD_JOIN. Ignore the
3438 		   request silently, as we dont want the mac80211 to emit
3439 		   an error message. */
3440 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3441 			return 0;
3442 
3443 		/* don't remove key if hlid was already deleted */
3444 		if (action == KEY_REMOVE &&
3445 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3446 			return 0;
3447 
3448 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3449 					     id, key_type, key_size,
3450 					     key, addr, tx_seq_32,
3451 					     tx_seq_16);
3452 		if (ret < 0)
3453 			return ret;
3454 
3455 	}
3456 
3457 	return 0;
3458 }
3459 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3460 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3461 			     struct ieee80211_vif *vif,
3462 			     struct ieee80211_sta *sta,
3463 			     struct ieee80211_key_conf *key_conf)
3464 {
3465 	struct wl1271 *wl = hw->priv;
3466 	int ret;
3467 	bool might_change_spare =
3468 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3469 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3470 
3471 	if (might_change_spare) {
3472 		/*
3473 		 * stop the queues and flush to ensure the next packets are
3474 		 * in sync with FW spare block accounting
3475 		 */
3476 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3477 		wl1271_tx_flush(wl);
3478 	}
3479 
3480 	mutex_lock(&wl->mutex);
3481 
3482 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3483 		ret = -EAGAIN;
3484 		goto out_wake_queues;
3485 	}
3486 
3487 	ret = pm_runtime_get_sync(wl->dev);
3488 	if (ret < 0) {
3489 		pm_runtime_put_noidle(wl->dev);
3490 		goto out_wake_queues;
3491 	}
3492 
3493 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3494 
3495 	pm_runtime_mark_last_busy(wl->dev);
3496 	pm_runtime_put_autosuspend(wl->dev);
3497 
3498 out_wake_queues:
3499 	if (might_change_spare)
3500 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3501 
3502 	mutex_unlock(&wl->mutex);
3503 
3504 	return ret;
3505 }
3506 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3507 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3508 		   struct ieee80211_vif *vif,
3509 		   struct ieee80211_sta *sta,
3510 		   struct ieee80211_key_conf *key_conf)
3511 {
3512 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3513 	int ret;
3514 	u32 tx_seq_32 = 0;
3515 	u16 tx_seq_16 = 0;
3516 	u8 key_type;
3517 	u8 hlid;
3518 
3519 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3520 
3521 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3522 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3523 		     key_conf->cipher, key_conf->keyidx,
3524 		     key_conf->keylen, key_conf->flags);
3525 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3526 
3527 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3528 		if (sta) {
3529 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3530 			hlid = wl_sta->hlid;
3531 		} else {
3532 			hlid = wlvif->ap.bcast_hlid;
3533 		}
3534 	else
3535 		hlid = wlvif->sta.hlid;
3536 
3537 	if (hlid != WL12XX_INVALID_LINK_ID) {
3538 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3539 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3540 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3541 	}
3542 
3543 	switch (key_conf->cipher) {
3544 	case WLAN_CIPHER_SUITE_WEP40:
3545 	case WLAN_CIPHER_SUITE_WEP104:
3546 		key_type = KEY_WEP;
3547 
3548 		key_conf->hw_key_idx = key_conf->keyidx;
3549 		break;
3550 	case WLAN_CIPHER_SUITE_TKIP:
3551 		key_type = KEY_TKIP;
3552 		key_conf->hw_key_idx = key_conf->keyidx;
3553 		break;
3554 	case WLAN_CIPHER_SUITE_CCMP:
3555 		key_type = KEY_AES;
3556 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3557 		break;
3558 	case WL1271_CIPHER_SUITE_GEM:
3559 		key_type = KEY_GEM;
3560 		break;
3561 	default:
3562 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3563 
3564 		return -EOPNOTSUPP;
3565 	}
3566 
3567 	switch (cmd) {
3568 	case SET_KEY:
3569 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3570 				 key_conf->keyidx, key_type,
3571 				 key_conf->keylen, key_conf->key,
3572 				 tx_seq_32, tx_seq_16, sta);
3573 		if (ret < 0) {
3574 			wl1271_error("Could not add or replace key");
3575 			return ret;
3576 		}
3577 
3578 		/*
3579 		 * reconfiguring arp response if the unicast (or common)
3580 		 * encryption key type was changed
3581 		 */
3582 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3583 		    (sta || key_type == KEY_WEP) &&
3584 		    wlvif->encryption_type != key_type) {
3585 			wlvif->encryption_type = key_type;
3586 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3587 			if (ret < 0) {
3588 				wl1271_warning("build arp rsp failed: %d", ret);
3589 				return ret;
3590 			}
3591 		}
3592 		break;
3593 
3594 	case DISABLE_KEY:
3595 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3596 				     key_conf->keyidx, key_type,
3597 				     key_conf->keylen, key_conf->key,
3598 				     0, 0, sta);
3599 		if (ret < 0) {
3600 			wl1271_error("Could not remove key");
3601 			return ret;
3602 		}
3603 		break;
3604 
3605 	default:
3606 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3607 		return -EOPNOTSUPP;
3608 	}
3609 
3610 	return ret;
3611 }
3612 EXPORT_SYMBOL_GPL(wlcore_set_key);
3613 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3614 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3615 					  struct ieee80211_vif *vif,
3616 					  int key_idx)
3617 {
3618 	struct wl1271 *wl = hw->priv;
3619 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3620 	int ret;
3621 
3622 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3623 		     key_idx);
3624 
3625 	/* we don't handle unsetting of default key */
3626 	if (key_idx == -1)
3627 		return;
3628 
3629 	mutex_lock(&wl->mutex);
3630 
3631 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3632 		ret = -EAGAIN;
3633 		goto out_unlock;
3634 	}
3635 
3636 	ret = pm_runtime_get_sync(wl->dev);
3637 	if (ret < 0) {
3638 		pm_runtime_put_noidle(wl->dev);
3639 		goto out_unlock;
3640 	}
3641 
3642 	wlvif->default_key = key_idx;
3643 
3644 	/* the default WEP key needs to be configured at least once */
3645 	if (wlvif->encryption_type == KEY_WEP) {
3646 		ret = wl12xx_cmd_set_default_wep_key(wl,
3647 				key_idx,
3648 				wlvif->sta.hlid);
3649 		if (ret < 0)
3650 			goto out_sleep;
3651 	}
3652 
3653 out_sleep:
3654 	pm_runtime_mark_last_busy(wl->dev);
3655 	pm_runtime_put_autosuspend(wl->dev);
3656 
3657 out_unlock:
3658 	mutex_unlock(&wl->mutex);
3659 }
3660 
wlcore_regdomain_config(struct wl1271 * wl)3661 void wlcore_regdomain_config(struct wl1271 *wl)
3662 {
3663 	int ret;
3664 
3665 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3666 		return;
3667 
3668 	mutex_lock(&wl->mutex);
3669 
3670 	if (unlikely(wl->state != WLCORE_STATE_ON))
3671 		goto out;
3672 
3673 	ret = pm_runtime_get_sync(wl->dev);
3674 	if (ret < 0) {
3675 		pm_runtime_put_autosuspend(wl->dev);
3676 		goto out;
3677 	}
3678 
3679 	ret = wlcore_cmd_regdomain_config_locked(wl);
3680 	if (ret < 0) {
3681 		wl12xx_queue_recovery_work(wl);
3682 		goto out;
3683 	}
3684 
3685 	pm_runtime_mark_last_busy(wl->dev);
3686 	pm_runtime_put_autosuspend(wl->dev);
3687 out:
3688 	mutex_unlock(&wl->mutex);
3689 }
3690 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3691 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3692 			     struct ieee80211_vif *vif,
3693 			     struct ieee80211_scan_request *hw_req)
3694 {
3695 	struct cfg80211_scan_request *req = &hw_req->req;
3696 	struct wl1271 *wl = hw->priv;
3697 	int ret;
3698 	u8 *ssid = NULL;
3699 	size_t len = 0;
3700 
3701 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3702 
3703 	if (req->n_ssids) {
3704 		ssid = req->ssids[0].ssid;
3705 		len = req->ssids[0].ssid_len;
3706 	}
3707 
3708 	mutex_lock(&wl->mutex);
3709 
3710 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3711 		/*
3712 		 * We cannot return -EBUSY here because cfg80211 will expect
3713 		 * a call to ieee80211_scan_completed if we do - in this case
3714 		 * there won't be any call.
3715 		 */
3716 		ret = -EAGAIN;
3717 		goto out;
3718 	}
3719 
3720 	ret = pm_runtime_get_sync(wl->dev);
3721 	if (ret < 0) {
3722 		pm_runtime_put_noidle(wl->dev);
3723 		goto out;
3724 	}
3725 
3726 	/* fail if there is any role in ROC */
3727 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3728 		/* don't allow scanning right now */
3729 		ret = -EBUSY;
3730 		goto out_sleep;
3731 	}
3732 
3733 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3734 out_sleep:
3735 	pm_runtime_mark_last_busy(wl->dev);
3736 	pm_runtime_put_autosuspend(wl->dev);
3737 out:
3738 	mutex_unlock(&wl->mutex);
3739 
3740 	return ret;
3741 }
3742 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3743 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3744 				     struct ieee80211_vif *vif)
3745 {
3746 	struct wl1271 *wl = hw->priv;
3747 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3748 	struct cfg80211_scan_info info = {
3749 		.aborted = true,
3750 	};
3751 	int ret;
3752 
3753 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3754 
3755 	mutex_lock(&wl->mutex);
3756 
3757 	if (unlikely(wl->state != WLCORE_STATE_ON))
3758 		goto out;
3759 
3760 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3761 		goto out;
3762 
3763 	ret = pm_runtime_get_sync(wl->dev);
3764 	if (ret < 0) {
3765 		pm_runtime_put_noidle(wl->dev);
3766 		goto out;
3767 	}
3768 
3769 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3770 		ret = wl->ops->scan_stop(wl, wlvif);
3771 		if (ret < 0)
3772 			goto out_sleep;
3773 	}
3774 
3775 	/*
3776 	 * Rearm the tx watchdog just before idling scan. This
3777 	 * prevents just-finished scans from triggering the watchdog
3778 	 */
3779 	wl12xx_rearm_tx_watchdog_locked(wl);
3780 
3781 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3782 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3783 	wl->scan_wlvif = NULL;
3784 	wl->scan.req = NULL;
3785 	ieee80211_scan_completed(wl->hw, &info);
3786 
3787 out_sleep:
3788 	pm_runtime_mark_last_busy(wl->dev);
3789 	pm_runtime_put_autosuspend(wl->dev);
3790 out:
3791 	mutex_unlock(&wl->mutex);
3792 
3793 	cancel_delayed_work_sync(&wl->scan_complete_work);
3794 }
3795 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3796 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3797 				      struct ieee80211_vif *vif,
3798 				      struct cfg80211_sched_scan_request *req,
3799 				      struct ieee80211_scan_ies *ies)
3800 {
3801 	struct wl1271 *wl = hw->priv;
3802 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3803 	int ret;
3804 
3805 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3806 
3807 	mutex_lock(&wl->mutex);
3808 
3809 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3810 		ret = -EAGAIN;
3811 		goto out;
3812 	}
3813 
3814 	ret = pm_runtime_get_sync(wl->dev);
3815 	if (ret < 0) {
3816 		pm_runtime_put_noidle(wl->dev);
3817 		goto out;
3818 	}
3819 
3820 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3821 	if (ret < 0)
3822 		goto out_sleep;
3823 
3824 	wl->sched_vif = wlvif;
3825 
3826 out_sleep:
3827 	pm_runtime_mark_last_busy(wl->dev);
3828 	pm_runtime_put_autosuspend(wl->dev);
3829 out:
3830 	mutex_unlock(&wl->mutex);
3831 	return ret;
3832 }
3833 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3834 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3835 				     struct ieee80211_vif *vif)
3836 {
3837 	struct wl1271 *wl = hw->priv;
3838 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3839 	int ret;
3840 
3841 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3842 
3843 	mutex_lock(&wl->mutex);
3844 
3845 	if (unlikely(wl->state != WLCORE_STATE_ON))
3846 		goto out;
3847 
3848 	ret = pm_runtime_get_sync(wl->dev);
3849 	if (ret < 0) {
3850 		pm_runtime_put_noidle(wl->dev);
3851 		goto out;
3852 	}
3853 
3854 	wl->ops->sched_scan_stop(wl, wlvif);
3855 
3856 	pm_runtime_mark_last_busy(wl->dev);
3857 	pm_runtime_put_autosuspend(wl->dev);
3858 out:
3859 	mutex_unlock(&wl->mutex);
3860 
3861 	return 0;
3862 }
3863 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3864 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3865 {
3866 	struct wl1271 *wl = hw->priv;
3867 	int ret = 0;
3868 
3869 	mutex_lock(&wl->mutex);
3870 
3871 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3872 		ret = -EAGAIN;
3873 		goto out;
3874 	}
3875 
3876 	ret = pm_runtime_get_sync(wl->dev);
3877 	if (ret < 0) {
3878 		pm_runtime_put_noidle(wl->dev);
3879 		goto out;
3880 	}
3881 
3882 	ret = wl1271_acx_frag_threshold(wl, value);
3883 	if (ret < 0)
3884 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3885 
3886 	pm_runtime_mark_last_busy(wl->dev);
3887 	pm_runtime_put_autosuspend(wl->dev);
3888 
3889 out:
3890 	mutex_unlock(&wl->mutex);
3891 
3892 	return ret;
3893 }
3894 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3895 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3896 {
3897 	struct wl1271 *wl = hw->priv;
3898 	struct wl12xx_vif *wlvif;
3899 	int ret = 0;
3900 
3901 	mutex_lock(&wl->mutex);
3902 
3903 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3904 		ret = -EAGAIN;
3905 		goto out;
3906 	}
3907 
3908 	ret = pm_runtime_get_sync(wl->dev);
3909 	if (ret < 0) {
3910 		pm_runtime_put_noidle(wl->dev);
3911 		goto out;
3912 	}
3913 
3914 	wl12xx_for_each_wlvif(wl, wlvif) {
3915 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3916 		if (ret < 0)
3917 			wl1271_warning("set rts threshold failed: %d", ret);
3918 	}
3919 	pm_runtime_mark_last_busy(wl->dev);
3920 	pm_runtime_put_autosuspend(wl->dev);
3921 
3922 out:
3923 	mutex_unlock(&wl->mutex);
3924 
3925 	return ret;
3926 }
3927 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3928 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3929 {
3930 	int len;
3931 	const u8 *next, *end = skb->data + skb->len;
3932 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3933 					skb->len - ieoffset);
3934 	if (!ie)
3935 		return;
3936 	len = ie[1] + 2;
3937 	next = ie + len;
3938 	memmove(ie, next, end - next);
3939 	skb_trim(skb, skb->len - len);
3940 }
3941 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3942 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3943 					    unsigned int oui, u8 oui_type,
3944 					    int ieoffset)
3945 {
3946 	int len;
3947 	const u8 *next, *end = skb->data + skb->len;
3948 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3949 					       skb->data + ieoffset,
3950 					       skb->len - ieoffset);
3951 	if (!ie)
3952 		return;
3953 	len = ie[1] + 2;
3954 	next = ie + len;
3955 	memmove(ie, next, end - next);
3956 	skb_trim(skb, skb->len - len);
3957 }
3958 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3959 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3960 					 struct ieee80211_vif *vif)
3961 {
3962 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3963 	struct sk_buff *skb;
3964 	int ret;
3965 
3966 	skb = ieee80211_proberesp_get(wl->hw, vif);
3967 	if (!skb)
3968 		return -EOPNOTSUPP;
3969 
3970 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3971 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3972 				      skb->data,
3973 				      skb->len, 0,
3974 				      rates);
3975 	dev_kfree_skb(skb);
3976 
3977 	if (ret < 0)
3978 		goto out;
3979 
3980 	wl1271_debug(DEBUG_AP, "probe response updated");
3981 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3982 
3983 out:
3984 	return ret;
3985 }
3986 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3987 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3988 					     struct ieee80211_vif *vif,
3989 					     u8 *probe_rsp_data,
3990 					     size_t probe_rsp_len,
3991 					     u32 rates)
3992 {
3993 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3994 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3995 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3996 	int ssid_ie_offset, ie_offset, templ_len;
3997 	const u8 *ptr;
3998 
3999 	/* no need to change probe response if the SSID is set correctly */
4000 	if (wlvif->ssid_len > 0)
4001 		return wl1271_cmd_template_set(wl, wlvif->role_id,
4002 					       CMD_TEMPL_AP_PROBE_RESPONSE,
4003 					       probe_rsp_data,
4004 					       probe_rsp_len, 0,
4005 					       rates);
4006 
4007 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
4008 		wl1271_error("probe_rsp template too big");
4009 		return -EINVAL;
4010 	}
4011 
4012 	/* start searching from IE offset */
4013 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4014 
4015 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4016 			       probe_rsp_len - ie_offset);
4017 	if (!ptr) {
4018 		wl1271_error("No SSID in beacon!");
4019 		return -EINVAL;
4020 	}
4021 
4022 	ssid_ie_offset = ptr - probe_rsp_data;
4023 	ptr += (ptr[1] + 2);
4024 
4025 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4026 
4027 	/* insert SSID from bss_conf */
4028 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4029 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4030 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4031 	       bss_conf->ssid, bss_conf->ssid_len);
4032 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4033 
4034 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4035 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4036 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4037 
4038 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4039 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4040 				       probe_rsp_templ,
4041 				       templ_len, 0,
4042 				       rates);
4043 }
4044 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4045 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4046 				       struct ieee80211_vif *vif,
4047 				       struct ieee80211_bss_conf *bss_conf,
4048 				       u32 changed)
4049 {
4050 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4051 	int ret = 0;
4052 
4053 	if (changed & BSS_CHANGED_ERP_SLOT) {
4054 		if (bss_conf->use_short_slot)
4055 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4056 		else
4057 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4058 		if (ret < 0) {
4059 			wl1271_warning("Set slot time failed %d", ret);
4060 			goto out;
4061 		}
4062 	}
4063 
4064 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4065 		if (bss_conf->use_short_preamble)
4066 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4067 		else
4068 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4069 	}
4070 
4071 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4072 		if (bss_conf->use_cts_prot)
4073 			ret = wl1271_acx_cts_protect(wl, wlvif,
4074 						     CTSPROTECT_ENABLE);
4075 		else
4076 			ret = wl1271_acx_cts_protect(wl, wlvif,
4077 						     CTSPROTECT_DISABLE);
4078 		if (ret < 0) {
4079 			wl1271_warning("Set ctsprotect failed %d", ret);
4080 			goto out;
4081 		}
4082 	}
4083 
4084 out:
4085 	return ret;
4086 }
4087 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4088 static int wlcore_set_beacon_template(struct wl1271 *wl,
4089 				      struct ieee80211_vif *vif,
4090 				      bool is_ap)
4091 {
4092 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4093 	struct ieee80211_hdr *hdr;
4094 	u32 min_rate;
4095 	int ret;
4096 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4097 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4098 	u16 tmpl_id;
4099 
4100 	if (!beacon) {
4101 		ret = -EINVAL;
4102 		goto out;
4103 	}
4104 
4105 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4106 
4107 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4108 	if (ret < 0) {
4109 		dev_kfree_skb(beacon);
4110 		goto out;
4111 	}
4112 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4113 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4114 		CMD_TEMPL_BEACON;
4115 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4116 				      beacon->data,
4117 				      beacon->len, 0,
4118 				      min_rate);
4119 	if (ret < 0) {
4120 		dev_kfree_skb(beacon);
4121 		goto out;
4122 	}
4123 
4124 	wlvif->wmm_enabled =
4125 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4126 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4127 					beacon->data + ieoffset,
4128 					beacon->len - ieoffset);
4129 
4130 	/*
4131 	 * In case we already have a probe-resp beacon set explicitly
4132 	 * by usermode, don't use the beacon data.
4133 	 */
4134 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4135 		goto end_bcn;
4136 
4137 	/* remove TIM ie from probe response */
4138 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4139 
4140 	/*
4141 	 * remove p2p ie from probe response.
4142 	 * the fw reponds to probe requests that don't include
4143 	 * the p2p ie. probe requests with p2p ie will be passed,
4144 	 * and will be responded by the supplicant (the spec
4145 	 * forbids including the p2p ie when responding to probe
4146 	 * requests that didn't include it).
4147 	 */
4148 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4149 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4150 
4151 	hdr = (struct ieee80211_hdr *) beacon->data;
4152 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4153 					 IEEE80211_STYPE_PROBE_RESP);
4154 	if (is_ap)
4155 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4156 							   beacon->data,
4157 							   beacon->len,
4158 							   min_rate);
4159 	else
4160 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4161 					      CMD_TEMPL_PROBE_RESPONSE,
4162 					      beacon->data,
4163 					      beacon->len, 0,
4164 					      min_rate);
4165 end_bcn:
4166 	dev_kfree_skb(beacon);
4167 	if (ret < 0)
4168 		goto out;
4169 
4170 out:
4171 	return ret;
4172 }
4173 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4174 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4175 					  struct ieee80211_vif *vif,
4176 					  struct ieee80211_bss_conf *bss_conf,
4177 					  u32 changed)
4178 {
4179 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4180 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4181 	int ret = 0;
4182 
4183 	if (changed & BSS_CHANGED_BEACON_INT) {
4184 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4185 			bss_conf->beacon_int);
4186 
4187 		wlvif->beacon_int = bss_conf->beacon_int;
4188 	}
4189 
4190 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4191 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4192 
4193 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4194 	}
4195 
4196 	if (changed & BSS_CHANGED_BEACON) {
4197 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4198 		if (ret < 0)
4199 			goto out;
4200 
4201 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4202 				       &wlvif->flags)) {
4203 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4204 			if (ret < 0)
4205 				goto out;
4206 		}
4207 	}
4208 out:
4209 	if (ret != 0)
4210 		wl1271_error("beacon info change failed: %d", ret);
4211 	return ret;
4212 }
4213 
4214 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4215 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4216 				       struct ieee80211_vif *vif,
4217 				       struct ieee80211_bss_conf *bss_conf,
4218 				       u32 changed)
4219 {
4220 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4221 	int ret = 0;
4222 
4223 	if (changed & BSS_CHANGED_BASIC_RATES) {
4224 		u32 rates = bss_conf->basic_rates;
4225 
4226 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4227 								 wlvif->band);
4228 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4229 							wlvif->basic_rate_set);
4230 
4231 		ret = wl1271_init_ap_rates(wl, wlvif);
4232 		if (ret < 0) {
4233 			wl1271_error("AP rate policy change failed %d", ret);
4234 			goto out;
4235 		}
4236 
4237 		ret = wl1271_ap_init_templates(wl, vif);
4238 		if (ret < 0)
4239 			goto out;
4240 
4241 		/* No need to set probe resp template for mesh */
4242 		if (!ieee80211_vif_is_mesh(vif)) {
4243 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4244 							    wlvif->basic_rate,
4245 							    vif);
4246 			if (ret < 0)
4247 				goto out;
4248 		}
4249 
4250 		ret = wlcore_set_beacon_template(wl, vif, true);
4251 		if (ret < 0)
4252 			goto out;
4253 	}
4254 
4255 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4256 	if (ret < 0)
4257 		goto out;
4258 
4259 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4260 		if (bss_conf->enable_beacon) {
4261 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4262 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4263 				if (ret < 0)
4264 					goto out;
4265 
4266 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4267 				if (ret < 0)
4268 					goto out;
4269 
4270 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4271 				wl1271_debug(DEBUG_AP, "started AP");
4272 			}
4273 		} else {
4274 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4275 				/*
4276 				 * AP might be in ROC in case we have just
4277 				 * sent auth reply. handle it.
4278 				 */
4279 				if (test_bit(wlvif->role_id, wl->roc_map))
4280 					wl12xx_croc(wl, wlvif->role_id);
4281 
4282 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4283 				if (ret < 0)
4284 					goto out;
4285 
4286 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4287 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4288 					  &wlvif->flags);
4289 				wl1271_debug(DEBUG_AP, "stopped AP");
4290 			}
4291 		}
4292 	}
4293 
4294 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4295 	if (ret < 0)
4296 		goto out;
4297 
4298 	/* Handle HT information change */
4299 	if ((changed & BSS_CHANGED_HT) &&
4300 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4301 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4302 					bss_conf->ht_operation_mode);
4303 		if (ret < 0) {
4304 			wl1271_warning("Set ht information failed %d", ret);
4305 			goto out;
4306 		}
4307 	}
4308 
4309 out:
4310 	return;
4311 }
4312 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4313 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4314 			    struct ieee80211_bss_conf *bss_conf,
4315 			    u32 sta_rate_set)
4316 {
4317 	u32 rates;
4318 	int ret;
4319 
4320 	wl1271_debug(DEBUG_MAC80211,
4321 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4322 	     bss_conf->bssid, bss_conf->aid,
4323 	     bss_conf->beacon_int,
4324 	     bss_conf->basic_rates, sta_rate_set);
4325 
4326 	wlvif->beacon_int = bss_conf->beacon_int;
4327 	rates = bss_conf->basic_rates;
4328 	wlvif->basic_rate_set =
4329 		wl1271_tx_enabled_rates_get(wl, rates,
4330 					    wlvif->band);
4331 	wlvif->basic_rate =
4332 		wl1271_tx_min_rate_get(wl,
4333 				       wlvif->basic_rate_set);
4334 
4335 	if (sta_rate_set)
4336 		wlvif->rate_set =
4337 			wl1271_tx_enabled_rates_get(wl,
4338 						sta_rate_set,
4339 						wlvif->band);
4340 
4341 	/* we only support sched_scan while not connected */
4342 	if (wl->sched_vif == wlvif)
4343 		wl->ops->sched_scan_stop(wl, wlvif);
4344 
4345 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4346 	if (ret < 0)
4347 		return ret;
4348 
4349 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4350 	if (ret < 0)
4351 		return ret;
4352 
4353 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4354 	if (ret < 0)
4355 		return ret;
4356 
4357 	wlcore_set_ssid(wl, wlvif);
4358 
4359 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4360 
4361 	return 0;
4362 }
4363 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4364 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4365 {
4366 	int ret;
4367 
4368 	/* revert back to minimum rates for the current band */
4369 	wl1271_set_band_rate(wl, wlvif);
4370 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4371 
4372 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4373 	if (ret < 0)
4374 		return ret;
4375 
4376 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4377 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4378 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4379 		if (ret < 0)
4380 			return ret;
4381 	}
4382 
4383 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4384 	return 0;
4385 }
4386 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4387 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4388 					struct ieee80211_vif *vif,
4389 					struct ieee80211_bss_conf *bss_conf,
4390 					u32 changed)
4391 {
4392 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4393 	bool do_join = false;
4394 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4395 	bool ibss_joined = false;
4396 	u32 sta_rate_set = 0;
4397 	int ret;
4398 	struct ieee80211_sta *sta;
4399 	bool sta_exists = false;
4400 	struct ieee80211_sta_ht_cap sta_ht_cap;
4401 
4402 	if (is_ibss) {
4403 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4404 						     changed);
4405 		if (ret < 0)
4406 			goto out;
4407 	}
4408 
4409 	if (changed & BSS_CHANGED_IBSS) {
4410 		if (bss_conf->ibss_joined) {
4411 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4412 			ibss_joined = true;
4413 		} else {
4414 			wlcore_unset_assoc(wl, wlvif);
4415 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4416 		}
4417 	}
4418 
4419 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4420 		do_join = true;
4421 
4422 	/* Need to update the SSID (for filtering etc) */
4423 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4424 		do_join = true;
4425 
4426 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4427 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4428 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4429 
4430 		do_join = true;
4431 	}
4432 
4433 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4434 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4435 
4436 	if (changed & BSS_CHANGED_CQM) {
4437 		bool enable = false;
4438 		if (bss_conf->cqm_rssi_thold)
4439 			enable = true;
4440 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4441 						  bss_conf->cqm_rssi_thold,
4442 						  bss_conf->cqm_rssi_hyst);
4443 		if (ret < 0)
4444 			goto out;
4445 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4446 	}
4447 
4448 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4449 		       BSS_CHANGED_ASSOC)) {
4450 		rcu_read_lock();
4451 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4452 		if (sta) {
4453 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4454 
4455 			/* save the supp_rates of the ap */
4456 			sta_rate_set = sta->supp_rates[wlvif->band];
4457 			if (sta->ht_cap.ht_supported)
4458 				sta_rate_set |=
4459 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4460 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4461 			sta_ht_cap = sta->ht_cap;
4462 			sta_exists = true;
4463 		}
4464 
4465 		rcu_read_unlock();
4466 	}
4467 
4468 	if (changed & BSS_CHANGED_BSSID) {
4469 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4470 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4471 					       sta_rate_set);
4472 			if (ret < 0)
4473 				goto out;
4474 
4475 			/* Need to update the BSSID (for filtering etc) */
4476 			do_join = true;
4477 		} else {
4478 			ret = wlcore_clear_bssid(wl, wlvif);
4479 			if (ret < 0)
4480 				goto out;
4481 		}
4482 	}
4483 
4484 	if (changed & BSS_CHANGED_IBSS) {
4485 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4486 			     bss_conf->ibss_joined);
4487 
4488 		if (bss_conf->ibss_joined) {
4489 			u32 rates = bss_conf->basic_rates;
4490 			wlvif->basic_rate_set =
4491 				wl1271_tx_enabled_rates_get(wl, rates,
4492 							    wlvif->band);
4493 			wlvif->basic_rate =
4494 				wl1271_tx_min_rate_get(wl,
4495 						       wlvif->basic_rate_set);
4496 
4497 			/* by default, use 11b + OFDM rates */
4498 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4499 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4500 			if (ret < 0)
4501 				goto out;
4502 		}
4503 	}
4504 
4505 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4506 		/* enable beacon filtering */
4507 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4508 		if (ret < 0)
4509 			goto out;
4510 	}
4511 
4512 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4513 	if (ret < 0)
4514 		goto out;
4515 
4516 	if (do_join) {
4517 		ret = wlcore_join(wl, wlvif);
4518 		if (ret < 0) {
4519 			wl1271_warning("cmd join failed %d", ret);
4520 			goto out;
4521 		}
4522 	}
4523 
4524 	if (changed & BSS_CHANGED_ASSOC) {
4525 		if (bss_conf->assoc) {
4526 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4527 					       sta_rate_set);
4528 			if (ret < 0)
4529 				goto out;
4530 
4531 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4532 				wl12xx_set_authorized(wl, wlvif);
4533 		} else {
4534 			wlcore_unset_assoc(wl, wlvif);
4535 		}
4536 	}
4537 
4538 	if (changed & BSS_CHANGED_PS) {
4539 		if ((bss_conf->ps) &&
4540 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4541 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4542 			int ps_mode;
4543 			char *ps_mode_str;
4544 
4545 			if (wl->conf.conn.forced_ps) {
4546 				ps_mode = STATION_POWER_SAVE_MODE;
4547 				ps_mode_str = "forced";
4548 			} else {
4549 				ps_mode = STATION_AUTO_PS_MODE;
4550 				ps_mode_str = "auto";
4551 			}
4552 
4553 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4554 
4555 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4556 			if (ret < 0)
4557 				wl1271_warning("enter %s ps failed %d",
4558 					       ps_mode_str, ret);
4559 		} else if (!bss_conf->ps &&
4560 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4561 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4562 
4563 			ret = wl1271_ps_set_mode(wl, wlvif,
4564 						 STATION_ACTIVE_MODE);
4565 			if (ret < 0)
4566 				wl1271_warning("exit auto ps failed %d", ret);
4567 		}
4568 	}
4569 
4570 	/* Handle new association with HT. Do this after join. */
4571 	if (sta_exists) {
4572 		bool enabled =
4573 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4574 
4575 		ret = wlcore_hw_set_peer_cap(wl,
4576 					     &sta_ht_cap,
4577 					     enabled,
4578 					     wlvif->rate_set,
4579 					     wlvif->sta.hlid);
4580 		if (ret < 0) {
4581 			wl1271_warning("Set ht cap failed %d", ret);
4582 			goto out;
4583 
4584 		}
4585 
4586 		if (enabled) {
4587 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4588 						bss_conf->ht_operation_mode);
4589 			if (ret < 0) {
4590 				wl1271_warning("Set ht information failed %d",
4591 					       ret);
4592 				goto out;
4593 			}
4594 		}
4595 	}
4596 
4597 	/* Handle arp filtering. Done after join. */
4598 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4599 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4600 		__be32 addr = bss_conf->arp_addr_list[0];
4601 		wlvif->sta.qos = bss_conf->qos;
4602 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4603 
4604 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4605 			wlvif->ip_addr = addr;
4606 			/*
4607 			 * The template should have been configured only upon
4608 			 * association. however, it seems that the correct ip
4609 			 * isn't being set (when sending), so we have to
4610 			 * reconfigure the template upon every ip change.
4611 			 */
4612 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4613 			if (ret < 0) {
4614 				wl1271_warning("build arp rsp failed: %d", ret);
4615 				goto out;
4616 			}
4617 
4618 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4619 				(ACX_ARP_FILTER_ARP_FILTERING |
4620 				 ACX_ARP_FILTER_AUTO_ARP),
4621 				addr);
4622 		} else {
4623 			wlvif->ip_addr = 0;
4624 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4625 		}
4626 
4627 		if (ret < 0)
4628 			goto out;
4629 	}
4630 
4631 out:
4632 	return;
4633 }
4634 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4635 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4636 				       struct ieee80211_vif *vif,
4637 				       struct ieee80211_bss_conf *bss_conf,
4638 				       u32 changed)
4639 {
4640 	struct wl1271 *wl = hw->priv;
4641 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4642 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4643 	int ret;
4644 
4645 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4646 		     wlvif->role_id, (int)changed);
4647 
4648 	/*
4649 	 * make sure to cancel pending disconnections if our association
4650 	 * state changed
4651 	 */
4652 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4653 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4654 
4655 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4656 	    !bss_conf->enable_beacon)
4657 		wl1271_tx_flush(wl);
4658 
4659 	mutex_lock(&wl->mutex);
4660 
4661 	if (unlikely(wl->state != WLCORE_STATE_ON))
4662 		goto out;
4663 
4664 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4665 		goto out;
4666 
4667 	ret = pm_runtime_get_sync(wl->dev);
4668 	if (ret < 0) {
4669 		pm_runtime_put_noidle(wl->dev);
4670 		goto out;
4671 	}
4672 
4673 	if ((changed & BSS_CHANGED_TXPOWER) &&
4674 	    bss_conf->txpower != wlvif->power_level) {
4675 
4676 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4677 		if (ret < 0)
4678 			goto out;
4679 
4680 		wlvif->power_level = bss_conf->txpower;
4681 	}
4682 
4683 	if (is_ap)
4684 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4685 	else
4686 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4687 
4688 	pm_runtime_mark_last_busy(wl->dev);
4689 	pm_runtime_put_autosuspend(wl->dev);
4690 
4691 out:
4692 	mutex_unlock(&wl->mutex);
4693 }
4694 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4695 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4696 				 struct ieee80211_chanctx_conf *ctx)
4697 {
4698 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4699 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4700 		     cfg80211_get_chandef_type(&ctx->def));
4701 	return 0;
4702 }
4703 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4704 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4705 				     struct ieee80211_chanctx_conf *ctx)
4706 {
4707 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4708 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4709 		     cfg80211_get_chandef_type(&ctx->def));
4710 }
4711 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4712 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4713 				     struct ieee80211_chanctx_conf *ctx,
4714 				     u32 changed)
4715 {
4716 	struct wl1271 *wl = hw->priv;
4717 	struct wl12xx_vif *wlvif;
4718 	int ret;
4719 	int channel = ieee80211_frequency_to_channel(
4720 		ctx->def.chan->center_freq);
4721 
4722 	wl1271_debug(DEBUG_MAC80211,
4723 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4724 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4725 
4726 	mutex_lock(&wl->mutex);
4727 
4728 	ret = pm_runtime_get_sync(wl->dev);
4729 	if (ret < 0) {
4730 		pm_runtime_put_noidle(wl->dev);
4731 		goto out;
4732 	}
4733 
4734 	wl12xx_for_each_wlvif(wl, wlvif) {
4735 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4736 
4737 		rcu_read_lock();
4738 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4739 			rcu_read_unlock();
4740 			continue;
4741 		}
4742 		rcu_read_unlock();
4743 
4744 		/* start radar if needed */
4745 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4746 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4747 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4748 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4749 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4750 			wlcore_hw_set_cac(wl, wlvif, true);
4751 			wlvif->radar_enabled = true;
4752 		}
4753 	}
4754 
4755 	pm_runtime_mark_last_busy(wl->dev);
4756 	pm_runtime_put_autosuspend(wl->dev);
4757 out:
4758 	mutex_unlock(&wl->mutex);
4759 }
4760 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4761 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4762 					struct ieee80211_vif *vif,
4763 					struct ieee80211_chanctx_conf *ctx)
4764 {
4765 	struct wl1271 *wl = hw->priv;
4766 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4767 	int channel = ieee80211_frequency_to_channel(
4768 		ctx->def.chan->center_freq);
4769 	int ret = -EINVAL;
4770 
4771 	wl1271_debug(DEBUG_MAC80211,
4772 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4773 		     wlvif->role_id, channel,
4774 		     cfg80211_get_chandef_type(&ctx->def),
4775 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4776 
4777 	mutex_lock(&wl->mutex);
4778 
4779 	if (unlikely(wl->state != WLCORE_STATE_ON))
4780 		goto out;
4781 
4782 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4783 		goto out;
4784 
4785 	ret = pm_runtime_get_sync(wl->dev);
4786 	if (ret < 0) {
4787 		pm_runtime_put_noidle(wl->dev);
4788 		goto out;
4789 	}
4790 
4791 	wlvif->band = ctx->def.chan->band;
4792 	wlvif->channel = channel;
4793 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4794 
4795 	/* update default rates according to the band */
4796 	wl1271_set_band_rate(wl, wlvif);
4797 
4798 	if (ctx->radar_enabled &&
4799 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4800 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4801 		wlcore_hw_set_cac(wl, wlvif, true);
4802 		wlvif->radar_enabled = true;
4803 	}
4804 
4805 	pm_runtime_mark_last_busy(wl->dev);
4806 	pm_runtime_put_autosuspend(wl->dev);
4807 out:
4808 	mutex_unlock(&wl->mutex);
4809 
4810 	return 0;
4811 }
4812 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4813 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4814 					   struct ieee80211_vif *vif,
4815 					   struct ieee80211_chanctx_conf *ctx)
4816 {
4817 	struct wl1271 *wl = hw->priv;
4818 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4819 	int ret;
4820 
4821 	wl1271_debug(DEBUG_MAC80211,
4822 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4823 		     wlvif->role_id,
4824 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4825 		     cfg80211_get_chandef_type(&ctx->def));
4826 
4827 	wl1271_tx_flush(wl);
4828 
4829 	mutex_lock(&wl->mutex);
4830 
4831 	if (unlikely(wl->state != WLCORE_STATE_ON))
4832 		goto out;
4833 
4834 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4835 		goto out;
4836 
4837 	ret = pm_runtime_get_sync(wl->dev);
4838 	if (ret < 0) {
4839 		pm_runtime_put_noidle(wl->dev);
4840 		goto out;
4841 	}
4842 
4843 	if (wlvif->radar_enabled) {
4844 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4845 		wlcore_hw_set_cac(wl, wlvif, false);
4846 		wlvif->radar_enabled = false;
4847 	}
4848 
4849 	pm_runtime_mark_last_busy(wl->dev);
4850 	pm_runtime_put_autosuspend(wl->dev);
4851 out:
4852 	mutex_unlock(&wl->mutex);
4853 }
4854 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4855 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4856 				    struct wl12xx_vif *wlvif,
4857 				    struct ieee80211_chanctx_conf *new_ctx)
4858 {
4859 	int channel = ieee80211_frequency_to_channel(
4860 		new_ctx->def.chan->center_freq);
4861 
4862 	wl1271_debug(DEBUG_MAC80211,
4863 		     "switch vif (role %d) %d -> %d chan_type: %d",
4864 		     wlvif->role_id, wlvif->channel, channel,
4865 		     cfg80211_get_chandef_type(&new_ctx->def));
4866 
4867 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4868 		return 0;
4869 
4870 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4871 
4872 	if (wlvif->radar_enabled) {
4873 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4874 		wlcore_hw_set_cac(wl, wlvif, false);
4875 		wlvif->radar_enabled = false;
4876 	}
4877 
4878 	wlvif->band = new_ctx->def.chan->band;
4879 	wlvif->channel = channel;
4880 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4881 
4882 	/* start radar if needed */
4883 	if (new_ctx->radar_enabled) {
4884 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4885 		wlcore_hw_set_cac(wl, wlvif, true);
4886 		wlvif->radar_enabled = true;
4887 	}
4888 
4889 	return 0;
4890 }
4891 
4892 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4893 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4894 			     struct ieee80211_vif_chanctx_switch *vifs,
4895 			     int n_vifs,
4896 			     enum ieee80211_chanctx_switch_mode mode)
4897 {
4898 	struct wl1271 *wl = hw->priv;
4899 	int i, ret;
4900 
4901 	wl1271_debug(DEBUG_MAC80211,
4902 		     "mac80211 switch chanctx n_vifs %d mode %d",
4903 		     n_vifs, mode);
4904 
4905 	mutex_lock(&wl->mutex);
4906 
4907 	ret = pm_runtime_get_sync(wl->dev);
4908 	if (ret < 0) {
4909 		pm_runtime_put_noidle(wl->dev);
4910 		goto out;
4911 	}
4912 
4913 	for (i = 0; i < n_vifs; i++) {
4914 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4915 
4916 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4917 		if (ret)
4918 			goto out_sleep;
4919 	}
4920 out_sleep:
4921 	pm_runtime_mark_last_busy(wl->dev);
4922 	pm_runtime_put_autosuspend(wl->dev);
4923 out:
4924 	mutex_unlock(&wl->mutex);
4925 
4926 	return 0;
4927 }
4928 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4929 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4930 			     struct ieee80211_vif *vif, u16 queue,
4931 			     const struct ieee80211_tx_queue_params *params)
4932 {
4933 	struct wl1271 *wl = hw->priv;
4934 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4935 	u8 ps_scheme;
4936 	int ret = 0;
4937 
4938 	if (wlcore_is_p2p_mgmt(wlvif))
4939 		return 0;
4940 
4941 	mutex_lock(&wl->mutex);
4942 
4943 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4944 
4945 	if (params->uapsd)
4946 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4947 	else
4948 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4949 
4950 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4951 		goto out;
4952 
4953 	ret = pm_runtime_get_sync(wl->dev);
4954 	if (ret < 0) {
4955 		pm_runtime_put_noidle(wl->dev);
4956 		goto out;
4957 	}
4958 
4959 	/*
4960 	 * the txop is confed in units of 32us by the mac80211,
4961 	 * we need us
4962 	 */
4963 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4964 				params->cw_min, params->cw_max,
4965 				params->aifs, params->txop << 5);
4966 	if (ret < 0)
4967 		goto out_sleep;
4968 
4969 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4970 				 CONF_CHANNEL_TYPE_EDCF,
4971 				 wl1271_tx_get_queue(queue),
4972 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4973 				 0, 0);
4974 
4975 out_sleep:
4976 	pm_runtime_mark_last_busy(wl->dev);
4977 	pm_runtime_put_autosuspend(wl->dev);
4978 
4979 out:
4980 	mutex_unlock(&wl->mutex);
4981 
4982 	return ret;
4983 }
4984 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4985 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4986 			     struct ieee80211_vif *vif)
4987 {
4988 
4989 	struct wl1271 *wl = hw->priv;
4990 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4991 	u64 mactime = ULLONG_MAX;
4992 	int ret;
4993 
4994 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4995 
4996 	mutex_lock(&wl->mutex);
4997 
4998 	if (unlikely(wl->state != WLCORE_STATE_ON))
4999 		goto out;
5000 
5001 	ret = pm_runtime_get_sync(wl->dev);
5002 	if (ret < 0) {
5003 		pm_runtime_put_noidle(wl->dev);
5004 		goto out;
5005 	}
5006 
5007 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
5008 	if (ret < 0)
5009 		goto out_sleep;
5010 
5011 out_sleep:
5012 	pm_runtime_mark_last_busy(wl->dev);
5013 	pm_runtime_put_autosuspend(wl->dev);
5014 
5015 out:
5016 	mutex_unlock(&wl->mutex);
5017 	return mactime;
5018 }
5019 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5020 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5021 				struct survey_info *survey)
5022 {
5023 	struct ieee80211_conf *conf = &hw->conf;
5024 
5025 	if (idx != 0)
5026 		return -ENOENT;
5027 
5028 	survey->channel = conf->chandef.chan;
5029 	survey->filled = 0;
5030 	return 0;
5031 }
5032 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5033 static int wl1271_allocate_sta(struct wl1271 *wl,
5034 			     struct wl12xx_vif *wlvif,
5035 			     struct ieee80211_sta *sta)
5036 {
5037 	struct wl1271_station *wl_sta;
5038 	int ret;
5039 
5040 
5041 	if (wl->active_sta_count >= wl->max_ap_stations) {
5042 		wl1271_warning("could not allocate HLID - too much stations");
5043 		return -EBUSY;
5044 	}
5045 
5046 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5047 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5048 	if (ret < 0) {
5049 		wl1271_warning("could not allocate HLID - too many links");
5050 		return -EBUSY;
5051 	}
5052 
5053 	/* use the previous security seq, if this is a recovery/resume */
5054 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5055 
5056 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5057 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5058 	wl->active_sta_count++;
5059 	return 0;
5060 }
5061 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5062 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5063 {
5064 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5065 		return;
5066 
5067 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5068 	__clear_bit(hlid, &wl->ap_ps_map);
5069 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5070 
5071 	/*
5072 	 * save the last used PN in the private part of iee80211_sta,
5073 	 * in case of recovery/suspend
5074 	 */
5075 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5076 
5077 	wl12xx_free_link(wl, wlvif, &hlid);
5078 	wl->active_sta_count--;
5079 
5080 	/*
5081 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5082 	 * chance to return STA-buffered packets before complaining.
5083 	 */
5084 	if (wl->active_sta_count == 0)
5085 		wl12xx_rearm_tx_watchdog_locked(wl);
5086 }
5087 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5088 static int wl12xx_sta_add(struct wl1271 *wl,
5089 			  struct wl12xx_vif *wlvif,
5090 			  struct ieee80211_sta *sta)
5091 {
5092 	struct wl1271_station *wl_sta;
5093 	int ret = 0;
5094 	u8 hlid;
5095 
5096 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5097 
5098 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5099 	if (ret < 0)
5100 		return ret;
5101 
5102 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5103 	hlid = wl_sta->hlid;
5104 
5105 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5106 	if (ret < 0)
5107 		wl1271_free_sta(wl, wlvif, hlid);
5108 
5109 	return ret;
5110 }
5111 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5112 static int wl12xx_sta_remove(struct wl1271 *wl,
5113 			     struct wl12xx_vif *wlvif,
5114 			     struct ieee80211_sta *sta)
5115 {
5116 	struct wl1271_station *wl_sta;
5117 	int ret = 0, id;
5118 
5119 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5120 
5121 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5122 	id = wl_sta->hlid;
5123 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5124 		return -EINVAL;
5125 
5126 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5127 	if (ret < 0)
5128 		return ret;
5129 
5130 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5131 	return ret;
5132 }
5133 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5134 static void wlcore_roc_if_possible(struct wl1271 *wl,
5135 				   struct wl12xx_vif *wlvif)
5136 {
5137 	if (find_first_bit(wl->roc_map,
5138 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5139 		return;
5140 
5141 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5142 		return;
5143 
5144 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5145 }
5146 
5147 /*
5148  * when wl_sta is NULL, we treat this call as if coming from a
5149  * pending auth reply.
5150  * wl->mutex must be taken and the FW must be awake when the call
5151  * takes place.
5152  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5153 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5154 			      struct wl1271_station *wl_sta, bool in_conn)
5155 {
5156 	if (in_conn) {
5157 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5158 			return;
5159 
5160 		if (!wlvif->ap_pending_auth_reply &&
5161 		    !wlvif->inconn_count)
5162 			wlcore_roc_if_possible(wl, wlvif);
5163 
5164 		if (wl_sta) {
5165 			wl_sta->in_connection = true;
5166 			wlvif->inconn_count++;
5167 		} else {
5168 			wlvif->ap_pending_auth_reply = true;
5169 		}
5170 	} else {
5171 		if (wl_sta && !wl_sta->in_connection)
5172 			return;
5173 
5174 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5175 			return;
5176 
5177 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5178 			return;
5179 
5180 		if (wl_sta) {
5181 			wl_sta->in_connection = false;
5182 			wlvif->inconn_count--;
5183 		} else {
5184 			wlvif->ap_pending_auth_reply = false;
5185 		}
5186 
5187 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5188 		    test_bit(wlvif->role_id, wl->roc_map))
5189 			wl12xx_croc(wl, wlvif->role_id);
5190 	}
5191 }
5192 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5193 static int wl12xx_update_sta_state(struct wl1271 *wl,
5194 				   struct wl12xx_vif *wlvif,
5195 				   struct ieee80211_sta *sta,
5196 				   enum ieee80211_sta_state old_state,
5197 				   enum ieee80211_sta_state new_state)
5198 {
5199 	struct wl1271_station *wl_sta;
5200 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5201 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5202 	int ret;
5203 
5204 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5205 
5206 	/* Add station (AP mode) */
5207 	if (is_ap &&
5208 	    old_state == IEEE80211_STA_NOTEXIST &&
5209 	    new_state == IEEE80211_STA_NONE) {
5210 		ret = wl12xx_sta_add(wl, wlvif, sta);
5211 		if (ret)
5212 			return ret;
5213 
5214 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5215 	}
5216 
5217 	/* Remove station (AP mode) */
5218 	if (is_ap &&
5219 	    old_state == IEEE80211_STA_NONE &&
5220 	    new_state == IEEE80211_STA_NOTEXIST) {
5221 		/* must not fail */
5222 		wl12xx_sta_remove(wl, wlvif, sta);
5223 
5224 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5225 	}
5226 
5227 	/* Authorize station (AP mode) */
5228 	if (is_ap &&
5229 	    new_state == IEEE80211_STA_AUTHORIZED) {
5230 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5231 		if (ret < 0)
5232 			return ret;
5233 
5234 		/* reconfigure rates */
5235 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5236 		if (ret < 0)
5237 			return ret;
5238 
5239 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5240 						     wl_sta->hlid);
5241 		if (ret)
5242 			return ret;
5243 
5244 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5245 	}
5246 
5247 	/* Authorize station */
5248 	if (is_sta &&
5249 	    new_state == IEEE80211_STA_AUTHORIZED) {
5250 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5251 		ret = wl12xx_set_authorized(wl, wlvif);
5252 		if (ret)
5253 			return ret;
5254 	}
5255 
5256 	if (is_sta &&
5257 	    old_state == IEEE80211_STA_AUTHORIZED &&
5258 	    new_state == IEEE80211_STA_ASSOC) {
5259 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5260 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5261 	}
5262 
5263 	/* save seq number on disassoc (suspend) */
5264 	if (is_sta &&
5265 	    old_state == IEEE80211_STA_ASSOC &&
5266 	    new_state == IEEE80211_STA_AUTH) {
5267 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5268 		wlvif->total_freed_pkts = 0;
5269 	}
5270 
5271 	/* restore seq number on assoc (resume) */
5272 	if (is_sta &&
5273 	    old_state == IEEE80211_STA_AUTH &&
5274 	    new_state == IEEE80211_STA_ASSOC) {
5275 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5276 	}
5277 
5278 	/* clear ROCs on failure or authorization */
5279 	if (is_sta &&
5280 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5281 	     new_state == IEEE80211_STA_NOTEXIST)) {
5282 		if (test_bit(wlvif->role_id, wl->roc_map))
5283 			wl12xx_croc(wl, wlvif->role_id);
5284 	}
5285 
5286 	if (is_sta &&
5287 	    old_state == IEEE80211_STA_NOTEXIST &&
5288 	    new_state == IEEE80211_STA_NONE) {
5289 		if (find_first_bit(wl->roc_map,
5290 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5291 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5292 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5293 				   wlvif->band, wlvif->channel);
5294 		}
5295 	}
5296 	return 0;
5297 }
5298 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5299 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5300 			       struct ieee80211_vif *vif,
5301 			       struct ieee80211_sta *sta,
5302 			       enum ieee80211_sta_state old_state,
5303 			       enum ieee80211_sta_state new_state)
5304 {
5305 	struct wl1271 *wl = hw->priv;
5306 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5307 	int ret;
5308 
5309 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5310 		     sta->aid, old_state, new_state);
5311 
5312 	mutex_lock(&wl->mutex);
5313 
5314 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5315 		ret = -EBUSY;
5316 		goto out;
5317 	}
5318 
5319 	ret = pm_runtime_get_sync(wl->dev);
5320 	if (ret < 0) {
5321 		pm_runtime_put_noidle(wl->dev);
5322 		goto out;
5323 	}
5324 
5325 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5326 
5327 	pm_runtime_mark_last_busy(wl->dev);
5328 	pm_runtime_put_autosuspend(wl->dev);
5329 out:
5330 	mutex_unlock(&wl->mutex);
5331 	if (new_state < old_state)
5332 		return 0;
5333 	return ret;
5334 }
5335 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5336 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5337 				  struct ieee80211_vif *vif,
5338 				  struct ieee80211_ampdu_params *params)
5339 {
5340 	struct wl1271 *wl = hw->priv;
5341 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5342 	int ret;
5343 	u8 hlid, *ba_bitmap;
5344 	struct ieee80211_sta *sta = params->sta;
5345 	enum ieee80211_ampdu_mlme_action action = params->action;
5346 	u16 tid = params->tid;
5347 	u16 *ssn = &params->ssn;
5348 
5349 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5350 		     tid);
5351 
5352 	/* sanity check - the fields in FW are only 8bits wide */
5353 	if (WARN_ON(tid > 0xFF))
5354 		return -ENOTSUPP;
5355 
5356 	mutex_lock(&wl->mutex);
5357 
5358 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5359 		ret = -EAGAIN;
5360 		goto out;
5361 	}
5362 
5363 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5364 		hlid = wlvif->sta.hlid;
5365 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5366 		struct wl1271_station *wl_sta;
5367 
5368 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5369 		hlid = wl_sta->hlid;
5370 	} else {
5371 		ret = -EINVAL;
5372 		goto out;
5373 	}
5374 
5375 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5376 
5377 	ret = pm_runtime_get_sync(wl->dev);
5378 	if (ret < 0) {
5379 		pm_runtime_put_noidle(wl->dev);
5380 		goto out;
5381 	}
5382 
5383 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5384 		     tid, action);
5385 
5386 	switch (action) {
5387 	case IEEE80211_AMPDU_RX_START:
5388 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5389 			ret = -ENOTSUPP;
5390 			break;
5391 		}
5392 
5393 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5394 			ret = -EBUSY;
5395 			wl1271_error("exceeded max RX BA sessions");
5396 			break;
5397 		}
5398 
5399 		if (*ba_bitmap & BIT(tid)) {
5400 			ret = -EINVAL;
5401 			wl1271_error("cannot enable RX BA session on active "
5402 				     "tid: %d", tid);
5403 			break;
5404 		}
5405 
5406 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5407 				hlid,
5408 				params->buf_size);
5409 
5410 		if (!ret) {
5411 			*ba_bitmap |= BIT(tid);
5412 			wl->ba_rx_session_count++;
5413 		}
5414 		break;
5415 
5416 	case IEEE80211_AMPDU_RX_STOP:
5417 		if (!(*ba_bitmap & BIT(tid))) {
5418 			/*
5419 			 * this happens on reconfig - so only output a debug
5420 			 * message for now, and don't fail the function.
5421 			 */
5422 			wl1271_debug(DEBUG_MAC80211,
5423 				     "no active RX BA session on tid: %d",
5424 				     tid);
5425 			ret = 0;
5426 			break;
5427 		}
5428 
5429 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5430 							 hlid, 0);
5431 		if (!ret) {
5432 			*ba_bitmap &= ~BIT(tid);
5433 			wl->ba_rx_session_count--;
5434 		}
5435 		break;
5436 
5437 	/*
5438 	 * The BA initiator session management in FW independently.
5439 	 * Falling break here on purpose for all TX APDU commands.
5440 	 */
5441 	case IEEE80211_AMPDU_TX_START:
5442 	case IEEE80211_AMPDU_TX_STOP_CONT:
5443 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5444 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5445 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5446 		ret = -EINVAL;
5447 		break;
5448 
5449 	default:
5450 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5451 		ret = -EINVAL;
5452 	}
5453 
5454 	pm_runtime_mark_last_busy(wl->dev);
5455 	pm_runtime_put_autosuspend(wl->dev);
5456 
5457 out:
5458 	mutex_unlock(&wl->mutex);
5459 
5460 	return ret;
5461 }
5462 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5463 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5464 				   struct ieee80211_vif *vif,
5465 				   const struct cfg80211_bitrate_mask *mask)
5466 {
5467 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5468 	struct wl1271 *wl = hw->priv;
5469 	int i, ret = 0;
5470 
5471 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5472 		mask->control[NL80211_BAND_2GHZ].legacy,
5473 		mask->control[NL80211_BAND_5GHZ].legacy);
5474 
5475 	mutex_lock(&wl->mutex);
5476 
5477 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5478 		wlvif->bitrate_masks[i] =
5479 			wl1271_tx_enabled_rates_get(wl,
5480 						    mask->control[i].legacy,
5481 						    i);
5482 
5483 	if (unlikely(wl->state != WLCORE_STATE_ON))
5484 		goto out;
5485 
5486 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5487 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5488 
5489 		ret = pm_runtime_get_sync(wl->dev);
5490 		if (ret < 0) {
5491 			pm_runtime_put_noidle(wl->dev);
5492 			goto out;
5493 		}
5494 
5495 		wl1271_set_band_rate(wl, wlvif);
5496 		wlvif->basic_rate =
5497 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5498 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5499 
5500 		pm_runtime_mark_last_busy(wl->dev);
5501 		pm_runtime_put_autosuspend(wl->dev);
5502 	}
5503 out:
5504 	mutex_unlock(&wl->mutex);
5505 
5506 	return ret;
5507 }
5508 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5509 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5510 				     struct ieee80211_vif *vif,
5511 				     struct ieee80211_channel_switch *ch_switch)
5512 {
5513 	struct wl1271 *wl = hw->priv;
5514 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5515 	int ret;
5516 
5517 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5518 
5519 	wl1271_tx_flush(wl);
5520 
5521 	mutex_lock(&wl->mutex);
5522 
5523 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5524 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5525 			ieee80211_chswitch_done(vif, false);
5526 		goto out;
5527 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5528 		goto out;
5529 	}
5530 
5531 	ret = pm_runtime_get_sync(wl->dev);
5532 	if (ret < 0) {
5533 		pm_runtime_put_noidle(wl->dev);
5534 		goto out;
5535 	}
5536 
5537 	/* TODO: change mac80211 to pass vif as param */
5538 
5539 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5540 		unsigned long delay_usec;
5541 
5542 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5543 		if (ret)
5544 			goto out_sleep;
5545 
5546 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5547 
5548 		/* indicate failure 5 seconds after channel switch time */
5549 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5550 			ch_switch->count;
5551 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5552 					     usecs_to_jiffies(delay_usec) +
5553 					     msecs_to_jiffies(5000));
5554 	}
5555 
5556 out_sleep:
5557 	pm_runtime_mark_last_busy(wl->dev);
5558 	pm_runtime_put_autosuspend(wl->dev);
5559 
5560 out:
5561 	mutex_unlock(&wl->mutex);
5562 }
5563 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5564 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5565 					struct wl12xx_vif *wlvif,
5566 					u8 eid)
5567 {
5568 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5569 	struct sk_buff *beacon =
5570 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5571 
5572 	if (!beacon)
5573 		return NULL;
5574 
5575 	return cfg80211_find_ie(eid,
5576 				beacon->data + ieoffset,
5577 				beacon->len - ieoffset);
5578 }
5579 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5580 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5581 				u8 *csa_count)
5582 {
5583 	const u8 *ie;
5584 	const struct ieee80211_channel_sw_ie *ie_csa;
5585 
5586 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5587 	if (!ie)
5588 		return -EINVAL;
5589 
5590 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5591 	*csa_count = ie_csa->count;
5592 
5593 	return 0;
5594 }
5595 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5596 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5597 					    struct ieee80211_vif *vif,
5598 					    struct cfg80211_chan_def *chandef)
5599 {
5600 	struct wl1271 *wl = hw->priv;
5601 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5602 	struct ieee80211_channel_switch ch_switch = {
5603 		.block_tx = true,
5604 		.chandef = *chandef,
5605 	};
5606 	int ret;
5607 
5608 	wl1271_debug(DEBUG_MAC80211,
5609 		     "mac80211 channel switch beacon (role %d)",
5610 		     wlvif->role_id);
5611 
5612 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5613 	if (ret < 0) {
5614 		wl1271_error("error getting beacon (for CSA counter)");
5615 		return;
5616 	}
5617 
5618 	mutex_lock(&wl->mutex);
5619 
5620 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5621 		ret = -EBUSY;
5622 		goto out;
5623 	}
5624 
5625 	ret = pm_runtime_get_sync(wl->dev);
5626 	if (ret < 0) {
5627 		pm_runtime_put_noidle(wl->dev);
5628 		goto out;
5629 	}
5630 
5631 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5632 	if (ret)
5633 		goto out_sleep;
5634 
5635 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5636 
5637 out_sleep:
5638 	pm_runtime_mark_last_busy(wl->dev);
5639 	pm_runtime_put_autosuspend(wl->dev);
5640 out:
5641 	mutex_unlock(&wl->mutex);
5642 }
5643 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5644 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5645 			    u32 queues, bool drop)
5646 {
5647 	struct wl1271 *wl = hw->priv;
5648 
5649 	wl1271_tx_flush(wl);
5650 }
5651 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5652 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5653 				       struct ieee80211_vif *vif,
5654 				       struct ieee80211_channel *chan,
5655 				       int duration,
5656 				       enum ieee80211_roc_type type)
5657 {
5658 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5659 	struct wl1271 *wl = hw->priv;
5660 	int channel, active_roc, ret = 0;
5661 
5662 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5663 
5664 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5665 		     channel, wlvif->role_id);
5666 
5667 	mutex_lock(&wl->mutex);
5668 
5669 	if (unlikely(wl->state != WLCORE_STATE_ON))
5670 		goto out;
5671 
5672 	/* return EBUSY if we can't ROC right now */
5673 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5674 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5675 		wl1271_warning("active roc on role %d", active_roc);
5676 		ret = -EBUSY;
5677 		goto out;
5678 	}
5679 
5680 	ret = pm_runtime_get_sync(wl->dev);
5681 	if (ret < 0) {
5682 		pm_runtime_put_noidle(wl->dev);
5683 		goto out;
5684 	}
5685 
5686 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5687 	if (ret < 0)
5688 		goto out_sleep;
5689 
5690 	wl->roc_vif = vif;
5691 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5692 				     msecs_to_jiffies(duration));
5693 out_sleep:
5694 	pm_runtime_mark_last_busy(wl->dev);
5695 	pm_runtime_put_autosuspend(wl->dev);
5696 out:
5697 	mutex_unlock(&wl->mutex);
5698 	return ret;
5699 }
5700 
__wlcore_roc_completed(struct wl1271 * wl)5701 static int __wlcore_roc_completed(struct wl1271 *wl)
5702 {
5703 	struct wl12xx_vif *wlvif;
5704 	int ret;
5705 
5706 	/* already completed */
5707 	if (unlikely(!wl->roc_vif))
5708 		return 0;
5709 
5710 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5711 
5712 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5713 		return -EBUSY;
5714 
5715 	ret = wl12xx_stop_dev(wl, wlvif);
5716 	if (ret < 0)
5717 		return ret;
5718 
5719 	wl->roc_vif = NULL;
5720 
5721 	return 0;
5722 }
5723 
wlcore_roc_completed(struct wl1271 * wl)5724 static int wlcore_roc_completed(struct wl1271 *wl)
5725 {
5726 	int ret;
5727 
5728 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5729 
5730 	mutex_lock(&wl->mutex);
5731 
5732 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5733 		ret = -EBUSY;
5734 		goto out;
5735 	}
5736 
5737 	ret = pm_runtime_get_sync(wl->dev);
5738 	if (ret < 0) {
5739 		pm_runtime_put_noidle(wl->dev);
5740 		goto out;
5741 	}
5742 
5743 	ret = __wlcore_roc_completed(wl);
5744 
5745 	pm_runtime_mark_last_busy(wl->dev);
5746 	pm_runtime_put_autosuspend(wl->dev);
5747 out:
5748 	mutex_unlock(&wl->mutex);
5749 
5750 	return ret;
5751 }
5752 
wlcore_roc_complete_work(struct work_struct * work)5753 static void wlcore_roc_complete_work(struct work_struct *work)
5754 {
5755 	struct delayed_work *dwork;
5756 	struct wl1271 *wl;
5757 	int ret;
5758 
5759 	dwork = to_delayed_work(work);
5760 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5761 
5762 	ret = wlcore_roc_completed(wl);
5763 	if (!ret)
5764 		ieee80211_remain_on_channel_expired(wl->hw);
5765 }
5766 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw)5767 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5768 {
5769 	struct wl1271 *wl = hw->priv;
5770 
5771 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5772 
5773 	/* TODO: per-vif */
5774 	wl1271_tx_flush(wl);
5775 
5776 	/*
5777 	 * we can't just flush_work here, because it might deadlock
5778 	 * (as we might get called from the same workqueue)
5779 	 */
5780 	cancel_delayed_work_sync(&wl->roc_complete_work);
5781 	wlcore_roc_completed(wl);
5782 
5783 	return 0;
5784 }
5785 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5786 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5787 				    struct ieee80211_vif *vif,
5788 				    struct ieee80211_sta *sta,
5789 				    u32 changed)
5790 {
5791 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5792 
5793 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5794 
5795 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5796 		return;
5797 
5798 	/* this callback is atomic, so schedule a new work */
5799 	wlvif->rc_update_bw = sta->bandwidth;
5800 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5801 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5802 }
5803 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5804 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5805 				     struct ieee80211_vif *vif,
5806 				     struct ieee80211_sta *sta,
5807 				     struct station_info *sinfo)
5808 {
5809 	struct wl1271 *wl = hw->priv;
5810 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5811 	s8 rssi_dbm;
5812 	int ret;
5813 
5814 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5815 
5816 	mutex_lock(&wl->mutex);
5817 
5818 	if (unlikely(wl->state != WLCORE_STATE_ON))
5819 		goto out;
5820 
5821 	ret = pm_runtime_get_sync(wl->dev);
5822 	if (ret < 0) {
5823 		pm_runtime_put_noidle(wl->dev);
5824 		goto out_sleep;
5825 	}
5826 
5827 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5828 	if (ret < 0)
5829 		goto out_sleep;
5830 
5831 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5832 	sinfo->signal = rssi_dbm;
5833 
5834 out_sleep:
5835 	pm_runtime_mark_last_busy(wl->dev);
5836 	pm_runtime_put_autosuspend(wl->dev);
5837 
5838 out:
5839 	mutex_unlock(&wl->mutex);
5840 }
5841 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5842 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5843 					     struct ieee80211_sta *sta)
5844 {
5845 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5846 	struct wl1271 *wl = hw->priv;
5847 	u8 hlid = wl_sta->hlid;
5848 
5849 	/* return in units of Kbps */
5850 	return (wl->links[hlid].fw_rate_mbps * 1000);
5851 }
5852 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5853 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5854 {
5855 	struct wl1271 *wl = hw->priv;
5856 	bool ret = false;
5857 
5858 	mutex_lock(&wl->mutex);
5859 
5860 	if (unlikely(wl->state != WLCORE_STATE_ON))
5861 		goto out;
5862 
5863 	/* packets are considered pending if in the TX queue or the FW */
5864 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5865 out:
5866 	mutex_unlock(&wl->mutex);
5867 
5868 	return ret;
5869 }
5870 
5871 /* can't be const, mac80211 writes to this */
5872 static struct ieee80211_rate wl1271_rates[] = {
5873 	{ .bitrate = 10,
5874 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5875 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5876 	{ .bitrate = 20,
5877 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5878 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5879 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5880 	{ .bitrate = 55,
5881 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5882 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5883 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5884 	{ .bitrate = 110,
5885 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5886 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5887 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5888 	{ .bitrate = 60,
5889 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5890 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5891 	{ .bitrate = 90,
5892 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5893 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5894 	{ .bitrate = 120,
5895 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5896 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5897 	{ .bitrate = 180,
5898 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5899 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5900 	{ .bitrate = 240,
5901 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5902 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5903 	{ .bitrate = 360,
5904 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5905 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5906 	{ .bitrate = 480,
5907 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5908 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5909 	{ .bitrate = 540,
5910 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5911 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5912 };
5913 
5914 /* can't be const, mac80211 writes to this */
5915 static struct ieee80211_channel wl1271_channels[] = {
5916 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5922 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5923 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5924 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5925 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5926 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5927 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5928 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5929 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5930 };
5931 
5932 /* can't be const, mac80211 writes to this */
5933 static struct ieee80211_supported_band wl1271_band_2ghz = {
5934 	.channels = wl1271_channels,
5935 	.n_channels = ARRAY_SIZE(wl1271_channels),
5936 	.bitrates = wl1271_rates,
5937 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5938 };
5939 
5940 /* 5 GHz data rates for WL1273 */
5941 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5942 	{ .bitrate = 60,
5943 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5944 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5945 	{ .bitrate = 90,
5946 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5947 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5948 	{ .bitrate = 120,
5949 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5950 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5951 	{ .bitrate = 180,
5952 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5953 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5954 	{ .bitrate = 240,
5955 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5956 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5957 	{ .bitrate = 360,
5958 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5959 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5960 	{ .bitrate = 480,
5961 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5962 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5963 	{ .bitrate = 540,
5964 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5965 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5966 };
5967 
5968 /* 5 GHz band channels for WL1273 */
5969 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5970 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5991 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5992 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5993 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5994 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5995 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5996 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5997 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5998 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5999 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
6000 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
6001 };
6002 
6003 static struct ieee80211_supported_band wl1271_band_5ghz = {
6004 	.channels = wl1271_channels_5ghz,
6005 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
6006 	.bitrates = wl1271_rates_5ghz,
6007 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
6008 };
6009 
6010 static const struct ieee80211_ops wl1271_ops = {
6011 	.start = wl1271_op_start,
6012 	.stop = wlcore_op_stop,
6013 	.add_interface = wl1271_op_add_interface,
6014 	.remove_interface = wl1271_op_remove_interface,
6015 	.change_interface = wl12xx_op_change_interface,
6016 #ifdef CONFIG_PM
6017 	.suspend = wl1271_op_suspend,
6018 	.resume = wl1271_op_resume,
6019 #endif
6020 	.config = wl1271_op_config,
6021 	.prepare_multicast = wl1271_op_prepare_multicast,
6022 	.configure_filter = wl1271_op_configure_filter,
6023 	.tx = wl1271_op_tx,
6024 	.set_key = wlcore_op_set_key,
6025 	.hw_scan = wl1271_op_hw_scan,
6026 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6027 	.sched_scan_start = wl1271_op_sched_scan_start,
6028 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6029 	.bss_info_changed = wl1271_op_bss_info_changed,
6030 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6031 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6032 	.conf_tx = wl1271_op_conf_tx,
6033 	.get_tsf = wl1271_op_get_tsf,
6034 	.get_survey = wl1271_op_get_survey,
6035 	.sta_state = wl12xx_op_sta_state,
6036 	.ampdu_action = wl1271_op_ampdu_action,
6037 	.tx_frames_pending = wl1271_tx_frames_pending,
6038 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6039 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6040 	.channel_switch = wl12xx_op_channel_switch,
6041 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6042 	.flush = wlcore_op_flush,
6043 	.remain_on_channel = wlcore_op_remain_on_channel,
6044 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6045 	.add_chanctx = wlcore_op_add_chanctx,
6046 	.remove_chanctx = wlcore_op_remove_chanctx,
6047 	.change_chanctx = wlcore_op_change_chanctx,
6048 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6049 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6050 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6051 	.sta_rc_update = wlcore_op_sta_rc_update,
6052 	.sta_statistics = wlcore_op_sta_statistics,
6053 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6054 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6055 };
6056 
6057 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6058 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6059 {
6060 	u8 idx;
6061 
6062 	BUG_ON(band >= 2);
6063 
6064 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6065 		wl1271_error("Illegal RX rate from HW: %d", rate);
6066 		return 0;
6067 	}
6068 
6069 	idx = wl->band_rate_to_idx[band][rate];
6070 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6071 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6072 		return 0;
6073 	}
6074 
6075 	return idx;
6076 }
6077 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6078 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6079 {
6080 	int i;
6081 
6082 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6083 		     oui, nic);
6084 
6085 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6086 		wl1271_warning("NIC part of the MAC address wraps around!");
6087 
6088 	for (i = 0; i < wl->num_mac_addr; i++) {
6089 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6090 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6091 		wl->addresses[i].addr[2] = (u8) oui;
6092 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6093 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6094 		wl->addresses[i].addr[5] = (u8) nic;
6095 		nic++;
6096 	}
6097 
6098 	/* we may be one address short at the most */
6099 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6100 
6101 	/*
6102 	 * turn on the LAA bit in the first address and use it as
6103 	 * the last address.
6104 	 */
6105 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6106 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6107 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6108 		       sizeof(wl->addresses[0]));
6109 		/* LAA bit */
6110 		wl->addresses[idx].addr[0] |= BIT(1);
6111 	}
6112 
6113 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6114 	wl->hw->wiphy->addresses = wl->addresses;
6115 }
6116 
wl12xx_get_hw_info(struct wl1271 * wl)6117 static int wl12xx_get_hw_info(struct wl1271 *wl)
6118 {
6119 	int ret;
6120 
6121 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6122 	if (ret < 0)
6123 		goto out;
6124 
6125 	wl->fuse_oui_addr = 0;
6126 	wl->fuse_nic_addr = 0;
6127 
6128 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6129 	if (ret < 0)
6130 		goto out;
6131 
6132 	if (wl->ops->get_mac)
6133 		ret = wl->ops->get_mac(wl);
6134 
6135 out:
6136 	return ret;
6137 }
6138 
wl1271_register_hw(struct wl1271 * wl)6139 static int wl1271_register_hw(struct wl1271 *wl)
6140 {
6141 	int ret;
6142 	u32 oui_addr = 0, nic_addr = 0;
6143 	struct platform_device *pdev = wl->pdev;
6144 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6145 
6146 	if (wl->mac80211_registered)
6147 		return 0;
6148 
6149 	if (wl->nvs_len >= 12) {
6150 		/* NOTE: The wl->nvs->nvs element must be first, in
6151 		 * order to simplify the casting, we assume it is at
6152 		 * the beginning of the wl->nvs structure.
6153 		 */
6154 		u8 *nvs_ptr = (u8 *)wl->nvs;
6155 
6156 		oui_addr =
6157 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6158 		nic_addr =
6159 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6160 	}
6161 
6162 	/* if the MAC address is zeroed in the NVS derive from fuse */
6163 	if (oui_addr == 0 && nic_addr == 0) {
6164 		oui_addr = wl->fuse_oui_addr;
6165 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6166 		nic_addr = wl->fuse_nic_addr + 1;
6167 	}
6168 
6169 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6170 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6171 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6172 			wl1271_warning("This default nvs file can be removed from the file system");
6173 		} else {
6174 			wl1271_warning("Your device performance is not optimized.");
6175 			wl1271_warning("Please use the calibrator tool to configure your device.");
6176 		}
6177 
6178 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6179 			wl1271_warning("Fuse mac address is zero. using random mac");
6180 			/* Use TI oui and a random nic */
6181 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6182 			nic_addr = get_random_int();
6183 		} else {
6184 			oui_addr = wl->fuse_oui_addr;
6185 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6186 			nic_addr = wl->fuse_nic_addr + 1;
6187 		}
6188 	}
6189 
6190 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6191 
6192 	ret = ieee80211_register_hw(wl->hw);
6193 	if (ret < 0) {
6194 		wl1271_error("unable to register mac80211 hw: %d", ret);
6195 		goto out;
6196 	}
6197 
6198 	wl->mac80211_registered = true;
6199 
6200 	wl1271_debugfs_init(wl);
6201 
6202 	wl1271_notice("loaded");
6203 
6204 out:
6205 	return ret;
6206 }
6207 
wl1271_unregister_hw(struct wl1271 * wl)6208 static void wl1271_unregister_hw(struct wl1271 *wl)
6209 {
6210 	if (wl->plt)
6211 		wl1271_plt_stop(wl);
6212 
6213 	ieee80211_unregister_hw(wl->hw);
6214 	wl->mac80211_registered = false;
6215 
6216 }
6217 
wl1271_init_ieee80211(struct wl1271 * wl)6218 static int wl1271_init_ieee80211(struct wl1271 *wl)
6219 {
6220 	int i;
6221 	static const u32 cipher_suites[] = {
6222 		WLAN_CIPHER_SUITE_WEP40,
6223 		WLAN_CIPHER_SUITE_WEP104,
6224 		WLAN_CIPHER_SUITE_TKIP,
6225 		WLAN_CIPHER_SUITE_CCMP,
6226 		WL1271_CIPHER_SUITE_GEM,
6227 	};
6228 
6229 	/* The tx descriptor buffer */
6230 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6231 
6232 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6233 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6234 
6235 	/* unit us */
6236 	/* FIXME: find a proper value */
6237 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6238 
6239 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6240 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6241 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6242 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6243 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6244 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6245 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6246 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6247 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6248 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6249 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6250 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6251 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6252 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6253 
6254 	wl->hw->wiphy->cipher_suites = cipher_suites;
6255 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6256 
6257 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6258 					 BIT(NL80211_IFTYPE_AP) |
6259 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6260 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6261 #ifdef CONFIG_MAC80211_MESH
6262 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6263 #endif
6264 					 BIT(NL80211_IFTYPE_P2P_GO);
6265 
6266 	wl->hw->wiphy->max_scan_ssids = 1;
6267 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6268 	wl->hw->wiphy->max_match_sets = 16;
6269 	/*
6270 	 * Maximum length of elements in scanning probe request templates
6271 	 * should be the maximum length possible for a template, without
6272 	 * the IEEE80211 header of the template
6273 	 */
6274 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6275 			sizeof(struct ieee80211_header);
6276 
6277 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6278 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6279 		sizeof(struct ieee80211_header);
6280 
6281 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6282 
6283 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6284 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6285 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6286 
6287 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6288 
6289 	/* make sure all our channels fit in the scanned_ch bitmask */
6290 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6291 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6292 		     WL1271_MAX_CHANNELS);
6293 	/*
6294 	* clear channel flags from the previous usage
6295 	* and restore max_power & max_antenna_gain values.
6296 	*/
6297 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6298 		wl1271_band_2ghz.channels[i].flags = 0;
6299 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6300 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6301 	}
6302 
6303 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6304 		wl1271_band_5ghz.channels[i].flags = 0;
6305 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6306 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6307 	}
6308 
6309 	/*
6310 	 * We keep local copies of the band structs because we need to
6311 	 * modify them on a per-device basis.
6312 	 */
6313 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6314 	       sizeof(wl1271_band_2ghz));
6315 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6316 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6317 	       sizeof(*wl->ht_cap));
6318 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6319 	       sizeof(wl1271_band_5ghz));
6320 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6321 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6322 	       sizeof(*wl->ht_cap));
6323 
6324 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6325 		&wl->bands[NL80211_BAND_2GHZ];
6326 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6327 		&wl->bands[NL80211_BAND_5GHZ];
6328 
6329 	/*
6330 	 * allow 4 queues per mac address we support +
6331 	 * 1 cab queue per mac + one global offchannel Tx queue
6332 	 */
6333 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6334 
6335 	/* the last queue is the offchannel queue */
6336 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6337 	wl->hw->max_rates = 1;
6338 
6339 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6340 
6341 	/* the FW answers probe-requests in AP-mode */
6342 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6343 	wl->hw->wiphy->probe_resp_offload =
6344 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6345 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6346 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6347 
6348 	/* allowed interface combinations */
6349 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6350 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6351 
6352 	/* register vendor commands */
6353 	wlcore_set_vendor_commands(wl->hw->wiphy);
6354 
6355 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6356 
6357 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6358 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6359 
6360 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6361 
6362 	return 0;
6363 }
6364 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6365 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6366 				     u32 mbox_size)
6367 {
6368 	struct ieee80211_hw *hw;
6369 	struct wl1271 *wl;
6370 	int i, j, ret;
6371 	unsigned int order;
6372 
6373 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6374 	if (!hw) {
6375 		wl1271_error("could not alloc ieee80211_hw");
6376 		ret = -ENOMEM;
6377 		goto err_hw_alloc;
6378 	}
6379 
6380 	wl = hw->priv;
6381 	memset(wl, 0, sizeof(*wl));
6382 
6383 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6384 	if (!wl->priv) {
6385 		wl1271_error("could not alloc wl priv");
6386 		ret = -ENOMEM;
6387 		goto err_priv_alloc;
6388 	}
6389 
6390 	INIT_LIST_HEAD(&wl->wlvif_list);
6391 
6392 	wl->hw = hw;
6393 
6394 	/*
6395 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6396 	 * we don't allocate any additional resource here, so that's fine.
6397 	 */
6398 	for (i = 0; i < NUM_TX_QUEUES; i++)
6399 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6400 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6401 
6402 	skb_queue_head_init(&wl->deferred_rx_queue);
6403 	skb_queue_head_init(&wl->deferred_tx_queue);
6404 
6405 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6406 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6407 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6408 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6409 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6410 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6411 
6412 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6413 	if (!wl->freezable_wq) {
6414 		ret = -ENOMEM;
6415 		goto err_hw;
6416 	}
6417 
6418 	wl->channel = 0;
6419 	wl->rx_counter = 0;
6420 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6421 	wl->band = NL80211_BAND_2GHZ;
6422 	wl->channel_type = NL80211_CHAN_NO_HT;
6423 	wl->flags = 0;
6424 	wl->sg_enabled = true;
6425 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6426 	wl->recovery_count = 0;
6427 	wl->hw_pg_ver = -1;
6428 	wl->ap_ps_map = 0;
6429 	wl->ap_fw_ps_map = 0;
6430 	wl->quirks = 0;
6431 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6432 	wl->active_sta_count = 0;
6433 	wl->active_link_count = 0;
6434 	wl->fwlog_size = 0;
6435 
6436 	/* The system link is always allocated */
6437 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6438 
6439 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6440 	for (i = 0; i < wl->num_tx_desc; i++)
6441 		wl->tx_frames[i] = NULL;
6442 
6443 	spin_lock_init(&wl->wl_lock);
6444 
6445 	wl->state = WLCORE_STATE_OFF;
6446 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6447 	mutex_init(&wl->mutex);
6448 	mutex_init(&wl->flush_mutex);
6449 	init_completion(&wl->nvs_loading_complete);
6450 
6451 	order = get_order(aggr_buf_size);
6452 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6453 	if (!wl->aggr_buf) {
6454 		ret = -ENOMEM;
6455 		goto err_wq;
6456 	}
6457 	wl->aggr_buf_size = aggr_buf_size;
6458 
6459 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6460 	if (!wl->dummy_packet) {
6461 		ret = -ENOMEM;
6462 		goto err_aggr;
6463 	}
6464 
6465 	/* Allocate one page for the FW log */
6466 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6467 	if (!wl->fwlog) {
6468 		ret = -ENOMEM;
6469 		goto err_dummy_packet;
6470 	}
6471 
6472 	wl->mbox_size = mbox_size;
6473 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6474 	if (!wl->mbox) {
6475 		ret = -ENOMEM;
6476 		goto err_fwlog;
6477 	}
6478 
6479 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6480 	if (!wl->buffer_32) {
6481 		ret = -ENOMEM;
6482 		goto err_mbox;
6483 	}
6484 
6485 	return hw;
6486 
6487 err_mbox:
6488 	kfree(wl->mbox);
6489 
6490 err_fwlog:
6491 	free_page((unsigned long)wl->fwlog);
6492 
6493 err_dummy_packet:
6494 	dev_kfree_skb(wl->dummy_packet);
6495 
6496 err_aggr:
6497 	free_pages((unsigned long)wl->aggr_buf, order);
6498 
6499 err_wq:
6500 	destroy_workqueue(wl->freezable_wq);
6501 
6502 err_hw:
6503 	wl1271_debugfs_exit(wl);
6504 	kfree(wl->priv);
6505 
6506 err_priv_alloc:
6507 	ieee80211_free_hw(hw);
6508 
6509 err_hw_alloc:
6510 
6511 	return ERR_PTR(ret);
6512 }
6513 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6514 
wlcore_free_hw(struct wl1271 * wl)6515 int wlcore_free_hw(struct wl1271 *wl)
6516 {
6517 	/* Unblock any fwlog readers */
6518 	mutex_lock(&wl->mutex);
6519 	wl->fwlog_size = -1;
6520 	mutex_unlock(&wl->mutex);
6521 
6522 	wlcore_sysfs_free(wl);
6523 
6524 	kfree(wl->buffer_32);
6525 	kfree(wl->mbox);
6526 	free_page((unsigned long)wl->fwlog);
6527 	dev_kfree_skb(wl->dummy_packet);
6528 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6529 
6530 	wl1271_debugfs_exit(wl);
6531 
6532 	vfree(wl->fw);
6533 	wl->fw = NULL;
6534 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6535 	kfree(wl->nvs);
6536 	wl->nvs = NULL;
6537 
6538 	kfree(wl->raw_fw_status);
6539 	kfree(wl->fw_status);
6540 	kfree(wl->tx_res_if);
6541 	destroy_workqueue(wl->freezable_wq);
6542 
6543 	kfree(wl->priv);
6544 	ieee80211_free_hw(wl->hw);
6545 
6546 	return 0;
6547 }
6548 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6549 
6550 #ifdef CONFIG_PM
6551 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6552 	.flags = WIPHY_WOWLAN_ANY,
6553 	.n_patterns = WL1271_MAX_RX_FILTERS,
6554 	.pattern_min_len = 1,
6555 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6556 };
6557 #endif
6558 
wlcore_hardirq(int irq,void * cookie)6559 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6560 {
6561 	return IRQ_WAKE_THREAD;
6562 }
6563 
wlcore_nvs_cb(const struct firmware * fw,void * context)6564 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6565 {
6566 	struct wl1271 *wl = context;
6567 	struct platform_device *pdev = wl->pdev;
6568 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6569 	struct resource *res;
6570 
6571 	int ret;
6572 	irq_handler_t hardirq_fn = NULL;
6573 
6574 	if (fw) {
6575 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6576 		if (!wl->nvs) {
6577 			wl1271_error("Could not allocate nvs data");
6578 			goto out;
6579 		}
6580 		wl->nvs_len = fw->size;
6581 	} else if (pdev_data->family->nvs_name) {
6582 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6583 			     pdev_data->family->nvs_name);
6584 		wl->nvs = NULL;
6585 		wl->nvs_len = 0;
6586 	} else {
6587 		wl->nvs = NULL;
6588 		wl->nvs_len = 0;
6589 	}
6590 
6591 	ret = wl->ops->setup(wl);
6592 	if (ret < 0)
6593 		goto out_free_nvs;
6594 
6595 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6596 
6597 	/* adjust some runtime configuration parameters */
6598 	wlcore_adjust_conf(wl);
6599 
6600 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6601 	if (!res) {
6602 		wl1271_error("Could not get IRQ resource");
6603 		goto out_free_nvs;
6604 	}
6605 
6606 	wl->irq = res->start;
6607 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6608 	wl->if_ops = pdev_data->if_ops;
6609 
6610 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6611 		hardirq_fn = wlcore_hardirq;
6612 	else
6613 		wl->irq_flags |= IRQF_ONESHOT;
6614 
6615 	ret = wl12xx_set_power_on(wl);
6616 	if (ret < 0)
6617 		goto out_free_nvs;
6618 
6619 	ret = wl12xx_get_hw_info(wl);
6620 	if (ret < 0) {
6621 		wl1271_error("couldn't get hw info");
6622 		wl1271_power_off(wl);
6623 		goto out_free_nvs;
6624 	}
6625 
6626 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6627 				   wl->irq_flags, pdev->name, wl);
6628 	if (ret < 0) {
6629 		wl1271_error("interrupt configuration failed");
6630 		wl1271_power_off(wl);
6631 		goto out_free_nvs;
6632 	}
6633 
6634 #ifdef CONFIG_PM
6635 	ret = enable_irq_wake(wl->irq);
6636 	if (!ret) {
6637 		wl->irq_wake_enabled = true;
6638 		device_init_wakeup(wl->dev, 1);
6639 		if (pdev_data->pwr_in_suspend)
6640 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6641 	}
6642 #endif
6643 	disable_irq(wl->irq);
6644 	wl1271_power_off(wl);
6645 
6646 	ret = wl->ops->identify_chip(wl);
6647 	if (ret < 0)
6648 		goto out_irq;
6649 
6650 	ret = wl1271_init_ieee80211(wl);
6651 	if (ret)
6652 		goto out_irq;
6653 
6654 	ret = wl1271_register_hw(wl);
6655 	if (ret)
6656 		goto out_irq;
6657 
6658 	ret = wlcore_sysfs_init(wl);
6659 	if (ret)
6660 		goto out_unreg;
6661 
6662 	wl->initialized = true;
6663 	goto out;
6664 
6665 out_unreg:
6666 	wl1271_unregister_hw(wl);
6667 
6668 out_irq:
6669 	free_irq(wl->irq, wl);
6670 
6671 out_free_nvs:
6672 	kfree(wl->nvs);
6673 
6674 out:
6675 	release_firmware(fw);
6676 	complete_all(&wl->nvs_loading_complete);
6677 }
6678 
wlcore_runtime_suspend(struct device * dev)6679 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6680 {
6681 	struct wl1271 *wl = dev_get_drvdata(dev);
6682 	struct wl12xx_vif *wlvif;
6683 	int error;
6684 
6685 	/* We do not enter elp sleep in PLT mode */
6686 	if (wl->plt)
6687 		return 0;
6688 
6689 	/* Nothing to do if no ELP mode requested */
6690 	if (wl->sleep_auth != WL1271_PSM_ELP)
6691 		return 0;
6692 
6693 	wl12xx_for_each_wlvif(wl, wlvif) {
6694 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6695 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6696 			return -EBUSY;
6697 	}
6698 
6699 	wl1271_debug(DEBUG_PSM, "chip to elp");
6700 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6701 	if (error < 0) {
6702 		wl12xx_queue_recovery_work(wl);
6703 
6704 		return error;
6705 	}
6706 
6707 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6708 
6709 	return 0;
6710 }
6711 
wlcore_runtime_resume(struct device * dev)6712 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6713 {
6714 	struct wl1271 *wl = dev_get_drvdata(dev);
6715 	DECLARE_COMPLETION_ONSTACK(compl);
6716 	unsigned long flags;
6717 	int ret;
6718 	unsigned long start_time = jiffies;
6719 	bool pending = false;
6720 	bool recovery = false;
6721 
6722 	/* Nothing to do if no ELP mode requested */
6723 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6724 		return 0;
6725 
6726 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6727 
6728 	spin_lock_irqsave(&wl->wl_lock, flags);
6729 	if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
6730 		pending = true;
6731 	else
6732 		wl->elp_compl = &compl;
6733 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6734 
6735 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6736 	if (ret < 0) {
6737 		recovery = true;
6738 		goto err;
6739 	}
6740 
6741 	if (!pending) {
6742 		ret = wait_for_completion_timeout(&compl,
6743 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6744 		if (ret == 0) {
6745 			wl1271_warning("ELP wakeup timeout!");
6746 
6747 			/* Return no error for runtime PM for recovery */
6748 			ret = 0;
6749 			recovery = true;
6750 			goto err;
6751 		}
6752 	}
6753 
6754 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6755 
6756 	wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6757 		     jiffies_to_msecs(jiffies - start_time));
6758 
6759 	return 0;
6760 
6761 err:
6762 	spin_lock_irqsave(&wl->wl_lock, flags);
6763 	wl->elp_compl = NULL;
6764 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6765 
6766 	if (recovery) {
6767 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6768 		wl12xx_queue_recovery_work(wl);
6769 	}
6770 
6771 	return ret;
6772 }
6773 
6774 static const struct dev_pm_ops wlcore_pm_ops = {
6775 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6776 			   wlcore_runtime_resume,
6777 			   NULL)
6778 };
6779 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6780 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6781 {
6782 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6783 	const char *nvs_name;
6784 	int ret = 0;
6785 
6786 	if (!wl->ops || !wl->ptable || !pdev_data)
6787 		return -EINVAL;
6788 
6789 	wl->dev = &pdev->dev;
6790 	wl->pdev = pdev;
6791 	platform_set_drvdata(pdev, wl);
6792 
6793 	if (pdev_data->family && pdev_data->family->nvs_name) {
6794 		nvs_name = pdev_data->family->nvs_name;
6795 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6796 					      nvs_name, &pdev->dev, GFP_KERNEL,
6797 					      wl, wlcore_nvs_cb);
6798 		if (ret < 0) {
6799 			wl1271_error("request_firmware_nowait failed for %s: %d",
6800 				     nvs_name, ret);
6801 			complete_all(&wl->nvs_loading_complete);
6802 		}
6803 	} else {
6804 		wlcore_nvs_cb(NULL, wl);
6805 	}
6806 
6807 	wl->dev->driver->pm = &wlcore_pm_ops;
6808 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6809 	pm_runtime_use_autosuspend(wl->dev);
6810 	pm_runtime_enable(wl->dev);
6811 
6812 	return ret;
6813 }
6814 EXPORT_SYMBOL_GPL(wlcore_probe);
6815 
wlcore_remove(struct platform_device * pdev)6816 int wlcore_remove(struct platform_device *pdev)
6817 {
6818 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6819 	struct wl1271 *wl = platform_get_drvdata(pdev);
6820 	int error;
6821 
6822 	error = pm_runtime_get_sync(wl->dev);
6823 	if (error < 0)
6824 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6825 
6826 	wl->dev->driver->pm = NULL;
6827 
6828 	if (pdev_data->family && pdev_data->family->nvs_name)
6829 		wait_for_completion(&wl->nvs_loading_complete);
6830 	if (!wl->initialized)
6831 		return 0;
6832 
6833 	if (wl->irq_wake_enabled) {
6834 		device_init_wakeup(wl->dev, 0);
6835 		disable_irq_wake(wl->irq);
6836 	}
6837 	wl1271_unregister_hw(wl);
6838 
6839 	pm_runtime_put_sync(wl->dev);
6840 	pm_runtime_dont_use_autosuspend(wl->dev);
6841 	pm_runtime_disable(wl->dev);
6842 
6843 	free_irq(wl->irq, wl);
6844 	wlcore_free_hw(wl);
6845 
6846 	return 0;
6847 }
6848 EXPORT_SYMBOL_GPL(wlcore_remove);
6849 
6850 u32 wl12xx_debug_level = DEBUG_NONE;
6851 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6852 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6853 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6854 
6855 module_param_named(fwlog, fwlog_param, charp, 0);
6856 MODULE_PARM_DESC(fwlog,
6857 		 "FW logger options: continuous, dbgpins or disable");
6858 
6859 module_param(fwlog_mem_blocks, int, 0600);
6860 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6861 
6862 module_param(bug_on_recovery, int, 0600);
6863 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6864 
6865 module_param(no_recovery, int, 0600);
6866 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6867 
6868 MODULE_LICENSE("GPL");
6869 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6870 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6871