• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		del_timer_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_resume_and_get(wl->dev);
145 	if (ret < 0)
146 		goto out;
147 
148 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
149 	if (ret < 0)
150 		goto out_sleep;
151 
152 	/* stop it after some time of inactivity */
153 	mod_timer(&wlvif->rx_streaming_timer,
154 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
155 
156 out_sleep:
157 	pm_runtime_mark_last_busy(wl->dev);
158 	pm_runtime_put_autosuspend(wl->dev);
159 out:
160 	mutex_unlock(&wl->mutex);
161 }
162 
wl1271_rx_streaming_disable_work(struct work_struct * work)163 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
164 {
165 	int ret;
166 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 						rx_streaming_disable_work);
168 	struct wl1271 *wl = wlvif->wl;
169 
170 	mutex_lock(&wl->mutex);
171 
172 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
173 		goto out;
174 
175 	ret = pm_runtime_resume_and_get(wl->dev);
176 	if (ret < 0)
177 		goto out;
178 
179 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
180 	if (ret)
181 		goto out_sleep;
182 
183 out_sleep:
184 	pm_runtime_mark_last_busy(wl->dev);
185 	pm_runtime_put_autosuspend(wl->dev);
186 out:
187 	mutex_unlock(&wl->mutex);
188 }
189 
wl1271_rx_streaming_timer(struct timer_list * t)190 static void wl1271_rx_streaming_timer(struct timer_list *t)
191 {
192 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
193 	struct wl1271 *wl = wlvif->wl;
194 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
195 }
196 
197 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)198 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
199 {
200 	/* if the watchdog is not armed, don't do anything */
201 	if (wl->tx_allocated_blocks == 0)
202 		return;
203 
204 	cancel_delayed_work(&wl->tx_watchdog_work);
205 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
206 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
207 }
208 
wlcore_rc_update_work(struct work_struct * work)209 static void wlcore_rc_update_work(struct work_struct *work)
210 {
211 	int ret;
212 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
213 						rc_update_work);
214 	struct wl1271 *wl = wlvif->wl;
215 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
216 
217 	mutex_lock(&wl->mutex);
218 
219 	if (unlikely(wl->state != WLCORE_STATE_ON))
220 		goto out;
221 
222 	ret = pm_runtime_resume_and_get(wl->dev);
223 	if (ret < 0)
224 		goto out;
225 
226 	if (ieee80211_vif_is_mesh(vif)) {
227 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
228 						     true, wlvif->sta.hlid);
229 		if (ret < 0)
230 			goto out_sleep;
231 	} else {
232 		wlcore_hw_sta_rc_update(wl, wlvif);
233 	}
234 
235 out_sleep:
236 	pm_runtime_mark_last_busy(wl->dev);
237 	pm_runtime_put_autosuspend(wl->dev);
238 out:
239 	mutex_unlock(&wl->mutex);
240 }
241 
wl12xx_tx_watchdog_work(struct work_struct * work)242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 	struct delayed_work *dwork;
245 	struct wl1271 *wl;
246 
247 	dwork = to_delayed_work(work);
248 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 
250 	mutex_lock(&wl->mutex);
251 
252 	if (unlikely(wl->state != WLCORE_STATE_ON))
253 		goto out;
254 
255 	/* Tx went out in the meantime - everything is ok */
256 	if (unlikely(wl->tx_allocated_blocks == 0))
257 		goto out;
258 
259 	/*
260 	 * if a ROC is in progress, we might not have any Tx for a long
261 	 * time (e.g. pending Tx on the non-ROC channels)
262 	 */
263 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 			     wl->conf.tx.tx_watchdog_timeout);
266 		wl12xx_rearm_tx_watchdog_locked(wl);
267 		goto out;
268 	}
269 
270 	/*
271 	 * if a scan is in progress, we might not have any Tx for a long
272 	 * time
273 	 */
274 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 			     wl->conf.tx.tx_watchdog_timeout);
277 		wl12xx_rearm_tx_watchdog_locked(wl);
278 		goto out;
279 	}
280 
281 	/*
282 	* AP might cache a frame for a long time for a sleeping station,
283 	* so rearm the timer if there's an AP interface with stations. If
284 	* Tx is genuinely stuck we will most hopefully discover it when all
285 	* stations are removed due to inactivity.
286 	*/
287 	if (wl->active_sta_count) {
288 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 			     " %d stations",
290 			      wl->conf.tx.tx_watchdog_timeout,
291 			      wl->active_sta_count);
292 		wl12xx_rearm_tx_watchdog_locked(wl);
293 		goto out;
294 	}
295 
296 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 		     wl->conf.tx.tx_watchdog_timeout);
298 	wl12xx_queue_recovery_work(wl);
299 
300 out:
301 	mutex_unlock(&wl->mutex);
302 }
303 
wlcore_adjust_conf(struct wl1271 * wl)304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 
307 	if (fwlog_param) {
308 		if (!strcmp(fwlog_param, "continuous")) {
309 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
311 		} else if (!strcmp(fwlog_param, "dbgpins")) {
312 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
314 		} else if (!strcmp(fwlog_param, "disable")) {
315 			wl->conf.fwlog.mem_blocks = 0;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
317 		} else {
318 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
319 		}
320 	}
321 
322 	if (bug_on_recovery != -1)
323 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
324 
325 	if (no_recovery != -1)
326 		wl->conf.recovery.no_recovery = (u8) no_recovery;
327 }
328 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)329 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
330 					struct wl12xx_vif *wlvif,
331 					u8 hlid, u8 tx_pkts)
332 {
333 	bool fw_ps;
334 
335 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
336 
337 	/*
338 	 * Wake up from high level PS if the STA is asleep with too little
339 	 * packets in FW or if the STA is awake.
340 	 */
341 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
342 		wl12xx_ps_link_end(wl, wlvif, hlid);
343 
344 	/*
345 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
346 	 * Make an exception if this is the only connected link. In this
347 	 * case FW-memory congestion is less of a problem.
348 	 * Note that a single connected STA means 2*ap_count + 1 active links,
349 	 * since we must account for the global and broadcast AP links
350 	 * for each AP. The "fw_ps" check assures us the other link is a STA
351 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
352 	 */
353 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
354 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 					   struct wl12xx_vif *wlvif,
360 					   struct wl_fw_status *status)
361 {
362 	unsigned long cur_fw_ps_map;
363 	u8 hlid;
364 
365 	cur_fw_ps_map = status->link_ps_bitmap;
366 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 		wl1271_debug(DEBUG_PSM,
368 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
369 			     wl->ap_fw_ps_map, cur_fw_ps_map,
370 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
371 
372 		wl->ap_fw_ps_map = cur_fw_ps_map;
373 	}
374 
375 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
376 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 					    wl->links[hlid].allocated_pkts);
378 }
379 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)380 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
381 {
382 	struct wl12xx_vif *wlvif;
383 	u32 old_tx_blk_count = wl->tx_blocks_available;
384 	int avail, freed_blocks;
385 	int i;
386 	int ret;
387 	struct wl1271_link *lnk;
388 
389 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
390 				   wl->raw_fw_status,
391 				   wl->fw_status_len, false);
392 	if (ret < 0)
393 		return ret;
394 
395 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
396 
397 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
398 		     "drv_rx_counter = %d, tx_results_counter = %d)",
399 		     status->intr,
400 		     status->fw_rx_counter,
401 		     status->drv_rx_counter,
402 		     status->tx_results_counter);
403 
404 	for (i = 0; i < NUM_TX_QUEUES; i++) {
405 		/* prevent wrap-around in freed-packets counter */
406 		wl->tx_allocated_pkts[i] -=
407 				(status->counters.tx_released_pkts[i] -
408 				wl->tx_pkts_freed[i]) & 0xff;
409 
410 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
411 	}
412 
413 
414 	for_each_set_bit(i, wl->links_map, wl->num_links) {
415 		u8 diff;
416 		lnk = &wl->links[i];
417 
418 		/* prevent wrap-around in freed-packets counter */
419 		diff = (status->counters.tx_lnk_free_pkts[i] -
420 		       lnk->prev_freed_pkts) & 0xff;
421 
422 		if (diff == 0)
423 			continue;
424 
425 		lnk->allocated_pkts -= diff;
426 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
427 
428 		/* accumulate the prev_freed_pkts counter */
429 		lnk->total_freed_pkts += diff;
430 	}
431 
432 	/* prevent wrap-around in total blocks counter */
433 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
434 		freed_blocks = status->total_released_blks -
435 			       wl->tx_blocks_freed;
436 	else
437 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
438 			       status->total_released_blks;
439 
440 	wl->tx_blocks_freed = status->total_released_blks;
441 
442 	wl->tx_allocated_blocks -= freed_blocks;
443 
444 	/*
445 	 * If the FW freed some blocks:
446 	 * If we still have allocated blocks - re-arm the timer, Tx is
447 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
448 	 */
449 	if (freed_blocks) {
450 		if (wl->tx_allocated_blocks)
451 			wl12xx_rearm_tx_watchdog_locked(wl);
452 		else
453 			cancel_delayed_work(&wl->tx_watchdog_work);
454 	}
455 
456 	avail = status->tx_total - wl->tx_allocated_blocks;
457 
458 	/*
459 	 * The FW might change the total number of TX memblocks before
460 	 * we get a notification about blocks being released. Thus, the
461 	 * available blocks calculation might yield a temporary result
462 	 * which is lower than the actual available blocks. Keeping in
463 	 * mind that only blocks that were allocated can be moved from
464 	 * TX to RX, tx_blocks_available should never decrease here.
465 	 */
466 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
467 				      avail);
468 
469 	/* if more blocks are available now, tx work can be scheduled */
470 	if (wl->tx_blocks_available > old_tx_blk_count)
471 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
472 
473 	/* for AP update num of allocated TX blocks per link and ps status */
474 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
475 		wl12xx_irq_update_links_status(wl, wlvif, status);
476 	}
477 
478 	/* update the host-chipset time offset */
479 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
480 		(s64)(status->fw_localtime);
481 
482 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
483 
484 	return 0;
485 }
486 
wl1271_flush_deferred_work(struct wl1271 * wl)487 static void wl1271_flush_deferred_work(struct wl1271 *wl)
488 {
489 	struct sk_buff *skb;
490 
491 	/* Pass all received frames to the network stack */
492 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
493 		ieee80211_rx_ni(wl->hw, skb);
494 
495 	/* Return sent skbs to the network stack */
496 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
497 		ieee80211_tx_status_ni(wl->hw, skb);
498 }
499 
wl1271_netstack_work(struct work_struct * work)500 static void wl1271_netstack_work(struct work_struct *work)
501 {
502 	struct wl1271 *wl =
503 		container_of(work, struct wl1271, netstack_work);
504 
505 	do {
506 		wl1271_flush_deferred_work(wl);
507 	} while (skb_queue_len(&wl->deferred_rx_queue));
508 }
509 
510 #define WL1271_IRQ_MAX_LOOPS 256
511 
wlcore_irq_locked(struct wl1271 * wl)512 static int wlcore_irq_locked(struct wl1271 *wl)
513 {
514 	int ret = 0;
515 	u32 intr;
516 	int loopcount = WL1271_IRQ_MAX_LOOPS;
517 	bool run_tx_queue = true;
518 	bool done = false;
519 	unsigned int defer_count;
520 	unsigned long flags;
521 
522 	/*
523 	 * In case edge triggered interrupt must be used, we cannot iterate
524 	 * more than once without introducing race conditions with the hardirq.
525 	 */
526 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
527 		loopcount = 1;
528 
529 	wl1271_debug(DEBUG_IRQ, "IRQ work");
530 
531 	if (unlikely(wl->state != WLCORE_STATE_ON))
532 		goto out;
533 
534 	ret = pm_runtime_resume_and_get(wl->dev);
535 	if (ret < 0)
536 		goto out;
537 
538 	while (!done && loopcount--) {
539 		smp_mb__after_atomic();
540 
541 		ret = wlcore_fw_status(wl, wl->fw_status);
542 		if (ret < 0)
543 			goto err_ret;
544 
545 		wlcore_hw_tx_immediate_compl(wl);
546 
547 		intr = wl->fw_status->intr;
548 		intr &= WLCORE_ALL_INTR_MASK;
549 		if (!intr) {
550 			done = true;
551 			continue;
552 		}
553 
554 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
555 			wl1271_error("HW watchdog interrupt received! starting recovery.");
556 			wl->watchdog_recovery = true;
557 			ret = -EIO;
558 
559 			/* restarting the chip. ignore any other interrupt. */
560 			goto err_ret;
561 		}
562 
563 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
564 			wl1271_error("SW watchdog interrupt received! "
565 				     "starting recovery.");
566 			wl->watchdog_recovery = true;
567 			ret = -EIO;
568 
569 			/* restarting the chip. ignore any other interrupt. */
570 			goto err_ret;
571 		}
572 
573 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
574 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
575 
576 			ret = wlcore_rx(wl, wl->fw_status);
577 			if (ret < 0)
578 				goto err_ret;
579 
580 			/* Check if any tx blocks were freed */
581 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
582 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
583 					if (!wl1271_tx_total_queue_count(wl))
584 						run_tx_queue = false;
585 					spin_unlock_irqrestore(&wl->wl_lock, flags);
586 				}
587 
588 				/*
589 				 * In order to avoid starvation of the TX path,
590 				 * call the work function directly.
591 				 */
592 				if (run_tx_queue) {
593 					ret = wlcore_tx_work_locked(wl);
594 					if (ret < 0)
595 						goto err_ret;
596 				}
597 			}
598 
599 			/* check for tx results */
600 			ret = wlcore_hw_tx_delayed_compl(wl);
601 			if (ret < 0)
602 				goto err_ret;
603 
604 			/* Make sure the deferred queues don't get too long */
605 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
606 				      skb_queue_len(&wl->deferred_rx_queue);
607 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
608 				wl1271_flush_deferred_work(wl);
609 		}
610 
611 		if (intr & WL1271_ACX_INTR_EVENT_A) {
612 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
613 			ret = wl1271_event_handle(wl, 0);
614 			if (ret < 0)
615 				goto err_ret;
616 		}
617 
618 		if (intr & WL1271_ACX_INTR_EVENT_B) {
619 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
620 			ret = wl1271_event_handle(wl, 1);
621 			if (ret < 0)
622 				goto err_ret;
623 		}
624 
625 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
626 			wl1271_debug(DEBUG_IRQ,
627 				     "WL1271_ACX_INTR_INIT_COMPLETE");
628 
629 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
630 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
631 	}
632 
633 err_ret:
634 	pm_runtime_mark_last_busy(wl->dev);
635 	pm_runtime_put_autosuspend(wl->dev);
636 
637 out:
638 	return ret;
639 }
640 
wlcore_irq(int irq,void * cookie)641 static irqreturn_t wlcore_irq(int irq, void *cookie)
642 {
643 	int ret;
644 	unsigned long flags;
645 	struct wl1271 *wl = cookie;
646 	bool queue_tx_work = true;
647 
648 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
649 
650 	/* complete the ELP completion */
651 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
652 		spin_lock_irqsave(&wl->wl_lock, flags);
653 		if (wl->elp_compl)
654 			complete(wl->elp_compl);
655 		spin_unlock_irqrestore(&wl->wl_lock, flags);
656 	}
657 
658 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
659 		/* don't enqueue a work right now. mark it as pending */
660 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
661 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
662 		spin_lock_irqsave(&wl->wl_lock, flags);
663 		disable_irq_nosync(wl->irq);
664 		pm_wakeup_event(wl->dev, 0);
665 		spin_unlock_irqrestore(&wl->wl_lock, flags);
666 		goto out_handled;
667 	}
668 
669 	/* TX might be handled here, avoid redundant work */
670 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
671 	cancel_work_sync(&wl->tx_work);
672 
673 	mutex_lock(&wl->mutex);
674 
675 	ret = wlcore_irq_locked(wl);
676 	if (ret)
677 		wl12xx_queue_recovery_work(wl);
678 
679 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
680 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
681 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
682 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
683 			if (!wl1271_tx_total_queue_count(wl))
684 				queue_tx_work = false;
685 			spin_unlock_irqrestore(&wl->wl_lock, flags);
686 		}
687 		if (queue_tx_work)
688 			ieee80211_queue_work(wl->hw, &wl->tx_work);
689 	}
690 
691 	mutex_unlock(&wl->mutex);
692 
693 out_handled:
694 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
695 
696 	return IRQ_HANDLED;
697 }
698 
699 struct vif_counter_data {
700 	u8 counter;
701 
702 	struct ieee80211_vif *cur_vif;
703 	bool cur_vif_running;
704 };
705 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)706 static void wl12xx_vif_count_iter(void *data, u8 *mac,
707 				  struct ieee80211_vif *vif)
708 {
709 	struct vif_counter_data *counter = data;
710 
711 	counter->counter++;
712 	if (counter->cur_vif == vif)
713 		counter->cur_vif_running = true;
714 }
715 
716 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)717 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
718 			       struct ieee80211_vif *cur_vif,
719 			       struct vif_counter_data *data)
720 {
721 	memset(data, 0, sizeof(*data));
722 	data->cur_vif = cur_vif;
723 
724 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
725 					    wl12xx_vif_count_iter, data);
726 }
727 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)728 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
729 {
730 	const struct firmware *fw;
731 	const char *fw_name;
732 	enum wl12xx_fw_type fw_type;
733 	int ret;
734 
735 	if (plt) {
736 		fw_type = WL12XX_FW_TYPE_PLT;
737 		fw_name = wl->plt_fw_name;
738 	} else {
739 		/*
740 		 * we can't call wl12xx_get_vif_count() here because
741 		 * wl->mutex is taken, so use the cached last_vif_count value
742 		 */
743 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
744 			fw_type = WL12XX_FW_TYPE_MULTI;
745 			fw_name = wl->mr_fw_name;
746 		} else {
747 			fw_type = WL12XX_FW_TYPE_NORMAL;
748 			fw_name = wl->sr_fw_name;
749 		}
750 	}
751 
752 	if (wl->fw_type == fw_type)
753 		return 0;
754 
755 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
756 
757 	ret = request_firmware(&fw, fw_name, wl->dev);
758 
759 	if (ret < 0) {
760 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
761 		return ret;
762 	}
763 
764 	if (fw->size % 4) {
765 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
766 			     fw->size);
767 		ret = -EILSEQ;
768 		goto out;
769 	}
770 
771 	vfree(wl->fw);
772 	wl->fw_type = WL12XX_FW_TYPE_NONE;
773 	wl->fw_len = fw->size;
774 	wl->fw = vmalloc(wl->fw_len);
775 
776 	if (!wl->fw) {
777 		wl1271_error("could not allocate memory for the firmware");
778 		ret = -ENOMEM;
779 		goto out;
780 	}
781 
782 	memcpy(wl->fw, fw->data, wl->fw_len);
783 	ret = 0;
784 	wl->fw_type = fw_type;
785 out:
786 	release_firmware(fw);
787 
788 	return ret;
789 }
790 
wl12xx_queue_recovery_work(struct wl1271 * wl)791 void wl12xx_queue_recovery_work(struct wl1271 *wl)
792 {
793 	/* Avoid a recursive recovery */
794 	if (wl->state == WLCORE_STATE_ON) {
795 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
796 				  &wl->flags));
797 
798 		wl->state = WLCORE_STATE_RESTARTING;
799 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
800 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
801 	}
802 }
803 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)804 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
805 {
806 	size_t len;
807 
808 	/* Make sure we have enough room */
809 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
810 
811 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
812 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
813 	wl->fwlog_size += len;
814 
815 	return len;
816 }
817 
wl12xx_read_fwlog_panic(struct wl1271 * wl)818 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
819 {
820 	u32 end_of_log = 0;
821 	int error;
822 
823 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
824 		return;
825 
826 	wl1271_info("Reading FW panic log");
827 
828 	/*
829 	 * Make sure the chip is awake and the logger isn't active.
830 	 * Do not send a stop fwlog command if the fw is hanged or if
831 	 * dbgpins are used (due to some fw bug).
832 	 */
833 	error = pm_runtime_resume_and_get(wl->dev);
834 	if (error < 0)
835 		return;
836 	if (!wl->watchdog_recovery &&
837 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
838 		wl12xx_cmd_stop_fwlog(wl);
839 
840 	/* Traverse the memory blocks linked list */
841 	do {
842 		end_of_log = wlcore_event_fw_logger(wl);
843 		if (end_of_log == 0) {
844 			msleep(100);
845 			end_of_log = wlcore_event_fw_logger(wl);
846 		}
847 	} while (end_of_log != 0);
848 }
849 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)850 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
851 				   u8 hlid, struct ieee80211_sta *sta)
852 {
853 	struct wl1271_station *wl_sta;
854 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
855 
856 	wl_sta = (void *)sta->drv_priv;
857 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
858 
859 	/*
860 	 * increment the initial seq number on recovery to account for
861 	 * transmitted packets that we haven't yet got in the FW status
862 	 */
863 	if (wlvif->encryption_type == KEY_GEM)
864 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
865 
866 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
867 		wl_sta->total_freed_pkts += sqn_recovery_padding;
868 }
869 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)870 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
871 					struct wl12xx_vif *wlvif,
872 					u8 hlid, const u8 *addr)
873 {
874 	struct ieee80211_sta *sta;
875 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
876 
877 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
878 		    is_zero_ether_addr(addr)))
879 		return;
880 
881 	rcu_read_lock();
882 	sta = ieee80211_find_sta(vif, addr);
883 	if (sta)
884 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
885 	rcu_read_unlock();
886 }
887 
wlcore_print_recovery(struct wl1271 * wl)888 static void wlcore_print_recovery(struct wl1271 *wl)
889 {
890 	u32 pc = 0;
891 	u32 hint_sts = 0;
892 	int ret;
893 
894 	wl1271_info("Hardware recovery in progress. FW ver: %s",
895 		    wl->chip.fw_ver_str);
896 
897 	/* change partitions momentarily so we can read the FW pc */
898 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
899 	if (ret < 0)
900 		return;
901 
902 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
903 	if (ret < 0)
904 		return;
905 
906 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
907 	if (ret < 0)
908 		return;
909 
910 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
911 				pc, hint_sts, ++wl->recovery_count);
912 
913 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
914 }
915 
916 
wl1271_recovery_work(struct work_struct * work)917 static void wl1271_recovery_work(struct work_struct *work)
918 {
919 	struct wl1271 *wl =
920 		container_of(work, struct wl1271, recovery_work);
921 	struct wl12xx_vif *wlvif;
922 	struct ieee80211_vif *vif;
923 	int error;
924 
925 	mutex_lock(&wl->mutex);
926 
927 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
928 		goto out_unlock;
929 
930 	error = pm_runtime_resume_and_get(wl->dev);
931 	if (error < 0)
932 		wl1271_warning("Enable for recovery failed");
933 	wlcore_disable_interrupts_nosync(wl);
934 
935 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
936 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
937 			wl12xx_read_fwlog_panic(wl);
938 		wlcore_print_recovery(wl);
939 	}
940 
941 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
942 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
943 
944 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
945 
946 	if (wl->conf.recovery.no_recovery) {
947 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
948 		goto out_unlock;
949 	}
950 
951 	/* Prevent spurious TX during FW restart */
952 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
953 
954 	/* reboot the chipset */
955 	while (!list_empty(&wl->wlvif_list)) {
956 		wlvif = list_first_entry(&wl->wlvif_list,
957 				       struct wl12xx_vif, list);
958 		vif = wl12xx_wlvif_to_vif(wlvif);
959 
960 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
961 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
962 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
963 						    vif->bss_conf.bssid);
964 		}
965 
966 		__wl1271_op_remove_interface(wl, vif, false);
967 	}
968 
969 	wlcore_op_stop_locked(wl);
970 	pm_runtime_mark_last_busy(wl->dev);
971 	pm_runtime_put_autosuspend(wl->dev);
972 
973 	ieee80211_restart_hw(wl->hw);
974 
975 	/*
976 	 * Its safe to enable TX now - the queues are stopped after a request
977 	 * to restart the HW.
978 	 */
979 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
980 
981 out_unlock:
982 	wl->watchdog_recovery = false;
983 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
984 	mutex_unlock(&wl->mutex);
985 }
986 
wlcore_fw_wakeup(struct wl1271 * wl)987 static int wlcore_fw_wakeup(struct wl1271 *wl)
988 {
989 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
990 }
991 
wl1271_setup(struct wl1271 * wl)992 static int wl1271_setup(struct wl1271 *wl)
993 {
994 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
995 	if (!wl->raw_fw_status)
996 		goto err;
997 
998 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
999 	if (!wl->fw_status)
1000 		goto err;
1001 
1002 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1003 	if (!wl->tx_res_if)
1004 		goto err;
1005 
1006 	return 0;
1007 err:
1008 	kfree(wl->fw_status);
1009 	kfree(wl->raw_fw_status);
1010 	return -ENOMEM;
1011 }
1012 
wl12xx_set_power_on(struct wl1271 * wl)1013 static int wl12xx_set_power_on(struct wl1271 *wl)
1014 {
1015 	int ret;
1016 
1017 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1018 	ret = wl1271_power_on(wl);
1019 	if (ret < 0)
1020 		goto out;
1021 	msleep(WL1271_POWER_ON_SLEEP);
1022 	wl1271_io_reset(wl);
1023 	wl1271_io_init(wl);
1024 
1025 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1026 	if (ret < 0)
1027 		goto fail;
1028 
1029 	/* ELP module wake up */
1030 	ret = wlcore_fw_wakeup(wl);
1031 	if (ret < 0)
1032 		goto fail;
1033 
1034 out:
1035 	return ret;
1036 
1037 fail:
1038 	wl1271_power_off(wl);
1039 	return ret;
1040 }
1041 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1042 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1043 {
1044 	int ret = 0;
1045 
1046 	ret = wl12xx_set_power_on(wl);
1047 	if (ret < 0)
1048 		goto out;
1049 
1050 	/*
1051 	 * For wl127x based devices we could use the default block
1052 	 * size (512 bytes), but due to a bug in the sdio driver, we
1053 	 * need to set it explicitly after the chip is powered on.  To
1054 	 * simplify the code and since the performance impact is
1055 	 * negligible, we use the same block size for all different
1056 	 * chip types.
1057 	 *
1058 	 * Check if the bus supports blocksize alignment and, if it
1059 	 * doesn't, make sure we don't have the quirk.
1060 	 */
1061 	if (!wl1271_set_block_size(wl))
1062 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1063 
1064 	/* TODO: make sure the lower driver has set things up correctly */
1065 
1066 	ret = wl1271_setup(wl);
1067 	if (ret < 0)
1068 		goto out;
1069 
1070 	ret = wl12xx_fetch_firmware(wl, plt);
1071 	if (ret < 0) {
1072 		kfree(wl->fw_status);
1073 		kfree(wl->raw_fw_status);
1074 		kfree(wl->tx_res_if);
1075 	}
1076 
1077 out:
1078 	return ret;
1079 }
1080 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1081 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1082 {
1083 	int retries = WL1271_BOOT_RETRIES;
1084 	struct wiphy *wiphy = wl->hw->wiphy;
1085 
1086 	static const char* const PLT_MODE[] = {
1087 		"PLT_OFF",
1088 		"PLT_ON",
1089 		"PLT_FEM_DETECT",
1090 		"PLT_CHIP_AWAKE"
1091 	};
1092 
1093 	int ret;
1094 
1095 	mutex_lock(&wl->mutex);
1096 
1097 	wl1271_notice("power up");
1098 
1099 	if (wl->state != WLCORE_STATE_OFF) {
1100 		wl1271_error("cannot go into PLT state because not "
1101 			     "in off state: %d", wl->state);
1102 		ret = -EBUSY;
1103 		goto out;
1104 	}
1105 
1106 	/* Indicate to lower levels that we are now in PLT mode */
1107 	wl->plt = true;
1108 	wl->plt_mode = plt_mode;
1109 
1110 	while (retries) {
1111 		retries--;
1112 		ret = wl12xx_chip_wakeup(wl, true);
1113 		if (ret < 0)
1114 			goto power_off;
1115 
1116 		if (plt_mode != PLT_CHIP_AWAKE) {
1117 			ret = wl->ops->plt_init(wl);
1118 			if (ret < 0)
1119 				goto power_off;
1120 		}
1121 
1122 		wl->state = WLCORE_STATE_ON;
1123 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1124 			      PLT_MODE[plt_mode],
1125 			      wl->chip.fw_ver_str);
1126 
1127 		/* update hw/fw version info in wiphy struct */
1128 		wiphy->hw_version = wl->chip.id;
1129 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1130 			sizeof(wiphy->fw_version));
1131 
1132 		goto out;
1133 
1134 power_off:
1135 		wl1271_power_off(wl);
1136 	}
1137 
1138 	wl->plt = false;
1139 	wl->plt_mode = PLT_OFF;
1140 
1141 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1142 		     WL1271_BOOT_RETRIES);
1143 out:
1144 	mutex_unlock(&wl->mutex);
1145 
1146 	return ret;
1147 }
1148 
wl1271_plt_stop(struct wl1271 * wl)1149 int wl1271_plt_stop(struct wl1271 *wl)
1150 {
1151 	int ret = 0;
1152 
1153 	wl1271_notice("power down");
1154 
1155 	/*
1156 	 * Interrupts must be disabled before setting the state to OFF.
1157 	 * Otherwise, the interrupt handler might be called and exit without
1158 	 * reading the interrupt status.
1159 	 */
1160 	wlcore_disable_interrupts(wl);
1161 	mutex_lock(&wl->mutex);
1162 	if (!wl->plt) {
1163 		mutex_unlock(&wl->mutex);
1164 
1165 		/*
1166 		 * This will not necessarily enable interrupts as interrupts
1167 		 * may have been disabled when op_stop was called. It will,
1168 		 * however, balance the above call to disable_interrupts().
1169 		 */
1170 		wlcore_enable_interrupts(wl);
1171 
1172 		wl1271_error("cannot power down because not in PLT "
1173 			     "state: %d", wl->state);
1174 		ret = -EBUSY;
1175 		goto out;
1176 	}
1177 
1178 	mutex_unlock(&wl->mutex);
1179 
1180 	wl1271_flush_deferred_work(wl);
1181 	cancel_work_sync(&wl->netstack_work);
1182 	cancel_work_sync(&wl->recovery_work);
1183 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1184 
1185 	mutex_lock(&wl->mutex);
1186 	wl1271_power_off(wl);
1187 	wl->flags = 0;
1188 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1189 	wl->state = WLCORE_STATE_OFF;
1190 	wl->plt = false;
1191 	wl->plt_mode = PLT_OFF;
1192 	wl->rx_counter = 0;
1193 	mutex_unlock(&wl->mutex);
1194 
1195 out:
1196 	return ret;
1197 }
1198 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1199 static void wl1271_op_tx(struct ieee80211_hw *hw,
1200 			 struct ieee80211_tx_control *control,
1201 			 struct sk_buff *skb)
1202 {
1203 	struct wl1271 *wl = hw->priv;
1204 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1205 	struct ieee80211_vif *vif = info->control.vif;
1206 	struct wl12xx_vif *wlvif = NULL;
1207 	unsigned long flags;
1208 	int q, mapping;
1209 	u8 hlid;
1210 
1211 	if (!vif) {
1212 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1213 		ieee80211_free_txskb(hw, skb);
1214 		return;
1215 	}
1216 
1217 	wlvif = wl12xx_vif_to_data(vif);
1218 	mapping = skb_get_queue_mapping(skb);
1219 	q = wl1271_tx_get_queue(mapping);
1220 
1221 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1222 
1223 	spin_lock_irqsave(&wl->wl_lock, flags);
1224 
1225 	/*
1226 	 * drop the packet if the link is invalid or the queue is stopped
1227 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1228 	 * allow these packets through.
1229 	 */
1230 	if (hlid == WL12XX_INVALID_LINK_ID ||
1231 	    (!test_bit(hlid, wlvif->links_map)) ||
1232 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1233 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1234 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1235 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1236 		ieee80211_free_txskb(hw, skb);
1237 		goto out;
1238 	}
1239 
1240 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1241 		     hlid, q, skb->len);
1242 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1243 
1244 	wl->tx_queue_count[q]++;
1245 	wlvif->tx_queue_count[q]++;
1246 
1247 	/*
1248 	 * The workqueue is slow to process the tx_queue and we need stop
1249 	 * the queue here, otherwise the queue will get too long.
1250 	 */
1251 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1252 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1253 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1254 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1255 		wlcore_stop_queue_locked(wl, wlvif, q,
1256 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1257 	}
1258 
1259 	/*
1260 	 * The chip specific setup must run before the first TX packet -
1261 	 * before that, the tx_work will not be initialized!
1262 	 */
1263 
1264 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1265 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1266 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1267 
1268 out:
1269 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1270 }
1271 
wl1271_tx_dummy_packet(struct wl1271 * wl)1272 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1273 {
1274 	unsigned long flags;
1275 	int q;
1276 
1277 	/* no need to queue a new dummy packet if one is already pending */
1278 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1279 		return 0;
1280 
1281 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1282 
1283 	spin_lock_irqsave(&wl->wl_lock, flags);
1284 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1285 	wl->tx_queue_count[q]++;
1286 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1287 
1288 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1289 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1290 		return wlcore_tx_work_locked(wl);
1291 
1292 	/*
1293 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1294 	 * interrupt handler function
1295 	 */
1296 	return 0;
1297 }
1298 
1299 /*
1300  * The size of the dummy packet should be at least 1400 bytes. However, in
1301  * order to minimize the number of bus transactions, aligning it to 512 bytes
1302  * boundaries could be beneficial, performance wise
1303  */
1304 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1305 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1306 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1307 {
1308 	struct sk_buff *skb;
1309 	struct ieee80211_hdr_3addr *hdr;
1310 	unsigned int dummy_packet_size;
1311 
1312 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1313 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1314 
1315 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1316 	if (!skb) {
1317 		wl1271_warning("Failed to allocate a dummy packet skb");
1318 		return NULL;
1319 	}
1320 
1321 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1322 
1323 	hdr = skb_put_zero(skb, sizeof(*hdr));
1324 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1325 					 IEEE80211_STYPE_NULLFUNC |
1326 					 IEEE80211_FCTL_TODS);
1327 
1328 	skb_put_zero(skb, dummy_packet_size);
1329 
1330 	/* Dummy packets require the TID to be management */
1331 	skb->priority = WL1271_TID_MGMT;
1332 
1333 	/* Initialize all fields that might be used */
1334 	skb_set_queue_mapping(skb, 0);
1335 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1336 
1337 	return skb;
1338 }
1339 
1340 
1341 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1342 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1343 {
1344 	int num_fields = 0, in_field = 0, fields_size = 0;
1345 	int i, pattern_len = 0;
1346 
1347 	if (!p->mask) {
1348 		wl1271_warning("No mask in WoWLAN pattern");
1349 		return -EINVAL;
1350 	}
1351 
1352 	/*
1353 	 * The pattern is broken up into segments of bytes at different offsets
1354 	 * that need to be checked by the FW filter. Each segment is called
1355 	 * a field in the FW API. We verify that the total number of fields
1356 	 * required for this pattern won't exceed FW limits (8)
1357 	 * as well as the total fields buffer won't exceed the FW limit.
1358 	 * Note that if there's a pattern which crosses Ethernet/IP header
1359 	 * boundary a new field is required.
1360 	 */
1361 	for (i = 0; i < p->pattern_len; i++) {
1362 		if (test_bit(i, (unsigned long *)p->mask)) {
1363 			if (!in_field) {
1364 				in_field = 1;
1365 				pattern_len = 1;
1366 			} else {
1367 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1368 					num_fields++;
1369 					fields_size += pattern_len +
1370 						RX_FILTER_FIELD_OVERHEAD;
1371 					pattern_len = 1;
1372 				} else
1373 					pattern_len++;
1374 			}
1375 		} else {
1376 			if (in_field) {
1377 				in_field = 0;
1378 				fields_size += pattern_len +
1379 					RX_FILTER_FIELD_OVERHEAD;
1380 				num_fields++;
1381 			}
1382 		}
1383 	}
1384 
1385 	if (in_field) {
1386 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1387 		num_fields++;
1388 	}
1389 
1390 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1391 		wl1271_warning("RX Filter too complex. Too many segments");
1392 		return -EINVAL;
1393 	}
1394 
1395 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1396 		wl1271_warning("RX filter pattern is too big");
1397 		return -E2BIG;
1398 	}
1399 
1400 	return 0;
1401 }
1402 
wl1271_rx_filter_alloc(void)1403 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1404 {
1405 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1406 }
1407 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1408 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1409 {
1410 	int i;
1411 
1412 	if (filter == NULL)
1413 		return;
1414 
1415 	for (i = 0; i < filter->num_fields; i++)
1416 		kfree(filter->fields[i].pattern);
1417 
1418 	kfree(filter);
1419 }
1420 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1421 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1422 				 u16 offset, u8 flags,
1423 				 const u8 *pattern, u8 len)
1424 {
1425 	struct wl12xx_rx_filter_field *field;
1426 
1427 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1428 		wl1271_warning("Max fields per RX filter. can't alloc another");
1429 		return -EINVAL;
1430 	}
1431 
1432 	field = &filter->fields[filter->num_fields];
1433 
1434 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1435 	if (!field->pattern) {
1436 		wl1271_warning("Failed to allocate RX filter pattern");
1437 		return -ENOMEM;
1438 	}
1439 
1440 	filter->num_fields++;
1441 
1442 	field->offset = cpu_to_le16(offset);
1443 	field->flags = flags;
1444 	field->len = len;
1445 
1446 	return 0;
1447 }
1448 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1449 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1450 {
1451 	int i, fields_size = 0;
1452 
1453 	for (i = 0; i < filter->num_fields; i++)
1454 		fields_size += filter->fields[i].len +
1455 			sizeof(struct wl12xx_rx_filter_field) -
1456 			sizeof(u8 *);
1457 
1458 	return fields_size;
1459 }
1460 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1461 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1462 				    u8 *buf)
1463 {
1464 	int i;
1465 	struct wl12xx_rx_filter_field *field;
1466 
1467 	for (i = 0; i < filter->num_fields; i++) {
1468 		field = (struct wl12xx_rx_filter_field *)buf;
1469 
1470 		field->offset = filter->fields[i].offset;
1471 		field->flags = filter->fields[i].flags;
1472 		field->len = filter->fields[i].len;
1473 
1474 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1475 		buf += sizeof(struct wl12xx_rx_filter_field) -
1476 			sizeof(u8 *) + field->len;
1477 	}
1478 }
1479 
1480 /*
1481  * Allocates an RX filter returned through f
1482  * which needs to be freed using rx_filter_free()
1483  */
1484 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1485 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1486 					   struct wl12xx_rx_filter **f)
1487 {
1488 	int i, j, ret = 0;
1489 	struct wl12xx_rx_filter *filter;
1490 	u16 offset;
1491 	u8 flags, len;
1492 
1493 	filter = wl1271_rx_filter_alloc();
1494 	if (!filter) {
1495 		wl1271_warning("Failed to alloc rx filter");
1496 		ret = -ENOMEM;
1497 		goto err;
1498 	}
1499 
1500 	i = 0;
1501 	while (i < p->pattern_len) {
1502 		if (!test_bit(i, (unsigned long *)p->mask)) {
1503 			i++;
1504 			continue;
1505 		}
1506 
1507 		for (j = i; j < p->pattern_len; j++) {
1508 			if (!test_bit(j, (unsigned long *)p->mask))
1509 				break;
1510 
1511 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1512 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1513 				break;
1514 		}
1515 
1516 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1517 			offset = i;
1518 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1519 		} else {
1520 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1521 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1522 		}
1523 
1524 		len = j - i;
1525 
1526 		ret = wl1271_rx_filter_alloc_field(filter,
1527 						   offset,
1528 						   flags,
1529 						   &p->pattern[i], len);
1530 		if (ret)
1531 			goto err;
1532 
1533 		i = j;
1534 	}
1535 
1536 	filter->action = FILTER_SIGNAL;
1537 
1538 	*f = filter;
1539 	return 0;
1540 
1541 err:
1542 	wl1271_rx_filter_free(filter);
1543 	*f = NULL;
1544 
1545 	return ret;
1546 }
1547 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1548 static int wl1271_configure_wowlan(struct wl1271 *wl,
1549 				   struct cfg80211_wowlan *wow)
1550 {
1551 	int i, ret;
1552 
1553 	if (!wow || wow->any || !wow->n_patterns) {
1554 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1555 							  FILTER_SIGNAL);
1556 		if (ret)
1557 			goto out;
1558 
1559 		ret = wl1271_rx_filter_clear_all(wl);
1560 		if (ret)
1561 			goto out;
1562 
1563 		return 0;
1564 	}
1565 
1566 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1567 		return -EINVAL;
1568 
1569 	/* Validate all incoming patterns before clearing current FW state */
1570 	for (i = 0; i < wow->n_patterns; i++) {
1571 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1572 		if (ret) {
1573 			wl1271_warning("Bad wowlan pattern %d", i);
1574 			return ret;
1575 		}
1576 	}
1577 
1578 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1579 	if (ret)
1580 		goto out;
1581 
1582 	ret = wl1271_rx_filter_clear_all(wl);
1583 	if (ret)
1584 		goto out;
1585 
1586 	/* Translate WoWLAN patterns into filters */
1587 	for (i = 0; i < wow->n_patterns; i++) {
1588 		struct cfg80211_pkt_pattern *p;
1589 		struct wl12xx_rx_filter *filter = NULL;
1590 
1591 		p = &wow->patterns[i];
1592 
1593 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1594 		if (ret) {
1595 			wl1271_warning("Failed to create an RX filter from "
1596 				       "wowlan pattern %d", i);
1597 			goto out;
1598 		}
1599 
1600 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1601 
1602 		wl1271_rx_filter_free(filter);
1603 		if (ret)
1604 			goto out;
1605 	}
1606 
1607 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1608 
1609 out:
1610 	return ret;
1611 }
1612 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1613 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1614 					struct wl12xx_vif *wlvif,
1615 					struct cfg80211_wowlan *wow)
1616 {
1617 	int ret = 0;
1618 
1619 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1620 		goto out;
1621 
1622 	ret = wl1271_configure_wowlan(wl, wow);
1623 	if (ret < 0)
1624 		goto out;
1625 
1626 	if ((wl->conf.conn.suspend_wake_up_event ==
1627 	     wl->conf.conn.wake_up_event) &&
1628 	    (wl->conf.conn.suspend_listen_interval ==
1629 	     wl->conf.conn.listen_interval))
1630 		goto out;
1631 
1632 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1633 				    wl->conf.conn.suspend_wake_up_event,
1634 				    wl->conf.conn.suspend_listen_interval);
1635 
1636 	if (ret < 0)
1637 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1638 out:
1639 	return ret;
1640 
1641 }
1642 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1643 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1644 					struct wl12xx_vif *wlvif,
1645 					struct cfg80211_wowlan *wow)
1646 {
1647 	int ret = 0;
1648 
1649 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1650 		goto out;
1651 
1652 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1653 	if (ret < 0)
1654 		goto out;
1655 
1656 	ret = wl1271_configure_wowlan(wl, wow);
1657 	if (ret < 0)
1658 		goto out;
1659 
1660 out:
1661 	return ret;
1662 
1663 }
1664 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1665 static int wl1271_configure_suspend(struct wl1271 *wl,
1666 				    struct wl12xx_vif *wlvif,
1667 				    struct cfg80211_wowlan *wow)
1668 {
1669 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1670 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1671 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1672 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1673 	return 0;
1674 }
1675 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1676 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1677 {
1678 	int ret = 0;
1679 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1680 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1681 
1682 	if ((!is_ap) && (!is_sta))
1683 		return;
1684 
1685 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1686 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1687 		return;
1688 
1689 	wl1271_configure_wowlan(wl, NULL);
1690 
1691 	if (is_sta) {
1692 		if ((wl->conf.conn.suspend_wake_up_event ==
1693 		     wl->conf.conn.wake_up_event) &&
1694 		    (wl->conf.conn.suspend_listen_interval ==
1695 		     wl->conf.conn.listen_interval))
1696 			return;
1697 
1698 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1699 				    wl->conf.conn.wake_up_event,
1700 				    wl->conf.conn.listen_interval);
1701 
1702 		if (ret < 0)
1703 			wl1271_error("resume: wake up conditions failed: %d",
1704 				     ret);
1705 
1706 	} else if (is_ap) {
1707 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1708 	}
1709 }
1710 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1711 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1712 					    struct cfg80211_wowlan *wow)
1713 {
1714 	struct wl1271 *wl = hw->priv;
1715 	struct wl12xx_vif *wlvif;
1716 	unsigned long flags;
1717 	int ret;
1718 
1719 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1720 	WARN_ON(!wow);
1721 
1722 	/* we want to perform the recovery before suspending */
1723 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1724 		wl1271_warning("postponing suspend to perform recovery");
1725 		return -EBUSY;
1726 	}
1727 
1728 	wl1271_tx_flush(wl);
1729 
1730 	mutex_lock(&wl->mutex);
1731 
1732 	ret = pm_runtime_resume_and_get(wl->dev);
1733 	if (ret < 0) {
1734 		mutex_unlock(&wl->mutex);
1735 		return ret;
1736 	}
1737 
1738 	wl->wow_enabled = true;
1739 	wl12xx_for_each_wlvif(wl, wlvif) {
1740 		if (wlcore_is_p2p_mgmt(wlvif))
1741 			continue;
1742 
1743 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1744 		if (ret < 0) {
1745 			goto out_sleep;
1746 		}
1747 	}
1748 
1749 	/* disable fast link flow control notifications from FW */
1750 	ret = wlcore_hw_interrupt_notify(wl, false);
1751 	if (ret < 0)
1752 		goto out_sleep;
1753 
1754 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1755 	ret = wlcore_hw_rx_ba_filter(wl,
1756 				     !!wl->conf.conn.suspend_rx_ba_activity);
1757 	if (ret < 0)
1758 		goto out_sleep;
1759 
1760 out_sleep:
1761 	pm_runtime_put_noidle(wl->dev);
1762 	mutex_unlock(&wl->mutex);
1763 
1764 	if (ret < 0) {
1765 		wl1271_warning("couldn't prepare device to suspend");
1766 		return ret;
1767 	}
1768 
1769 	/* flush any remaining work */
1770 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1771 
1772 	flush_work(&wl->tx_work);
1773 
1774 	/*
1775 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1776 	 * it on resume anyway.
1777 	 */
1778 	cancel_delayed_work(&wl->tx_watchdog_work);
1779 
1780 	/*
1781 	 * set suspended flag to avoid triggering a new threaded_irq
1782 	 * work.
1783 	 */
1784 	spin_lock_irqsave(&wl->wl_lock, flags);
1785 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1786 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1787 
1788 	return pm_runtime_force_suspend(wl->dev);
1789 }
1790 
wl1271_op_resume(struct ieee80211_hw * hw)1791 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1792 {
1793 	struct wl1271 *wl = hw->priv;
1794 	struct wl12xx_vif *wlvif;
1795 	unsigned long flags;
1796 	bool run_irq_work = false, pending_recovery;
1797 	int ret;
1798 
1799 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1800 		     wl->wow_enabled);
1801 	WARN_ON(!wl->wow_enabled);
1802 
1803 	ret = pm_runtime_force_resume(wl->dev);
1804 	if (ret < 0) {
1805 		wl1271_error("ELP wakeup failure!");
1806 		goto out_sleep;
1807 	}
1808 
1809 	/*
1810 	 * re-enable irq_work enqueuing, and call irq_work directly if
1811 	 * there is a pending work.
1812 	 */
1813 	spin_lock_irqsave(&wl->wl_lock, flags);
1814 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1815 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1816 		run_irq_work = true;
1817 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1818 
1819 	mutex_lock(&wl->mutex);
1820 
1821 	/* test the recovery flag before calling any SDIO functions */
1822 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1823 				    &wl->flags);
1824 
1825 	if (run_irq_work) {
1826 		wl1271_debug(DEBUG_MAC80211,
1827 			     "run postponed irq_work directly");
1828 
1829 		/* don't talk to the HW if recovery is pending */
1830 		if (!pending_recovery) {
1831 			ret = wlcore_irq_locked(wl);
1832 			if (ret)
1833 				wl12xx_queue_recovery_work(wl);
1834 		}
1835 
1836 		wlcore_enable_interrupts(wl);
1837 	}
1838 
1839 	if (pending_recovery) {
1840 		wl1271_warning("queuing forgotten recovery on resume");
1841 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1842 		goto out_sleep;
1843 	}
1844 
1845 	ret = pm_runtime_resume_and_get(wl->dev);
1846 	if (ret < 0)
1847 		goto out;
1848 
1849 	wl12xx_for_each_wlvif(wl, wlvif) {
1850 		if (wlcore_is_p2p_mgmt(wlvif))
1851 			continue;
1852 
1853 		wl1271_configure_resume(wl, wlvif);
1854 	}
1855 
1856 	ret = wlcore_hw_interrupt_notify(wl, true);
1857 	if (ret < 0)
1858 		goto out_sleep;
1859 
1860 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1861 	ret = wlcore_hw_rx_ba_filter(wl, false);
1862 	if (ret < 0)
1863 		goto out_sleep;
1864 
1865 out_sleep:
1866 	pm_runtime_mark_last_busy(wl->dev);
1867 	pm_runtime_put_autosuspend(wl->dev);
1868 
1869 out:
1870 	wl->wow_enabled = false;
1871 
1872 	/*
1873 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1874 	 * That way we avoid possible conditions where Tx-complete interrupts
1875 	 * fail to arrive and we perform a spurious recovery.
1876 	 */
1877 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1878 	mutex_unlock(&wl->mutex);
1879 
1880 	return 0;
1881 }
1882 
wl1271_op_start(struct ieee80211_hw * hw)1883 static int wl1271_op_start(struct ieee80211_hw *hw)
1884 {
1885 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1886 
1887 	/*
1888 	 * We have to delay the booting of the hardware because
1889 	 * we need to know the local MAC address before downloading and
1890 	 * initializing the firmware. The MAC address cannot be changed
1891 	 * after boot, and without the proper MAC address, the firmware
1892 	 * will not function properly.
1893 	 *
1894 	 * The MAC address is first known when the corresponding interface
1895 	 * is added. That is where we will initialize the hardware.
1896 	 */
1897 
1898 	return 0;
1899 }
1900 
wlcore_op_stop_locked(struct wl1271 * wl)1901 static void wlcore_op_stop_locked(struct wl1271 *wl)
1902 {
1903 	int i;
1904 
1905 	if (wl->state == WLCORE_STATE_OFF) {
1906 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1907 					&wl->flags))
1908 			wlcore_enable_interrupts(wl);
1909 
1910 		return;
1911 	}
1912 
1913 	/*
1914 	 * this must be before the cancel_work calls below, so that the work
1915 	 * functions don't perform further work.
1916 	 */
1917 	wl->state = WLCORE_STATE_OFF;
1918 
1919 	/*
1920 	 * Use the nosync variant to disable interrupts, so the mutex could be
1921 	 * held while doing so without deadlocking.
1922 	 */
1923 	wlcore_disable_interrupts_nosync(wl);
1924 
1925 	mutex_unlock(&wl->mutex);
1926 
1927 	wlcore_synchronize_interrupts(wl);
1928 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1929 		cancel_work_sync(&wl->recovery_work);
1930 	wl1271_flush_deferred_work(wl);
1931 	cancel_delayed_work_sync(&wl->scan_complete_work);
1932 	cancel_work_sync(&wl->netstack_work);
1933 	cancel_work_sync(&wl->tx_work);
1934 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1935 
1936 	/* let's notify MAC80211 about the remaining pending TX frames */
1937 	mutex_lock(&wl->mutex);
1938 	wl12xx_tx_reset(wl);
1939 
1940 	wl1271_power_off(wl);
1941 	/*
1942 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1943 	 * an interrupt storm. Now that the power is down, it is safe to
1944 	 * re-enable interrupts to balance the disable depth
1945 	 */
1946 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1947 		wlcore_enable_interrupts(wl);
1948 
1949 	wl->band = NL80211_BAND_2GHZ;
1950 
1951 	wl->rx_counter = 0;
1952 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1953 	wl->channel_type = NL80211_CHAN_NO_HT;
1954 	wl->tx_blocks_available = 0;
1955 	wl->tx_allocated_blocks = 0;
1956 	wl->tx_results_count = 0;
1957 	wl->tx_packets_count = 0;
1958 	wl->time_offset = 0;
1959 	wl->ap_fw_ps_map = 0;
1960 	wl->ap_ps_map = 0;
1961 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1962 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1963 	memset(wl->links_map, 0, sizeof(wl->links_map));
1964 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1965 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1966 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1967 	wl->active_sta_count = 0;
1968 	wl->active_link_count = 0;
1969 
1970 	/* The system link is always allocated */
1971 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1972 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1973 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1974 
1975 	/*
1976 	 * this is performed after the cancel_work calls and the associated
1977 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1978 	 * get executed before all these vars have been reset.
1979 	 */
1980 	wl->flags = 0;
1981 
1982 	wl->tx_blocks_freed = 0;
1983 
1984 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1985 		wl->tx_pkts_freed[i] = 0;
1986 		wl->tx_allocated_pkts[i] = 0;
1987 	}
1988 
1989 	wl1271_debugfs_reset(wl);
1990 
1991 	kfree(wl->raw_fw_status);
1992 	wl->raw_fw_status = NULL;
1993 	kfree(wl->fw_status);
1994 	wl->fw_status = NULL;
1995 	kfree(wl->tx_res_if);
1996 	wl->tx_res_if = NULL;
1997 	kfree(wl->target_mem_map);
1998 	wl->target_mem_map = NULL;
1999 
2000 	/*
2001 	 * FW channels must be re-calibrated after recovery,
2002 	 * save current Reg-Domain channel configuration and clear it.
2003 	 */
2004 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2005 	       sizeof(wl->reg_ch_conf_pending));
2006 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2007 }
2008 
wlcore_op_stop(struct ieee80211_hw * hw)2009 static void wlcore_op_stop(struct ieee80211_hw *hw)
2010 {
2011 	struct wl1271 *wl = hw->priv;
2012 
2013 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2014 
2015 	mutex_lock(&wl->mutex);
2016 
2017 	wlcore_op_stop_locked(wl);
2018 
2019 	mutex_unlock(&wl->mutex);
2020 }
2021 
wlcore_channel_switch_work(struct work_struct * work)2022 static void wlcore_channel_switch_work(struct work_struct *work)
2023 {
2024 	struct delayed_work *dwork;
2025 	struct wl1271 *wl;
2026 	struct ieee80211_vif *vif;
2027 	struct wl12xx_vif *wlvif;
2028 	int ret;
2029 
2030 	dwork = to_delayed_work(work);
2031 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2032 	wl = wlvif->wl;
2033 
2034 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2035 
2036 	mutex_lock(&wl->mutex);
2037 
2038 	if (unlikely(wl->state != WLCORE_STATE_ON))
2039 		goto out;
2040 
2041 	/* check the channel switch is still ongoing */
2042 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2043 		goto out;
2044 
2045 	vif = wl12xx_wlvif_to_vif(wlvif);
2046 	ieee80211_chswitch_done(vif, false);
2047 
2048 	ret = pm_runtime_resume_and_get(wl->dev);
2049 	if (ret < 0)
2050 		goto out;
2051 
2052 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2053 
2054 	pm_runtime_mark_last_busy(wl->dev);
2055 	pm_runtime_put_autosuspend(wl->dev);
2056 out:
2057 	mutex_unlock(&wl->mutex);
2058 }
2059 
wlcore_connection_loss_work(struct work_struct * work)2060 static void wlcore_connection_loss_work(struct work_struct *work)
2061 {
2062 	struct delayed_work *dwork;
2063 	struct wl1271 *wl;
2064 	struct ieee80211_vif *vif;
2065 	struct wl12xx_vif *wlvif;
2066 
2067 	dwork = to_delayed_work(work);
2068 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2069 	wl = wlvif->wl;
2070 
2071 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2072 
2073 	mutex_lock(&wl->mutex);
2074 
2075 	if (unlikely(wl->state != WLCORE_STATE_ON))
2076 		goto out;
2077 
2078 	/* Call mac80211 connection loss */
2079 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2080 		goto out;
2081 
2082 	vif = wl12xx_wlvif_to_vif(wlvif);
2083 	ieee80211_connection_loss(vif);
2084 out:
2085 	mutex_unlock(&wl->mutex);
2086 }
2087 
wlcore_pending_auth_complete_work(struct work_struct * work)2088 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2089 {
2090 	struct delayed_work *dwork;
2091 	struct wl1271 *wl;
2092 	struct wl12xx_vif *wlvif;
2093 	unsigned long time_spare;
2094 	int ret;
2095 
2096 	dwork = to_delayed_work(work);
2097 	wlvif = container_of(dwork, struct wl12xx_vif,
2098 			     pending_auth_complete_work);
2099 	wl = wlvif->wl;
2100 
2101 	mutex_lock(&wl->mutex);
2102 
2103 	if (unlikely(wl->state != WLCORE_STATE_ON))
2104 		goto out;
2105 
2106 	/*
2107 	 * Make sure a second really passed since the last auth reply. Maybe
2108 	 * a second auth reply arrived while we were stuck on the mutex.
2109 	 * Check for a little less than the timeout to protect from scheduler
2110 	 * irregularities.
2111 	 */
2112 	time_spare = jiffies +
2113 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2114 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2115 		goto out;
2116 
2117 	ret = pm_runtime_resume_and_get(wl->dev);
2118 	if (ret < 0)
2119 		goto out;
2120 
2121 	/* cancel the ROC if active */
2122 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2123 
2124 	pm_runtime_mark_last_busy(wl->dev);
2125 	pm_runtime_put_autosuspend(wl->dev);
2126 out:
2127 	mutex_unlock(&wl->mutex);
2128 }
2129 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2130 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2131 {
2132 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2133 					WL12XX_MAX_RATE_POLICIES);
2134 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2135 		return -EBUSY;
2136 
2137 	__set_bit(policy, wl->rate_policies_map);
2138 	*idx = policy;
2139 	return 0;
2140 }
2141 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2142 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2143 {
2144 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2145 		return;
2146 
2147 	__clear_bit(*idx, wl->rate_policies_map);
2148 	*idx = WL12XX_MAX_RATE_POLICIES;
2149 }
2150 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2151 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2152 {
2153 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2154 					WLCORE_MAX_KLV_TEMPLATES);
2155 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2156 		return -EBUSY;
2157 
2158 	__set_bit(policy, wl->klv_templates_map);
2159 	*idx = policy;
2160 	return 0;
2161 }
2162 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2163 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2164 {
2165 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2166 		return;
2167 
2168 	__clear_bit(*idx, wl->klv_templates_map);
2169 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2170 }
2171 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2172 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2173 {
2174 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2175 
2176 	switch (wlvif->bss_type) {
2177 	case BSS_TYPE_AP_BSS:
2178 		if (wlvif->p2p)
2179 			return WL1271_ROLE_P2P_GO;
2180 		else if (ieee80211_vif_is_mesh(vif))
2181 			return WL1271_ROLE_MESH_POINT;
2182 		else
2183 			return WL1271_ROLE_AP;
2184 
2185 	case BSS_TYPE_STA_BSS:
2186 		if (wlvif->p2p)
2187 			return WL1271_ROLE_P2P_CL;
2188 		else
2189 			return WL1271_ROLE_STA;
2190 
2191 	case BSS_TYPE_IBSS:
2192 		return WL1271_ROLE_IBSS;
2193 
2194 	default:
2195 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2196 	}
2197 	return WL12XX_INVALID_ROLE_TYPE;
2198 }
2199 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2200 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2201 {
2202 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2203 	int i;
2204 
2205 	/* clear everything but the persistent data */
2206 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2207 
2208 	switch (ieee80211_vif_type_p2p(vif)) {
2209 	case NL80211_IFTYPE_P2P_CLIENT:
2210 		wlvif->p2p = 1;
2211 		fallthrough;
2212 	case NL80211_IFTYPE_STATION:
2213 	case NL80211_IFTYPE_P2P_DEVICE:
2214 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2215 		break;
2216 	case NL80211_IFTYPE_ADHOC:
2217 		wlvif->bss_type = BSS_TYPE_IBSS;
2218 		break;
2219 	case NL80211_IFTYPE_P2P_GO:
2220 		wlvif->p2p = 1;
2221 		fallthrough;
2222 	case NL80211_IFTYPE_AP:
2223 	case NL80211_IFTYPE_MESH_POINT:
2224 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2225 		break;
2226 	default:
2227 		wlvif->bss_type = MAX_BSS_TYPE;
2228 		return -EOPNOTSUPP;
2229 	}
2230 
2231 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2232 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2233 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2234 
2235 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2236 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2237 		/* init sta/ibss data */
2238 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2239 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2240 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2241 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2242 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2243 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2244 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2245 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2246 	} else {
2247 		/* init ap data */
2248 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2249 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2250 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2251 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2252 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2253 			wl12xx_allocate_rate_policy(wl,
2254 						&wlvif->ap.ucast_rate_idx[i]);
2255 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2256 		/*
2257 		 * TODO: check if basic_rate shouldn't be
2258 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2259 		 * instead (the same thing for STA above).
2260 		*/
2261 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2262 		/* TODO: this seems to be used only for STA, check it */
2263 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2264 	}
2265 
2266 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2267 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2268 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2269 
2270 	/*
2271 	 * mac80211 configures some values globally, while we treat them
2272 	 * per-interface. thus, on init, we have to copy them from wl
2273 	 */
2274 	wlvif->band = wl->band;
2275 	wlvif->channel = wl->channel;
2276 	wlvif->power_level = wl->power_level;
2277 	wlvif->channel_type = wl->channel_type;
2278 
2279 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2280 		  wl1271_rx_streaming_enable_work);
2281 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2282 		  wl1271_rx_streaming_disable_work);
2283 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2284 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2285 			  wlcore_channel_switch_work);
2286 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2287 			  wlcore_connection_loss_work);
2288 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2289 			  wlcore_pending_auth_complete_work);
2290 	INIT_LIST_HEAD(&wlvif->list);
2291 
2292 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2293 	return 0;
2294 }
2295 
wl12xx_init_fw(struct wl1271 * wl)2296 static int wl12xx_init_fw(struct wl1271 *wl)
2297 {
2298 	int retries = WL1271_BOOT_RETRIES;
2299 	bool booted = false;
2300 	struct wiphy *wiphy = wl->hw->wiphy;
2301 	int ret;
2302 
2303 	while (retries) {
2304 		retries--;
2305 		ret = wl12xx_chip_wakeup(wl, false);
2306 		if (ret < 0)
2307 			goto power_off;
2308 
2309 		ret = wl->ops->boot(wl);
2310 		if (ret < 0)
2311 			goto power_off;
2312 
2313 		ret = wl1271_hw_init(wl);
2314 		if (ret < 0)
2315 			goto irq_disable;
2316 
2317 		booted = true;
2318 		break;
2319 
2320 irq_disable:
2321 		mutex_unlock(&wl->mutex);
2322 		/* Unlocking the mutex in the middle of handling is
2323 		   inherently unsafe. In this case we deem it safe to do,
2324 		   because we need to let any possibly pending IRQ out of
2325 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2326 		   work function will not do anything.) Also, any other
2327 		   possible concurrent operations will fail due to the
2328 		   current state, hence the wl1271 struct should be safe. */
2329 		wlcore_disable_interrupts(wl);
2330 		wl1271_flush_deferred_work(wl);
2331 		cancel_work_sync(&wl->netstack_work);
2332 		mutex_lock(&wl->mutex);
2333 power_off:
2334 		wl1271_power_off(wl);
2335 	}
2336 
2337 	if (!booted) {
2338 		wl1271_error("firmware boot failed despite %d retries",
2339 			     WL1271_BOOT_RETRIES);
2340 		goto out;
2341 	}
2342 
2343 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2344 
2345 	/* update hw/fw version info in wiphy struct */
2346 	wiphy->hw_version = wl->chip.id;
2347 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2348 		sizeof(wiphy->fw_version));
2349 
2350 	/*
2351 	 * Now we know if 11a is supported (info from the NVS), so disable
2352 	 * 11a channels if not supported
2353 	 */
2354 	if (!wl->enable_11a)
2355 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2356 
2357 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2358 		     wl->enable_11a ? "" : "not ");
2359 
2360 	wl->state = WLCORE_STATE_ON;
2361 out:
2362 	return ret;
2363 }
2364 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2365 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2366 {
2367 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2368 }
2369 
2370 /*
2371  * Check whether a fw switch (i.e. moving from one loaded
2372  * fw to another) is needed. This function is also responsible
2373  * for updating wl->last_vif_count, so it must be called before
2374  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2375  * will be used).
2376  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2377 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2378 				  struct vif_counter_data vif_counter_data,
2379 				  bool add)
2380 {
2381 	enum wl12xx_fw_type current_fw = wl->fw_type;
2382 	u8 vif_count = vif_counter_data.counter;
2383 
2384 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2385 		return false;
2386 
2387 	/* increase the vif count if this is a new vif */
2388 	if (add && !vif_counter_data.cur_vif_running)
2389 		vif_count++;
2390 
2391 	wl->last_vif_count = vif_count;
2392 
2393 	/* no need for fw change if the device is OFF */
2394 	if (wl->state == WLCORE_STATE_OFF)
2395 		return false;
2396 
2397 	/* no need for fw change if a single fw is used */
2398 	if (!wl->mr_fw_name)
2399 		return false;
2400 
2401 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2402 		return true;
2403 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2404 		return true;
2405 
2406 	return false;
2407 }
2408 
2409 /*
2410  * Enter "forced psm". Make sure the sta is in psm against the ap,
2411  * to make the fw switch a bit more disconnection-persistent.
2412  */
wl12xx_force_active_psm(struct wl1271 * wl)2413 static void wl12xx_force_active_psm(struct wl1271 *wl)
2414 {
2415 	struct wl12xx_vif *wlvif;
2416 
2417 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2418 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2419 	}
2420 }
2421 
2422 struct wlcore_hw_queue_iter_data {
2423 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2424 	/* current vif */
2425 	struct ieee80211_vif *vif;
2426 	/* is the current vif among those iterated */
2427 	bool cur_running;
2428 };
2429 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2430 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2431 				 struct ieee80211_vif *vif)
2432 {
2433 	struct wlcore_hw_queue_iter_data *iter_data = data;
2434 
2435 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2436 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2437 		return;
2438 
2439 	if (iter_data->cur_running || vif == iter_data->vif) {
2440 		iter_data->cur_running = true;
2441 		return;
2442 	}
2443 
2444 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2445 }
2446 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2447 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2448 					 struct wl12xx_vif *wlvif)
2449 {
2450 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2451 	struct wlcore_hw_queue_iter_data iter_data = {};
2452 	int i, q_base;
2453 
2454 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2455 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2456 		return 0;
2457 	}
2458 
2459 	iter_data.vif = vif;
2460 
2461 	/* mark all bits taken by active interfaces */
2462 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2463 					IEEE80211_IFACE_ITER_RESUME_ALL,
2464 					wlcore_hw_queue_iter, &iter_data);
2465 
2466 	/* the current vif is already running in mac80211 (resume/recovery) */
2467 	if (iter_data.cur_running) {
2468 		wlvif->hw_queue_base = vif->hw_queue[0];
2469 		wl1271_debug(DEBUG_MAC80211,
2470 			     "using pre-allocated hw queue base %d",
2471 			     wlvif->hw_queue_base);
2472 
2473 		/* interface type might have changed type */
2474 		goto adjust_cab_queue;
2475 	}
2476 
2477 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2478 				     WLCORE_NUM_MAC_ADDRESSES);
2479 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2480 		return -EBUSY;
2481 
2482 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2483 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2484 		     wlvif->hw_queue_base);
2485 
2486 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2487 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2488 		/* register hw queues in mac80211 */
2489 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2490 	}
2491 
2492 adjust_cab_queue:
2493 	/* the last places are reserved for cab queues per interface */
2494 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2495 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2496 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2497 	else
2498 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2499 
2500 	return 0;
2501 }
2502 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2503 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2504 				   struct ieee80211_vif *vif)
2505 {
2506 	struct wl1271 *wl = hw->priv;
2507 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2508 	struct vif_counter_data vif_count;
2509 	int ret = 0;
2510 	u8 role_type;
2511 
2512 	if (wl->plt) {
2513 		wl1271_error("Adding Interface not allowed while in PLT mode");
2514 		return -EBUSY;
2515 	}
2516 
2517 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2518 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2519 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2520 
2521 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2522 		     ieee80211_vif_type_p2p(vif), vif->addr);
2523 
2524 	wl12xx_get_vif_count(hw, vif, &vif_count);
2525 
2526 	mutex_lock(&wl->mutex);
2527 
2528 	/*
2529 	 * in some very corner case HW recovery scenarios its possible to
2530 	 * get here before __wl1271_op_remove_interface is complete, so
2531 	 * opt out if that is the case.
2532 	 */
2533 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2534 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2535 		ret = -EBUSY;
2536 		goto out;
2537 	}
2538 
2539 
2540 	ret = wl12xx_init_vif_data(wl, vif);
2541 	if (ret < 0)
2542 		goto out;
2543 
2544 	wlvif->wl = wl;
2545 	role_type = wl12xx_get_role_type(wl, wlvif);
2546 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2547 		ret = -EINVAL;
2548 		goto out;
2549 	}
2550 
2551 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2552 	if (ret < 0)
2553 		goto out;
2554 
2555 	/*
2556 	 * TODO: after the nvs issue will be solved, move this block
2557 	 * to start(), and make sure here the driver is ON.
2558 	 */
2559 	if (wl->state == WLCORE_STATE_OFF) {
2560 		/*
2561 		 * we still need this in order to configure the fw
2562 		 * while uploading the nvs
2563 		 */
2564 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2565 
2566 		ret = wl12xx_init_fw(wl);
2567 		if (ret < 0)
2568 			goto out;
2569 	}
2570 
2571 	/*
2572 	 * Call runtime PM only after possible wl12xx_init_fw() above
2573 	 * is done. Otherwise we do not have interrupts enabled.
2574 	 */
2575 	ret = pm_runtime_resume_and_get(wl->dev);
2576 	if (ret < 0)
2577 		goto out_unlock;
2578 
2579 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2580 		wl12xx_force_active_psm(wl);
2581 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2582 		mutex_unlock(&wl->mutex);
2583 		wl1271_recovery_work(&wl->recovery_work);
2584 		return 0;
2585 	}
2586 
2587 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2588 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2589 					     role_type, &wlvif->role_id);
2590 		if (ret < 0)
2591 			goto out;
2592 
2593 		ret = wl1271_init_vif_specific(wl, vif);
2594 		if (ret < 0)
2595 			goto out;
2596 
2597 	} else {
2598 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2599 					     &wlvif->dev_role_id);
2600 		if (ret < 0)
2601 			goto out;
2602 
2603 		/* needed mainly for configuring rate policies */
2604 		ret = wl1271_sta_hw_init(wl, wlvif);
2605 		if (ret < 0)
2606 			goto out;
2607 	}
2608 
2609 	list_add(&wlvif->list, &wl->wlvif_list);
2610 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2611 
2612 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2613 		wl->ap_count++;
2614 	else
2615 		wl->sta_count++;
2616 out:
2617 	pm_runtime_mark_last_busy(wl->dev);
2618 	pm_runtime_put_autosuspend(wl->dev);
2619 out_unlock:
2620 	mutex_unlock(&wl->mutex);
2621 
2622 	return ret;
2623 }
2624 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2625 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2626 					 struct ieee80211_vif *vif,
2627 					 bool reset_tx_queues)
2628 {
2629 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2630 	int i, ret;
2631 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2632 
2633 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2634 
2635 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2636 		return;
2637 
2638 	/* because of hardware recovery, we may get here twice */
2639 	if (wl->state == WLCORE_STATE_OFF)
2640 		return;
2641 
2642 	wl1271_info("down");
2643 
2644 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2645 	    wl->scan_wlvif == wlvif) {
2646 		struct cfg80211_scan_info info = {
2647 			.aborted = true,
2648 		};
2649 
2650 		/*
2651 		 * Rearm the tx watchdog just before idling scan. This
2652 		 * prevents just-finished scans from triggering the watchdog
2653 		 */
2654 		wl12xx_rearm_tx_watchdog_locked(wl);
2655 
2656 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2657 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2658 		wl->scan_wlvif = NULL;
2659 		wl->scan.req = NULL;
2660 		ieee80211_scan_completed(wl->hw, &info);
2661 	}
2662 
2663 	if (wl->sched_vif == wlvif)
2664 		wl->sched_vif = NULL;
2665 
2666 	if (wl->roc_vif == vif) {
2667 		wl->roc_vif = NULL;
2668 		ieee80211_remain_on_channel_expired(wl->hw);
2669 	}
2670 
2671 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2672 		/* disable active roles */
2673 		ret = pm_runtime_resume_and_get(wl->dev);
2674 		if (ret < 0)
2675 			goto deinit;
2676 
2677 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2678 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2679 			if (wl12xx_dev_role_started(wlvif))
2680 				wl12xx_stop_dev(wl, wlvif);
2681 		}
2682 
2683 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2684 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2685 			if (ret < 0) {
2686 				pm_runtime_put_noidle(wl->dev);
2687 				goto deinit;
2688 			}
2689 		} else {
2690 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2691 			if (ret < 0) {
2692 				pm_runtime_put_noidle(wl->dev);
2693 				goto deinit;
2694 			}
2695 		}
2696 
2697 		pm_runtime_mark_last_busy(wl->dev);
2698 		pm_runtime_put_autosuspend(wl->dev);
2699 	}
2700 deinit:
2701 	wl12xx_tx_reset_wlvif(wl, wlvif);
2702 
2703 	/* clear all hlids (except system_hlid) */
2704 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2705 
2706 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2707 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2708 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2709 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2710 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2711 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2712 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2713 	} else {
2714 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2715 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2716 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2717 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2718 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2719 			wl12xx_free_rate_policy(wl,
2720 						&wlvif->ap.ucast_rate_idx[i]);
2721 		wl1271_free_ap_keys(wl, wlvif);
2722 	}
2723 
2724 	dev_kfree_skb(wlvif->probereq);
2725 	wlvif->probereq = NULL;
2726 	if (wl->last_wlvif == wlvif)
2727 		wl->last_wlvif = NULL;
2728 	list_del(&wlvif->list);
2729 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2730 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2731 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2732 
2733 	if (is_ap)
2734 		wl->ap_count--;
2735 	else
2736 		wl->sta_count--;
2737 
2738 	/*
2739 	 * Last AP, have more stations. Configure sleep auth according to STA.
2740 	 * Don't do thin on unintended recovery.
2741 	 */
2742 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2743 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2744 		goto unlock;
2745 
2746 	if (wl->ap_count == 0 && is_ap) {
2747 		/* mask ap events */
2748 		wl->event_mask &= ~wl->ap_event_mask;
2749 		wl1271_event_unmask(wl);
2750 	}
2751 
2752 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2753 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2754 		/* Configure for power according to debugfs */
2755 		if (sta_auth != WL1271_PSM_ILLEGAL)
2756 			wl1271_acx_sleep_auth(wl, sta_auth);
2757 		/* Configure for ELP power saving */
2758 		else
2759 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2760 	}
2761 
2762 unlock:
2763 	mutex_unlock(&wl->mutex);
2764 
2765 	del_timer_sync(&wlvif->rx_streaming_timer);
2766 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2767 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2768 	cancel_work_sync(&wlvif->rc_update_work);
2769 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2770 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2771 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2772 
2773 	mutex_lock(&wl->mutex);
2774 }
2775 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2776 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2777 				       struct ieee80211_vif *vif)
2778 {
2779 	struct wl1271 *wl = hw->priv;
2780 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2781 	struct wl12xx_vif *iter;
2782 	struct vif_counter_data vif_count;
2783 
2784 	wl12xx_get_vif_count(hw, vif, &vif_count);
2785 	mutex_lock(&wl->mutex);
2786 
2787 	if (wl->state == WLCORE_STATE_OFF ||
2788 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2789 		goto out;
2790 
2791 	/*
2792 	 * wl->vif can be null here if someone shuts down the interface
2793 	 * just when hardware recovery has been started.
2794 	 */
2795 	wl12xx_for_each_wlvif(wl, iter) {
2796 		if (iter != wlvif)
2797 			continue;
2798 
2799 		__wl1271_op_remove_interface(wl, vif, true);
2800 		break;
2801 	}
2802 	WARN_ON(iter != wlvif);
2803 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2804 		wl12xx_force_active_psm(wl);
2805 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2806 		wl12xx_queue_recovery_work(wl);
2807 	}
2808 out:
2809 	mutex_unlock(&wl->mutex);
2810 }
2811 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2812 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2813 				      struct ieee80211_vif *vif,
2814 				      enum nl80211_iftype new_type, bool p2p)
2815 {
2816 	struct wl1271 *wl = hw->priv;
2817 	int ret;
2818 
2819 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2820 	wl1271_op_remove_interface(hw, vif);
2821 
2822 	vif->type = new_type;
2823 	vif->p2p = p2p;
2824 	ret = wl1271_op_add_interface(hw, vif);
2825 
2826 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2827 	return ret;
2828 }
2829 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2830 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2831 {
2832 	int ret;
2833 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2834 
2835 	/*
2836 	 * One of the side effects of the JOIN command is that is clears
2837 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2838 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2839 	 * Currently the only valid scenario for JOIN during association
2840 	 * is on roaming, in which case we will also be given new keys.
2841 	 * Keep the below message for now, unless it starts bothering
2842 	 * users who really like to roam a lot :)
2843 	 */
2844 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2845 		wl1271_info("JOIN while associated.");
2846 
2847 	/* clear encryption type */
2848 	wlvif->encryption_type = KEY_NONE;
2849 
2850 	if (is_ibss)
2851 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2852 	else
2853 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2854 
2855 	return ret;
2856 }
2857 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2858 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2859 			    int offset)
2860 {
2861 	u8 ssid_len;
2862 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2863 					 skb->len - offset);
2864 
2865 	if (!ptr) {
2866 		wl1271_error("No SSID in IEs!");
2867 		return -ENOENT;
2868 	}
2869 
2870 	ssid_len = ptr[1];
2871 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2872 		wl1271_error("SSID is too long!");
2873 		return -EINVAL;
2874 	}
2875 
2876 	wlvif->ssid_len = ssid_len;
2877 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2878 	return 0;
2879 }
2880 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2881 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2882 {
2883 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2884 	struct sk_buff *skb;
2885 	int ieoffset;
2886 
2887 	/* we currently only support setting the ssid from the ap probe req */
2888 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2889 		return -EINVAL;
2890 
2891 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2892 	if (!skb)
2893 		return -EINVAL;
2894 
2895 	ieoffset = offsetof(struct ieee80211_mgmt,
2896 			    u.probe_req.variable);
2897 	wl1271_ssid_set(wlvif, skb, ieoffset);
2898 	dev_kfree_skb(skb);
2899 
2900 	return 0;
2901 }
2902 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2903 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2904 			    struct ieee80211_bss_conf *bss_conf,
2905 			    u32 sta_rate_set)
2906 {
2907 	struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
2908 						 bss_conf);
2909 	int ieoffset;
2910 	int ret;
2911 
2912 	wlvif->aid = vif->cfg.aid;
2913 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2914 	wlvif->beacon_int = bss_conf->beacon_int;
2915 	wlvif->wmm_enabled = bss_conf->qos;
2916 
2917 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2918 
2919 	/*
2920 	 * with wl1271, we don't need to update the
2921 	 * beacon_int and dtim_period, because the firmware
2922 	 * updates it by itself when the first beacon is
2923 	 * received after a join.
2924 	 */
2925 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	/*
2930 	 * Get a template for hardware connection maintenance
2931 	 */
2932 	dev_kfree_skb(wlvif->probereq);
2933 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2934 							wlvif,
2935 							NULL);
2936 	ieoffset = offsetof(struct ieee80211_mgmt,
2937 			    u.probe_req.variable);
2938 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2939 
2940 	/* enable the connection monitoring feature */
2941 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2942 	if (ret < 0)
2943 		return ret;
2944 
2945 	/*
2946 	 * The join command disable the keep-alive mode, shut down its process,
2947 	 * and also clear the template config, so we need to reset it all after
2948 	 * the join. The acx_aid starts the keep-alive process, and the order
2949 	 * of the commands below is relevant.
2950 	 */
2951 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2956 	if (ret < 0)
2957 		return ret;
2958 
2959 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2960 	if (ret < 0)
2961 		return ret;
2962 
2963 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2964 					   wlvif->sta.klv_template_id,
2965 					   ACX_KEEP_ALIVE_TPL_VALID);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	/*
2970 	 * The default fw psm configuration is AUTO, while mac80211 default
2971 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2972 	 */
2973 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2974 	if (ret < 0)
2975 		return ret;
2976 
2977 	if (sta_rate_set) {
2978 		wlvif->rate_set =
2979 			wl1271_tx_enabled_rates_get(wl,
2980 						    sta_rate_set,
2981 						    wlvif->band);
2982 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2983 		if (ret < 0)
2984 			return ret;
2985 	}
2986 
2987 	return ret;
2988 }
2989 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)2990 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2991 {
2992 	int ret;
2993 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2994 
2995 	/* make sure we are connected (sta) joined */
2996 	if (sta &&
2997 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2998 		return false;
2999 
3000 	/* make sure we are joined (ibss) */
3001 	if (!sta &&
3002 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3003 		return false;
3004 
3005 	if (sta) {
3006 		/* use defaults when not associated */
3007 		wlvif->aid = 0;
3008 
3009 		/* free probe-request template */
3010 		dev_kfree_skb(wlvif->probereq);
3011 		wlvif->probereq = NULL;
3012 
3013 		/* disable connection monitor features */
3014 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3015 		if (ret < 0)
3016 			return ret;
3017 
3018 		/* Disable the keep-alive feature */
3019 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3020 		if (ret < 0)
3021 			return ret;
3022 
3023 		/* disable beacon filtering */
3024 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3025 		if (ret < 0)
3026 			return ret;
3027 	}
3028 
3029 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3030 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3031 
3032 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3033 		ieee80211_chswitch_done(vif, false);
3034 		cancel_delayed_work(&wlvif->channel_switch_work);
3035 	}
3036 
3037 	/* invalidate keep-alive template */
3038 	wl1271_acx_keep_alive_config(wl, wlvif,
3039 				     wlvif->sta.klv_template_id,
3040 				     ACX_KEEP_ALIVE_TPL_INVALID);
3041 
3042 	return 0;
3043 }
3044 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3045 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3046 {
3047 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3048 	wlvif->rate_set = wlvif->basic_rate_set;
3049 }
3050 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3051 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3052 				   bool idle)
3053 {
3054 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3055 
3056 	if (idle == cur_idle)
3057 		return;
3058 
3059 	if (idle) {
3060 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3061 	} else {
3062 		/* The current firmware only supports sched_scan in idle */
3063 		if (wl->sched_vif == wlvif)
3064 			wl->ops->sched_scan_stop(wl, wlvif);
3065 
3066 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3067 	}
3068 }
3069 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3070 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3071 			     struct ieee80211_conf *conf, u32 changed)
3072 {
3073 	int ret;
3074 
3075 	if (wlcore_is_p2p_mgmt(wlvif))
3076 		return 0;
3077 
3078 	if (conf->power_level != wlvif->power_level) {
3079 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3080 		if (ret < 0)
3081 			return ret;
3082 
3083 		wlvif->power_level = conf->power_level;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3089 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3090 {
3091 	struct wl1271 *wl = hw->priv;
3092 	struct wl12xx_vif *wlvif;
3093 	struct ieee80211_conf *conf = &hw->conf;
3094 	int ret = 0;
3095 
3096 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3097 		     " changed 0x%x",
3098 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3099 		     conf->power_level,
3100 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3101 			 changed);
3102 
3103 	mutex_lock(&wl->mutex);
3104 
3105 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3106 		wl->power_level = conf->power_level;
3107 
3108 	if (unlikely(wl->state != WLCORE_STATE_ON))
3109 		goto out;
3110 
3111 	ret = pm_runtime_resume_and_get(wl->dev);
3112 	if (ret < 0)
3113 		goto out;
3114 
3115 	/* configure each interface */
3116 	wl12xx_for_each_wlvif(wl, wlvif) {
3117 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3118 		if (ret < 0)
3119 			goto out_sleep;
3120 	}
3121 
3122 out_sleep:
3123 	pm_runtime_mark_last_busy(wl->dev);
3124 	pm_runtime_put_autosuspend(wl->dev);
3125 
3126 out:
3127 	mutex_unlock(&wl->mutex);
3128 
3129 	return ret;
3130 }
3131 
3132 struct wl1271_filter_params {
3133 	bool enabled;
3134 	int mc_list_length;
3135 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3136 };
3137 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3138 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3139 				       struct netdev_hw_addr_list *mc_list)
3140 {
3141 	struct wl1271_filter_params *fp;
3142 	struct netdev_hw_addr *ha;
3143 
3144 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3145 	if (!fp) {
3146 		wl1271_error("Out of memory setting filters.");
3147 		return 0;
3148 	}
3149 
3150 	/* update multicast filtering parameters */
3151 	fp->mc_list_length = 0;
3152 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3153 		fp->enabled = false;
3154 	} else {
3155 		fp->enabled = true;
3156 		netdev_hw_addr_list_for_each(ha, mc_list) {
3157 			memcpy(fp->mc_list[fp->mc_list_length],
3158 					ha->addr, ETH_ALEN);
3159 			fp->mc_list_length++;
3160 		}
3161 	}
3162 
3163 	return (u64)(unsigned long)fp;
3164 }
3165 
3166 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3167 				  FIF_FCSFAIL | \
3168 				  FIF_BCN_PRBRESP_PROMISC | \
3169 				  FIF_CONTROL | \
3170 				  FIF_OTHER_BSS)
3171 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3172 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3173 				       unsigned int changed,
3174 				       unsigned int *total, u64 multicast)
3175 {
3176 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3177 	struct wl1271 *wl = hw->priv;
3178 	struct wl12xx_vif *wlvif;
3179 
3180 	int ret;
3181 
3182 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3183 		     " total %x", changed, *total);
3184 
3185 	mutex_lock(&wl->mutex);
3186 
3187 	*total &= WL1271_SUPPORTED_FILTERS;
3188 	changed &= WL1271_SUPPORTED_FILTERS;
3189 
3190 	if (unlikely(wl->state != WLCORE_STATE_ON))
3191 		goto out;
3192 
3193 	ret = pm_runtime_resume_and_get(wl->dev);
3194 	if (ret < 0)
3195 		goto out;
3196 
3197 	wl12xx_for_each_wlvif(wl, wlvif) {
3198 		if (wlcore_is_p2p_mgmt(wlvif))
3199 			continue;
3200 
3201 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3202 			if (*total & FIF_ALLMULTI)
3203 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3204 								   false,
3205 								   NULL, 0);
3206 			else if (fp)
3207 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3208 							fp->enabled,
3209 							fp->mc_list,
3210 							fp->mc_list_length);
3211 			if (ret < 0)
3212 				goto out_sleep;
3213 		}
3214 
3215 		/*
3216 		 * If interface in AP mode and created with allmulticast then disable
3217 		 * the firmware filters so that all multicast packets are passed
3218 		 * This is mandatory for MDNS based discovery protocols
3219 		 */
3220 		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3221 			if (*total & FIF_ALLMULTI) {
3222 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3223 							false,
3224 							NULL, 0);
3225 				if (ret < 0)
3226 					goto out_sleep;
3227 			}
3228 		}
3229 	}
3230 
3231 	/*
3232 	 * the fw doesn't provide an api to configure the filters. instead,
3233 	 * the filters configuration is based on the active roles / ROC
3234 	 * state.
3235 	 */
3236 
3237 out_sleep:
3238 	pm_runtime_mark_last_busy(wl->dev);
3239 	pm_runtime_put_autosuspend(wl->dev);
3240 
3241 out:
3242 	mutex_unlock(&wl->mutex);
3243 	kfree(fp);
3244 }
3245 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3246 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3247 				u8 id, u8 key_type, u8 key_size,
3248 				const u8 *key, u8 hlid, u32 tx_seq_32,
3249 				u16 tx_seq_16, bool is_pairwise)
3250 {
3251 	struct wl1271_ap_key *ap_key;
3252 	int i;
3253 
3254 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3255 
3256 	if (key_size > MAX_KEY_SIZE)
3257 		return -EINVAL;
3258 
3259 	/*
3260 	 * Find next free entry in ap_keys. Also check we are not replacing
3261 	 * an existing key.
3262 	 */
3263 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3264 		if (wlvif->ap.recorded_keys[i] == NULL)
3265 			break;
3266 
3267 		if (wlvif->ap.recorded_keys[i]->id == id) {
3268 			wl1271_warning("trying to record key replacement");
3269 			return -EINVAL;
3270 		}
3271 	}
3272 
3273 	if (i == MAX_NUM_KEYS)
3274 		return -EBUSY;
3275 
3276 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3277 	if (!ap_key)
3278 		return -ENOMEM;
3279 
3280 	ap_key->id = id;
3281 	ap_key->key_type = key_type;
3282 	ap_key->key_size = key_size;
3283 	memcpy(ap_key->key, key, key_size);
3284 	ap_key->hlid = hlid;
3285 	ap_key->tx_seq_32 = tx_seq_32;
3286 	ap_key->tx_seq_16 = tx_seq_16;
3287 	ap_key->is_pairwise = is_pairwise;
3288 
3289 	wlvif->ap.recorded_keys[i] = ap_key;
3290 	return 0;
3291 }
3292 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3293 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3294 {
3295 	int i;
3296 
3297 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3298 		kfree(wlvif->ap.recorded_keys[i]);
3299 		wlvif->ap.recorded_keys[i] = NULL;
3300 	}
3301 }
3302 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3303 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3304 {
3305 	int i, ret = 0;
3306 	struct wl1271_ap_key *key;
3307 	bool wep_key_added = false;
3308 
3309 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3310 		u8 hlid;
3311 		if (wlvif->ap.recorded_keys[i] == NULL)
3312 			break;
3313 
3314 		key = wlvif->ap.recorded_keys[i];
3315 		hlid = key->hlid;
3316 		if (hlid == WL12XX_INVALID_LINK_ID)
3317 			hlid = wlvif->ap.bcast_hlid;
3318 
3319 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3320 					    key->id, key->key_type,
3321 					    key->key_size, key->key,
3322 					    hlid, key->tx_seq_32,
3323 					    key->tx_seq_16, key->is_pairwise);
3324 		if (ret < 0)
3325 			goto out;
3326 
3327 		if (key->key_type == KEY_WEP)
3328 			wep_key_added = true;
3329 	}
3330 
3331 	if (wep_key_added) {
3332 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3333 						     wlvif->ap.bcast_hlid);
3334 		if (ret < 0)
3335 			goto out;
3336 	}
3337 
3338 out:
3339 	wl1271_free_ap_keys(wl, wlvif);
3340 	return ret;
3341 }
3342 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3343 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3344 		       u16 action, u8 id, u8 key_type,
3345 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3346 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3347 		       bool is_pairwise)
3348 {
3349 	int ret;
3350 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3351 
3352 	if (is_ap) {
3353 		struct wl1271_station *wl_sta;
3354 		u8 hlid;
3355 
3356 		if (sta) {
3357 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3358 			hlid = wl_sta->hlid;
3359 		} else {
3360 			hlid = wlvif->ap.bcast_hlid;
3361 		}
3362 
3363 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3364 			/*
3365 			 * We do not support removing keys after AP shutdown.
3366 			 * Pretend we do to make mac80211 happy.
3367 			 */
3368 			if (action != KEY_ADD_OR_REPLACE)
3369 				return 0;
3370 
3371 			ret = wl1271_record_ap_key(wl, wlvif, id,
3372 					     key_type, key_size,
3373 					     key, hlid, tx_seq_32,
3374 					     tx_seq_16, is_pairwise);
3375 		} else {
3376 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3377 					     id, key_type, key_size,
3378 					     key, hlid, tx_seq_32,
3379 					     tx_seq_16, is_pairwise);
3380 		}
3381 
3382 		if (ret < 0)
3383 			return ret;
3384 	} else {
3385 		const u8 *addr;
3386 		static const u8 bcast_addr[ETH_ALEN] = {
3387 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3388 		};
3389 
3390 		addr = sta ? sta->addr : bcast_addr;
3391 
3392 		if (is_zero_ether_addr(addr)) {
3393 			/* We dont support TX only encryption */
3394 			return -EOPNOTSUPP;
3395 		}
3396 
3397 		/* The wl1271 does not allow to remove unicast keys - they
3398 		   will be cleared automatically on next CMD_JOIN. Ignore the
3399 		   request silently, as we dont want the mac80211 to emit
3400 		   an error message. */
3401 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3402 			return 0;
3403 
3404 		/* don't remove key if hlid was already deleted */
3405 		if (action == KEY_REMOVE &&
3406 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3407 			return 0;
3408 
3409 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3410 					     id, key_type, key_size,
3411 					     key, addr, tx_seq_32,
3412 					     tx_seq_16);
3413 		if (ret < 0)
3414 			return ret;
3415 
3416 	}
3417 
3418 	return 0;
3419 }
3420 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3421 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3422 			     struct ieee80211_vif *vif,
3423 			     struct ieee80211_sta *sta,
3424 			     struct ieee80211_key_conf *key_conf)
3425 {
3426 	struct wl1271 *wl = hw->priv;
3427 	int ret;
3428 	bool might_change_spare =
3429 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3430 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3431 
3432 	if (might_change_spare) {
3433 		/*
3434 		 * stop the queues and flush to ensure the next packets are
3435 		 * in sync with FW spare block accounting
3436 		 */
3437 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3438 		wl1271_tx_flush(wl);
3439 	}
3440 
3441 	mutex_lock(&wl->mutex);
3442 
3443 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3444 		ret = -EAGAIN;
3445 		goto out_wake_queues;
3446 	}
3447 
3448 	ret = pm_runtime_resume_and_get(wl->dev);
3449 	if (ret < 0)
3450 		goto out_wake_queues;
3451 
3452 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3453 
3454 	pm_runtime_mark_last_busy(wl->dev);
3455 	pm_runtime_put_autosuspend(wl->dev);
3456 
3457 out_wake_queues:
3458 	if (might_change_spare)
3459 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3460 
3461 	mutex_unlock(&wl->mutex);
3462 
3463 	return ret;
3464 }
3465 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3466 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3467 		   struct ieee80211_vif *vif,
3468 		   struct ieee80211_sta *sta,
3469 		   struct ieee80211_key_conf *key_conf)
3470 {
3471 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3472 	int ret;
3473 	u32 tx_seq_32 = 0;
3474 	u16 tx_seq_16 = 0;
3475 	u8 key_type;
3476 	u8 hlid;
3477 	bool is_pairwise;
3478 
3479 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3480 
3481 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3482 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3483 		     key_conf->cipher, key_conf->keyidx,
3484 		     key_conf->keylen, key_conf->flags);
3485 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3486 
3487 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3488 		if (sta) {
3489 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3490 			hlid = wl_sta->hlid;
3491 		} else {
3492 			hlid = wlvif->ap.bcast_hlid;
3493 		}
3494 	else
3495 		hlid = wlvif->sta.hlid;
3496 
3497 	if (hlid != WL12XX_INVALID_LINK_ID) {
3498 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3499 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3500 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3501 	}
3502 
3503 	switch (key_conf->cipher) {
3504 	case WLAN_CIPHER_SUITE_WEP40:
3505 	case WLAN_CIPHER_SUITE_WEP104:
3506 		key_type = KEY_WEP;
3507 
3508 		key_conf->hw_key_idx = key_conf->keyidx;
3509 		break;
3510 	case WLAN_CIPHER_SUITE_TKIP:
3511 		key_type = KEY_TKIP;
3512 		key_conf->hw_key_idx = key_conf->keyidx;
3513 		break;
3514 	case WLAN_CIPHER_SUITE_CCMP:
3515 		key_type = KEY_AES;
3516 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3517 		break;
3518 	case WL1271_CIPHER_SUITE_GEM:
3519 		key_type = KEY_GEM;
3520 		break;
3521 	default:
3522 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3523 
3524 		return -EOPNOTSUPP;
3525 	}
3526 
3527 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3528 
3529 	switch (cmd) {
3530 	case SET_KEY:
3531 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3532 				 key_conf->keyidx, key_type,
3533 				 key_conf->keylen, key_conf->key,
3534 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3535 		if (ret < 0) {
3536 			wl1271_error("Could not add or replace key");
3537 			return ret;
3538 		}
3539 
3540 		/*
3541 		 * reconfiguring arp response if the unicast (or common)
3542 		 * encryption key type was changed
3543 		 */
3544 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3545 		    (sta || key_type == KEY_WEP) &&
3546 		    wlvif->encryption_type != key_type) {
3547 			wlvif->encryption_type = key_type;
3548 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3549 			if (ret < 0) {
3550 				wl1271_warning("build arp rsp failed: %d", ret);
3551 				return ret;
3552 			}
3553 		}
3554 		break;
3555 
3556 	case DISABLE_KEY:
3557 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3558 				     key_conf->keyidx, key_type,
3559 				     key_conf->keylen, key_conf->key,
3560 				     0, 0, sta, is_pairwise);
3561 		if (ret < 0) {
3562 			wl1271_error("Could not remove key");
3563 			return ret;
3564 		}
3565 		break;
3566 
3567 	default:
3568 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3569 		return -EOPNOTSUPP;
3570 	}
3571 
3572 	return ret;
3573 }
3574 EXPORT_SYMBOL_GPL(wlcore_set_key);
3575 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3576 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3577 					  struct ieee80211_vif *vif,
3578 					  int key_idx)
3579 {
3580 	struct wl1271 *wl = hw->priv;
3581 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3582 	int ret;
3583 
3584 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3585 		     key_idx);
3586 
3587 	/* we don't handle unsetting of default key */
3588 	if (key_idx == -1)
3589 		return;
3590 
3591 	mutex_lock(&wl->mutex);
3592 
3593 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3594 		ret = -EAGAIN;
3595 		goto out_unlock;
3596 	}
3597 
3598 	ret = pm_runtime_resume_and_get(wl->dev);
3599 	if (ret < 0)
3600 		goto out_unlock;
3601 
3602 	wlvif->default_key = key_idx;
3603 
3604 	/* the default WEP key needs to be configured at least once */
3605 	if (wlvif->encryption_type == KEY_WEP) {
3606 		ret = wl12xx_cmd_set_default_wep_key(wl,
3607 				key_idx,
3608 				wlvif->sta.hlid);
3609 		if (ret < 0)
3610 			goto out_sleep;
3611 	}
3612 
3613 out_sleep:
3614 	pm_runtime_mark_last_busy(wl->dev);
3615 	pm_runtime_put_autosuspend(wl->dev);
3616 
3617 out_unlock:
3618 	mutex_unlock(&wl->mutex);
3619 }
3620 
wlcore_regdomain_config(struct wl1271 * wl)3621 void wlcore_regdomain_config(struct wl1271 *wl)
3622 {
3623 	int ret;
3624 
3625 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3626 		return;
3627 
3628 	mutex_lock(&wl->mutex);
3629 
3630 	if (unlikely(wl->state != WLCORE_STATE_ON))
3631 		goto out;
3632 
3633 	ret = pm_runtime_resume_and_get(wl->dev);
3634 	if (ret < 0)
3635 		goto out;
3636 
3637 	ret = wlcore_cmd_regdomain_config_locked(wl);
3638 	if (ret < 0) {
3639 		wl12xx_queue_recovery_work(wl);
3640 		goto out;
3641 	}
3642 
3643 	pm_runtime_mark_last_busy(wl->dev);
3644 	pm_runtime_put_autosuspend(wl->dev);
3645 out:
3646 	mutex_unlock(&wl->mutex);
3647 }
3648 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3649 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3650 			     struct ieee80211_vif *vif,
3651 			     struct ieee80211_scan_request *hw_req)
3652 {
3653 	struct cfg80211_scan_request *req = &hw_req->req;
3654 	struct wl1271 *wl = hw->priv;
3655 	int ret;
3656 	u8 *ssid = NULL;
3657 	size_t len = 0;
3658 
3659 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3660 
3661 	if (req->n_ssids) {
3662 		ssid = req->ssids[0].ssid;
3663 		len = req->ssids[0].ssid_len;
3664 	}
3665 
3666 	mutex_lock(&wl->mutex);
3667 
3668 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3669 		/*
3670 		 * We cannot return -EBUSY here because cfg80211 will expect
3671 		 * a call to ieee80211_scan_completed if we do - in this case
3672 		 * there won't be any call.
3673 		 */
3674 		ret = -EAGAIN;
3675 		goto out;
3676 	}
3677 
3678 	ret = pm_runtime_resume_and_get(wl->dev);
3679 	if (ret < 0)
3680 		goto out;
3681 
3682 	/* fail if there is any role in ROC */
3683 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3684 		/* don't allow scanning right now */
3685 		ret = -EBUSY;
3686 		goto out_sleep;
3687 	}
3688 
3689 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3690 out_sleep:
3691 	pm_runtime_mark_last_busy(wl->dev);
3692 	pm_runtime_put_autosuspend(wl->dev);
3693 out:
3694 	mutex_unlock(&wl->mutex);
3695 
3696 	return ret;
3697 }
3698 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3699 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3700 				     struct ieee80211_vif *vif)
3701 {
3702 	struct wl1271 *wl = hw->priv;
3703 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3704 	struct cfg80211_scan_info info = {
3705 		.aborted = true,
3706 	};
3707 	int ret;
3708 
3709 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3710 
3711 	mutex_lock(&wl->mutex);
3712 
3713 	if (unlikely(wl->state != WLCORE_STATE_ON))
3714 		goto out;
3715 
3716 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3717 		goto out;
3718 
3719 	ret = pm_runtime_resume_and_get(wl->dev);
3720 	if (ret < 0)
3721 		goto out;
3722 
3723 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3724 		ret = wl->ops->scan_stop(wl, wlvif);
3725 		if (ret < 0)
3726 			goto out_sleep;
3727 	}
3728 
3729 	/*
3730 	 * Rearm the tx watchdog just before idling scan. This
3731 	 * prevents just-finished scans from triggering the watchdog
3732 	 */
3733 	wl12xx_rearm_tx_watchdog_locked(wl);
3734 
3735 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3736 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3737 	wl->scan_wlvif = NULL;
3738 	wl->scan.req = NULL;
3739 	ieee80211_scan_completed(wl->hw, &info);
3740 
3741 out_sleep:
3742 	pm_runtime_mark_last_busy(wl->dev);
3743 	pm_runtime_put_autosuspend(wl->dev);
3744 out:
3745 	mutex_unlock(&wl->mutex);
3746 
3747 	cancel_delayed_work_sync(&wl->scan_complete_work);
3748 }
3749 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3750 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3751 				      struct ieee80211_vif *vif,
3752 				      struct cfg80211_sched_scan_request *req,
3753 				      struct ieee80211_scan_ies *ies)
3754 {
3755 	struct wl1271 *wl = hw->priv;
3756 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3757 	int ret;
3758 
3759 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3760 
3761 	mutex_lock(&wl->mutex);
3762 
3763 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3764 		ret = -EAGAIN;
3765 		goto out;
3766 	}
3767 
3768 	ret = pm_runtime_resume_and_get(wl->dev);
3769 	if (ret < 0)
3770 		goto out;
3771 
3772 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3773 	if (ret < 0)
3774 		goto out_sleep;
3775 
3776 	wl->sched_vif = wlvif;
3777 
3778 out_sleep:
3779 	pm_runtime_mark_last_busy(wl->dev);
3780 	pm_runtime_put_autosuspend(wl->dev);
3781 out:
3782 	mutex_unlock(&wl->mutex);
3783 	return ret;
3784 }
3785 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3786 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3787 				     struct ieee80211_vif *vif)
3788 {
3789 	struct wl1271 *wl = hw->priv;
3790 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3791 	int ret;
3792 
3793 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3794 
3795 	mutex_lock(&wl->mutex);
3796 
3797 	if (unlikely(wl->state != WLCORE_STATE_ON))
3798 		goto out;
3799 
3800 	ret = pm_runtime_resume_and_get(wl->dev);
3801 	if (ret < 0)
3802 		goto out;
3803 
3804 	wl->ops->sched_scan_stop(wl, wlvif);
3805 
3806 	pm_runtime_mark_last_busy(wl->dev);
3807 	pm_runtime_put_autosuspend(wl->dev);
3808 out:
3809 	mutex_unlock(&wl->mutex);
3810 
3811 	return 0;
3812 }
3813 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3814 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3815 {
3816 	struct wl1271 *wl = hw->priv;
3817 	int ret = 0;
3818 
3819 	mutex_lock(&wl->mutex);
3820 
3821 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3822 		ret = -EAGAIN;
3823 		goto out;
3824 	}
3825 
3826 	ret = pm_runtime_resume_and_get(wl->dev);
3827 	if (ret < 0)
3828 		goto out;
3829 
3830 	ret = wl1271_acx_frag_threshold(wl, value);
3831 	if (ret < 0)
3832 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3833 
3834 	pm_runtime_mark_last_busy(wl->dev);
3835 	pm_runtime_put_autosuspend(wl->dev);
3836 
3837 out:
3838 	mutex_unlock(&wl->mutex);
3839 
3840 	return ret;
3841 }
3842 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3843 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3844 {
3845 	struct wl1271 *wl = hw->priv;
3846 	struct wl12xx_vif *wlvif;
3847 	int ret = 0;
3848 
3849 	mutex_lock(&wl->mutex);
3850 
3851 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3852 		ret = -EAGAIN;
3853 		goto out;
3854 	}
3855 
3856 	ret = pm_runtime_resume_and_get(wl->dev);
3857 	if (ret < 0)
3858 		goto out;
3859 
3860 	wl12xx_for_each_wlvif(wl, wlvif) {
3861 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3862 		if (ret < 0)
3863 			wl1271_warning("set rts threshold failed: %d", ret);
3864 	}
3865 	pm_runtime_mark_last_busy(wl->dev);
3866 	pm_runtime_put_autosuspend(wl->dev);
3867 
3868 out:
3869 	mutex_unlock(&wl->mutex);
3870 
3871 	return ret;
3872 }
3873 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3874 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3875 {
3876 	int len;
3877 	const u8 *next, *end = skb->data + skb->len;
3878 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3879 					skb->len - ieoffset);
3880 	if (!ie)
3881 		return;
3882 	len = ie[1] + 2;
3883 	next = ie + len;
3884 	memmove(ie, next, end - next);
3885 	skb_trim(skb, skb->len - len);
3886 }
3887 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3888 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3889 					    unsigned int oui, u8 oui_type,
3890 					    int ieoffset)
3891 {
3892 	int len;
3893 	const u8 *next, *end = skb->data + skb->len;
3894 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3895 					       skb->data + ieoffset,
3896 					       skb->len - ieoffset);
3897 	if (!ie)
3898 		return;
3899 	len = ie[1] + 2;
3900 	next = ie + len;
3901 	memmove(ie, next, end - next);
3902 	skb_trim(skb, skb->len - len);
3903 }
3904 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3905 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3906 					 struct ieee80211_vif *vif)
3907 {
3908 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3909 	struct sk_buff *skb;
3910 	int ret;
3911 
3912 	skb = ieee80211_proberesp_get(wl->hw, vif);
3913 	if (!skb)
3914 		return -EOPNOTSUPP;
3915 
3916 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3917 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3918 				      skb->data,
3919 				      skb->len, 0,
3920 				      rates);
3921 	dev_kfree_skb(skb);
3922 
3923 	if (ret < 0)
3924 		goto out;
3925 
3926 	wl1271_debug(DEBUG_AP, "probe response updated");
3927 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3928 
3929 out:
3930 	return ret;
3931 }
3932 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3933 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3934 					     struct ieee80211_vif *vif,
3935 					     u8 *probe_rsp_data,
3936 					     size_t probe_rsp_len,
3937 					     u32 rates)
3938 {
3939 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3940 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3941 	int ssid_ie_offset, ie_offset, templ_len;
3942 	const u8 *ptr;
3943 
3944 	/* no need to change probe response if the SSID is set correctly */
3945 	if (wlvif->ssid_len > 0)
3946 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3947 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3948 					       probe_rsp_data,
3949 					       probe_rsp_len, 0,
3950 					       rates);
3951 
3952 	if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3953 		wl1271_error("probe_rsp template too big");
3954 		return -EINVAL;
3955 	}
3956 
3957 	/* start searching from IE offset */
3958 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3959 
3960 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3961 			       probe_rsp_len - ie_offset);
3962 	if (!ptr) {
3963 		wl1271_error("No SSID in beacon!");
3964 		return -EINVAL;
3965 	}
3966 
3967 	ssid_ie_offset = ptr - probe_rsp_data;
3968 	ptr += (ptr[1] + 2);
3969 
3970 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3971 
3972 	/* insert SSID from bss_conf */
3973 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3974 	probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
3975 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3976 	       vif->cfg.ssid, vif->cfg.ssid_len);
3977 	templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
3978 
3979 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
3980 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3981 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3982 
3983 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3984 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3985 				       probe_rsp_templ,
3986 				       templ_len, 0,
3987 				       rates);
3988 }
3989 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)3990 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3991 				       struct ieee80211_vif *vif,
3992 				       struct ieee80211_bss_conf *bss_conf,
3993 				       u32 changed)
3994 {
3995 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3996 	int ret = 0;
3997 
3998 	if (changed & BSS_CHANGED_ERP_SLOT) {
3999 		if (bss_conf->use_short_slot)
4000 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4001 		else
4002 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4003 		if (ret < 0) {
4004 			wl1271_warning("Set slot time failed %d", ret);
4005 			goto out;
4006 		}
4007 	}
4008 
4009 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4010 		if (bss_conf->use_short_preamble)
4011 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4012 		else
4013 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4014 	}
4015 
4016 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4017 		if (bss_conf->use_cts_prot)
4018 			ret = wl1271_acx_cts_protect(wl, wlvif,
4019 						     CTSPROTECT_ENABLE);
4020 		else
4021 			ret = wl1271_acx_cts_protect(wl, wlvif,
4022 						     CTSPROTECT_DISABLE);
4023 		if (ret < 0) {
4024 			wl1271_warning("Set ctsprotect failed %d", ret);
4025 			goto out;
4026 		}
4027 	}
4028 
4029 out:
4030 	return ret;
4031 }
4032 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4033 static int wlcore_set_beacon_template(struct wl1271 *wl,
4034 				      struct ieee80211_vif *vif,
4035 				      bool is_ap)
4036 {
4037 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4038 	struct ieee80211_hdr *hdr;
4039 	u32 min_rate;
4040 	int ret;
4041 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4042 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
4043 	u16 tmpl_id;
4044 
4045 	if (!beacon) {
4046 		ret = -EINVAL;
4047 		goto out;
4048 	}
4049 
4050 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4051 
4052 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4053 	if (ret < 0) {
4054 		dev_kfree_skb(beacon);
4055 		goto out;
4056 	}
4057 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4058 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4059 		CMD_TEMPL_BEACON;
4060 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4061 				      beacon->data,
4062 				      beacon->len, 0,
4063 				      min_rate);
4064 	if (ret < 0) {
4065 		dev_kfree_skb(beacon);
4066 		goto out;
4067 	}
4068 
4069 	wlvif->wmm_enabled =
4070 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4071 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4072 					beacon->data + ieoffset,
4073 					beacon->len - ieoffset);
4074 
4075 	/*
4076 	 * In case we already have a probe-resp beacon set explicitly
4077 	 * by usermode, don't use the beacon data.
4078 	 */
4079 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4080 		goto end_bcn;
4081 
4082 	/* remove TIM ie from probe response */
4083 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4084 
4085 	/*
4086 	 * remove p2p ie from probe response.
4087 	 * the fw reponds to probe requests that don't include
4088 	 * the p2p ie. probe requests with p2p ie will be passed,
4089 	 * and will be responded by the supplicant (the spec
4090 	 * forbids including the p2p ie when responding to probe
4091 	 * requests that didn't include it).
4092 	 */
4093 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4094 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4095 
4096 	hdr = (struct ieee80211_hdr *) beacon->data;
4097 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4098 					 IEEE80211_STYPE_PROBE_RESP);
4099 	if (is_ap)
4100 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4101 							   beacon->data,
4102 							   beacon->len,
4103 							   min_rate);
4104 	else
4105 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4106 					      CMD_TEMPL_PROBE_RESPONSE,
4107 					      beacon->data,
4108 					      beacon->len, 0,
4109 					      min_rate);
4110 end_bcn:
4111 	dev_kfree_skb(beacon);
4112 	if (ret < 0)
4113 		goto out;
4114 
4115 out:
4116 	return ret;
4117 }
4118 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4119 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4120 					  struct ieee80211_vif *vif,
4121 					  struct ieee80211_bss_conf *bss_conf,
4122 					  u32 changed)
4123 {
4124 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4125 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4126 	int ret = 0;
4127 
4128 	if (changed & BSS_CHANGED_BEACON_INT) {
4129 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4130 			bss_conf->beacon_int);
4131 
4132 		wlvif->beacon_int = bss_conf->beacon_int;
4133 	}
4134 
4135 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4136 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4137 
4138 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4139 	}
4140 
4141 	if (changed & BSS_CHANGED_BEACON) {
4142 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4143 		if (ret < 0)
4144 			goto out;
4145 
4146 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4147 				       &wlvif->flags)) {
4148 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4149 			if (ret < 0)
4150 				goto out;
4151 		}
4152 	}
4153 out:
4154 	if (ret != 0)
4155 		wl1271_error("beacon info change failed: %d", ret);
4156 	return ret;
4157 }
4158 
4159 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4160 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4161 				       struct ieee80211_vif *vif,
4162 				       struct ieee80211_bss_conf *bss_conf,
4163 				       u32 changed)
4164 {
4165 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4166 	int ret = 0;
4167 
4168 	if (changed & BSS_CHANGED_BASIC_RATES) {
4169 		u32 rates = bss_conf->basic_rates;
4170 
4171 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4172 								 wlvif->band);
4173 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4174 							wlvif->basic_rate_set);
4175 
4176 		ret = wl1271_init_ap_rates(wl, wlvif);
4177 		if (ret < 0) {
4178 			wl1271_error("AP rate policy change failed %d", ret);
4179 			goto out;
4180 		}
4181 
4182 		ret = wl1271_ap_init_templates(wl, vif);
4183 		if (ret < 0)
4184 			goto out;
4185 
4186 		/* No need to set probe resp template for mesh */
4187 		if (!ieee80211_vif_is_mesh(vif)) {
4188 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4189 							    wlvif->basic_rate,
4190 							    vif);
4191 			if (ret < 0)
4192 				goto out;
4193 		}
4194 
4195 		ret = wlcore_set_beacon_template(wl, vif, true);
4196 		if (ret < 0)
4197 			goto out;
4198 	}
4199 
4200 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4201 	if (ret < 0)
4202 		goto out;
4203 
4204 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4205 		if (bss_conf->enable_beacon) {
4206 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4207 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4208 				if (ret < 0)
4209 					goto out;
4210 
4211 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4212 				if (ret < 0)
4213 					goto out;
4214 
4215 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4216 				wl1271_debug(DEBUG_AP, "started AP");
4217 			}
4218 		} else {
4219 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4220 				/*
4221 				 * AP might be in ROC in case we have just
4222 				 * sent auth reply. handle it.
4223 				 */
4224 				if (test_bit(wlvif->role_id, wl->roc_map))
4225 					wl12xx_croc(wl, wlvif->role_id);
4226 
4227 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4228 				if (ret < 0)
4229 					goto out;
4230 
4231 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4232 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4233 					  &wlvif->flags);
4234 				wl1271_debug(DEBUG_AP, "stopped AP");
4235 			}
4236 		}
4237 	}
4238 
4239 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4240 	if (ret < 0)
4241 		goto out;
4242 
4243 	/* Handle HT information change */
4244 	if ((changed & BSS_CHANGED_HT) &&
4245 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4246 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4247 					bss_conf->ht_operation_mode);
4248 		if (ret < 0) {
4249 			wl1271_warning("Set ht information failed %d", ret);
4250 			goto out;
4251 		}
4252 	}
4253 
4254 out:
4255 	return;
4256 }
4257 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_vif * vif,u32 sta_rate_set)4258 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4259 			    struct ieee80211_vif *vif, u32 sta_rate_set)
4260 {
4261 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
4262 	u32 rates;
4263 	int ret;
4264 
4265 	wl1271_debug(DEBUG_MAC80211,
4266 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4267 	     bss_conf->bssid, vif->cfg.aid,
4268 	     bss_conf->beacon_int,
4269 	     bss_conf->basic_rates, sta_rate_set);
4270 
4271 	wlvif->beacon_int = bss_conf->beacon_int;
4272 	rates = bss_conf->basic_rates;
4273 	wlvif->basic_rate_set =
4274 		wl1271_tx_enabled_rates_get(wl, rates,
4275 					    wlvif->band);
4276 	wlvif->basic_rate =
4277 		wl1271_tx_min_rate_get(wl,
4278 				       wlvif->basic_rate_set);
4279 
4280 	if (sta_rate_set)
4281 		wlvif->rate_set =
4282 			wl1271_tx_enabled_rates_get(wl,
4283 						sta_rate_set,
4284 						wlvif->band);
4285 
4286 	/* we only support sched_scan while not connected */
4287 	if (wl->sched_vif == wlvif)
4288 		wl->ops->sched_scan_stop(wl, wlvif);
4289 
4290 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4291 	if (ret < 0)
4292 		return ret;
4293 
4294 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4295 	if (ret < 0)
4296 		return ret;
4297 
4298 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4299 	if (ret < 0)
4300 		return ret;
4301 
4302 	wlcore_set_ssid(wl, wlvif);
4303 
4304 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4305 
4306 	return 0;
4307 }
4308 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4309 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4310 {
4311 	int ret;
4312 
4313 	/* revert back to minimum rates for the current band */
4314 	wl1271_set_band_rate(wl, wlvif);
4315 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4316 
4317 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4318 	if (ret < 0)
4319 		return ret;
4320 
4321 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4322 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4323 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4324 		if (ret < 0)
4325 			return ret;
4326 	}
4327 
4328 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4329 	return 0;
4330 }
4331 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4332 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4333 					struct ieee80211_vif *vif,
4334 					struct ieee80211_bss_conf *bss_conf,
4335 					u32 changed)
4336 {
4337 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4338 	bool do_join = false;
4339 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4340 	bool ibss_joined = false;
4341 	u32 sta_rate_set = 0;
4342 	int ret;
4343 	struct ieee80211_sta *sta;
4344 	bool sta_exists = false;
4345 	struct ieee80211_sta_ht_cap sta_ht_cap;
4346 
4347 	if (is_ibss) {
4348 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4349 						     changed);
4350 		if (ret < 0)
4351 			goto out;
4352 	}
4353 
4354 	if (changed & BSS_CHANGED_IBSS) {
4355 		if (vif->cfg.ibss_joined) {
4356 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4357 			ibss_joined = true;
4358 		} else {
4359 			wlcore_unset_assoc(wl, wlvif);
4360 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4361 		}
4362 	}
4363 
4364 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4365 		do_join = true;
4366 
4367 	/* Need to update the SSID (for filtering etc) */
4368 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4369 		do_join = true;
4370 
4371 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4372 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4373 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4374 
4375 		do_join = true;
4376 	}
4377 
4378 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4379 		wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
4380 
4381 	if (changed & BSS_CHANGED_CQM) {
4382 		bool enable = false;
4383 		if (bss_conf->cqm_rssi_thold)
4384 			enable = true;
4385 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4386 						  bss_conf->cqm_rssi_thold,
4387 						  bss_conf->cqm_rssi_hyst);
4388 		if (ret < 0)
4389 			goto out;
4390 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4391 	}
4392 
4393 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4394 		       BSS_CHANGED_ASSOC)) {
4395 		rcu_read_lock();
4396 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4397 		if (sta) {
4398 			u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
4399 
4400 			/* save the supp_rates of the ap */
4401 			sta_rate_set = sta->deflink.supp_rates[wlvif->band];
4402 			if (sta->deflink.ht_cap.ht_supported)
4403 				sta_rate_set |=
4404 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4405 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4406 			sta_ht_cap = sta->deflink.ht_cap;
4407 			sta_exists = true;
4408 		}
4409 
4410 		rcu_read_unlock();
4411 	}
4412 
4413 	if (changed & BSS_CHANGED_BSSID) {
4414 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4415 			ret = wlcore_set_bssid(wl, wlvif, vif,
4416 					       sta_rate_set);
4417 			if (ret < 0)
4418 				goto out;
4419 
4420 			/* Need to update the BSSID (for filtering etc) */
4421 			do_join = true;
4422 		} else {
4423 			ret = wlcore_clear_bssid(wl, wlvif);
4424 			if (ret < 0)
4425 				goto out;
4426 		}
4427 	}
4428 
4429 	if (changed & BSS_CHANGED_IBSS) {
4430 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4431 			     vif->cfg.ibss_joined);
4432 
4433 		if (vif->cfg.ibss_joined) {
4434 			u32 rates = bss_conf->basic_rates;
4435 			wlvif->basic_rate_set =
4436 				wl1271_tx_enabled_rates_get(wl, rates,
4437 							    wlvif->band);
4438 			wlvif->basic_rate =
4439 				wl1271_tx_min_rate_get(wl,
4440 						       wlvif->basic_rate_set);
4441 
4442 			/* by default, use 11b + OFDM rates */
4443 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4444 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4445 			if (ret < 0)
4446 				goto out;
4447 		}
4448 	}
4449 
4450 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4451 		/* enable beacon filtering */
4452 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4453 		if (ret < 0)
4454 			goto out;
4455 	}
4456 
4457 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4458 	if (ret < 0)
4459 		goto out;
4460 
4461 	if (do_join) {
4462 		ret = wlcore_join(wl, wlvif);
4463 		if (ret < 0) {
4464 			wl1271_warning("cmd join failed %d", ret);
4465 			goto out;
4466 		}
4467 	}
4468 
4469 	if (changed & BSS_CHANGED_ASSOC) {
4470 		if (vif->cfg.assoc) {
4471 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4472 					       sta_rate_set);
4473 			if (ret < 0)
4474 				goto out;
4475 
4476 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4477 				wl12xx_set_authorized(wl, wlvif);
4478 		} else {
4479 			wlcore_unset_assoc(wl, wlvif);
4480 		}
4481 	}
4482 
4483 	if (changed & BSS_CHANGED_PS) {
4484 		if (vif->cfg.ps &&
4485 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4486 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4487 			int ps_mode;
4488 			char *ps_mode_str;
4489 
4490 			if (wl->conf.conn.forced_ps) {
4491 				ps_mode = STATION_POWER_SAVE_MODE;
4492 				ps_mode_str = "forced";
4493 			} else {
4494 				ps_mode = STATION_AUTO_PS_MODE;
4495 				ps_mode_str = "auto";
4496 			}
4497 
4498 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4499 
4500 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4501 			if (ret < 0)
4502 				wl1271_warning("enter %s ps failed %d",
4503 					       ps_mode_str, ret);
4504 		} else if (!vif->cfg.ps &&
4505 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4506 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4507 
4508 			ret = wl1271_ps_set_mode(wl, wlvif,
4509 						 STATION_ACTIVE_MODE);
4510 			if (ret < 0)
4511 				wl1271_warning("exit auto ps failed %d", ret);
4512 		}
4513 	}
4514 
4515 	/* Handle new association with HT. Do this after join. */
4516 	if (sta_exists) {
4517 		bool enabled =
4518 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4519 
4520 		ret = wlcore_hw_set_peer_cap(wl,
4521 					     &sta_ht_cap,
4522 					     enabled,
4523 					     wlvif->rate_set,
4524 					     wlvif->sta.hlid);
4525 		if (ret < 0) {
4526 			wl1271_warning("Set ht cap failed %d", ret);
4527 			goto out;
4528 
4529 		}
4530 
4531 		if (enabled) {
4532 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4533 						bss_conf->ht_operation_mode);
4534 			if (ret < 0) {
4535 				wl1271_warning("Set ht information failed %d",
4536 					       ret);
4537 				goto out;
4538 			}
4539 		}
4540 	}
4541 
4542 	/* Handle arp filtering. Done after join. */
4543 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4544 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4545 		__be32 addr = vif->cfg.arp_addr_list[0];
4546 		wlvif->sta.qos = bss_conf->qos;
4547 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4548 
4549 		if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
4550 			wlvif->ip_addr = addr;
4551 			/*
4552 			 * The template should have been configured only upon
4553 			 * association. however, it seems that the correct ip
4554 			 * isn't being set (when sending), so we have to
4555 			 * reconfigure the template upon every ip change.
4556 			 */
4557 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4558 			if (ret < 0) {
4559 				wl1271_warning("build arp rsp failed: %d", ret);
4560 				goto out;
4561 			}
4562 
4563 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4564 				(ACX_ARP_FILTER_ARP_FILTERING |
4565 				 ACX_ARP_FILTER_AUTO_ARP),
4566 				addr);
4567 		} else {
4568 			wlvif->ip_addr = 0;
4569 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4570 		}
4571 
4572 		if (ret < 0)
4573 			goto out;
4574 	}
4575 
4576 out:
4577 	return;
4578 }
4579 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u64 changed)4580 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4581 				       struct ieee80211_vif *vif,
4582 				       struct ieee80211_bss_conf *bss_conf,
4583 				       u64 changed)
4584 {
4585 	struct wl1271 *wl = hw->priv;
4586 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4587 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4588 	int ret;
4589 
4590 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4591 		     wlvif->role_id, (int)changed);
4592 
4593 	/*
4594 	 * make sure to cancel pending disconnections if our association
4595 	 * state changed
4596 	 */
4597 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4598 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4599 
4600 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4601 	    !bss_conf->enable_beacon)
4602 		wl1271_tx_flush(wl);
4603 
4604 	mutex_lock(&wl->mutex);
4605 
4606 	if (unlikely(wl->state != WLCORE_STATE_ON))
4607 		goto out;
4608 
4609 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4610 		goto out;
4611 
4612 	ret = pm_runtime_resume_and_get(wl->dev);
4613 	if (ret < 0)
4614 		goto out;
4615 
4616 	if ((changed & BSS_CHANGED_TXPOWER) &&
4617 	    bss_conf->txpower != wlvif->power_level) {
4618 
4619 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4620 		if (ret < 0)
4621 			goto out;
4622 
4623 		wlvif->power_level = bss_conf->txpower;
4624 	}
4625 
4626 	if (is_ap)
4627 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4628 	else
4629 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4630 
4631 	pm_runtime_mark_last_busy(wl->dev);
4632 	pm_runtime_put_autosuspend(wl->dev);
4633 
4634 out:
4635 	mutex_unlock(&wl->mutex);
4636 }
4637 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4638 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4639 				 struct ieee80211_chanctx_conf *ctx)
4640 {
4641 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4642 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4643 		     cfg80211_get_chandef_type(&ctx->def));
4644 	return 0;
4645 }
4646 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4647 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4648 				     struct ieee80211_chanctx_conf *ctx)
4649 {
4650 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4651 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4652 		     cfg80211_get_chandef_type(&ctx->def));
4653 }
4654 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4655 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4656 				     struct ieee80211_chanctx_conf *ctx,
4657 				     u32 changed)
4658 {
4659 	struct wl1271 *wl = hw->priv;
4660 	struct wl12xx_vif *wlvif;
4661 	int ret;
4662 	int channel = ieee80211_frequency_to_channel(
4663 		ctx->def.chan->center_freq);
4664 
4665 	wl1271_debug(DEBUG_MAC80211,
4666 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4667 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4668 
4669 	mutex_lock(&wl->mutex);
4670 
4671 	ret = pm_runtime_resume_and_get(wl->dev);
4672 	if (ret < 0)
4673 		goto out;
4674 
4675 	wl12xx_for_each_wlvif(wl, wlvif) {
4676 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4677 
4678 		rcu_read_lock();
4679 		if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
4680 			rcu_read_unlock();
4681 			continue;
4682 		}
4683 		rcu_read_unlock();
4684 
4685 		/* start radar if needed */
4686 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4687 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4688 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4689 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4690 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4691 			wlcore_hw_set_cac(wl, wlvif, true);
4692 			wlvif->radar_enabled = true;
4693 		}
4694 	}
4695 
4696 	pm_runtime_mark_last_busy(wl->dev);
4697 	pm_runtime_put_autosuspend(wl->dev);
4698 out:
4699 	mutex_unlock(&wl->mutex);
4700 }
4701 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4702 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4703 					struct ieee80211_vif *vif,
4704 					struct ieee80211_bss_conf *link_conf,
4705 					struct ieee80211_chanctx_conf *ctx)
4706 {
4707 	struct wl1271 *wl = hw->priv;
4708 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4709 	int channel = ieee80211_frequency_to_channel(
4710 		ctx->def.chan->center_freq);
4711 	int ret = -EINVAL;
4712 
4713 	wl1271_debug(DEBUG_MAC80211,
4714 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4715 		     wlvif->role_id, channel,
4716 		     cfg80211_get_chandef_type(&ctx->def),
4717 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4718 
4719 	mutex_lock(&wl->mutex);
4720 
4721 	if (unlikely(wl->state != WLCORE_STATE_ON))
4722 		goto out;
4723 
4724 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4725 		goto out;
4726 
4727 	ret = pm_runtime_resume_and_get(wl->dev);
4728 	if (ret < 0)
4729 		goto out;
4730 
4731 	wlvif->band = ctx->def.chan->band;
4732 	wlvif->channel = channel;
4733 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4734 
4735 	/* update default rates according to the band */
4736 	wl1271_set_band_rate(wl, wlvif);
4737 
4738 	if (ctx->radar_enabled &&
4739 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4740 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4741 		wlcore_hw_set_cac(wl, wlvif, true);
4742 		wlvif->radar_enabled = true;
4743 	}
4744 
4745 	pm_runtime_mark_last_busy(wl->dev);
4746 	pm_runtime_put_autosuspend(wl->dev);
4747 out:
4748 	mutex_unlock(&wl->mutex);
4749 
4750 	return 0;
4751 }
4752 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)4753 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4754 					   struct ieee80211_vif *vif,
4755 					   struct ieee80211_bss_conf *link_conf,
4756 					   struct ieee80211_chanctx_conf *ctx)
4757 {
4758 	struct wl1271 *wl = hw->priv;
4759 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4760 	int ret;
4761 
4762 	wl1271_debug(DEBUG_MAC80211,
4763 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4764 		     wlvif->role_id,
4765 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4766 		     cfg80211_get_chandef_type(&ctx->def));
4767 
4768 	wl1271_tx_flush(wl);
4769 
4770 	mutex_lock(&wl->mutex);
4771 
4772 	if (unlikely(wl->state != WLCORE_STATE_ON))
4773 		goto out;
4774 
4775 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4776 		goto out;
4777 
4778 	ret = pm_runtime_resume_and_get(wl->dev);
4779 	if (ret < 0)
4780 		goto out;
4781 
4782 	if (wlvif->radar_enabled) {
4783 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4784 		wlcore_hw_set_cac(wl, wlvif, false);
4785 		wlvif->radar_enabled = false;
4786 	}
4787 
4788 	pm_runtime_mark_last_busy(wl->dev);
4789 	pm_runtime_put_autosuspend(wl->dev);
4790 out:
4791 	mutex_unlock(&wl->mutex);
4792 }
4793 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4794 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4795 				    struct wl12xx_vif *wlvif,
4796 				    struct ieee80211_chanctx_conf *new_ctx)
4797 {
4798 	int channel = ieee80211_frequency_to_channel(
4799 		new_ctx->def.chan->center_freq);
4800 
4801 	wl1271_debug(DEBUG_MAC80211,
4802 		     "switch vif (role %d) %d -> %d chan_type: %d",
4803 		     wlvif->role_id, wlvif->channel, channel,
4804 		     cfg80211_get_chandef_type(&new_ctx->def));
4805 
4806 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4807 		return 0;
4808 
4809 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4810 
4811 	if (wlvif->radar_enabled) {
4812 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4813 		wlcore_hw_set_cac(wl, wlvif, false);
4814 		wlvif->radar_enabled = false;
4815 	}
4816 
4817 	wlvif->band = new_ctx->def.chan->band;
4818 	wlvif->channel = channel;
4819 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4820 
4821 	/* start radar if needed */
4822 	if (new_ctx->radar_enabled) {
4823 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4824 		wlcore_hw_set_cac(wl, wlvif, true);
4825 		wlvif->radar_enabled = true;
4826 	}
4827 
4828 	return 0;
4829 }
4830 
4831 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4832 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4833 			     struct ieee80211_vif_chanctx_switch *vifs,
4834 			     int n_vifs,
4835 			     enum ieee80211_chanctx_switch_mode mode)
4836 {
4837 	struct wl1271 *wl = hw->priv;
4838 	int i, ret;
4839 
4840 	wl1271_debug(DEBUG_MAC80211,
4841 		     "mac80211 switch chanctx n_vifs %d mode %d",
4842 		     n_vifs, mode);
4843 
4844 	mutex_lock(&wl->mutex);
4845 
4846 	ret = pm_runtime_resume_and_get(wl->dev);
4847 	if (ret < 0)
4848 		goto out;
4849 
4850 	for (i = 0; i < n_vifs; i++) {
4851 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4852 
4853 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4854 		if (ret)
4855 			goto out_sleep;
4856 	}
4857 out_sleep:
4858 	pm_runtime_mark_last_busy(wl->dev);
4859 	pm_runtime_put_autosuspend(wl->dev);
4860 out:
4861 	mutex_unlock(&wl->mutex);
4862 
4863 	return 0;
4864 }
4865 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 queue,const struct ieee80211_tx_queue_params * params)4866 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4867 			     struct ieee80211_vif *vif,
4868 			     unsigned int link_id, u16 queue,
4869 			     const struct ieee80211_tx_queue_params *params)
4870 {
4871 	struct wl1271 *wl = hw->priv;
4872 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4873 	u8 ps_scheme;
4874 	int ret = 0;
4875 
4876 	if (wlcore_is_p2p_mgmt(wlvif))
4877 		return 0;
4878 
4879 	mutex_lock(&wl->mutex);
4880 
4881 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4882 
4883 	if (params->uapsd)
4884 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4885 	else
4886 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4887 
4888 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4889 		goto out;
4890 
4891 	ret = pm_runtime_resume_and_get(wl->dev);
4892 	if (ret < 0)
4893 		goto out;
4894 
4895 	/*
4896 	 * the txop is confed in units of 32us by the mac80211,
4897 	 * we need us
4898 	 */
4899 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4900 				params->cw_min, params->cw_max,
4901 				params->aifs, params->txop << 5);
4902 	if (ret < 0)
4903 		goto out_sleep;
4904 
4905 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4906 				 CONF_CHANNEL_TYPE_EDCF,
4907 				 wl1271_tx_get_queue(queue),
4908 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4909 				 0, 0);
4910 
4911 out_sleep:
4912 	pm_runtime_mark_last_busy(wl->dev);
4913 	pm_runtime_put_autosuspend(wl->dev);
4914 
4915 out:
4916 	mutex_unlock(&wl->mutex);
4917 
4918 	return ret;
4919 }
4920 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4921 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4922 			     struct ieee80211_vif *vif)
4923 {
4924 
4925 	struct wl1271 *wl = hw->priv;
4926 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4927 	u64 mactime = ULLONG_MAX;
4928 	int ret;
4929 
4930 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4931 
4932 	mutex_lock(&wl->mutex);
4933 
4934 	if (unlikely(wl->state != WLCORE_STATE_ON))
4935 		goto out;
4936 
4937 	ret = pm_runtime_resume_and_get(wl->dev);
4938 	if (ret < 0)
4939 		goto out;
4940 
4941 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4942 	if (ret < 0)
4943 		goto out_sleep;
4944 
4945 out_sleep:
4946 	pm_runtime_mark_last_busy(wl->dev);
4947 	pm_runtime_put_autosuspend(wl->dev);
4948 
4949 out:
4950 	mutex_unlock(&wl->mutex);
4951 	return mactime;
4952 }
4953 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4954 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4955 				struct survey_info *survey)
4956 {
4957 	struct ieee80211_conf *conf = &hw->conf;
4958 
4959 	if (idx != 0)
4960 		return -ENOENT;
4961 
4962 	survey->channel = conf->chandef.chan;
4963 	survey->filled = 0;
4964 	return 0;
4965 }
4966 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4967 static int wl1271_allocate_sta(struct wl1271 *wl,
4968 			     struct wl12xx_vif *wlvif,
4969 			     struct ieee80211_sta *sta)
4970 {
4971 	struct wl1271_station *wl_sta;
4972 	int ret;
4973 
4974 
4975 	if (wl->active_sta_count >= wl->max_ap_stations) {
4976 		wl1271_warning("could not allocate HLID - too much stations");
4977 		return -EBUSY;
4978 	}
4979 
4980 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4981 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4982 	if (ret < 0) {
4983 		wl1271_warning("could not allocate HLID - too many links");
4984 		return -EBUSY;
4985 	}
4986 
4987 	/* use the previous security seq, if this is a recovery/resume */
4988 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4989 
4990 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4991 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4992 	wl->active_sta_count++;
4993 	return 0;
4994 }
4995 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)4996 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4997 {
4998 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4999 		return;
5000 
5001 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5002 	__clear_bit(hlid, &wl->ap_ps_map);
5003 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5004 
5005 	/*
5006 	 * save the last used PN in the private part of iee80211_sta,
5007 	 * in case of recovery/suspend
5008 	 */
5009 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5010 
5011 	wl12xx_free_link(wl, wlvif, &hlid);
5012 	wl->active_sta_count--;
5013 
5014 	/*
5015 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5016 	 * chance to return STA-buffered packets before complaining.
5017 	 */
5018 	if (wl->active_sta_count == 0)
5019 		wl12xx_rearm_tx_watchdog_locked(wl);
5020 }
5021 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5022 static int wl12xx_sta_add(struct wl1271 *wl,
5023 			  struct wl12xx_vif *wlvif,
5024 			  struct ieee80211_sta *sta)
5025 {
5026 	struct wl1271_station *wl_sta;
5027 	int ret = 0;
5028 	u8 hlid;
5029 
5030 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5031 
5032 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5033 	if (ret < 0)
5034 		return ret;
5035 
5036 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5037 	hlid = wl_sta->hlid;
5038 
5039 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5040 	if (ret < 0)
5041 		wl1271_free_sta(wl, wlvif, hlid);
5042 
5043 	return ret;
5044 }
5045 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5046 static int wl12xx_sta_remove(struct wl1271 *wl,
5047 			     struct wl12xx_vif *wlvif,
5048 			     struct ieee80211_sta *sta)
5049 {
5050 	struct wl1271_station *wl_sta;
5051 	int ret = 0, id;
5052 
5053 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5054 
5055 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5056 	id = wl_sta->hlid;
5057 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5058 		return -EINVAL;
5059 
5060 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5061 	if (ret < 0)
5062 		return ret;
5063 
5064 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5065 	return ret;
5066 }
5067 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5068 static void wlcore_roc_if_possible(struct wl1271 *wl,
5069 				   struct wl12xx_vif *wlvif)
5070 {
5071 	if (find_first_bit(wl->roc_map,
5072 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5073 		return;
5074 
5075 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5076 		return;
5077 
5078 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5079 }
5080 
5081 /*
5082  * when wl_sta is NULL, we treat this call as if coming from a
5083  * pending auth reply.
5084  * wl->mutex must be taken and the FW must be awake when the call
5085  * takes place.
5086  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5087 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5088 			      struct wl1271_station *wl_sta, bool in_conn)
5089 {
5090 	if (in_conn) {
5091 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5092 			return;
5093 
5094 		if (!wlvif->ap_pending_auth_reply &&
5095 		    !wlvif->inconn_count)
5096 			wlcore_roc_if_possible(wl, wlvif);
5097 
5098 		if (wl_sta) {
5099 			wl_sta->in_connection = true;
5100 			wlvif->inconn_count++;
5101 		} else {
5102 			wlvif->ap_pending_auth_reply = true;
5103 		}
5104 	} else {
5105 		if (wl_sta && !wl_sta->in_connection)
5106 			return;
5107 
5108 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5109 			return;
5110 
5111 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5112 			return;
5113 
5114 		if (wl_sta) {
5115 			wl_sta->in_connection = false;
5116 			wlvif->inconn_count--;
5117 		} else {
5118 			wlvif->ap_pending_auth_reply = false;
5119 		}
5120 
5121 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5122 		    test_bit(wlvif->role_id, wl->roc_map))
5123 			wl12xx_croc(wl, wlvif->role_id);
5124 	}
5125 }
5126 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5127 static int wl12xx_update_sta_state(struct wl1271 *wl,
5128 				   struct wl12xx_vif *wlvif,
5129 				   struct ieee80211_sta *sta,
5130 				   enum ieee80211_sta_state old_state,
5131 				   enum ieee80211_sta_state new_state)
5132 {
5133 	struct wl1271_station *wl_sta;
5134 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5135 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5136 	int ret;
5137 
5138 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5139 
5140 	/* Add station (AP mode) */
5141 	if (is_ap &&
5142 	    old_state == IEEE80211_STA_NOTEXIST &&
5143 	    new_state == IEEE80211_STA_NONE) {
5144 		ret = wl12xx_sta_add(wl, wlvif, sta);
5145 		if (ret)
5146 			return ret;
5147 
5148 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5149 	}
5150 
5151 	/* Remove station (AP mode) */
5152 	if (is_ap &&
5153 	    old_state == IEEE80211_STA_NONE &&
5154 	    new_state == IEEE80211_STA_NOTEXIST) {
5155 		/* must not fail */
5156 		wl12xx_sta_remove(wl, wlvif, sta);
5157 
5158 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5159 	}
5160 
5161 	/* Authorize station (AP mode) */
5162 	if (is_ap &&
5163 	    new_state == IEEE80211_STA_AUTHORIZED) {
5164 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5165 		if (ret < 0)
5166 			return ret;
5167 
5168 		/* reconfigure rates */
5169 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5170 		if (ret < 0)
5171 			return ret;
5172 
5173 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
5174 						     true,
5175 						     wl_sta->hlid);
5176 		if (ret)
5177 			return ret;
5178 
5179 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5180 	}
5181 
5182 	/* Authorize station */
5183 	if (is_sta &&
5184 	    new_state == IEEE80211_STA_AUTHORIZED) {
5185 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5186 		ret = wl12xx_set_authorized(wl, wlvif);
5187 		if (ret)
5188 			return ret;
5189 	}
5190 
5191 	if (is_sta &&
5192 	    old_state == IEEE80211_STA_AUTHORIZED &&
5193 	    new_state == IEEE80211_STA_ASSOC) {
5194 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5195 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5196 	}
5197 
5198 	/* save seq number on disassoc (suspend) */
5199 	if (is_sta &&
5200 	    old_state == IEEE80211_STA_ASSOC &&
5201 	    new_state == IEEE80211_STA_AUTH) {
5202 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5203 		wlvif->total_freed_pkts = 0;
5204 	}
5205 
5206 	/* restore seq number on assoc (resume) */
5207 	if (is_sta &&
5208 	    old_state == IEEE80211_STA_AUTH &&
5209 	    new_state == IEEE80211_STA_ASSOC) {
5210 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5211 	}
5212 
5213 	/* clear ROCs on failure or authorization */
5214 	if (is_sta &&
5215 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5216 	     new_state == IEEE80211_STA_NOTEXIST)) {
5217 		if (test_bit(wlvif->role_id, wl->roc_map))
5218 			wl12xx_croc(wl, wlvif->role_id);
5219 	}
5220 
5221 	if (is_sta &&
5222 	    old_state == IEEE80211_STA_NOTEXIST &&
5223 	    new_state == IEEE80211_STA_NONE) {
5224 		if (find_first_bit(wl->roc_map,
5225 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5226 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5227 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5228 				   wlvif->band, wlvif->channel);
5229 		}
5230 	}
5231 	return 0;
5232 }
5233 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5234 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5235 			       struct ieee80211_vif *vif,
5236 			       struct ieee80211_sta *sta,
5237 			       enum ieee80211_sta_state old_state,
5238 			       enum ieee80211_sta_state new_state)
5239 {
5240 	struct wl1271 *wl = hw->priv;
5241 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5242 	int ret;
5243 
5244 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5245 		     sta->aid, old_state, new_state);
5246 
5247 	mutex_lock(&wl->mutex);
5248 
5249 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5250 		ret = -EBUSY;
5251 		goto out;
5252 	}
5253 
5254 	ret = pm_runtime_resume_and_get(wl->dev);
5255 	if (ret < 0)
5256 		goto out;
5257 
5258 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5259 
5260 	pm_runtime_mark_last_busy(wl->dev);
5261 	pm_runtime_put_autosuspend(wl->dev);
5262 out:
5263 	mutex_unlock(&wl->mutex);
5264 	if (new_state < old_state)
5265 		return 0;
5266 	return ret;
5267 }
5268 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5269 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5270 				  struct ieee80211_vif *vif,
5271 				  struct ieee80211_ampdu_params *params)
5272 {
5273 	struct wl1271 *wl = hw->priv;
5274 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5275 	int ret;
5276 	u8 hlid, *ba_bitmap;
5277 	struct ieee80211_sta *sta = params->sta;
5278 	enum ieee80211_ampdu_mlme_action action = params->action;
5279 	u16 tid = params->tid;
5280 	u16 *ssn = &params->ssn;
5281 
5282 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5283 		     tid);
5284 
5285 	/* sanity check - the fields in FW are only 8bits wide */
5286 	if (WARN_ON(tid > 0xFF))
5287 		return -ENOTSUPP;
5288 
5289 	mutex_lock(&wl->mutex);
5290 
5291 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5292 		ret = -EAGAIN;
5293 		goto out;
5294 	}
5295 
5296 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5297 		hlid = wlvif->sta.hlid;
5298 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5299 		struct wl1271_station *wl_sta;
5300 
5301 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5302 		hlid = wl_sta->hlid;
5303 	} else {
5304 		ret = -EINVAL;
5305 		goto out;
5306 	}
5307 
5308 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5309 
5310 	ret = pm_runtime_resume_and_get(wl->dev);
5311 	if (ret < 0)
5312 		goto out;
5313 
5314 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5315 		     tid, action);
5316 
5317 	switch (action) {
5318 	case IEEE80211_AMPDU_RX_START:
5319 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5320 			ret = -ENOTSUPP;
5321 			break;
5322 		}
5323 
5324 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5325 			ret = -EBUSY;
5326 			wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
5327 			break;
5328 		}
5329 
5330 		if (*ba_bitmap & BIT(tid)) {
5331 			ret = -EINVAL;
5332 			wl1271_error("cannot enable RX BA session on active "
5333 				     "tid: %d", tid);
5334 			break;
5335 		}
5336 
5337 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5338 				hlid,
5339 				params->buf_size);
5340 
5341 		if (!ret) {
5342 			*ba_bitmap |= BIT(tid);
5343 			wl->ba_rx_session_count++;
5344 		}
5345 		break;
5346 
5347 	case IEEE80211_AMPDU_RX_STOP:
5348 		if (!(*ba_bitmap & BIT(tid))) {
5349 			/*
5350 			 * this happens on reconfig - so only output a debug
5351 			 * message for now, and don't fail the function.
5352 			 */
5353 			wl1271_debug(DEBUG_MAC80211,
5354 				     "no active RX BA session on tid: %d",
5355 				     tid);
5356 			ret = 0;
5357 			break;
5358 		}
5359 
5360 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5361 							 hlid, 0);
5362 		if (!ret) {
5363 			*ba_bitmap &= ~BIT(tid);
5364 			wl->ba_rx_session_count--;
5365 		}
5366 		break;
5367 
5368 	/*
5369 	 * The BA initiator session management in FW independently.
5370 	 * Falling break here on purpose for all TX APDU commands.
5371 	 */
5372 	case IEEE80211_AMPDU_TX_START:
5373 	case IEEE80211_AMPDU_TX_STOP_CONT:
5374 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5375 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5376 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5377 		ret = -EINVAL;
5378 		break;
5379 
5380 	default:
5381 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5382 		ret = -EINVAL;
5383 	}
5384 
5385 	pm_runtime_mark_last_busy(wl->dev);
5386 	pm_runtime_put_autosuspend(wl->dev);
5387 
5388 out:
5389 	mutex_unlock(&wl->mutex);
5390 
5391 	return ret;
5392 }
5393 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5394 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5395 				   struct ieee80211_vif *vif,
5396 				   const struct cfg80211_bitrate_mask *mask)
5397 {
5398 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5399 	struct wl1271 *wl = hw->priv;
5400 	int i, ret = 0;
5401 
5402 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5403 		mask->control[NL80211_BAND_2GHZ].legacy,
5404 		mask->control[NL80211_BAND_5GHZ].legacy);
5405 
5406 	mutex_lock(&wl->mutex);
5407 
5408 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5409 		wlvif->bitrate_masks[i] =
5410 			wl1271_tx_enabled_rates_get(wl,
5411 						    mask->control[i].legacy,
5412 						    i);
5413 
5414 	if (unlikely(wl->state != WLCORE_STATE_ON))
5415 		goto out;
5416 
5417 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5418 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5419 
5420 		ret = pm_runtime_resume_and_get(wl->dev);
5421 		if (ret < 0)
5422 			goto out;
5423 
5424 		wl1271_set_band_rate(wl, wlvif);
5425 		wlvif->basic_rate =
5426 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5427 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5428 
5429 		pm_runtime_mark_last_busy(wl->dev);
5430 		pm_runtime_put_autosuspend(wl->dev);
5431 	}
5432 out:
5433 	mutex_unlock(&wl->mutex);
5434 
5435 	return ret;
5436 }
5437 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5438 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5439 				     struct ieee80211_vif *vif,
5440 				     struct ieee80211_channel_switch *ch_switch)
5441 {
5442 	struct wl1271 *wl = hw->priv;
5443 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5444 	int ret;
5445 
5446 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5447 
5448 	wl1271_tx_flush(wl);
5449 
5450 	mutex_lock(&wl->mutex);
5451 
5452 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5453 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5454 			ieee80211_chswitch_done(vif, false);
5455 		goto out;
5456 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5457 		goto out;
5458 	}
5459 
5460 	ret = pm_runtime_resume_and_get(wl->dev);
5461 	if (ret < 0)
5462 		goto out;
5463 
5464 	/* TODO: change mac80211 to pass vif as param */
5465 
5466 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5467 		unsigned long delay_usec;
5468 
5469 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5470 		if (ret)
5471 			goto out_sleep;
5472 
5473 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5474 
5475 		/* indicate failure 5 seconds after channel switch time */
5476 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5477 			ch_switch->count;
5478 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5479 					     usecs_to_jiffies(delay_usec) +
5480 					     msecs_to_jiffies(5000));
5481 	}
5482 
5483 out_sleep:
5484 	pm_runtime_mark_last_busy(wl->dev);
5485 	pm_runtime_put_autosuspend(wl->dev);
5486 
5487 out:
5488 	mutex_unlock(&wl->mutex);
5489 }
5490 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5491 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5492 					struct wl12xx_vif *wlvif,
5493 					u8 eid)
5494 {
5495 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5496 	struct sk_buff *beacon =
5497 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
5498 
5499 	if (!beacon)
5500 		return NULL;
5501 
5502 	return cfg80211_find_ie(eid,
5503 				beacon->data + ieoffset,
5504 				beacon->len - ieoffset);
5505 }
5506 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5507 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5508 				u8 *csa_count)
5509 {
5510 	const u8 *ie;
5511 	const struct ieee80211_channel_sw_ie *ie_csa;
5512 
5513 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5514 	if (!ie)
5515 		return -EINVAL;
5516 
5517 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5518 	*csa_count = ie_csa->count;
5519 
5520 	return 0;
5521 }
5522 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5523 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5524 					    struct ieee80211_vif *vif,
5525 					    struct cfg80211_chan_def *chandef)
5526 {
5527 	struct wl1271 *wl = hw->priv;
5528 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5529 	struct ieee80211_channel_switch ch_switch = {
5530 		.block_tx = true,
5531 		.chandef = *chandef,
5532 	};
5533 	int ret;
5534 
5535 	wl1271_debug(DEBUG_MAC80211,
5536 		     "mac80211 channel switch beacon (role %d)",
5537 		     wlvif->role_id);
5538 
5539 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5540 	if (ret < 0) {
5541 		wl1271_error("error getting beacon (for CSA counter)");
5542 		return;
5543 	}
5544 
5545 	mutex_lock(&wl->mutex);
5546 
5547 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5548 		ret = -EBUSY;
5549 		goto out;
5550 	}
5551 
5552 	ret = pm_runtime_resume_and_get(wl->dev);
5553 	if (ret < 0)
5554 		goto out;
5555 
5556 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5557 	if (ret)
5558 		goto out_sleep;
5559 
5560 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5561 
5562 out_sleep:
5563 	pm_runtime_mark_last_busy(wl->dev);
5564 	pm_runtime_put_autosuspend(wl->dev);
5565 out:
5566 	mutex_unlock(&wl->mutex);
5567 }
5568 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5569 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5570 			    u32 queues, bool drop)
5571 {
5572 	struct wl1271 *wl = hw->priv;
5573 
5574 	wl1271_tx_flush(wl);
5575 }
5576 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5577 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5578 				       struct ieee80211_vif *vif,
5579 				       struct ieee80211_channel *chan,
5580 				       int duration,
5581 				       enum ieee80211_roc_type type)
5582 {
5583 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5584 	struct wl1271 *wl = hw->priv;
5585 	int channel, active_roc, ret = 0;
5586 
5587 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5588 
5589 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5590 		     channel, wlvif->role_id);
5591 
5592 	mutex_lock(&wl->mutex);
5593 
5594 	if (unlikely(wl->state != WLCORE_STATE_ON))
5595 		goto out;
5596 
5597 	/* return EBUSY if we can't ROC right now */
5598 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5599 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5600 		wl1271_warning("active roc on role %d", active_roc);
5601 		ret = -EBUSY;
5602 		goto out;
5603 	}
5604 
5605 	ret = pm_runtime_resume_and_get(wl->dev);
5606 	if (ret < 0)
5607 		goto out;
5608 
5609 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5610 	if (ret < 0)
5611 		goto out_sleep;
5612 
5613 	wl->roc_vif = vif;
5614 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5615 				     msecs_to_jiffies(duration));
5616 out_sleep:
5617 	pm_runtime_mark_last_busy(wl->dev);
5618 	pm_runtime_put_autosuspend(wl->dev);
5619 out:
5620 	mutex_unlock(&wl->mutex);
5621 	return ret;
5622 }
5623 
__wlcore_roc_completed(struct wl1271 * wl)5624 static int __wlcore_roc_completed(struct wl1271 *wl)
5625 {
5626 	struct wl12xx_vif *wlvif;
5627 	int ret;
5628 
5629 	/* already completed */
5630 	if (unlikely(!wl->roc_vif))
5631 		return 0;
5632 
5633 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5634 
5635 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5636 		return -EBUSY;
5637 
5638 	ret = wl12xx_stop_dev(wl, wlvif);
5639 	if (ret < 0)
5640 		return ret;
5641 
5642 	wl->roc_vif = NULL;
5643 
5644 	return 0;
5645 }
5646 
wlcore_roc_completed(struct wl1271 * wl)5647 static int wlcore_roc_completed(struct wl1271 *wl)
5648 {
5649 	int ret;
5650 
5651 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5652 
5653 	mutex_lock(&wl->mutex);
5654 
5655 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5656 		ret = -EBUSY;
5657 		goto out;
5658 	}
5659 
5660 	ret = pm_runtime_resume_and_get(wl->dev);
5661 	if (ret < 0)
5662 		goto out;
5663 
5664 	ret = __wlcore_roc_completed(wl);
5665 
5666 	pm_runtime_mark_last_busy(wl->dev);
5667 	pm_runtime_put_autosuspend(wl->dev);
5668 out:
5669 	mutex_unlock(&wl->mutex);
5670 
5671 	return ret;
5672 }
5673 
wlcore_roc_complete_work(struct work_struct * work)5674 static void wlcore_roc_complete_work(struct work_struct *work)
5675 {
5676 	struct delayed_work *dwork;
5677 	struct wl1271 *wl;
5678 	int ret;
5679 
5680 	dwork = to_delayed_work(work);
5681 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5682 
5683 	ret = wlcore_roc_completed(wl);
5684 	if (!ret)
5685 		ieee80211_remain_on_channel_expired(wl->hw);
5686 }
5687 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5688 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5689 					      struct ieee80211_vif *vif)
5690 {
5691 	struct wl1271 *wl = hw->priv;
5692 
5693 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5694 
5695 	/* TODO: per-vif */
5696 	wl1271_tx_flush(wl);
5697 
5698 	/*
5699 	 * we can't just flush_work here, because it might deadlock
5700 	 * (as we might get called from the same workqueue)
5701 	 */
5702 	cancel_delayed_work_sync(&wl->roc_complete_work);
5703 	wlcore_roc_completed(wl);
5704 
5705 	return 0;
5706 }
5707 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5708 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5709 				    struct ieee80211_vif *vif,
5710 				    struct ieee80211_sta *sta,
5711 				    u32 changed)
5712 {
5713 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5714 
5715 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5716 
5717 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5718 		return;
5719 
5720 	/* this callback is atomic, so schedule a new work */
5721 	wlvif->rc_update_bw = sta->deflink.bandwidth;
5722 	memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
5723 	       sizeof(sta->deflink.ht_cap));
5724 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5725 }
5726 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5727 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5728 				     struct ieee80211_vif *vif,
5729 				     struct ieee80211_sta *sta,
5730 				     struct station_info *sinfo)
5731 {
5732 	struct wl1271 *wl = hw->priv;
5733 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5734 	s8 rssi_dbm;
5735 	int ret;
5736 
5737 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5738 
5739 	mutex_lock(&wl->mutex);
5740 
5741 	if (unlikely(wl->state != WLCORE_STATE_ON))
5742 		goto out;
5743 
5744 	ret = pm_runtime_resume_and_get(wl->dev);
5745 	if (ret < 0)
5746 		goto out_sleep;
5747 
5748 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5749 	if (ret < 0)
5750 		goto out_sleep;
5751 
5752 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5753 	sinfo->signal = rssi_dbm;
5754 
5755 out_sleep:
5756 	pm_runtime_mark_last_busy(wl->dev);
5757 	pm_runtime_put_autosuspend(wl->dev);
5758 
5759 out:
5760 	mutex_unlock(&wl->mutex);
5761 }
5762 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5763 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5764 					     struct ieee80211_sta *sta)
5765 {
5766 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5767 	struct wl1271 *wl = hw->priv;
5768 	u8 hlid = wl_sta->hlid;
5769 
5770 	/* return in units of Kbps */
5771 	return (wl->links[hlid].fw_rate_mbps * 1000);
5772 }
5773 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5774 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5775 {
5776 	struct wl1271 *wl = hw->priv;
5777 	bool ret = false;
5778 
5779 	mutex_lock(&wl->mutex);
5780 
5781 	if (unlikely(wl->state != WLCORE_STATE_ON))
5782 		goto out;
5783 
5784 	/* packets are considered pending if in the TX queue or the FW */
5785 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5786 out:
5787 	mutex_unlock(&wl->mutex);
5788 
5789 	return ret;
5790 }
5791 
5792 /* can't be const, mac80211 writes to this */
5793 static struct ieee80211_rate wl1271_rates[] = {
5794 	{ .bitrate = 10,
5795 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5796 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5797 	{ .bitrate = 20,
5798 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5799 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5800 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5801 	{ .bitrate = 55,
5802 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5803 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5804 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5805 	{ .bitrate = 110,
5806 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5807 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5808 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5809 	{ .bitrate = 60,
5810 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5811 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5812 	{ .bitrate = 90,
5813 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5814 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5815 	{ .bitrate = 120,
5816 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5817 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5818 	{ .bitrate = 180,
5819 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5820 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5821 	{ .bitrate = 240,
5822 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5823 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5824 	{ .bitrate = 360,
5825 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5826 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5827 	{ .bitrate = 480,
5828 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5829 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5830 	{ .bitrate = 540,
5831 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5832 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5833 };
5834 
5835 /* can't be const, mac80211 writes to this */
5836 static struct ieee80211_channel wl1271_channels[] = {
5837 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5838 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5839 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5840 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5841 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5842 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5843 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5844 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5845 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5846 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5847 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5848 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5849 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5850 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5851 };
5852 
5853 /* can't be const, mac80211 writes to this */
5854 static struct ieee80211_supported_band wl1271_band_2ghz = {
5855 	.channels = wl1271_channels,
5856 	.n_channels = ARRAY_SIZE(wl1271_channels),
5857 	.bitrates = wl1271_rates,
5858 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5859 };
5860 
5861 /* 5 GHz data rates for WL1273 */
5862 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5863 	{ .bitrate = 60,
5864 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5865 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5866 	{ .bitrate = 90,
5867 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5868 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5869 	{ .bitrate = 120,
5870 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5871 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5872 	{ .bitrate = 180,
5873 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5874 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5875 	{ .bitrate = 240,
5876 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5877 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5878 	{ .bitrate = 360,
5879 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5880 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5881 	{ .bitrate = 480,
5882 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5883 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5884 	{ .bitrate = 540,
5885 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5886 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5887 };
5888 
5889 /* 5 GHz band channels for WL1273 */
5890 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5891 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5892 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5893 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5894 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5895 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5896 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5897 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5898 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5899 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5900 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5901 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5902 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5903 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5904 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5905 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5906 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5907 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5908 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5909 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5910 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5911 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5912 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5913 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5914 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5920 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5921 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5922 };
5923 
5924 static struct ieee80211_supported_band wl1271_band_5ghz = {
5925 	.channels = wl1271_channels_5ghz,
5926 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5927 	.bitrates = wl1271_rates_5ghz,
5928 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5929 };
5930 
5931 static const struct ieee80211_ops wl1271_ops = {
5932 	.start = wl1271_op_start,
5933 	.stop = wlcore_op_stop,
5934 	.add_interface = wl1271_op_add_interface,
5935 	.remove_interface = wl1271_op_remove_interface,
5936 	.change_interface = wl12xx_op_change_interface,
5937 #ifdef CONFIG_PM
5938 	.suspend = wl1271_op_suspend,
5939 	.resume = wl1271_op_resume,
5940 #endif
5941 	.config = wl1271_op_config,
5942 	.prepare_multicast = wl1271_op_prepare_multicast,
5943 	.configure_filter = wl1271_op_configure_filter,
5944 	.tx = wl1271_op_tx,
5945 	.wake_tx_queue = ieee80211_handle_wake_tx_queue,
5946 	.set_key = wlcore_op_set_key,
5947 	.hw_scan = wl1271_op_hw_scan,
5948 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5949 	.sched_scan_start = wl1271_op_sched_scan_start,
5950 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5951 	.bss_info_changed = wl1271_op_bss_info_changed,
5952 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5953 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5954 	.conf_tx = wl1271_op_conf_tx,
5955 	.get_tsf = wl1271_op_get_tsf,
5956 	.get_survey = wl1271_op_get_survey,
5957 	.sta_state = wl12xx_op_sta_state,
5958 	.ampdu_action = wl1271_op_ampdu_action,
5959 	.tx_frames_pending = wl1271_tx_frames_pending,
5960 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5961 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5962 	.channel_switch = wl12xx_op_channel_switch,
5963 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5964 	.flush = wlcore_op_flush,
5965 	.remain_on_channel = wlcore_op_remain_on_channel,
5966 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5967 	.add_chanctx = wlcore_op_add_chanctx,
5968 	.remove_chanctx = wlcore_op_remove_chanctx,
5969 	.change_chanctx = wlcore_op_change_chanctx,
5970 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5971 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5972 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5973 	.sta_rc_update = wlcore_op_sta_rc_update,
5974 	.sta_statistics = wlcore_op_sta_statistics,
5975 	.get_expected_throughput = wlcore_op_get_expected_throughput,
5976 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5977 };
5978 
5979 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)5980 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
5981 {
5982 	u8 idx;
5983 
5984 	BUG_ON(band >= 2);
5985 
5986 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5987 		wl1271_error("Illegal RX rate from HW: %d", rate);
5988 		return 0;
5989 	}
5990 
5991 	idx = wl->band_rate_to_idx[band][rate];
5992 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5993 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5994 		return 0;
5995 	}
5996 
5997 	return idx;
5998 }
5999 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6000 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6001 {
6002 	int i;
6003 
6004 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6005 		     oui, nic);
6006 
6007 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6008 		wl1271_warning("NIC part of the MAC address wraps around!");
6009 
6010 	for (i = 0; i < wl->num_mac_addr; i++) {
6011 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6012 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6013 		wl->addresses[i].addr[2] = (u8) oui;
6014 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6015 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6016 		wl->addresses[i].addr[5] = (u8) nic;
6017 		nic++;
6018 	}
6019 
6020 	/* we may be one address short at the most */
6021 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6022 
6023 	/*
6024 	 * turn on the LAA bit in the first address and use it as
6025 	 * the last address.
6026 	 */
6027 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6028 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6029 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6030 		       sizeof(wl->addresses[0]));
6031 		/* LAA bit */
6032 		wl->addresses[idx].addr[0] |= BIT(1);
6033 	}
6034 
6035 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6036 	wl->hw->wiphy->addresses = wl->addresses;
6037 }
6038 
wl12xx_get_hw_info(struct wl1271 * wl)6039 static int wl12xx_get_hw_info(struct wl1271 *wl)
6040 {
6041 	int ret;
6042 
6043 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6044 	if (ret < 0)
6045 		goto out;
6046 
6047 	wl->fuse_oui_addr = 0;
6048 	wl->fuse_nic_addr = 0;
6049 
6050 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6051 	if (ret < 0)
6052 		goto out;
6053 
6054 	if (wl->ops->get_mac)
6055 		ret = wl->ops->get_mac(wl);
6056 
6057 out:
6058 	return ret;
6059 }
6060 
wl1271_register_hw(struct wl1271 * wl)6061 static int wl1271_register_hw(struct wl1271 *wl)
6062 {
6063 	int ret;
6064 	u32 oui_addr = 0, nic_addr = 0;
6065 	struct platform_device *pdev = wl->pdev;
6066 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6067 
6068 	if (wl->mac80211_registered)
6069 		return 0;
6070 
6071 	if (wl->nvs_len >= 12) {
6072 		/* NOTE: The wl->nvs->nvs element must be first, in
6073 		 * order to simplify the casting, we assume it is at
6074 		 * the beginning of the wl->nvs structure.
6075 		 */
6076 		u8 *nvs_ptr = (u8 *)wl->nvs;
6077 
6078 		oui_addr =
6079 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6080 		nic_addr =
6081 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6082 	}
6083 
6084 	/* if the MAC address is zeroed in the NVS derive from fuse */
6085 	if (oui_addr == 0 && nic_addr == 0) {
6086 		oui_addr = wl->fuse_oui_addr;
6087 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6088 		nic_addr = wl->fuse_nic_addr + 1;
6089 	}
6090 
6091 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6092 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6093 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6094 			wl1271_warning("This default nvs file can be removed from the file system");
6095 		} else {
6096 			wl1271_warning("Your device performance is not optimized.");
6097 			wl1271_warning("Please use the calibrator tool to configure your device.");
6098 		}
6099 
6100 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6101 			wl1271_warning("Fuse mac address is zero. using random mac");
6102 			/* Use TI oui and a random nic */
6103 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6104 			nic_addr = get_random_u32();
6105 		} else {
6106 			oui_addr = wl->fuse_oui_addr;
6107 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6108 			nic_addr = wl->fuse_nic_addr + 1;
6109 		}
6110 	}
6111 
6112 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6113 
6114 	ret = ieee80211_register_hw(wl->hw);
6115 	if (ret < 0) {
6116 		wl1271_error("unable to register mac80211 hw: %d", ret);
6117 		goto out;
6118 	}
6119 
6120 	wl->mac80211_registered = true;
6121 
6122 	wl1271_debugfs_init(wl);
6123 
6124 	wl1271_notice("loaded");
6125 
6126 out:
6127 	return ret;
6128 }
6129 
wl1271_unregister_hw(struct wl1271 * wl)6130 static void wl1271_unregister_hw(struct wl1271 *wl)
6131 {
6132 	if (wl->plt)
6133 		wl1271_plt_stop(wl);
6134 
6135 	ieee80211_unregister_hw(wl->hw);
6136 	wl->mac80211_registered = false;
6137 
6138 }
6139 
wl1271_init_ieee80211(struct wl1271 * wl)6140 static int wl1271_init_ieee80211(struct wl1271 *wl)
6141 {
6142 	int i;
6143 	static const u32 cipher_suites[] = {
6144 		WLAN_CIPHER_SUITE_WEP40,
6145 		WLAN_CIPHER_SUITE_WEP104,
6146 		WLAN_CIPHER_SUITE_TKIP,
6147 		WLAN_CIPHER_SUITE_CCMP,
6148 		WL1271_CIPHER_SUITE_GEM,
6149 	};
6150 
6151 	/* The tx descriptor buffer */
6152 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6153 
6154 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6155 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6156 
6157 	/* unit us */
6158 	/* FIXME: find a proper value */
6159 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6160 
6161 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6162 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6163 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6164 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6165 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6166 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6167 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6168 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6169 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6170 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6171 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6172 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6173 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6174 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6175 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6176 
6177 	wl->hw->wiphy->cipher_suites = cipher_suites;
6178 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6179 
6180 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6181 					 BIT(NL80211_IFTYPE_AP) |
6182 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6183 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6184 #ifdef CONFIG_MAC80211_MESH
6185 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6186 #endif
6187 					 BIT(NL80211_IFTYPE_P2P_GO);
6188 
6189 	wl->hw->wiphy->max_scan_ssids = 1;
6190 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6191 	wl->hw->wiphy->max_match_sets = 16;
6192 	/*
6193 	 * Maximum length of elements in scanning probe request templates
6194 	 * should be the maximum length possible for a template, without
6195 	 * the IEEE80211 header of the template
6196 	 */
6197 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6198 			sizeof(struct ieee80211_header);
6199 
6200 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6201 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6202 		sizeof(struct ieee80211_header);
6203 
6204 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6205 
6206 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6207 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6208 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6209 				WIPHY_FLAG_IBSS_RSN;
6210 
6211 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6212 
6213 	/* make sure all our channels fit in the scanned_ch bitmask */
6214 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6215 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6216 		     WL1271_MAX_CHANNELS);
6217 	/*
6218 	* clear channel flags from the previous usage
6219 	* and restore max_power & max_antenna_gain values.
6220 	*/
6221 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6222 		wl1271_band_2ghz.channels[i].flags = 0;
6223 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6224 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6225 	}
6226 
6227 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6228 		wl1271_band_5ghz.channels[i].flags = 0;
6229 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6230 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6231 	}
6232 
6233 	/*
6234 	 * We keep local copies of the band structs because we need to
6235 	 * modify them on a per-device basis.
6236 	 */
6237 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6238 	       sizeof(wl1271_band_2ghz));
6239 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6240 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6241 	       sizeof(*wl->ht_cap));
6242 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6243 	       sizeof(wl1271_band_5ghz));
6244 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6245 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6246 	       sizeof(*wl->ht_cap));
6247 
6248 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6249 		&wl->bands[NL80211_BAND_2GHZ];
6250 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6251 		&wl->bands[NL80211_BAND_5GHZ];
6252 
6253 	/*
6254 	 * allow 4 queues per mac address we support +
6255 	 * 1 cab queue per mac + one global offchannel Tx queue
6256 	 */
6257 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6258 
6259 	/* the last queue is the offchannel queue */
6260 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6261 	wl->hw->max_rates = 1;
6262 
6263 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6264 
6265 	/* the FW answers probe-requests in AP-mode */
6266 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6267 	wl->hw->wiphy->probe_resp_offload =
6268 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6269 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6270 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6271 
6272 	/* allowed interface combinations */
6273 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6274 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6275 
6276 	/* register vendor commands */
6277 	wlcore_set_vendor_commands(wl->hw->wiphy);
6278 
6279 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6280 
6281 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6282 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6283 
6284 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6285 
6286 	return 0;
6287 }
6288 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6289 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6290 				     u32 mbox_size)
6291 {
6292 	struct ieee80211_hw *hw;
6293 	struct wl1271 *wl;
6294 	int i, j, ret;
6295 	unsigned int order;
6296 
6297 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6298 	if (!hw) {
6299 		wl1271_error("could not alloc ieee80211_hw");
6300 		ret = -ENOMEM;
6301 		goto err_hw_alloc;
6302 	}
6303 
6304 	wl = hw->priv;
6305 	memset(wl, 0, sizeof(*wl));
6306 
6307 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6308 	if (!wl->priv) {
6309 		wl1271_error("could not alloc wl priv");
6310 		ret = -ENOMEM;
6311 		goto err_priv_alloc;
6312 	}
6313 
6314 	INIT_LIST_HEAD(&wl->wlvif_list);
6315 
6316 	wl->hw = hw;
6317 
6318 	/*
6319 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6320 	 * we don't allocate any additional resource here, so that's fine.
6321 	 */
6322 	for (i = 0; i < NUM_TX_QUEUES; i++)
6323 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6324 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6325 
6326 	skb_queue_head_init(&wl->deferred_rx_queue);
6327 	skb_queue_head_init(&wl->deferred_tx_queue);
6328 
6329 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6330 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6331 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6332 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6333 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6334 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6335 
6336 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6337 	if (!wl->freezable_wq) {
6338 		ret = -ENOMEM;
6339 		goto err_hw;
6340 	}
6341 
6342 	wl->channel = 0;
6343 	wl->rx_counter = 0;
6344 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6345 	wl->band = NL80211_BAND_2GHZ;
6346 	wl->channel_type = NL80211_CHAN_NO_HT;
6347 	wl->flags = 0;
6348 	wl->sg_enabled = true;
6349 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6350 	wl->recovery_count = 0;
6351 	wl->hw_pg_ver = -1;
6352 	wl->ap_ps_map = 0;
6353 	wl->ap_fw_ps_map = 0;
6354 	wl->quirks = 0;
6355 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6356 	wl->active_sta_count = 0;
6357 	wl->active_link_count = 0;
6358 	wl->fwlog_size = 0;
6359 
6360 	/* The system link is always allocated */
6361 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6362 
6363 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6364 	for (i = 0; i < wl->num_tx_desc; i++)
6365 		wl->tx_frames[i] = NULL;
6366 
6367 	spin_lock_init(&wl->wl_lock);
6368 
6369 	wl->state = WLCORE_STATE_OFF;
6370 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6371 	mutex_init(&wl->mutex);
6372 	mutex_init(&wl->flush_mutex);
6373 	init_completion(&wl->nvs_loading_complete);
6374 
6375 	order = get_order(aggr_buf_size);
6376 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6377 	if (!wl->aggr_buf) {
6378 		ret = -ENOMEM;
6379 		goto err_wq;
6380 	}
6381 	wl->aggr_buf_size = aggr_buf_size;
6382 
6383 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6384 	if (!wl->dummy_packet) {
6385 		ret = -ENOMEM;
6386 		goto err_aggr;
6387 	}
6388 
6389 	/* Allocate one page for the FW log */
6390 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6391 	if (!wl->fwlog) {
6392 		ret = -ENOMEM;
6393 		goto err_dummy_packet;
6394 	}
6395 
6396 	wl->mbox_size = mbox_size;
6397 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6398 	if (!wl->mbox) {
6399 		ret = -ENOMEM;
6400 		goto err_fwlog;
6401 	}
6402 
6403 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6404 	if (!wl->buffer_32) {
6405 		ret = -ENOMEM;
6406 		goto err_mbox;
6407 	}
6408 
6409 	return hw;
6410 
6411 err_mbox:
6412 	kfree(wl->mbox);
6413 
6414 err_fwlog:
6415 	free_page((unsigned long)wl->fwlog);
6416 
6417 err_dummy_packet:
6418 	dev_kfree_skb(wl->dummy_packet);
6419 
6420 err_aggr:
6421 	free_pages((unsigned long)wl->aggr_buf, order);
6422 
6423 err_wq:
6424 	destroy_workqueue(wl->freezable_wq);
6425 
6426 err_hw:
6427 	wl1271_debugfs_exit(wl);
6428 	kfree(wl->priv);
6429 
6430 err_priv_alloc:
6431 	ieee80211_free_hw(hw);
6432 
6433 err_hw_alloc:
6434 
6435 	return ERR_PTR(ret);
6436 }
6437 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6438 
wlcore_free_hw(struct wl1271 * wl)6439 int wlcore_free_hw(struct wl1271 *wl)
6440 {
6441 	/* Unblock any fwlog readers */
6442 	mutex_lock(&wl->mutex);
6443 	wl->fwlog_size = -1;
6444 	mutex_unlock(&wl->mutex);
6445 
6446 	wlcore_sysfs_free(wl);
6447 
6448 	kfree(wl->buffer_32);
6449 	kfree(wl->mbox);
6450 	free_page((unsigned long)wl->fwlog);
6451 	dev_kfree_skb(wl->dummy_packet);
6452 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6453 
6454 	wl1271_debugfs_exit(wl);
6455 
6456 	vfree(wl->fw);
6457 	wl->fw = NULL;
6458 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6459 	kfree(wl->nvs);
6460 	wl->nvs = NULL;
6461 
6462 	kfree(wl->raw_fw_status);
6463 	kfree(wl->fw_status);
6464 	kfree(wl->tx_res_if);
6465 	destroy_workqueue(wl->freezable_wq);
6466 
6467 	kfree(wl->priv);
6468 	ieee80211_free_hw(wl->hw);
6469 
6470 	return 0;
6471 }
6472 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6473 
6474 #ifdef CONFIG_PM
6475 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6476 	.flags = WIPHY_WOWLAN_ANY,
6477 	.n_patterns = WL1271_MAX_RX_FILTERS,
6478 	.pattern_min_len = 1,
6479 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6480 };
6481 #endif
6482 
wlcore_hardirq(int irq,void * cookie)6483 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6484 {
6485 	return IRQ_WAKE_THREAD;
6486 }
6487 
wlcore_nvs_cb(const struct firmware * fw,void * context)6488 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6489 {
6490 	struct wl1271 *wl = context;
6491 	struct platform_device *pdev = wl->pdev;
6492 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6493 	struct resource *res;
6494 
6495 	int ret;
6496 	irq_handler_t hardirq_fn = NULL;
6497 
6498 	if (fw) {
6499 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6500 		if (!wl->nvs) {
6501 			wl1271_error("Could not allocate nvs data");
6502 			goto out;
6503 		}
6504 		wl->nvs_len = fw->size;
6505 	} else if (pdev_data->family->nvs_name) {
6506 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6507 			     pdev_data->family->nvs_name);
6508 		wl->nvs = NULL;
6509 		wl->nvs_len = 0;
6510 	} else {
6511 		wl->nvs = NULL;
6512 		wl->nvs_len = 0;
6513 	}
6514 
6515 	ret = wl->ops->setup(wl);
6516 	if (ret < 0)
6517 		goto out_free_nvs;
6518 
6519 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6520 
6521 	/* adjust some runtime configuration parameters */
6522 	wlcore_adjust_conf(wl);
6523 
6524 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6525 	if (!res) {
6526 		wl1271_error("Could not get IRQ resource");
6527 		goto out_free_nvs;
6528 	}
6529 
6530 	wl->irq = res->start;
6531 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6532 	wl->if_ops = pdev_data->if_ops;
6533 
6534 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6535 		hardirq_fn = wlcore_hardirq;
6536 	else
6537 		wl->irq_flags |= IRQF_ONESHOT;
6538 
6539 	ret = wl12xx_set_power_on(wl);
6540 	if (ret < 0)
6541 		goto out_free_nvs;
6542 
6543 	ret = wl12xx_get_hw_info(wl);
6544 	if (ret < 0) {
6545 		wl1271_error("couldn't get hw info");
6546 		wl1271_power_off(wl);
6547 		goto out_free_nvs;
6548 	}
6549 
6550 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6551 				   wl->irq_flags, pdev->name, wl);
6552 	if (ret < 0) {
6553 		wl1271_error("interrupt configuration failed");
6554 		wl1271_power_off(wl);
6555 		goto out_free_nvs;
6556 	}
6557 
6558 #ifdef CONFIG_PM
6559 	device_init_wakeup(wl->dev, true);
6560 
6561 	ret = enable_irq_wake(wl->irq);
6562 	if (!ret) {
6563 		wl->irq_wake_enabled = true;
6564 		if (pdev_data->pwr_in_suspend)
6565 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6566 	}
6567 
6568 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6569 	if (res) {
6570 		wl->wakeirq = res->start;
6571 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6572 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6573 		if (ret)
6574 			wl->wakeirq = -ENODEV;
6575 	} else {
6576 		wl->wakeirq = -ENODEV;
6577 	}
6578 #endif
6579 	disable_irq(wl->irq);
6580 	wl1271_power_off(wl);
6581 
6582 	ret = wl->ops->identify_chip(wl);
6583 	if (ret < 0)
6584 		goto out_irq;
6585 
6586 	ret = wl1271_init_ieee80211(wl);
6587 	if (ret)
6588 		goto out_irq;
6589 
6590 	ret = wl1271_register_hw(wl);
6591 	if (ret)
6592 		goto out_irq;
6593 
6594 	ret = wlcore_sysfs_init(wl);
6595 	if (ret)
6596 		goto out_unreg;
6597 
6598 	wl->initialized = true;
6599 	goto out;
6600 
6601 out_unreg:
6602 	wl1271_unregister_hw(wl);
6603 
6604 out_irq:
6605 	if (wl->wakeirq >= 0)
6606 		dev_pm_clear_wake_irq(wl->dev);
6607 	device_init_wakeup(wl->dev, false);
6608 	free_irq(wl->irq, wl);
6609 
6610 out_free_nvs:
6611 	kfree(wl->nvs);
6612 
6613 out:
6614 	release_firmware(fw);
6615 	complete_all(&wl->nvs_loading_complete);
6616 }
6617 
wlcore_runtime_suspend(struct device * dev)6618 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6619 {
6620 	struct wl1271 *wl = dev_get_drvdata(dev);
6621 	struct wl12xx_vif *wlvif;
6622 	int error;
6623 
6624 	/* We do not enter elp sleep in PLT mode */
6625 	if (wl->plt)
6626 		return 0;
6627 
6628 	/* Nothing to do if no ELP mode requested */
6629 	if (wl->sleep_auth != WL1271_PSM_ELP)
6630 		return 0;
6631 
6632 	wl12xx_for_each_wlvif(wl, wlvif) {
6633 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6634 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6635 			return -EBUSY;
6636 	}
6637 
6638 	wl1271_debug(DEBUG_PSM, "chip to elp");
6639 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6640 	if (error < 0) {
6641 		wl12xx_queue_recovery_work(wl);
6642 
6643 		return error;
6644 	}
6645 
6646 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6647 
6648 	return 0;
6649 }
6650 
wlcore_runtime_resume(struct device * dev)6651 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6652 {
6653 	struct wl1271 *wl = dev_get_drvdata(dev);
6654 	DECLARE_COMPLETION_ONSTACK(compl);
6655 	unsigned long flags;
6656 	int ret;
6657 	unsigned long start_time = jiffies;
6658 	bool recovery = false;
6659 
6660 	/* Nothing to do if no ELP mode requested */
6661 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6662 		return 0;
6663 
6664 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6665 
6666 	spin_lock_irqsave(&wl->wl_lock, flags);
6667 	wl->elp_compl = &compl;
6668 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6669 
6670 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6671 	if (ret < 0) {
6672 		recovery = true;
6673 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6674 		ret = wait_for_completion_timeout(&compl,
6675 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6676 		if (ret == 0) {
6677 			wl1271_warning("ELP wakeup timeout!");
6678 			recovery = true;
6679 		}
6680 	}
6681 
6682 	spin_lock_irqsave(&wl->wl_lock, flags);
6683 	wl->elp_compl = NULL;
6684 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6685 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6686 
6687 	if (recovery) {
6688 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6689 		wl12xx_queue_recovery_work(wl);
6690 	} else {
6691 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6692 			     jiffies_to_msecs(jiffies - start_time));
6693 	}
6694 
6695 	return 0;
6696 }
6697 
6698 static const struct dev_pm_ops wlcore_pm_ops = {
6699 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6700 			   wlcore_runtime_resume,
6701 			   NULL)
6702 };
6703 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6704 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6705 {
6706 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6707 	const char *nvs_name;
6708 	int ret = 0;
6709 
6710 	if (!wl->ops || !wl->ptable || !pdev_data)
6711 		return -EINVAL;
6712 
6713 	wl->dev = &pdev->dev;
6714 	wl->pdev = pdev;
6715 	platform_set_drvdata(pdev, wl);
6716 
6717 	if (pdev_data->family && pdev_data->family->nvs_name) {
6718 		nvs_name = pdev_data->family->nvs_name;
6719 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
6720 					      nvs_name, &pdev->dev, GFP_KERNEL,
6721 					      wl, wlcore_nvs_cb);
6722 		if (ret < 0) {
6723 			wl1271_error("request_firmware_nowait failed for %s: %d",
6724 				     nvs_name, ret);
6725 			complete_all(&wl->nvs_loading_complete);
6726 		}
6727 	} else {
6728 		wlcore_nvs_cb(NULL, wl);
6729 	}
6730 
6731 	wl->dev->driver->pm = &wlcore_pm_ops;
6732 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6733 	pm_runtime_use_autosuspend(wl->dev);
6734 	pm_runtime_enable(wl->dev);
6735 
6736 	return ret;
6737 }
6738 EXPORT_SYMBOL_GPL(wlcore_probe);
6739 
wlcore_remove(struct platform_device * pdev)6740 int wlcore_remove(struct platform_device *pdev)
6741 {
6742 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6743 	struct wl1271 *wl = platform_get_drvdata(pdev);
6744 	int error;
6745 
6746 	error = pm_runtime_get_sync(wl->dev);
6747 	if (error < 0)
6748 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6749 
6750 	wl->dev->driver->pm = NULL;
6751 
6752 	if (pdev_data->family && pdev_data->family->nvs_name)
6753 		wait_for_completion(&wl->nvs_loading_complete);
6754 	if (!wl->initialized)
6755 		return 0;
6756 
6757 	if (wl->wakeirq >= 0) {
6758 		dev_pm_clear_wake_irq(wl->dev);
6759 		wl->wakeirq = -ENODEV;
6760 	}
6761 
6762 	device_init_wakeup(wl->dev, false);
6763 
6764 	if (wl->irq_wake_enabled)
6765 		disable_irq_wake(wl->irq);
6766 
6767 	wl1271_unregister_hw(wl);
6768 
6769 	pm_runtime_put_sync(wl->dev);
6770 	pm_runtime_dont_use_autosuspend(wl->dev);
6771 	pm_runtime_disable(wl->dev);
6772 
6773 	free_irq(wl->irq, wl);
6774 	wlcore_free_hw(wl);
6775 
6776 	return 0;
6777 }
6778 EXPORT_SYMBOL_GPL(wlcore_remove);
6779 
6780 u32 wl12xx_debug_level = DEBUG_NONE;
6781 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6782 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6783 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6784 
6785 module_param_named(fwlog, fwlog_param, charp, 0);
6786 MODULE_PARM_DESC(fwlog,
6787 		 "FW logger options: continuous, dbgpins or disable");
6788 
6789 module_param(fwlog_mem_blocks, int, 0600);
6790 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6791 
6792 module_param(bug_on_recovery, int, 0600);
6793 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6794 
6795 module_param(no_recovery, int, 0600);
6796 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6797 
6798 MODULE_LICENSE("GPL");
6799 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6800 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6801