• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  */
8 
9 #include <linux/module.h>
10 #include <linux/firmware.h>
11 #include <linux/etherdevice.h>
12 #include <linux/vmalloc.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/pm_runtime.h>
16 #include <linux/pm_wakeirq.h>
17 
18 #include "wlcore.h"
19 #include "debug.h"
20 #include "wl12xx_80211.h"
21 #include "io.h"
22 #include "tx.h"
23 #include "ps.h"
24 #include "init.h"
25 #include "debugfs.h"
26 #include "testmode.h"
27 #include "vendor_cmd.h"
28 #include "scan.h"
29 #include "hw_ops.h"
30 #include "sysfs.h"
31 
32 #define WL1271_BOOT_RETRIES 3
33 #define WL1271_WAKEUP_TIMEOUT 500
34 
35 static char *fwlog_param;
36 static int fwlog_mem_blocks = -1;
37 static int bug_on_recovery = -1;
38 static int no_recovery     = -1;
39 
40 static void __wl1271_op_remove_interface(struct wl1271 *wl,
41 					 struct ieee80211_vif *vif,
42 					 bool reset_tx_queues);
43 static void wlcore_op_stop_locked(struct wl1271 *wl);
44 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
45 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)46 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
47 {
48 	int ret;
49 
50 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
51 		return -EINVAL;
52 
53 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
54 		return 0;
55 
56 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
57 		return 0;
58 
59 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
60 	if (ret < 0)
61 		return ret;
62 
63 	wl1271_info("Association completed.");
64 	return 0;
65 }
66 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)67 static void wl1271_reg_notify(struct wiphy *wiphy,
68 			      struct regulatory_request *request)
69 {
70 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
71 	struct wl1271 *wl = hw->priv;
72 
73 	/* copy the current dfs region */
74 	if (request)
75 		wl->dfs_region = request->dfs_region;
76 
77 	wlcore_regdomain_config(wl);
78 }
79 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)80 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
81 				   bool enable)
82 {
83 	int ret = 0;
84 
85 	/* we should hold wl->mutex */
86 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
87 	if (ret < 0)
88 		goto out;
89 
90 	if (enable)
91 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
92 	else
93 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
94 out:
95 	return ret;
96 }
97 
98 /*
99  * this function is being called when the rx_streaming interval
100  * has beed changed or rx_streaming should be disabled
101  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)102 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
103 {
104 	int ret = 0;
105 	int period = wl->conf.rx_streaming.interval;
106 
107 	/* don't reconfigure if rx_streaming is disabled */
108 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
109 		goto out;
110 
111 	/* reconfigure/disable according to new streaming_period */
112 	if (period &&
113 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
114 	    (wl->conf.rx_streaming.always ||
115 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
116 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
117 	else {
118 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
119 		/* don't cancel_work_sync since we might deadlock */
120 		del_timer_sync(&wlvif->rx_streaming_timer);
121 	}
122 out:
123 	return ret;
124 }
125 
wl1271_rx_streaming_enable_work(struct work_struct * work)126 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
127 {
128 	int ret;
129 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
130 						rx_streaming_enable_work);
131 	struct wl1271 *wl = wlvif->wl;
132 
133 	mutex_lock(&wl->mutex);
134 
135 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
136 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
137 	    (!wl->conf.rx_streaming.always &&
138 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
139 		goto out;
140 
141 	if (!wl->conf.rx_streaming.interval)
142 		goto out;
143 
144 	ret = pm_runtime_get_sync(wl->dev);
145 	if (ret < 0) {
146 		pm_runtime_put_noidle(wl->dev);
147 		goto out;
148 	}
149 
150 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 	if (ret < 0)
152 		goto out_sleep;
153 
154 	/* stop it after some time of inactivity */
155 	mod_timer(&wlvif->rx_streaming_timer,
156 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
157 
158 out_sleep:
159 	pm_runtime_mark_last_busy(wl->dev);
160 	pm_runtime_put_autosuspend(wl->dev);
161 out:
162 	mutex_unlock(&wl->mutex);
163 }
164 
wl1271_rx_streaming_disable_work(struct work_struct * work)165 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
166 {
167 	int ret;
168 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
169 						rx_streaming_disable_work);
170 	struct wl1271 *wl = wlvif->wl;
171 
172 	mutex_lock(&wl->mutex);
173 
174 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
175 		goto out;
176 
177 	ret = pm_runtime_get_sync(wl->dev);
178 	if (ret < 0) {
179 		pm_runtime_put_noidle(wl->dev);
180 		goto out;
181 	}
182 
183 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
184 	if (ret)
185 		goto out_sleep;
186 
187 out_sleep:
188 	pm_runtime_mark_last_busy(wl->dev);
189 	pm_runtime_put_autosuspend(wl->dev);
190 out:
191 	mutex_unlock(&wl->mutex);
192 }
193 
wl1271_rx_streaming_timer(struct timer_list * t)194 static void wl1271_rx_streaming_timer(struct timer_list *t)
195 {
196 	struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
197 	struct wl1271 *wl = wlvif->wl;
198 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
199 }
200 
201 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)202 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
203 {
204 	/* if the watchdog is not armed, don't do anything */
205 	if (wl->tx_allocated_blocks == 0)
206 		return;
207 
208 	cancel_delayed_work(&wl->tx_watchdog_work);
209 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
210 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
211 }
212 
wlcore_rc_update_work(struct work_struct * work)213 static void wlcore_rc_update_work(struct work_struct *work)
214 {
215 	int ret;
216 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
217 						rc_update_work);
218 	struct wl1271 *wl = wlvif->wl;
219 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
220 
221 	mutex_lock(&wl->mutex);
222 
223 	if (unlikely(wl->state != WLCORE_STATE_ON))
224 		goto out;
225 
226 	ret = pm_runtime_get_sync(wl->dev);
227 	if (ret < 0) {
228 		pm_runtime_put_noidle(wl->dev);
229 		goto out;
230 	}
231 
232 	if (ieee80211_vif_is_mesh(vif)) {
233 		ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
234 						     true, wlvif->sta.hlid);
235 		if (ret < 0)
236 			goto out_sleep;
237 	} else {
238 		wlcore_hw_sta_rc_update(wl, wlvif);
239 	}
240 
241 out_sleep:
242 	pm_runtime_mark_last_busy(wl->dev);
243 	pm_runtime_put_autosuspend(wl->dev);
244 out:
245 	mutex_unlock(&wl->mutex);
246 }
247 
wl12xx_tx_watchdog_work(struct work_struct * work)248 static void wl12xx_tx_watchdog_work(struct work_struct *work)
249 {
250 	struct delayed_work *dwork;
251 	struct wl1271 *wl;
252 
253 	dwork = to_delayed_work(work);
254 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
255 
256 	mutex_lock(&wl->mutex);
257 
258 	if (unlikely(wl->state != WLCORE_STATE_ON))
259 		goto out;
260 
261 	/* Tx went out in the meantime - everything is ok */
262 	if (unlikely(wl->tx_allocated_blocks == 0))
263 		goto out;
264 
265 	/*
266 	 * if a ROC is in progress, we might not have any Tx for a long
267 	 * time (e.g. pending Tx on the non-ROC channels)
268 	 */
269 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
270 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
271 			     wl->conf.tx.tx_watchdog_timeout);
272 		wl12xx_rearm_tx_watchdog_locked(wl);
273 		goto out;
274 	}
275 
276 	/*
277 	 * if a scan is in progress, we might not have any Tx for a long
278 	 * time
279 	 */
280 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
281 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
282 			     wl->conf.tx.tx_watchdog_timeout);
283 		wl12xx_rearm_tx_watchdog_locked(wl);
284 		goto out;
285 	}
286 
287 	/*
288 	* AP might cache a frame for a long time for a sleeping station,
289 	* so rearm the timer if there's an AP interface with stations. If
290 	* Tx is genuinely stuck we will most hopefully discover it when all
291 	* stations are removed due to inactivity.
292 	*/
293 	if (wl->active_sta_count) {
294 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
295 			     " %d stations",
296 			      wl->conf.tx.tx_watchdog_timeout,
297 			      wl->active_sta_count);
298 		wl12xx_rearm_tx_watchdog_locked(wl);
299 		goto out;
300 	}
301 
302 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
303 		     wl->conf.tx.tx_watchdog_timeout);
304 	wl12xx_queue_recovery_work(wl);
305 
306 out:
307 	mutex_unlock(&wl->mutex);
308 }
309 
wlcore_adjust_conf(struct wl1271 * wl)310 static void wlcore_adjust_conf(struct wl1271 *wl)
311 {
312 
313 	if (fwlog_param) {
314 		if (!strcmp(fwlog_param, "continuous")) {
315 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
316 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
317 		} else if (!strcmp(fwlog_param, "dbgpins")) {
318 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
319 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
320 		} else if (!strcmp(fwlog_param, "disable")) {
321 			wl->conf.fwlog.mem_blocks = 0;
322 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
323 		} else {
324 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
325 		}
326 	}
327 
328 	if (bug_on_recovery != -1)
329 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
330 
331 	if (no_recovery != -1)
332 		wl->conf.recovery.no_recovery = (u8) no_recovery;
333 }
334 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)335 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
336 					struct wl12xx_vif *wlvif,
337 					u8 hlid, u8 tx_pkts)
338 {
339 	bool fw_ps;
340 
341 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
342 
343 	/*
344 	 * Wake up from high level PS if the STA is asleep with too little
345 	 * packets in FW or if the STA is awake.
346 	 */
347 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
348 		wl12xx_ps_link_end(wl, wlvif, hlid);
349 
350 	/*
351 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
352 	 * Make an exception if this is the only connected link. In this
353 	 * case FW-memory congestion is less of a problem.
354 	 * Note that a single connected STA means 2*ap_count + 1 active links,
355 	 * since we must account for the global and broadcast AP links
356 	 * for each AP. The "fw_ps" check assures us the other link is a STA
357 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
358 	 */
359 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
360 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
361 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
362 }
363 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)364 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
365 					   struct wl12xx_vif *wlvif,
366 					   struct wl_fw_status *status)
367 {
368 	unsigned long cur_fw_ps_map;
369 	u8 hlid;
370 
371 	cur_fw_ps_map = status->link_ps_bitmap;
372 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
373 		wl1271_debug(DEBUG_PSM,
374 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
375 			     wl->ap_fw_ps_map, cur_fw_ps_map,
376 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
377 
378 		wl->ap_fw_ps_map = cur_fw_ps_map;
379 	}
380 
381 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
382 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
383 					    wl->links[hlid].allocated_pkts);
384 }
385 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)386 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
387 {
388 	struct wl12xx_vif *wlvif;
389 	u32 old_tx_blk_count = wl->tx_blocks_available;
390 	int avail, freed_blocks;
391 	int i;
392 	int ret;
393 	struct wl1271_link *lnk;
394 
395 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
396 				   wl->raw_fw_status,
397 				   wl->fw_status_len, false);
398 	if (ret < 0)
399 		return ret;
400 
401 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
402 
403 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
404 		     "drv_rx_counter = %d, tx_results_counter = %d)",
405 		     status->intr,
406 		     status->fw_rx_counter,
407 		     status->drv_rx_counter,
408 		     status->tx_results_counter);
409 
410 	for (i = 0; i < NUM_TX_QUEUES; i++) {
411 		/* prevent wrap-around in freed-packets counter */
412 		wl->tx_allocated_pkts[i] -=
413 				(status->counters.tx_released_pkts[i] -
414 				wl->tx_pkts_freed[i]) & 0xff;
415 
416 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
417 	}
418 
419 
420 	for_each_set_bit(i, wl->links_map, wl->num_links) {
421 		u8 diff;
422 		lnk = &wl->links[i];
423 
424 		/* prevent wrap-around in freed-packets counter */
425 		diff = (status->counters.tx_lnk_free_pkts[i] -
426 		       lnk->prev_freed_pkts) & 0xff;
427 
428 		if (diff == 0)
429 			continue;
430 
431 		lnk->allocated_pkts -= diff;
432 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
433 
434 		/* accumulate the prev_freed_pkts counter */
435 		lnk->total_freed_pkts += diff;
436 	}
437 
438 	/* prevent wrap-around in total blocks counter */
439 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
440 		freed_blocks = status->total_released_blks -
441 			       wl->tx_blocks_freed;
442 	else
443 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 			       status->total_released_blks;
445 
446 	wl->tx_blocks_freed = status->total_released_blks;
447 
448 	wl->tx_allocated_blocks -= freed_blocks;
449 
450 	/*
451 	 * If the FW freed some blocks:
452 	 * If we still have allocated blocks - re-arm the timer, Tx is
453 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
454 	 */
455 	if (freed_blocks) {
456 		if (wl->tx_allocated_blocks)
457 			wl12xx_rearm_tx_watchdog_locked(wl);
458 		else
459 			cancel_delayed_work(&wl->tx_watchdog_work);
460 	}
461 
462 	avail = status->tx_total - wl->tx_allocated_blocks;
463 
464 	/*
465 	 * The FW might change the total number of TX memblocks before
466 	 * we get a notification about blocks being released. Thus, the
467 	 * available blocks calculation might yield a temporary result
468 	 * which is lower than the actual available blocks. Keeping in
469 	 * mind that only blocks that were allocated can be moved from
470 	 * TX to RX, tx_blocks_available should never decrease here.
471 	 */
472 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 				      avail);
474 
475 	/* if more blocks are available now, tx work can be scheduled */
476 	if (wl->tx_blocks_available > old_tx_blk_count)
477 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478 
479 	/* for AP update num of allocated TX blocks per link and ps status */
480 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 		wl12xx_irq_update_links_status(wl, wlvif, status);
482 	}
483 
484 	/* update the host-chipset time offset */
485 	wl->time_offset = (ktime_get_boottime_ns() >> 10) -
486 		(s64)(status->fw_localtime);
487 
488 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
489 
490 	return 0;
491 }
492 
wl1271_flush_deferred_work(struct wl1271 * wl)493 static void wl1271_flush_deferred_work(struct wl1271 *wl)
494 {
495 	struct sk_buff *skb;
496 
497 	/* Pass all received frames to the network stack */
498 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
499 		ieee80211_rx_ni(wl->hw, skb);
500 
501 	/* Return sent skbs to the network stack */
502 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
503 		ieee80211_tx_status_ni(wl->hw, skb);
504 }
505 
wl1271_netstack_work(struct work_struct * work)506 static void wl1271_netstack_work(struct work_struct *work)
507 {
508 	struct wl1271 *wl =
509 		container_of(work, struct wl1271, netstack_work);
510 
511 	do {
512 		wl1271_flush_deferred_work(wl);
513 	} while (skb_queue_len(&wl->deferred_rx_queue));
514 }
515 
516 #define WL1271_IRQ_MAX_LOOPS 256
517 
wlcore_irq_locked(struct wl1271 * wl)518 static int wlcore_irq_locked(struct wl1271 *wl)
519 {
520 	int ret = 0;
521 	u32 intr;
522 	int loopcount = WL1271_IRQ_MAX_LOOPS;
523 	bool run_tx_queue = true;
524 	bool done = false;
525 	unsigned int defer_count;
526 	unsigned long flags;
527 
528 	/*
529 	 * In case edge triggered interrupt must be used, we cannot iterate
530 	 * more than once without introducing race conditions with the hardirq.
531 	 */
532 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
533 		loopcount = 1;
534 
535 	wl1271_debug(DEBUG_IRQ, "IRQ work");
536 
537 	if (unlikely(wl->state != WLCORE_STATE_ON))
538 		goto out;
539 
540 	ret = pm_runtime_get_sync(wl->dev);
541 	if (ret < 0) {
542 		pm_runtime_put_noidle(wl->dev);
543 		goto out;
544 	}
545 
546 	while (!done && loopcount--) {
547 		smp_mb__after_atomic();
548 
549 		ret = wlcore_fw_status(wl, wl->fw_status);
550 		if (ret < 0)
551 			goto err_ret;
552 
553 		wlcore_hw_tx_immediate_compl(wl);
554 
555 		intr = wl->fw_status->intr;
556 		intr &= WLCORE_ALL_INTR_MASK;
557 		if (!intr) {
558 			done = true;
559 			continue;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 			wl1271_error("HW watchdog interrupt received! starting recovery.");
564 			wl->watchdog_recovery = true;
565 			ret = -EIO;
566 
567 			/* restarting the chip. ignore any other interrupt. */
568 			goto err_ret;
569 		}
570 
571 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 			wl1271_error("SW watchdog interrupt received! "
573 				     "starting recovery.");
574 			wl->watchdog_recovery = true;
575 			ret = -EIO;
576 
577 			/* restarting the chip. ignore any other interrupt. */
578 			goto err_ret;
579 		}
580 
581 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 
584 			ret = wlcore_rx(wl, wl->fw_status);
585 			if (ret < 0)
586 				goto err_ret;
587 
588 			/* Check if any tx blocks were freed */
589 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
590 				if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
591 					if (!wl1271_tx_total_queue_count(wl))
592 						run_tx_queue = false;
593 					spin_unlock_irqrestore(&wl->wl_lock, flags);
594 				}
595 
596 				/*
597 				 * In order to avoid starvation of the TX path,
598 				 * call the work function directly.
599 				 */
600 				if (run_tx_queue) {
601 					ret = wlcore_tx_work_locked(wl);
602 					if (ret < 0)
603 						goto err_ret;
604 				}
605 			}
606 
607 			/* check for tx results */
608 			ret = wlcore_hw_tx_delayed_compl(wl);
609 			if (ret < 0)
610 				goto err_ret;
611 
612 			/* Make sure the deferred queues don't get too long */
613 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
614 				      skb_queue_len(&wl->deferred_rx_queue);
615 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
616 				wl1271_flush_deferred_work(wl);
617 		}
618 
619 		if (intr & WL1271_ACX_INTR_EVENT_A) {
620 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
621 			ret = wl1271_event_handle(wl, 0);
622 			if (ret < 0)
623 				goto err_ret;
624 		}
625 
626 		if (intr & WL1271_ACX_INTR_EVENT_B) {
627 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
628 			ret = wl1271_event_handle(wl, 1);
629 			if (ret < 0)
630 				goto err_ret;
631 		}
632 
633 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
634 			wl1271_debug(DEBUG_IRQ,
635 				     "WL1271_ACX_INTR_INIT_COMPLETE");
636 
637 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
638 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
639 	}
640 
641 err_ret:
642 	pm_runtime_mark_last_busy(wl->dev);
643 	pm_runtime_put_autosuspend(wl->dev);
644 
645 out:
646 	return ret;
647 }
648 
wlcore_irq(int irq,void * cookie)649 static irqreturn_t wlcore_irq(int irq, void *cookie)
650 {
651 	int ret;
652 	unsigned long flags;
653 	struct wl1271 *wl = cookie;
654 	bool queue_tx_work = true;
655 
656 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 
658 	/* complete the ELP completion */
659 	if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
660 		spin_lock_irqsave(&wl->wl_lock, flags);
661 		if (wl->elp_compl)
662 			complete(wl->elp_compl);
663 		spin_unlock_irqrestore(&wl->wl_lock, flags);
664 	}
665 
666 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
667 		/* don't enqueue a work right now. mark it as pending */
668 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
669 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
670 		spin_lock_irqsave(&wl->wl_lock, flags);
671 		disable_irq_nosync(wl->irq);
672 		pm_wakeup_event(wl->dev, 0);
673 		spin_unlock_irqrestore(&wl->wl_lock, flags);
674 		goto out_handled;
675 	}
676 
677 	/* TX might be handled here, avoid redundant work */
678 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
679 	cancel_work_sync(&wl->tx_work);
680 
681 	mutex_lock(&wl->mutex);
682 
683 	ret = wlcore_irq_locked(wl);
684 	if (ret)
685 		wl12xx_queue_recovery_work(wl);
686 
687 	/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
688 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
689 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
690 		if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
691 			if (!wl1271_tx_total_queue_count(wl))
692 				queue_tx_work = false;
693 			spin_unlock_irqrestore(&wl->wl_lock, flags);
694 		}
695 		if (queue_tx_work)
696 			ieee80211_queue_work(wl->hw, &wl->tx_work);
697 	}
698 
699 	mutex_unlock(&wl->mutex);
700 
701 out_handled:
702 	clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
703 
704 	return IRQ_HANDLED;
705 }
706 
707 struct vif_counter_data {
708 	u8 counter;
709 
710 	struct ieee80211_vif *cur_vif;
711 	bool cur_vif_running;
712 };
713 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)714 static void wl12xx_vif_count_iter(void *data, u8 *mac,
715 				  struct ieee80211_vif *vif)
716 {
717 	struct vif_counter_data *counter = data;
718 
719 	counter->counter++;
720 	if (counter->cur_vif == vif)
721 		counter->cur_vif_running = true;
722 }
723 
724 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)725 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
726 			       struct ieee80211_vif *cur_vif,
727 			       struct vif_counter_data *data)
728 {
729 	memset(data, 0, sizeof(*data));
730 	data->cur_vif = cur_vif;
731 
732 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
733 					    wl12xx_vif_count_iter, data);
734 }
735 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)736 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
737 {
738 	const struct firmware *fw;
739 	const char *fw_name;
740 	enum wl12xx_fw_type fw_type;
741 	int ret;
742 
743 	if (plt) {
744 		fw_type = WL12XX_FW_TYPE_PLT;
745 		fw_name = wl->plt_fw_name;
746 	} else {
747 		/*
748 		 * we can't call wl12xx_get_vif_count() here because
749 		 * wl->mutex is taken, so use the cached last_vif_count value
750 		 */
751 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
752 			fw_type = WL12XX_FW_TYPE_MULTI;
753 			fw_name = wl->mr_fw_name;
754 		} else {
755 			fw_type = WL12XX_FW_TYPE_NORMAL;
756 			fw_name = wl->sr_fw_name;
757 		}
758 	}
759 
760 	if (wl->fw_type == fw_type)
761 		return 0;
762 
763 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
764 
765 	ret = request_firmware(&fw, fw_name, wl->dev);
766 
767 	if (ret < 0) {
768 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
769 		return ret;
770 	}
771 
772 	if (fw->size % 4) {
773 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
774 			     fw->size);
775 		ret = -EILSEQ;
776 		goto out;
777 	}
778 
779 	vfree(wl->fw);
780 	wl->fw_type = WL12XX_FW_TYPE_NONE;
781 	wl->fw_len = fw->size;
782 	wl->fw = vmalloc(wl->fw_len);
783 
784 	if (!wl->fw) {
785 		wl1271_error("could not allocate memory for the firmware");
786 		ret = -ENOMEM;
787 		goto out;
788 	}
789 
790 	memcpy(wl->fw, fw->data, wl->fw_len);
791 	ret = 0;
792 	wl->fw_type = fw_type;
793 out:
794 	release_firmware(fw);
795 
796 	return ret;
797 }
798 
wl12xx_queue_recovery_work(struct wl1271 * wl)799 void wl12xx_queue_recovery_work(struct wl1271 *wl)
800 {
801 	/* Avoid a recursive recovery */
802 	if (wl->state == WLCORE_STATE_ON) {
803 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
804 				  &wl->flags));
805 
806 		wl->state = WLCORE_STATE_RESTARTING;
807 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
808 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 	}
810 }
811 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813 {
814 	size_t len;
815 
816 	/* Make sure we have enough room */
817 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818 
819 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
820 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 	wl->fwlog_size += len;
822 
823 	return len;
824 }
825 
wl12xx_read_fwlog_panic(struct wl1271 * wl)826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 	u32 end_of_log = 0;
829 	int error;
830 
831 	if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
832 		return;
833 
834 	wl1271_info("Reading FW panic log");
835 
836 	/*
837 	 * Make sure the chip is awake and the logger isn't active.
838 	 * Do not send a stop fwlog command if the fw is hanged or if
839 	 * dbgpins are used (due to some fw bug).
840 	 */
841 	error = pm_runtime_get_sync(wl->dev);
842 	if (error < 0) {
843 		pm_runtime_put_noidle(wl->dev);
844 		return;
845 	}
846 	if (!wl->watchdog_recovery &&
847 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
848 		wl12xx_cmd_stop_fwlog(wl);
849 
850 	/* Traverse the memory blocks linked list */
851 	do {
852 		end_of_log = wlcore_event_fw_logger(wl);
853 		if (end_of_log == 0) {
854 			msleep(100);
855 			end_of_log = wlcore_event_fw_logger(wl);
856 		}
857 	} while (end_of_log != 0);
858 }
859 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)860 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
861 				   u8 hlid, struct ieee80211_sta *sta)
862 {
863 	struct wl1271_station *wl_sta;
864 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
865 
866 	wl_sta = (void *)sta->drv_priv;
867 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
868 
869 	/*
870 	 * increment the initial seq number on recovery to account for
871 	 * transmitted packets that we haven't yet got in the FW status
872 	 */
873 	if (wlvif->encryption_type == KEY_GEM)
874 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
875 
876 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
877 		wl_sta->total_freed_pkts += sqn_recovery_padding;
878 }
879 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)880 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
881 					struct wl12xx_vif *wlvif,
882 					u8 hlid, const u8 *addr)
883 {
884 	struct ieee80211_sta *sta;
885 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
886 
887 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
888 		    is_zero_ether_addr(addr)))
889 		return;
890 
891 	rcu_read_lock();
892 	sta = ieee80211_find_sta(vif, addr);
893 	if (sta)
894 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
895 	rcu_read_unlock();
896 }
897 
wlcore_print_recovery(struct wl1271 * wl)898 static void wlcore_print_recovery(struct wl1271 *wl)
899 {
900 	u32 pc = 0;
901 	u32 hint_sts = 0;
902 	int ret;
903 
904 	wl1271_info("Hardware recovery in progress. FW ver: %s",
905 		    wl->chip.fw_ver_str);
906 
907 	/* change partitions momentarily so we can read the FW pc */
908 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
909 	if (ret < 0)
910 		return;
911 
912 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
913 	if (ret < 0)
914 		return;
915 
916 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
917 	if (ret < 0)
918 		return;
919 
920 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 				pc, hint_sts, ++wl->recovery_count);
922 
923 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
924 }
925 
926 
wl1271_recovery_work(struct work_struct * work)927 static void wl1271_recovery_work(struct work_struct *work)
928 {
929 	struct wl1271 *wl =
930 		container_of(work, struct wl1271, recovery_work);
931 	struct wl12xx_vif *wlvif;
932 	struct ieee80211_vif *vif;
933 	int error;
934 
935 	mutex_lock(&wl->mutex);
936 
937 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
938 		goto out_unlock;
939 
940 	error = pm_runtime_get_sync(wl->dev);
941 	if (error < 0) {
942 		wl1271_warning("Enable for recovery failed");
943 		pm_runtime_put_noidle(wl->dev);
944 	}
945 	wlcore_disable_interrupts_nosync(wl);
946 
947 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
948 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
949 			wl12xx_read_fwlog_panic(wl);
950 		wlcore_print_recovery(wl);
951 	}
952 
953 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
954 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
955 
956 	clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
957 
958 	if (wl->conf.recovery.no_recovery) {
959 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
960 		goto out_unlock;
961 	}
962 
963 	/* Prevent spurious TX during FW restart */
964 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
965 
966 	/* reboot the chipset */
967 	while (!list_empty(&wl->wlvif_list)) {
968 		wlvif = list_first_entry(&wl->wlvif_list,
969 				       struct wl12xx_vif, list);
970 		vif = wl12xx_wlvif_to_vif(wlvif);
971 
972 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
973 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
974 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
975 						    vif->bss_conf.bssid);
976 		}
977 
978 		__wl1271_op_remove_interface(wl, vif, false);
979 	}
980 
981 	wlcore_op_stop_locked(wl);
982 	pm_runtime_mark_last_busy(wl->dev);
983 	pm_runtime_put_autosuspend(wl->dev);
984 
985 	ieee80211_restart_hw(wl->hw);
986 
987 	/*
988 	 * Its safe to enable TX now - the queues are stopped after a request
989 	 * to restart the HW.
990 	 */
991 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
992 
993 out_unlock:
994 	wl->watchdog_recovery = false;
995 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
996 	mutex_unlock(&wl->mutex);
997 }
998 
wlcore_fw_wakeup(struct wl1271 * wl)999 static int wlcore_fw_wakeup(struct wl1271 *wl)
1000 {
1001 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1002 }
1003 
wl1271_setup(struct wl1271 * wl)1004 static int wl1271_setup(struct wl1271 *wl)
1005 {
1006 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1007 	if (!wl->raw_fw_status)
1008 		goto err;
1009 
1010 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1011 	if (!wl->fw_status)
1012 		goto err;
1013 
1014 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1015 	if (!wl->tx_res_if)
1016 		goto err;
1017 
1018 	return 0;
1019 err:
1020 	kfree(wl->fw_status);
1021 	kfree(wl->raw_fw_status);
1022 	return -ENOMEM;
1023 }
1024 
wl12xx_set_power_on(struct wl1271 * wl)1025 static int wl12xx_set_power_on(struct wl1271 *wl)
1026 {
1027 	int ret;
1028 
1029 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1030 	ret = wl1271_power_on(wl);
1031 	if (ret < 0)
1032 		goto out;
1033 	msleep(WL1271_POWER_ON_SLEEP);
1034 	wl1271_io_reset(wl);
1035 	wl1271_io_init(wl);
1036 
1037 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1038 	if (ret < 0)
1039 		goto fail;
1040 
1041 	/* ELP module wake up */
1042 	ret = wlcore_fw_wakeup(wl);
1043 	if (ret < 0)
1044 		goto fail;
1045 
1046 out:
1047 	return ret;
1048 
1049 fail:
1050 	wl1271_power_off(wl);
1051 	return ret;
1052 }
1053 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1054 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1055 {
1056 	int ret = 0;
1057 
1058 	ret = wl12xx_set_power_on(wl);
1059 	if (ret < 0)
1060 		goto out;
1061 
1062 	/*
1063 	 * For wl127x based devices we could use the default block
1064 	 * size (512 bytes), but due to a bug in the sdio driver, we
1065 	 * need to set it explicitly after the chip is powered on.  To
1066 	 * simplify the code and since the performance impact is
1067 	 * negligible, we use the same block size for all different
1068 	 * chip types.
1069 	 *
1070 	 * Check if the bus supports blocksize alignment and, if it
1071 	 * doesn't, make sure we don't have the quirk.
1072 	 */
1073 	if (!wl1271_set_block_size(wl))
1074 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1075 
1076 	/* TODO: make sure the lower driver has set things up correctly */
1077 
1078 	ret = wl1271_setup(wl);
1079 	if (ret < 0)
1080 		goto out;
1081 
1082 	ret = wl12xx_fetch_firmware(wl, plt);
1083 	if (ret < 0) {
1084 		kfree(wl->fw_status);
1085 		kfree(wl->raw_fw_status);
1086 		kfree(wl->tx_res_if);
1087 	}
1088 
1089 out:
1090 	return ret;
1091 }
1092 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1093 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1094 {
1095 	int retries = WL1271_BOOT_RETRIES;
1096 	struct wiphy *wiphy = wl->hw->wiphy;
1097 
1098 	static const char* const PLT_MODE[] = {
1099 		"PLT_OFF",
1100 		"PLT_ON",
1101 		"PLT_FEM_DETECT",
1102 		"PLT_CHIP_AWAKE"
1103 	};
1104 
1105 	int ret;
1106 
1107 	mutex_lock(&wl->mutex);
1108 
1109 	wl1271_notice("power up");
1110 
1111 	if (wl->state != WLCORE_STATE_OFF) {
1112 		wl1271_error("cannot go into PLT state because not "
1113 			     "in off state: %d", wl->state);
1114 		ret = -EBUSY;
1115 		goto out;
1116 	}
1117 
1118 	/* Indicate to lower levels that we are now in PLT mode */
1119 	wl->plt = true;
1120 	wl->plt_mode = plt_mode;
1121 
1122 	while (retries) {
1123 		retries--;
1124 		ret = wl12xx_chip_wakeup(wl, true);
1125 		if (ret < 0)
1126 			goto power_off;
1127 
1128 		if (plt_mode != PLT_CHIP_AWAKE) {
1129 			ret = wl->ops->plt_init(wl);
1130 			if (ret < 0)
1131 				goto power_off;
1132 		}
1133 
1134 		wl->state = WLCORE_STATE_ON;
1135 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1136 			      PLT_MODE[plt_mode],
1137 			      wl->chip.fw_ver_str);
1138 
1139 		/* update hw/fw version info in wiphy struct */
1140 		wiphy->hw_version = wl->chip.id;
1141 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1142 			sizeof(wiphy->fw_version));
1143 
1144 		goto out;
1145 
1146 power_off:
1147 		wl1271_power_off(wl);
1148 	}
1149 
1150 	wl->plt = false;
1151 	wl->plt_mode = PLT_OFF;
1152 
1153 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1154 		     WL1271_BOOT_RETRIES);
1155 out:
1156 	mutex_unlock(&wl->mutex);
1157 
1158 	return ret;
1159 }
1160 
wl1271_plt_stop(struct wl1271 * wl)1161 int wl1271_plt_stop(struct wl1271 *wl)
1162 {
1163 	int ret = 0;
1164 
1165 	wl1271_notice("power down");
1166 
1167 	/*
1168 	 * Interrupts must be disabled before setting the state to OFF.
1169 	 * Otherwise, the interrupt handler might be called and exit without
1170 	 * reading the interrupt status.
1171 	 */
1172 	wlcore_disable_interrupts(wl);
1173 	mutex_lock(&wl->mutex);
1174 	if (!wl->plt) {
1175 		mutex_unlock(&wl->mutex);
1176 
1177 		/*
1178 		 * This will not necessarily enable interrupts as interrupts
1179 		 * may have been disabled when op_stop was called. It will,
1180 		 * however, balance the above call to disable_interrupts().
1181 		 */
1182 		wlcore_enable_interrupts(wl);
1183 
1184 		wl1271_error("cannot power down because not in PLT "
1185 			     "state: %d", wl->state);
1186 		ret = -EBUSY;
1187 		goto out;
1188 	}
1189 
1190 	mutex_unlock(&wl->mutex);
1191 
1192 	wl1271_flush_deferred_work(wl);
1193 	cancel_work_sync(&wl->netstack_work);
1194 	cancel_work_sync(&wl->recovery_work);
1195 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1196 
1197 	mutex_lock(&wl->mutex);
1198 	wl1271_power_off(wl);
1199 	wl->flags = 0;
1200 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1201 	wl->state = WLCORE_STATE_OFF;
1202 	wl->plt = false;
1203 	wl->plt_mode = PLT_OFF;
1204 	wl->rx_counter = 0;
1205 	mutex_unlock(&wl->mutex);
1206 
1207 out:
1208 	return ret;
1209 }
1210 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1211 static void wl1271_op_tx(struct ieee80211_hw *hw,
1212 			 struct ieee80211_tx_control *control,
1213 			 struct sk_buff *skb)
1214 {
1215 	struct wl1271 *wl = hw->priv;
1216 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1217 	struct ieee80211_vif *vif = info->control.vif;
1218 	struct wl12xx_vif *wlvif = NULL;
1219 	unsigned long flags;
1220 	int q, mapping;
1221 	u8 hlid;
1222 
1223 	if (!vif) {
1224 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1225 		ieee80211_free_txskb(hw, skb);
1226 		return;
1227 	}
1228 
1229 	wlvif = wl12xx_vif_to_data(vif);
1230 	mapping = skb_get_queue_mapping(skb);
1231 	q = wl1271_tx_get_queue(mapping);
1232 
1233 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1234 
1235 	spin_lock_irqsave(&wl->wl_lock, flags);
1236 
1237 	/*
1238 	 * drop the packet if the link is invalid or the queue is stopped
1239 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1240 	 * allow these packets through.
1241 	 */
1242 	if (hlid == WL12XX_INVALID_LINK_ID ||
1243 	    (!test_bit(hlid, wlvif->links_map)) ||
1244 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1245 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1246 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1247 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1248 		ieee80211_free_txskb(hw, skb);
1249 		goto out;
1250 	}
1251 
1252 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1253 		     hlid, q, skb->len);
1254 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1255 
1256 	wl->tx_queue_count[q]++;
1257 	wlvif->tx_queue_count[q]++;
1258 
1259 	/*
1260 	 * The workqueue is slow to process the tx_queue and we need stop
1261 	 * the queue here, otherwise the queue will get too long.
1262 	 */
1263 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1264 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1265 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1266 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1267 		wlcore_stop_queue_locked(wl, wlvif, q,
1268 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1269 	}
1270 
1271 	/*
1272 	 * The chip specific setup must run before the first TX packet -
1273 	 * before that, the tx_work will not be initialized!
1274 	 */
1275 
1276 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1277 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1278 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1279 
1280 out:
1281 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1282 }
1283 
wl1271_tx_dummy_packet(struct wl1271 * wl)1284 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1285 {
1286 	unsigned long flags;
1287 	int q;
1288 
1289 	/* no need to queue a new dummy packet if one is already pending */
1290 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1291 		return 0;
1292 
1293 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1294 
1295 	spin_lock_irqsave(&wl->wl_lock, flags);
1296 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1297 	wl->tx_queue_count[q]++;
1298 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1299 
1300 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1301 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1302 		return wlcore_tx_work_locked(wl);
1303 
1304 	/*
1305 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1306 	 * interrupt handler function
1307 	 */
1308 	return 0;
1309 }
1310 
1311 /*
1312  * The size of the dummy packet should be at least 1400 bytes. However, in
1313  * order to minimize the number of bus transactions, aligning it to 512 bytes
1314  * boundaries could be beneficial, performance wise
1315  */
1316 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1317 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1318 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1319 {
1320 	struct sk_buff *skb;
1321 	struct ieee80211_hdr_3addr *hdr;
1322 	unsigned int dummy_packet_size;
1323 
1324 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1325 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1326 
1327 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1328 	if (!skb) {
1329 		wl1271_warning("Failed to allocate a dummy packet skb");
1330 		return NULL;
1331 	}
1332 
1333 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1334 
1335 	hdr = skb_put_zero(skb, sizeof(*hdr));
1336 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1337 					 IEEE80211_STYPE_NULLFUNC |
1338 					 IEEE80211_FCTL_TODS);
1339 
1340 	skb_put_zero(skb, dummy_packet_size);
1341 
1342 	/* Dummy packets require the TID to be management */
1343 	skb->priority = WL1271_TID_MGMT;
1344 
1345 	/* Initialize all fields that might be used */
1346 	skb_set_queue_mapping(skb, 0);
1347 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1348 
1349 	return skb;
1350 }
1351 
1352 
1353 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1354 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1355 {
1356 	int num_fields = 0, in_field = 0, fields_size = 0;
1357 	int i, pattern_len = 0;
1358 
1359 	if (!p->mask) {
1360 		wl1271_warning("No mask in WoWLAN pattern");
1361 		return -EINVAL;
1362 	}
1363 
1364 	/*
1365 	 * The pattern is broken up into segments of bytes at different offsets
1366 	 * that need to be checked by the FW filter. Each segment is called
1367 	 * a field in the FW API. We verify that the total number of fields
1368 	 * required for this pattern won't exceed FW limits (8)
1369 	 * as well as the total fields buffer won't exceed the FW limit.
1370 	 * Note that if there's a pattern which crosses Ethernet/IP header
1371 	 * boundary a new field is required.
1372 	 */
1373 	for (i = 0; i < p->pattern_len; i++) {
1374 		if (test_bit(i, (unsigned long *)p->mask)) {
1375 			if (!in_field) {
1376 				in_field = 1;
1377 				pattern_len = 1;
1378 			} else {
1379 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1380 					num_fields++;
1381 					fields_size += pattern_len +
1382 						RX_FILTER_FIELD_OVERHEAD;
1383 					pattern_len = 1;
1384 				} else
1385 					pattern_len++;
1386 			}
1387 		} else {
1388 			if (in_field) {
1389 				in_field = 0;
1390 				fields_size += pattern_len +
1391 					RX_FILTER_FIELD_OVERHEAD;
1392 				num_fields++;
1393 			}
1394 		}
1395 	}
1396 
1397 	if (in_field) {
1398 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1399 		num_fields++;
1400 	}
1401 
1402 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1403 		wl1271_warning("RX Filter too complex. Too many segments");
1404 		return -EINVAL;
1405 	}
1406 
1407 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1408 		wl1271_warning("RX filter pattern is too big");
1409 		return -E2BIG;
1410 	}
1411 
1412 	return 0;
1413 }
1414 
wl1271_rx_filter_alloc(void)1415 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1416 {
1417 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1418 }
1419 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1420 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1421 {
1422 	int i;
1423 
1424 	if (filter == NULL)
1425 		return;
1426 
1427 	for (i = 0; i < filter->num_fields; i++)
1428 		kfree(filter->fields[i].pattern);
1429 
1430 	kfree(filter);
1431 }
1432 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1433 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1434 				 u16 offset, u8 flags,
1435 				 const u8 *pattern, u8 len)
1436 {
1437 	struct wl12xx_rx_filter_field *field;
1438 
1439 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1440 		wl1271_warning("Max fields per RX filter. can't alloc another");
1441 		return -EINVAL;
1442 	}
1443 
1444 	field = &filter->fields[filter->num_fields];
1445 
1446 	field->pattern = kmemdup(pattern, len, GFP_KERNEL);
1447 	if (!field->pattern) {
1448 		wl1271_warning("Failed to allocate RX filter pattern");
1449 		return -ENOMEM;
1450 	}
1451 
1452 	filter->num_fields++;
1453 
1454 	field->offset = cpu_to_le16(offset);
1455 	field->flags = flags;
1456 	field->len = len;
1457 
1458 	return 0;
1459 }
1460 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1461 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1462 {
1463 	int i, fields_size = 0;
1464 
1465 	for (i = 0; i < filter->num_fields; i++)
1466 		fields_size += filter->fields[i].len +
1467 			sizeof(struct wl12xx_rx_filter_field) -
1468 			sizeof(u8 *);
1469 
1470 	return fields_size;
1471 }
1472 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1473 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1474 				    u8 *buf)
1475 {
1476 	int i;
1477 	struct wl12xx_rx_filter_field *field;
1478 
1479 	for (i = 0; i < filter->num_fields; i++) {
1480 		field = (struct wl12xx_rx_filter_field *)buf;
1481 
1482 		field->offset = filter->fields[i].offset;
1483 		field->flags = filter->fields[i].flags;
1484 		field->len = filter->fields[i].len;
1485 
1486 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1487 		buf += sizeof(struct wl12xx_rx_filter_field) -
1488 			sizeof(u8 *) + field->len;
1489 	}
1490 }
1491 
1492 /*
1493  * Allocates an RX filter returned through f
1494  * which needs to be freed using rx_filter_free()
1495  */
1496 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1497 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1498 					   struct wl12xx_rx_filter **f)
1499 {
1500 	int i, j, ret = 0;
1501 	struct wl12xx_rx_filter *filter;
1502 	u16 offset;
1503 	u8 flags, len;
1504 
1505 	filter = wl1271_rx_filter_alloc();
1506 	if (!filter) {
1507 		wl1271_warning("Failed to alloc rx filter");
1508 		ret = -ENOMEM;
1509 		goto err;
1510 	}
1511 
1512 	i = 0;
1513 	while (i < p->pattern_len) {
1514 		if (!test_bit(i, (unsigned long *)p->mask)) {
1515 			i++;
1516 			continue;
1517 		}
1518 
1519 		for (j = i; j < p->pattern_len; j++) {
1520 			if (!test_bit(j, (unsigned long *)p->mask))
1521 				break;
1522 
1523 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1524 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1525 				break;
1526 		}
1527 
1528 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1529 			offset = i;
1530 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1531 		} else {
1532 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1533 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1534 		}
1535 
1536 		len = j - i;
1537 
1538 		ret = wl1271_rx_filter_alloc_field(filter,
1539 						   offset,
1540 						   flags,
1541 						   &p->pattern[i], len);
1542 		if (ret)
1543 			goto err;
1544 
1545 		i = j;
1546 	}
1547 
1548 	filter->action = FILTER_SIGNAL;
1549 
1550 	*f = filter;
1551 	return 0;
1552 
1553 err:
1554 	wl1271_rx_filter_free(filter);
1555 	*f = NULL;
1556 
1557 	return ret;
1558 }
1559 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1560 static int wl1271_configure_wowlan(struct wl1271 *wl,
1561 				   struct cfg80211_wowlan *wow)
1562 {
1563 	int i, ret;
1564 
1565 	if (!wow || wow->any || !wow->n_patterns) {
1566 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1567 							  FILTER_SIGNAL);
1568 		if (ret)
1569 			goto out;
1570 
1571 		ret = wl1271_rx_filter_clear_all(wl);
1572 		if (ret)
1573 			goto out;
1574 
1575 		return 0;
1576 	}
1577 
1578 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1579 		return -EINVAL;
1580 
1581 	/* Validate all incoming patterns before clearing current FW state */
1582 	for (i = 0; i < wow->n_patterns; i++) {
1583 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1584 		if (ret) {
1585 			wl1271_warning("Bad wowlan pattern %d", i);
1586 			return ret;
1587 		}
1588 	}
1589 
1590 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1591 	if (ret)
1592 		goto out;
1593 
1594 	ret = wl1271_rx_filter_clear_all(wl);
1595 	if (ret)
1596 		goto out;
1597 
1598 	/* Translate WoWLAN patterns into filters */
1599 	for (i = 0; i < wow->n_patterns; i++) {
1600 		struct cfg80211_pkt_pattern *p;
1601 		struct wl12xx_rx_filter *filter = NULL;
1602 
1603 		p = &wow->patterns[i];
1604 
1605 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1606 		if (ret) {
1607 			wl1271_warning("Failed to create an RX filter from "
1608 				       "wowlan pattern %d", i);
1609 			goto out;
1610 		}
1611 
1612 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1613 
1614 		wl1271_rx_filter_free(filter);
1615 		if (ret)
1616 			goto out;
1617 	}
1618 
1619 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1620 
1621 out:
1622 	return ret;
1623 }
1624 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1625 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1626 					struct wl12xx_vif *wlvif,
1627 					struct cfg80211_wowlan *wow)
1628 {
1629 	int ret = 0;
1630 
1631 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1632 		goto out;
1633 
1634 	ret = wl1271_configure_wowlan(wl, wow);
1635 	if (ret < 0)
1636 		goto out;
1637 
1638 	if ((wl->conf.conn.suspend_wake_up_event ==
1639 	     wl->conf.conn.wake_up_event) &&
1640 	    (wl->conf.conn.suspend_listen_interval ==
1641 	     wl->conf.conn.listen_interval))
1642 		goto out;
1643 
1644 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1645 				    wl->conf.conn.suspend_wake_up_event,
1646 				    wl->conf.conn.suspend_listen_interval);
1647 
1648 	if (ret < 0)
1649 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1650 out:
1651 	return ret;
1652 
1653 }
1654 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1655 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1656 					struct wl12xx_vif *wlvif,
1657 					struct cfg80211_wowlan *wow)
1658 {
1659 	int ret = 0;
1660 
1661 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1662 		goto out;
1663 
1664 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1665 	if (ret < 0)
1666 		goto out;
1667 
1668 	ret = wl1271_configure_wowlan(wl, wow);
1669 	if (ret < 0)
1670 		goto out;
1671 
1672 out:
1673 	return ret;
1674 
1675 }
1676 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1677 static int wl1271_configure_suspend(struct wl1271 *wl,
1678 				    struct wl12xx_vif *wlvif,
1679 				    struct cfg80211_wowlan *wow)
1680 {
1681 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1682 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1683 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1684 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1685 	return 0;
1686 }
1687 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1688 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1689 {
1690 	int ret = 0;
1691 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1692 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1693 
1694 	if ((!is_ap) && (!is_sta))
1695 		return;
1696 
1697 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1698 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1699 		return;
1700 
1701 	wl1271_configure_wowlan(wl, NULL);
1702 
1703 	if (is_sta) {
1704 		if ((wl->conf.conn.suspend_wake_up_event ==
1705 		     wl->conf.conn.wake_up_event) &&
1706 		    (wl->conf.conn.suspend_listen_interval ==
1707 		     wl->conf.conn.listen_interval))
1708 			return;
1709 
1710 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1711 				    wl->conf.conn.wake_up_event,
1712 				    wl->conf.conn.listen_interval);
1713 
1714 		if (ret < 0)
1715 			wl1271_error("resume: wake up conditions failed: %d",
1716 				     ret);
1717 
1718 	} else if (is_ap) {
1719 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1720 	}
1721 }
1722 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1723 static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
1724 					    struct cfg80211_wowlan *wow)
1725 {
1726 	struct wl1271 *wl = hw->priv;
1727 	struct wl12xx_vif *wlvif;
1728 	unsigned long flags;
1729 	int ret;
1730 
1731 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1732 	WARN_ON(!wow);
1733 
1734 	/* we want to perform the recovery before suspending */
1735 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1736 		wl1271_warning("postponing suspend to perform recovery");
1737 		return -EBUSY;
1738 	}
1739 
1740 	wl1271_tx_flush(wl);
1741 
1742 	mutex_lock(&wl->mutex);
1743 
1744 	ret = pm_runtime_get_sync(wl->dev);
1745 	if (ret < 0) {
1746 		pm_runtime_put_noidle(wl->dev);
1747 		mutex_unlock(&wl->mutex);
1748 		return ret;
1749 	}
1750 
1751 	wl->wow_enabled = true;
1752 	wl12xx_for_each_wlvif(wl, wlvif) {
1753 		if (wlcore_is_p2p_mgmt(wlvif))
1754 			continue;
1755 
1756 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1757 		if (ret < 0) {
1758 			goto out_sleep;
1759 		}
1760 	}
1761 
1762 	/* disable fast link flow control notifications from FW */
1763 	ret = wlcore_hw_interrupt_notify(wl, false);
1764 	if (ret < 0)
1765 		goto out_sleep;
1766 
1767 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1768 	ret = wlcore_hw_rx_ba_filter(wl,
1769 				     !!wl->conf.conn.suspend_rx_ba_activity);
1770 	if (ret < 0)
1771 		goto out_sleep;
1772 
1773 out_sleep:
1774 	pm_runtime_put_noidle(wl->dev);
1775 	mutex_unlock(&wl->mutex);
1776 
1777 	if (ret < 0) {
1778 		wl1271_warning("couldn't prepare device to suspend");
1779 		return ret;
1780 	}
1781 
1782 	/* flush any remaining work */
1783 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1784 
1785 	flush_work(&wl->tx_work);
1786 
1787 	/*
1788 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1789 	 * it on resume anyway.
1790 	 */
1791 	cancel_delayed_work(&wl->tx_watchdog_work);
1792 
1793 	/*
1794 	 * set suspended flag to avoid triggering a new threaded_irq
1795 	 * work.
1796 	 */
1797 	spin_lock_irqsave(&wl->wl_lock, flags);
1798 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1799 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1800 
1801 	return pm_runtime_force_suspend(wl->dev);
1802 }
1803 
wl1271_op_resume(struct ieee80211_hw * hw)1804 static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
1805 {
1806 	struct wl1271 *wl = hw->priv;
1807 	struct wl12xx_vif *wlvif;
1808 	unsigned long flags;
1809 	bool run_irq_work = false, pending_recovery;
1810 	int ret;
1811 
1812 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1813 		     wl->wow_enabled);
1814 	WARN_ON(!wl->wow_enabled);
1815 
1816 	ret = pm_runtime_force_resume(wl->dev);
1817 	if (ret < 0) {
1818 		wl1271_error("ELP wakeup failure!");
1819 		goto out_sleep;
1820 	}
1821 
1822 	/*
1823 	 * re-enable irq_work enqueuing, and call irq_work directly if
1824 	 * there is a pending work.
1825 	 */
1826 	spin_lock_irqsave(&wl->wl_lock, flags);
1827 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1828 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1829 		run_irq_work = true;
1830 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1831 
1832 	mutex_lock(&wl->mutex);
1833 
1834 	/* test the recovery flag before calling any SDIO functions */
1835 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1836 				    &wl->flags);
1837 
1838 	if (run_irq_work) {
1839 		wl1271_debug(DEBUG_MAC80211,
1840 			     "run postponed irq_work directly");
1841 
1842 		/* don't talk to the HW if recovery is pending */
1843 		if (!pending_recovery) {
1844 			ret = wlcore_irq_locked(wl);
1845 			if (ret)
1846 				wl12xx_queue_recovery_work(wl);
1847 		}
1848 
1849 		wlcore_enable_interrupts(wl);
1850 	}
1851 
1852 	if (pending_recovery) {
1853 		wl1271_warning("queuing forgotten recovery on resume");
1854 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1855 		goto out_sleep;
1856 	}
1857 
1858 	ret = pm_runtime_get_sync(wl->dev);
1859 	if (ret < 0) {
1860 		pm_runtime_put_noidle(wl->dev);
1861 		goto out;
1862 	}
1863 
1864 	wl12xx_for_each_wlvif(wl, wlvif) {
1865 		if (wlcore_is_p2p_mgmt(wlvif))
1866 			continue;
1867 
1868 		wl1271_configure_resume(wl, wlvif);
1869 	}
1870 
1871 	ret = wlcore_hw_interrupt_notify(wl, true);
1872 	if (ret < 0)
1873 		goto out_sleep;
1874 
1875 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1876 	ret = wlcore_hw_rx_ba_filter(wl, false);
1877 	if (ret < 0)
1878 		goto out_sleep;
1879 
1880 out_sleep:
1881 	pm_runtime_mark_last_busy(wl->dev);
1882 	pm_runtime_put_autosuspend(wl->dev);
1883 
1884 out:
1885 	wl->wow_enabled = false;
1886 
1887 	/*
1888 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1889 	 * That way we avoid possible conditions where Tx-complete interrupts
1890 	 * fail to arrive and we perform a spurious recovery.
1891 	 */
1892 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1893 	mutex_unlock(&wl->mutex);
1894 
1895 	return 0;
1896 }
1897 
wl1271_op_start(struct ieee80211_hw * hw)1898 static int wl1271_op_start(struct ieee80211_hw *hw)
1899 {
1900 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1901 
1902 	/*
1903 	 * We have to delay the booting of the hardware because
1904 	 * we need to know the local MAC address before downloading and
1905 	 * initializing the firmware. The MAC address cannot be changed
1906 	 * after boot, and without the proper MAC address, the firmware
1907 	 * will not function properly.
1908 	 *
1909 	 * The MAC address is first known when the corresponding interface
1910 	 * is added. That is where we will initialize the hardware.
1911 	 */
1912 
1913 	return 0;
1914 }
1915 
wlcore_op_stop_locked(struct wl1271 * wl)1916 static void wlcore_op_stop_locked(struct wl1271 *wl)
1917 {
1918 	int i;
1919 
1920 	if (wl->state == WLCORE_STATE_OFF) {
1921 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1922 					&wl->flags))
1923 			wlcore_enable_interrupts(wl);
1924 
1925 		return;
1926 	}
1927 
1928 	/*
1929 	 * this must be before the cancel_work calls below, so that the work
1930 	 * functions don't perform further work.
1931 	 */
1932 	wl->state = WLCORE_STATE_OFF;
1933 
1934 	/*
1935 	 * Use the nosync variant to disable interrupts, so the mutex could be
1936 	 * held while doing so without deadlocking.
1937 	 */
1938 	wlcore_disable_interrupts_nosync(wl);
1939 
1940 	mutex_unlock(&wl->mutex);
1941 
1942 	wlcore_synchronize_interrupts(wl);
1943 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1944 		cancel_work_sync(&wl->recovery_work);
1945 	wl1271_flush_deferred_work(wl);
1946 	cancel_delayed_work_sync(&wl->scan_complete_work);
1947 	cancel_work_sync(&wl->netstack_work);
1948 	cancel_work_sync(&wl->tx_work);
1949 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1950 
1951 	/* let's notify MAC80211 about the remaining pending TX frames */
1952 	mutex_lock(&wl->mutex);
1953 	wl12xx_tx_reset(wl);
1954 
1955 	wl1271_power_off(wl);
1956 	/*
1957 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1958 	 * an interrupt storm. Now that the power is down, it is safe to
1959 	 * re-enable interrupts to balance the disable depth
1960 	 */
1961 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1962 		wlcore_enable_interrupts(wl);
1963 
1964 	wl->band = NL80211_BAND_2GHZ;
1965 
1966 	wl->rx_counter = 0;
1967 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1968 	wl->channel_type = NL80211_CHAN_NO_HT;
1969 	wl->tx_blocks_available = 0;
1970 	wl->tx_allocated_blocks = 0;
1971 	wl->tx_results_count = 0;
1972 	wl->tx_packets_count = 0;
1973 	wl->time_offset = 0;
1974 	wl->ap_fw_ps_map = 0;
1975 	wl->ap_ps_map = 0;
1976 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1977 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1978 	memset(wl->links_map, 0, sizeof(wl->links_map));
1979 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1980 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1981 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1982 	wl->active_sta_count = 0;
1983 	wl->active_link_count = 0;
1984 
1985 	/* The system link is always allocated */
1986 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1987 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1988 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1989 
1990 	/*
1991 	 * this is performed after the cancel_work calls and the associated
1992 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1993 	 * get executed before all these vars have been reset.
1994 	 */
1995 	wl->flags = 0;
1996 
1997 	wl->tx_blocks_freed = 0;
1998 
1999 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2000 		wl->tx_pkts_freed[i] = 0;
2001 		wl->tx_allocated_pkts[i] = 0;
2002 	}
2003 
2004 	wl1271_debugfs_reset(wl);
2005 
2006 	kfree(wl->raw_fw_status);
2007 	wl->raw_fw_status = NULL;
2008 	kfree(wl->fw_status);
2009 	wl->fw_status = NULL;
2010 	kfree(wl->tx_res_if);
2011 	wl->tx_res_if = NULL;
2012 	kfree(wl->target_mem_map);
2013 	wl->target_mem_map = NULL;
2014 
2015 	/*
2016 	 * FW channels must be re-calibrated after recovery,
2017 	 * save current Reg-Domain channel configuration and clear it.
2018 	 */
2019 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2020 	       sizeof(wl->reg_ch_conf_pending));
2021 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2022 }
2023 
wlcore_op_stop(struct ieee80211_hw * hw)2024 static void wlcore_op_stop(struct ieee80211_hw *hw)
2025 {
2026 	struct wl1271 *wl = hw->priv;
2027 
2028 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2029 
2030 	mutex_lock(&wl->mutex);
2031 
2032 	wlcore_op_stop_locked(wl);
2033 
2034 	mutex_unlock(&wl->mutex);
2035 }
2036 
wlcore_channel_switch_work(struct work_struct * work)2037 static void wlcore_channel_switch_work(struct work_struct *work)
2038 {
2039 	struct delayed_work *dwork;
2040 	struct wl1271 *wl;
2041 	struct ieee80211_vif *vif;
2042 	struct wl12xx_vif *wlvif;
2043 	int ret;
2044 
2045 	dwork = to_delayed_work(work);
2046 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2047 	wl = wlvif->wl;
2048 
2049 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2050 
2051 	mutex_lock(&wl->mutex);
2052 
2053 	if (unlikely(wl->state != WLCORE_STATE_ON))
2054 		goto out;
2055 
2056 	/* check the channel switch is still ongoing */
2057 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2058 		goto out;
2059 
2060 	vif = wl12xx_wlvif_to_vif(wlvif);
2061 	ieee80211_chswitch_done(vif, false);
2062 
2063 	ret = pm_runtime_get_sync(wl->dev);
2064 	if (ret < 0) {
2065 		pm_runtime_put_noidle(wl->dev);
2066 		goto out;
2067 	}
2068 
2069 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2070 
2071 	pm_runtime_mark_last_busy(wl->dev);
2072 	pm_runtime_put_autosuspend(wl->dev);
2073 out:
2074 	mutex_unlock(&wl->mutex);
2075 }
2076 
wlcore_connection_loss_work(struct work_struct * work)2077 static void wlcore_connection_loss_work(struct work_struct *work)
2078 {
2079 	struct delayed_work *dwork;
2080 	struct wl1271 *wl;
2081 	struct ieee80211_vif *vif;
2082 	struct wl12xx_vif *wlvif;
2083 
2084 	dwork = to_delayed_work(work);
2085 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2086 	wl = wlvif->wl;
2087 
2088 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2089 
2090 	mutex_lock(&wl->mutex);
2091 
2092 	if (unlikely(wl->state != WLCORE_STATE_ON))
2093 		goto out;
2094 
2095 	/* Call mac80211 connection loss */
2096 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2097 		goto out;
2098 
2099 	vif = wl12xx_wlvif_to_vif(wlvif);
2100 	ieee80211_connection_loss(vif);
2101 out:
2102 	mutex_unlock(&wl->mutex);
2103 }
2104 
wlcore_pending_auth_complete_work(struct work_struct * work)2105 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2106 {
2107 	struct delayed_work *dwork;
2108 	struct wl1271 *wl;
2109 	struct wl12xx_vif *wlvif;
2110 	unsigned long time_spare;
2111 	int ret;
2112 
2113 	dwork = to_delayed_work(work);
2114 	wlvif = container_of(dwork, struct wl12xx_vif,
2115 			     pending_auth_complete_work);
2116 	wl = wlvif->wl;
2117 
2118 	mutex_lock(&wl->mutex);
2119 
2120 	if (unlikely(wl->state != WLCORE_STATE_ON))
2121 		goto out;
2122 
2123 	/*
2124 	 * Make sure a second really passed since the last auth reply. Maybe
2125 	 * a second auth reply arrived while we were stuck on the mutex.
2126 	 * Check for a little less than the timeout to protect from scheduler
2127 	 * irregularities.
2128 	 */
2129 	time_spare = jiffies +
2130 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2131 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2132 		goto out;
2133 
2134 	ret = pm_runtime_get_sync(wl->dev);
2135 	if (ret < 0) {
2136 		pm_runtime_put_noidle(wl->dev);
2137 		goto out;
2138 	}
2139 
2140 	/* cancel the ROC if active */
2141 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2142 
2143 	pm_runtime_mark_last_busy(wl->dev);
2144 	pm_runtime_put_autosuspend(wl->dev);
2145 out:
2146 	mutex_unlock(&wl->mutex);
2147 }
2148 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2149 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2150 {
2151 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2152 					WL12XX_MAX_RATE_POLICIES);
2153 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2154 		return -EBUSY;
2155 
2156 	__set_bit(policy, wl->rate_policies_map);
2157 	*idx = policy;
2158 	return 0;
2159 }
2160 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2161 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2162 {
2163 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2164 		return;
2165 
2166 	__clear_bit(*idx, wl->rate_policies_map);
2167 	*idx = WL12XX_MAX_RATE_POLICIES;
2168 }
2169 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2170 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2171 {
2172 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2173 					WLCORE_MAX_KLV_TEMPLATES);
2174 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2175 		return -EBUSY;
2176 
2177 	__set_bit(policy, wl->klv_templates_map);
2178 	*idx = policy;
2179 	return 0;
2180 }
2181 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2182 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2183 {
2184 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2185 		return;
2186 
2187 	__clear_bit(*idx, wl->klv_templates_map);
2188 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2189 }
2190 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2191 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2192 {
2193 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2194 
2195 	switch (wlvif->bss_type) {
2196 	case BSS_TYPE_AP_BSS:
2197 		if (wlvif->p2p)
2198 			return WL1271_ROLE_P2P_GO;
2199 		else if (ieee80211_vif_is_mesh(vif))
2200 			return WL1271_ROLE_MESH_POINT;
2201 		else
2202 			return WL1271_ROLE_AP;
2203 
2204 	case BSS_TYPE_STA_BSS:
2205 		if (wlvif->p2p)
2206 			return WL1271_ROLE_P2P_CL;
2207 		else
2208 			return WL1271_ROLE_STA;
2209 
2210 	case BSS_TYPE_IBSS:
2211 		return WL1271_ROLE_IBSS;
2212 
2213 	default:
2214 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2215 	}
2216 	return WL12XX_INVALID_ROLE_TYPE;
2217 }
2218 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2219 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2220 {
2221 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2222 	int i;
2223 
2224 	/* clear everything but the persistent data */
2225 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2226 
2227 	switch (ieee80211_vif_type_p2p(vif)) {
2228 	case NL80211_IFTYPE_P2P_CLIENT:
2229 		wlvif->p2p = 1;
2230 		/* fall-through */
2231 	case NL80211_IFTYPE_STATION:
2232 	case NL80211_IFTYPE_P2P_DEVICE:
2233 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2234 		break;
2235 	case NL80211_IFTYPE_ADHOC:
2236 		wlvif->bss_type = BSS_TYPE_IBSS;
2237 		break;
2238 	case NL80211_IFTYPE_P2P_GO:
2239 		wlvif->p2p = 1;
2240 		/* fall-through */
2241 	case NL80211_IFTYPE_AP:
2242 	case NL80211_IFTYPE_MESH_POINT:
2243 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2244 		break;
2245 	default:
2246 		wlvif->bss_type = MAX_BSS_TYPE;
2247 		return -EOPNOTSUPP;
2248 	}
2249 
2250 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2251 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2252 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2253 
2254 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2255 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2256 		/* init sta/ibss data */
2257 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2258 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2259 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2260 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2261 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2262 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2263 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2264 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2265 	} else {
2266 		/* init ap data */
2267 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2268 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2269 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2270 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2271 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2272 			wl12xx_allocate_rate_policy(wl,
2273 						&wlvif->ap.ucast_rate_idx[i]);
2274 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2275 		/*
2276 		 * TODO: check if basic_rate shouldn't be
2277 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2278 		 * instead (the same thing for STA above).
2279 		*/
2280 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2281 		/* TODO: this seems to be used only for STA, check it */
2282 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2283 	}
2284 
2285 	wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2286 	wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2287 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2288 
2289 	/*
2290 	 * mac80211 configures some values globally, while we treat them
2291 	 * per-interface. thus, on init, we have to copy them from wl
2292 	 */
2293 	wlvif->band = wl->band;
2294 	wlvif->channel = wl->channel;
2295 	wlvif->power_level = wl->power_level;
2296 	wlvif->channel_type = wl->channel_type;
2297 
2298 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2299 		  wl1271_rx_streaming_enable_work);
2300 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2301 		  wl1271_rx_streaming_disable_work);
2302 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2303 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2304 			  wlcore_channel_switch_work);
2305 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2306 			  wlcore_connection_loss_work);
2307 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2308 			  wlcore_pending_auth_complete_work);
2309 	INIT_LIST_HEAD(&wlvif->list);
2310 
2311 	timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
2312 	return 0;
2313 }
2314 
wl12xx_init_fw(struct wl1271 * wl)2315 static int wl12xx_init_fw(struct wl1271 *wl)
2316 {
2317 	int retries = WL1271_BOOT_RETRIES;
2318 	bool booted = false;
2319 	struct wiphy *wiphy = wl->hw->wiphy;
2320 	int ret;
2321 
2322 	while (retries) {
2323 		retries--;
2324 		ret = wl12xx_chip_wakeup(wl, false);
2325 		if (ret < 0)
2326 			goto power_off;
2327 
2328 		ret = wl->ops->boot(wl);
2329 		if (ret < 0)
2330 			goto power_off;
2331 
2332 		ret = wl1271_hw_init(wl);
2333 		if (ret < 0)
2334 			goto irq_disable;
2335 
2336 		booted = true;
2337 		break;
2338 
2339 irq_disable:
2340 		mutex_unlock(&wl->mutex);
2341 		/* Unlocking the mutex in the middle of handling is
2342 		   inherently unsafe. In this case we deem it safe to do,
2343 		   because we need to let any possibly pending IRQ out of
2344 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2345 		   work function will not do anything.) Also, any other
2346 		   possible concurrent operations will fail due to the
2347 		   current state, hence the wl1271 struct should be safe. */
2348 		wlcore_disable_interrupts(wl);
2349 		wl1271_flush_deferred_work(wl);
2350 		cancel_work_sync(&wl->netstack_work);
2351 		mutex_lock(&wl->mutex);
2352 power_off:
2353 		wl1271_power_off(wl);
2354 	}
2355 
2356 	if (!booted) {
2357 		wl1271_error("firmware boot failed despite %d retries",
2358 			     WL1271_BOOT_RETRIES);
2359 		goto out;
2360 	}
2361 
2362 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2363 
2364 	/* update hw/fw version info in wiphy struct */
2365 	wiphy->hw_version = wl->chip.id;
2366 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2367 		sizeof(wiphy->fw_version));
2368 
2369 	/*
2370 	 * Now we know if 11a is supported (info from the NVS), so disable
2371 	 * 11a channels if not supported
2372 	 */
2373 	if (!wl->enable_11a)
2374 		wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
2375 
2376 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2377 		     wl->enable_11a ? "" : "not ");
2378 
2379 	wl->state = WLCORE_STATE_ON;
2380 out:
2381 	return ret;
2382 }
2383 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2384 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2385 {
2386 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2387 }
2388 
2389 /*
2390  * Check whether a fw switch (i.e. moving from one loaded
2391  * fw to another) is needed. This function is also responsible
2392  * for updating wl->last_vif_count, so it must be called before
2393  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2394  * will be used).
2395  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2396 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2397 				  struct vif_counter_data vif_counter_data,
2398 				  bool add)
2399 {
2400 	enum wl12xx_fw_type current_fw = wl->fw_type;
2401 	u8 vif_count = vif_counter_data.counter;
2402 
2403 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2404 		return false;
2405 
2406 	/* increase the vif count if this is a new vif */
2407 	if (add && !vif_counter_data.cur_vif_running)
2408 		vif_count++;
2409 
2410 	wl->last_vif_count = vif_count;
2411 
2412 	/* no need for fw change if the device is OFF */
2413 	if (wl->state == WLCORE_STATE_OFF)
2414 		return false;
2415 
2416 	/* no need for fw change if a single fw is used */
2417 	if (!wl->mr_fw_name)
2418 		return false;
2419 
2420 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2421 		return true;
2422 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2423 		return true;
2424 
2425 	return false;
2426 }
2427 
2428 /*
2429  * Enter "forced psm". Make sure the sta is in psm against the ap,
2430  * to make the fw switch a bit more disconnection-persistent.
2431  */
wl12xx_force_active_psm(struct wl1271 * wl)2432 static void wl12xx_force_active_psm(struct wl1271 *wl)
2433 {
2434 	struct wl12xx_vif *wlvif;
2435 
2436 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2437 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2438 	}
2439 }
2440 
2441 struct wlcore_hw_queue_iter_data {
2442 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2443 	/* current vif */
2444 	struct ieee80211_vif *vif;
2445 	/* is the current vif among those iterated */
2446 	bool cur_running;
2447 };
2448 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2449 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2450 				 struct ieee80211_vif *vif)
2451 {
2452 	struct wlcore_hw_queue_iter_data *iter_data = data;
2453 
2454 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2455 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2456 		return;
2457 
2458 	if (iter_data->cur_running || vif == iter_data->vif) {
2459 		iter_data->cur_running = true;
2460 		return;
2461 	}
2462 
2463 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2464 }
2465 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2466 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2467 					 struct wl12xx_vif *wlvif)
2468 {
2469 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2470 	struct wlcore_hw_queue_iter_data iter_data = {};
2471 	int i, q_base;
2472 
2473 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2474 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2475 		return 0;
2476 	}
2477 
2478 	iter_data.vif = vif;
2479 
2480 	/* mark all bits taken by active interfaces */
2481 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2482 					IEEE80211_IFACE_ITER_RESUME_ALL,
2483 					wlcore_hw_queue_iter, &iter_data);
2484 
2485 	/* the current vif is already running in mac80211 (resume/recovery) */
2486 	if (iter_data.cur_running) {
2487 		wlvif->hw_queue_base = vif->hw_queue[0];
2488 		wl1271_debug(DEBUG_MAC80211,
2489 			     "using pre-allocated hw queue base %d",
2490 			     wlvif->hw_queue_base);
2491 
2492 		/* interface type might have changed type */
2493 		goto adjust_cab_queue;
2494 	}
2495 
2496 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2497 				     WLCORE_NUM_MAC_ADDRESSES);
2498 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2499 		return -EBUSY;
2500 
2501 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2502 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2503 		     wlvif->hw_queue_base);
2504 
2505 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2506 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2507 		/* register hw queues in mac80211 */
2508 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2509 	}
2510 
2511 adjust_cab_queue:
2512 	/* the last places are reserved for cab queues per interface */
2513 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2514 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2515 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2516 	else
2517 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2518 
2519 	return 0;
2520 }
2521 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2522 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2523 				   struct ieee80211_vif *vif)
2524 {
2525 	struct wl1271 *wl = hw->priv;
2526 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2527 	struct vif_counter_data vif_count;
2528 	int ret = 0;
2529 	u8 role_type;
2530 
2531 	if (wl->plt) {
2532 		wl1271_error("Adding Interface not allowed while in PLT mode");
2533 		return -EBUSY;
2534 	}
2535 
2536 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2537 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2538 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2539 
2540 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2541 		     ieee80211_vif_type_p2p(vif), vif->addr);
2542 
2543 	wl12xx_get_vif_count(hw, vif, &vif_count);
2544 
2545 	mutex_lock(&wl->mutex);
2546 
2547 	/*
2548 	 * in some very corner case HW recovery scenarios its possible to
2549 	 * get here before __wl1271_op_remove_interface is complete, so
2550 	 * opt out if that is the case.
2551 	 */
2552 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2553 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2554 		ret = -EBUSY;
2555 		goto out;
2556 	}
2557 
2558 
2559 	ret = wl12xx_init_vif_data(wl, vif);
2560 	if (ret < 0)
2561 		goto out;
2562 
2563 	wlvif->wl = wl;
2564 	role_type = wl12xx_get_role_type(wl, wlvif);
2565 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2566 		ret = -EINVAL;
2567 		goto out;
2568 	}
2569 
2570 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2571 	if (ret < 0)
2572 		goto out;
2573 
2574 	/*
2575 	 * TODO: after the nvs issue will be solved, move this block
2576 	 * to start(), and make sure here the driver is ON.
2577 	 */
2578 	if (wl->state == WLCORE_STATE_OFF) {
2579 		/*
2580 		 * we still need this in order to configure the fw
2581 		 * while uploading the nvs
2582 		 */
2583 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2584 
2585 		ret = wl12xx_init_fw(wl);
2586 		if (ret < 0)
2587 			goto out;
2588 	}
2589 
2590 	/*
2591 	 * Call runtime PM only after possible wl12xx_init_fw() above
2592 	 * is done. Otherwise we do not have interrupts enabled.
2593 	 */
2594 	ret = pm_runtime_get_sync(wl->dev);
2595 	if (ret < 0) {
2596 		pm_runtime_put_noidle(wl->dev);
2597 		goto out_unlock;
2598 	}
2599 
2600 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2601 		wl12xx_force_active_psm(wl);
2602 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2603 		mutex_unlock(&wl->mutex);
2604 		wl1271_recovery_work(&wl->recovery_work);
2605 		return 0;
2606 	}
2607 
2608 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2609 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2610 					     role_type, &wlvif->role_id);
2611 		if (ret < 0)
2612 			goto out;
2613 
2614 		ret = wl1271_init_vif_specific(wl, vif);
2615 		if (ret < 0)
2616 			goto out;
2617 
2618 	} else {
2619 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2620 					     &wlvif->dev_role_id);
2621 		if (ret < 0)
2622 			goto out;
2623 
2624 		/* needed mainly for configuring rate policies */
2625 		ret = wl1271_sta_hw_init(wl, wlvif);
2626 		if (ret < 0)
2627 			goto out;
2628 	}
2629 
2630 	list_add(&wlvif->list, &wl->wlvif_list);
2631 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2632 
2633 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2634 		wl->ap_count++;
2635 	else
2636 		wl->sta_count++;
2637 out:
2638 	pm_runtime_mark_last_busy(wl->dev);
2639 	pm_runtime_put_autosuspend(wl->dev);
2640 out_unlock:
2641 	mutex_unlock(&wl->mutex);
2642 
2643 	return ret;
2644 }
2645 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2646 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2647 					 struct ieee80211_vif *vif,
2648 					 bool reset_tx_queues)
2649 {
2650 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2651 	int i, ret;
2652 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2653 
2654 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2655 
2656 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2657 		return;
2658 
2659 	/* because of hardware recovery, we may get here twice */
2660 	if (wl->state == WLCORE_STATE_OFF)
2661 		return;
2662 
2663 	wl1271_info("down");
2664 
2665 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2666 	    wl->scan_wlvif == wlvif) {
2667 		struct cfg80211_scan_info info = {
2668 			.aborted = true,
2669 		};
2670 
2671 		/*
2672 		 * Rearm the tx watchdog just before idling scan. This
2673 		 * prevents just-finished scans from triggering the watchdog
2674 		 */
2675 		wl12xx_rearm_tx_watchdog_locked(wl);
2676 
2677 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2678 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2679 		wl->scan_wlvif = NULL;
2680 		wl->scan.req = NULL;
2681 		ieee80211_scan_completed(wl->hw, &info);
2682 	}
2683 
2684 	if (wl->sched_vif == wlvif)
2685 		wl->sched_vif = NULL;
2686 
2687 	if (wl->roc_vif == vif) {
2688 		wl->roc_vif = NULL;
2689 		ieee80211_remain_on_channel_expired(wl->hw);
2690 	}
2691 
2692 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2693 		/* disable active roles */
2694 		ret = pm_runtime_get_sync(wl->dev);
2695 		if (ret < 0) {
2696 			pm_runtime_put_noidle(wl->dev);
2697 			goto deinit;
2698 		}
2699 
2700 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2701 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2702 			if (wl12xx_dev_role_started(wlvif))
2703 				wl12xx_stop_dev(wl, wlvif);
2704 		}
2705 
2706 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2707 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2708 			if (ret < 0) {
2709 				pm_runtime_put_noidle(wl->dev);
2710 				goto deinit;
2711 			}
2712 		} else {
2713 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2714 			if (ret < 0) {
2715 				pm_runtime_put_noidle(wl->dev);
2716 				goto deinit;
2717 			}
2718 		}
2719 
2720 		pm_runtime_mark_last_busy(wl->dev);
2721 		pm_runtime_put_autosuspend(wl->dev);
2722 	}
2723 deinit:
2724 	wl12xx_tx_reset_wlvif(wl, wlvif);
2725 
2726 	/* clear all hlids (except system_hlid) */
2727 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2728 
2729 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2730 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2731 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2732 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2733 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2734 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2735 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2736 	} else {
2737 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2738 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2739 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2740 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2741 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2742 			wl12xx_free_rate_policy(wl,
2743 						&wlvif->ap.ucast_rate_idx[i]);
2744 		wl1271_free_ap_keys(wl, wlvif);
2745 	}
2746 
2747 	dev_kfree_skb(wlvif->probereq);
2748 	wlvif->probereq = NULL;
2749 	if (wl->last_wlvif == wlvif)
2750 		wl->last_wlvif = NULL;
2751 	list_del(&wlvif->list);
2752 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2753 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2754 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2755 
2756 	if (is_ap)
2757 		wl->ap_count--;
2758 	else
2759 		wl->sta_count--;
2760 
2761 	/*
2762 	 * Last AP, have more stations. Configure sleep auth according to STA.
2763 	 * Don't do thin on unintended recovery.
2764 	 */
2765 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2766 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2767 		goto unlock;
2768 
2769 	if (wl->ap_count == 0 && is_ap) {
2770 		/* mask ap events */
2771 		wl->event_mask &= ~wl->ap_event_mask;
2772 		wl1271_event_unmask(wl);
2773 	}
2774 
2775 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2776 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2777 		/* Configure for power according to debugfs */
2778 		if (sta_auth != WL1271_PSM_ILLEGAL)
2779 			wl1271_acx_sleep_auth(wl, sta_auth);
2780 		/* Configure for ELP power saving */
2781 		else
2782 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2783 	}
2784 
2785 unlock:
2786 	mutex_unlock(&wl->mutex);
2787 
2788 	del_timer_sync(&wlvif->rx_streaming_timer);
2789 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2790 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2791 	cancel_work_sync(&wlvif->rc_update_work);
2792 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2793 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2794 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2795 
2796 	mutex_lock(&wl->mutex);
2797 }
2798 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2799 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2800 				       struct ieee80211_vif *vif)
2801 {
2802 	struct wl1271 *wl = hw->priv;
2803 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2804 	struct wl12xx_vif *iter;
2805 	struct vif_counter_data vif_count;
2806 
2807 	wl12xx_get_vif_count(hw, vif, &vif_count);
2808 	mutex_lock(&wl->mutex);
2809 
2810 	if (wl->state == WLCORE_STATE_OFF ||
2811 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2812 		goto out;
2813 
2814 	/*
2815 	 * wl->vif can be null here if someone shuts down the interface
2816 	 * just when hardware recovery has been started.
2817 	 */
2818 	wl12xx_for_each_wlvif(wl, iter) {
2819 		if (iter != wlvif)
2820 			continue;
2821 
2822 		__wl1271_op_remove_interface(wl, vif, true);
2823 		break;
2824 	}
2825 	WARN_ON(iter != wlvif);
2826 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2827 		wl12xx_force_active_psm(wl);
2828 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2829 		wl12xx_queue_recovery_work(wl);
2830 	}
2831 out:
2832 	mutex_unlock(&wl->mutex);
2833 }
2834 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2835 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2836 				      struct ieee80211_vif *vif,
2837 				      enum nl80211_iftype new_type, bool p2p)
2838 {
2839 	struct wl1271 *wl = hw->priv;
2840 	int ret;
2841 
2842 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2843 	wl1271_op_remove_interface(hw, vif);
2844 
2845 	vif->type = new_type;
2846 	vif->p2p = p2p;
2847 	ret = wl1271_op_add_interface(hw, vif);
2848 
2849 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2850 	return ret;
2851 }
2852 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2853 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2854 {
2855 	int ret;
2856 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2857 
2858 	/*
2859 	 * One of the side effects of the JOIN command is that is clears
2860 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2861 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2862 	 * Currently the only valid scenario for JOIN during association
2863 	 * is on roaming, in which case we will also be given new keys.
2864 	 * Keep the below message for now, unless it starts bothering
2865 	 * users who really like to roam a lot :)
2866 	 */
2867 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2868 		wl1271_info("JOIN while associated.");
2869 
2870 	/* clear encryption type */
2871 	wlvif->encryption_type = KEY_NONE;
2872 
2873 	if (is_ibss)
2874 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2875 	else
2876 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2877 
2878 	return ret;
2879 }
2880 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2881 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2882 			    int offset)
2883 {
2884 	u8 ssid_len;
2885 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2886 					 skb->len - offset);
2887 
2888 	if (!ptr) {
2889 		wl1271_error("No SSID in IEs!");
2890 		return -ENOENT;
2891 	}
2892 
2893 	ssid_len = ptr[1];
2894 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2895 		wl1271_error("SSID is too long!");
2896 		return -EINVAL;
2897 	}
2898 
2899 	wlvif->ssid_len = ssid_len;
2900 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2901 	return 0;
2902 }
2903 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2904 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2905 {
2906 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2907 	struct sk_buff *skb;
2908 	int ieoffset;
2909 
2910 	/* we currently only support setting the ssid from the ap probe req */
2911 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2912 		return -EINVAL;
2913 
2914 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2915 	if (!skb)
2916 		return -EINVAL;
2917 
2918 	ieoffset = offsetof(struct ieee80211_mgmt,
2919 			    u.probe_req.variable);
2920 	wl1271_ssid_set(wlvif, skb, ieoffset);
2921 	dev_kfree_skb(skb);
2922 
2923 	return 0;
2924 }
2925 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2926 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2927 			    struct ieee80211_bss_conf *bss_conf,
2928 			    u32 sta_rate_set)
2929 {
2930 	int ieoffset;
2931 	int ret;
2932 
2933 	wlvif->aid = bss_conf->aid;
2934 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2935 	wlvif->beacon_int = bss_conf->beacon_int;
2936 	wlvif->wmm_enabled = bss_conf->qos;
2937 
2938 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2939 
2940 	/*
2941 	 * with wl1271, we don't need to update the
2942 	 * beacon_int and dtim_period, because the firmware
2943 	 * updates it by itself when the first beacon is
2944 	 * received after a join.
2945 	 */
2946 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2947 	if (ret < 0)
2948 		return ret;
2949 
2950 	/*
2951 	 * Get a template for hardware connection maintenance
2952 	 */
2953 	dev_kfree_skb(wlvif->probereq);
2954 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2955 							wlvif,
2956 							NULL);
2957 	ieoffset = offsetof(struct ieee80211_mgmt,
2958 			    u.probe_req.variable);
2959 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2960 
2961 	/* enable the connection monitoring feature */
2962 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2963 	if (ret < 0)
2964 		return ret;
2965 
2966 	/*
2967 	 * The join command disable the keep-alive mode, shut down its process,
2968 	 * and also clear the template config, so we need to reset it all after
2969 	 * the join. The acx_aid starts the keep-alive process, and the order
2970 	 * of the commands below is relevant.
2971 	 */
2972 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2973 	if (ret < 0)
2974 		return ret;
2975 
2976 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2977 	if (ret < 0)
2978 		return ret;
2979 
2980 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2981 	if (ret < 0)
2982 		return ret;
2983 
2984 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2985 					   wlvif->sta.klv_template_id,
2986 					   ACX_KEEP_ALIVE_TPL_VALID);
2987 	if (ret < 0)
2988 		return ret;
2989 
2990 	/*
2991 	 * The default fw psm configuration is AUTO, while mac80211 default
2992 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2993 	 */
2994 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2995 	if (ret < 0)
2996 		return ret;
2997 
2998 	if (sta_rate_set) {
2999 		wlvif->rate_set =
3000 			wl1271_tx_enabled_rates_get(wl,
3001 						    sta_rate_set,
3002 						    wlvif->band);
3003 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3004 		if (ret < 0)
3005 			return ret;
3006 	}
3007 
3008 	return ret;
3009 }
3010 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3011 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3012 {
3013 	int ret;
3014 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3015 
3016 	/* make sure we are connected (sta) joined */
3017 	if (sta &&
3018 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3019 		return false;
3020 
3021 	/* make sure we are joined (ibss) */
3022 	if (!sta &&
3023 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3024 		return false;
3025 
3026 	if (sta) {
3027 		/* use defaults when not associated */
3028 		wlvif->aid = 0;
3029 
3030 		/* free probe-request template */
3031 		dev_kfree_skb(wlvif->probereq);
3032 		wlvif->probereq = NULL;
3033 
3034 		/* disable connection monitor features */
3035 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3036 		if (ret < 0)
3037 			return ret;
3038 
3039 		/* Disable the keep-alive feature */
3040 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3041 		if (ret < 0)
3042 			return ret;
3043 
3044 		/* disable beacon filtering */
3045 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3046 		if (ret < 0)
3047 			return ret;
3048 	}
3049 
3050 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3051 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3052 
3053 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3054 		ieee80211_chswitch_done(vif, false);
3055 		cancel_delayed_work(&wlvif->channel_switch_work);
3056 	}
3057 
3058 	/* invalidate keep-alive template */
3059 	wl1271_acx_keep_alive_config(wl, wlvif,
3060 				     wlvif->sta.klv_template_id,
3061 				     ACX_KEEP_ALIVE_TPL_INVALID);
3062 
3063 	return 0;
3064 }
3065 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3066 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3067 {
3068 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3069 	wlvif->rate_set = wlvif->basic_rate_set;
3070 }
3071 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3072 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3073 				   bool idle)
3074 {
3075 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3076 
3077 	if (idle == cur_idle)
3078 		return;
3079 
3080 	if (idle) {
3081 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3082 	} else {
3083 		/* The current firmware only supports sched_scan in idle */
3084 		if (wl->sched_vif == wlvif)
3085 			wl->ops->sched_scan_stop(wl, wlvif);
3086 
3087 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3088 	}
3089 }
3090 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3091 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3092 			     struct ieee80211_conf *conf, u32 changed)
3093 {
3094 	int ret;
3095 
3096 	if (wlcore_is_p2p_mgmt(wlvif))
3097 		return 0;
3098 
3099 	if (conf->power_level != wlvif->power_level) {
3100 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3101 		if (ret < 0)
3102 			return ret;
3103 
3104 		wlvif->power_level = conf->power_level;
3105 	}
3106 
3107 	return 0;
3108 }
3109 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3110 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3111 {
3112 	struct wl1271 *wl = hw->priv;
3113 	struct wl12xx_vif *wlvif;
3114 	struct ieee80211_conf *conf = &hw->conf;
3115 	int ret = 0;
3116 
3117 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3118 		     " changed 0x%x",
3119 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3120 		     conf->power_level,
3121 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3122 			 changed);
3123 
3124 	mutex_lock(&wl->mutex);
3125 
3126 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3127 		wl->power_level = conf->power_level;
3128 
3129 	if (unlikely(wl->state != WLCORE_STATE_ON))
3130 		goto out;
3131 
3132 	ret = pm_runtime_get_sync(wl->dev);
3133 	if (ret < 0) {
3134 		pm_runtime_put_noidle(wl->dev);
3135 		goto out;
3136 	}
3137 
3138 	/* configure each interface */
3139 	wl12xx_for_each_wlvif(wl, wlvif) {
3140 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3141 		if (ret < 0)
3142 			goto out_sleep;
3143 	}
3144 
3145 out_sleep:
3146 	pm_runtime_mark_last_busy(wl->dev);
3147 	pm_runtime_put_autosuspend(wl->dev);
3148 
3149 out:
3150 	mutex_unlock(&wl->mutex);
3151 
3152 	return ret;
3153 }
3154 
3155 struct wl1271_filter_params {
3156 	bool enabled;
3157 	int mc_list_length;
3158 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3159 };
3160 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3161 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3162 				       struct netdev_hw_addr_list *mc_list)
3163 {
3164 	struct wl1271_filter_params *fp;
3165 	struct netdev_hw_addr *ha;
3166 
3167 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3168 	if (!fp) {
3169 		wl1271_error("Out of memory setting filters.");
3170 		return 0;
3171 	}
3172 
3173 	/* update multicast filtering parameters */
3174 	fp->mc_list_length = 0;
3175 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3176 		fp->enabled = false;
3177 	} else {
3178 		fp->enabled = true;
3179 		netdev_hw_addr_list_for_each(ha, mc_list) {
3180 			memcpy(fp->mc_list[fp->mc_list_length],
3181 					ha->addr, ETH_ALEN);
3182 			fp->mc_list_length++;
3183 		}
3184 	}
3185 
3186 	return (u64)(unsigned long)fp;
3187 }
3188 
3189 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3190 				  FIF_FCSFAIL | \
3191 				  FIF_BCN_PRBRESP_PROMISC | \
3192 				  FIF_CONTROL | \
3193 				  FIF_OTHER_BSS)
3194 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3195 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3196 				       unsigned int changed,
3197 				       unsigned int *total, u64 multicast)
3198 {
3199 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3200 	struct wl1271 *wl = hw->priv;
3201 	struct wl12xx_vif *wlvif;
3202 
3203 	int ret;
3204 
3205 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3206 		     " total %x", changed, *total);
3207 
3208 	mutex_lock(&wl->mutex);
3209 
3210 	*total &= WL1271_SUPPORTED_FILTERS;
3211 	changed &= WL1271_SUPPORTED_FILTERS;
3212 
3213 	if (unlikely(wl->state != WLCORE_STATE_ON))
3214 		goto out;
3215 
3216 	ret = pm_runtime_get_sync(wl->dev);
3217 	if (ret < 0) {
3218 		pm_runtime_put_noidle(wl->dev);
3219 		goto out;
3220 	}
3221 
3222 	wl12xx_for_each_wlvif(wl, wlvif) {
3223 		if (wlcore_is_p2p_mgmt(wlvif))
3224 			continue;
3225 
3226 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3227 			if (*total & FIF_ALLMULTI)
3228 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3229 								   false,
3230 								   NULL, 0);
3231 			else if (fp)
3232 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3233 							fp->enabled,
3234 							fp->mc_list,
3235 							fp->mc_list_length);
3236 			if (ret < 0)
3237 				goto out_sleep;
3238 		}
3239 
3240 		/*
3241 		 * If interface in AP mode and created with allmulticast then disable
3242 		 * the firmware filters so that all multicast packets are passed
3243 		 * This is mandatory for MDNS based discovery protocols
3244 		 */
3245  		if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
3246  			if (*total & FIF_ALLMULTI) {
3247 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3248 							false,
3249 							NULL, 0);
3250 				if (ret < 0)
3251 					goto out_sleep;
3252 			}
3253 		}
3254 	}
3255 
3256 	/*
3257 	 * the fw doesn't provide an api to configure the filters. instead,
3258 	 * the filters configuration is based on the active roles / ROC
3259 	 * state.
3260 	 */
3261 
3262 out_sleep:
3263 	pm_runtime_mark_last_busy(wl->dev);
3264 	pm_runtime_put_autosuspend(wl->dev);
3265 
3266 out:
3267 	mutex_unlock(&wl->mutex);
3268 	kfree(fp);
3269 }
3270 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16,bool is_pairwise)3271 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3272 				u8 id, u8 key_type, u8 key_size,
3273 				const u8 *key, u8 hlid, u32 tx_seq_32,
3274 				u16 tx_seq_16, bool is_pairwise)
3275 {
3276 	struct wl1271_ap_key *ap_key;
3277 	int i;
3278 
3279 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3280 
3281 	if (key_size > MAX_KEY_SIZE)
3282 		return -EINVAL;
3283 
3284 	/*
3285 	 * Find next free entry in ap_keys. Also check we are not replacing
3286 	 * an existing key.
3287 	 */
3288 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3289 		if (wlvif->ap.recorded_keys[i] == NULL)
3290 			break;
3291 
3292 		if (wlvif->ap.recorded_keys[i]->id == id) {
3293 			wl1271_warning("trying to record key replacement");
3294 			return -EINVAL;
3295 		}
3296 	}
3297 
3298 	if (i == MAX_NUM_KEYS)
3299 		return -EBUSY;
3300 
3301 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3302 	if (!ap_key)
3303 		return -ENOMEM;
3304 
3305 	ap_key->id = id;
3306 	ap_key->key_type = key_type;
3307 	ap_key->key_size = key_size;
3308 	memcpy(ap_key->key, key, key_size);
3309 	ap_key->hlid = hlid;
3310 	ap_key->tx_seq_32 = tx_seq_32;
3311 	ap_key->tx_seq_16 = tx_seq_16;
3312 	ap_key->is_pairwise = is_pairwise;
3313 
3314 	wlvif->ap.recorded_keys[i] = ap_key;
3315 	return 0;
3316 }
3317 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3318 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3319 {
3320 	int i;
3321 
3322 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3323 		kfree(wlvif->ap.recorded_keys[i]);
3324 		wlvif->ap.recorded_keys[i] = NULL;
3325 	}
3326 }
3327 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3328 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3329 {
3330 	int i, ret = 0;
3331 	struct wl1271_ap_key *key;
3332 	bool wep_key_added = false;
3333 
3334 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3335 		u8 hlid;
3336 		if (wlvif->ap.recorded_keys[i] == NULL)
3337 			break;
3338 
3339 		key = wlvif->ap.recorded_keys[i];
3340 		hlid = key->hlid;
3341 		if (hlid == WL12XX_INVALID_LINK_ID)
3342 			hlid = wlvif->ap.bcast_hlid;
3343 
3344 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3345 					    key->id, key->key_type,
3346 					    key->key_size, key->key,
3347 					    hlid, key->tx_seq_32,
3348 					    key->tx_seq_16, key->is_pairwise);
3349 		if (ret < 0)
3350 			goto out;
3351 
3352 		if (key->key_type == KEY_WEP)
3353 			wep_key_added = true;
3354 	}
3355 
3356 	if (wep_key_added) {
3357 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3358 						     wlvif->ap.bcast_hlid);
3359 		if (ret < 0)
3360 			goto out;
3361 	}
3362 
3363 out:
3364 	wl1271_free_ap_keys(wl, wlvif);
3365 	return ret;
3366 }
3367 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta,bool is_pairwise)3368 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3369 		       u16 action, u8 id, u8 key_type,
3370 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3371 		       u16 tx_seq_16, struct ieee80211_sta *sta,
3372 		       bool is_pairwise)
3373 {
3374 	int ret;
3375 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3376 
3377 	if (is_ap) {
3378 		struct wl1271_station *wl_sta;
3379 		u8 hlid;
3380 
3381 		if (sta) {
3382 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3383 			hlid = wl_sta->hlid;
3384 		} else {
3385 			hlid = wlvif->ap.bcast_hlid;
3386 		}
3387 
3388 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3389 			/*
3390 			 * We do not support removing keys after AP shutdown.
3391 			 * Pretend we do to make mac80211 happy.
3392 			 */
3393 			if (action != KEY_ADD_OR_REPLACE)
3394 				return 0;
3395 
3396 			ret = wl1271_record_ap_key(wl, wlvif, id,
3397 					     key_type, key_size,
3398 					     key, hlid, tx_seq_32,
3399 					     tx_seq_16, is_pairwise);
3400 		} else {
3401 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3402 					     id, key_type, key_size,
3403 					     key, hlid, tx_seq_32,
3404 					     tx_seq_16, is_pairwise);
3405 		}
3406 
3407 		if (ret < 0)
3408 			return ret;
3409 	} else {
3410 		const u8 *addr;
3411 		static const u8 bcast_addr[ETH_ALEN] = {
3412 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3413 		};
3414 
3415 		addr = sta ? sta->addr : bcast_addr;
3416 
3417 		if (is_zero_ether_addr(addr)) {
3418 			/* We dont support TX only encryption */
3419 			return -EOPNOTSUPP;
3420 		}
3421 
3422 		/* The wl1271 does not allow to remove unicast keys - they
3423 		   will be cleared automatically on next CMD_JOIN. Ignore the
3424 		   request silently, as we dont want the mac80211 to emit
3425 		   an error message. */
3426 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3427 			return 0;
3428 
3429 		/* don't remove key if hlid was already deleted */
3430 		if (action == KEY_REMOVE &&
3431 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3432 			return 0;
3433 
3434 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3435 					     id, key_type, key_size,
3436 					     key, addr, tx_seq_32,
3437 					     tx_seq_16);
3438 		if (ret < 0)
3439 			return ret;
3440 
3441 	}
3442 
3443 	return 0;
3444 }
3445 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3446 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3447 			     struct ieee80211_vif *vif,
3448 			     struct ieee80211_sta *sta,
3449 			     struct ieee80211_key_conf *key_conf)
3450 {
3451 	struct wl1271 *wl = hw->priv;
3452 	int ret;
3453 	bool might_change_spare =
3454 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3455 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3456 
3457 	if (might_change_spare) {
3458 		/*
3459 		 * stop the queues and flush to ensure the next packets are
3460 		 * in sync with FW spare block accounting
3461 		 */
3462 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3463 		wl1271_tx_flush(wl);
3464 	}
3465 
3466 	mutex_lock(&wl->mutex);
3467 
3468 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3469 		ret = -EAGAIN;
3470 		goto out_wake_queues;
3471 	}
3472 
3473 	ret = pm_runtime_get_sync(wl->dev);
3474 	if (ret < 0) {
3475 		pm_runtime_put_noidle(wl->dev);
3476 		goto out_wake_queues;
3477 	}
3478 
3479 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3480 
3481 	pm_runtime_mark_last_busy(wl->dev);
3482 	pm_runtime_put_autosuspend(wl->dev);
3483 
3484 out_wake_queues:
3485 	if (might_change_spare)
3486 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3487 
3488 	mutex_unlock(&wl->mutex);
3489 
3490 	return ret;
3491 }
3492 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3493 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3494 		   struct ieee80211_vif *vif,
3495 		   struct ieee80211_sta *sta,
3496 		   struct ieee80211_key_conf *key_conf)
3497 {
3498 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3499 	int ret;
3500 	u32 tx_seq_32 = 0;
3501 	u16 tx_seq_16 = 0;
3502 	u8 key_type;
3503 	u8 hlid;
3504 	bool is_pairwise;
3505 
3506 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3507 
3508 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3509 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3510 		     key_conf->cipher, key_conf->keyidx,
3511 		     key_conf->keylen, key_conf->flags);
3512 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3513 
3514 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3515 		if (sta) {
3516 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3517 			hlid = wl_sta->hlid;
3518 		} else {
3519 			hlid = wlvif->ap.bcast_hlid;
3520 		}
3521 	else
3522 		hlid = wlvif->sta.hlid;
3523 
3524 	if (hlid != WL12XX_INVALID_LINK_ID) {
3525 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3526 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3527 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3528 	}
3529 
3530 	switch (key_conf->cipher) {
3531 	case WLAN_CIPHER_SUITE_WEP40:
3532 	case WLAN_CIPHER_SUITE_WEP104:
3533 		key_type = KEY_WEP;
3534 
3535 		key_conf->hw_key_idx = key_conf->keyidx;
3536 		break;
3537 	case WLAN_CIPHER_SUITE_TKIP:
3538 		key_type = KEY_TKIP;
3539 		key_conf->hw_key_idx = key_conf->keyidx;
3540 		break;
3541 	case WLAN_CIPHER_SUITE_CCMP:
3542 		key_type = KEY_AES;
3543 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3544 		break;
3545 	case WL1271_CIPHER_SUITE_GEM:
3546 		key_type = KEY_GEM;
3547 		break;
3548 	default:
3549 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3550 
3551 		return -EOPNOTSUPP;
3552 	}
3553 
3554 	is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
3555 
3556 	switch (cmd) {
3557 	case SET_KEY:
3558 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3559 				 key_conf->keyidx, key_type,
3560 				 key_conf->keylen, key_conf->key,
3561 				 tx_seq_32, tx_seq_16, sta, is_pairwise);
3562 		if (ret < 0) {
3563 			wl1271_error("Could not add or replace key");
3564 			return ret;
3565 		}
3566 
3567 		/*
3568 		 * reconfiguring arp response if the unicast (or common)
3569 		 * encryption key type was changed
3570 		 */
3571 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3572 		    (sta || key_type == KEY_WEP) &&
3573 		    wlvif->encryption_type != key_type) {
3574 			wlvif->encryption_type = key_type;
3575 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3576 			if (ret < 0) {
3577 				wl1271_warning("build arp rsp failed: %d", ret);
3578 				return ret;
3579 			}
3580 		}
3581 		break;
3582 
3583 	case DISABLE_KEY:
3584 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3585 				     key_conf->keyidx, key_type,
3586 				     key_conf->keylen, key_conf->key,
3587 				     0, 0, sta, is_pairwise);
3588 		if (ret < 0) {
3589 			wl1271_error("Could not remove key");
3590 			return ret;
3591 		}
3592 		break;
3593 
3594 	default:
3595 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3596 		return -EOPNOTSUPP;
3597 	}
3598 
3599 	return ret;
3600 }
3601 EXPORT_SYMBOL_GPL(wlcore_set_key);
3602 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3603 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3604 					  struct ieee80211_vif *vif,
3605 					  int key_idx)
3606 {
3607 	struct wl1271 *wl = hw->priv;
3608 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3609 	int ret;
3610 
3611 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3612 		     key_idx);
3613 
3614 	/* we don't handle unsetting of default key */
3615 	if (key_idx == -1)
3616 		return;
3617 
3618 	mutex_lock(&wl->mutex);
3619 
3620 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3621 		ret = -EAGAIN;
3622 		goto out_unlock;
3623 	}
3624 
3625 	ret = pm_runtime_get_sync(wl->dev);
3626 	if (ret < 0) {
3627 		pm_runtime_put_noidle(wl->dev);
3628 		goto out_unlock;
3629 	}
3630 
3631 	wlvif->default_key = key_idx;
3632 
3633 	/* the default WEP key needs to be configured at least once */
3634 	if (wlvif->encryption_type == KEY_WEP) {
3635 		ret = wl12xx_cmd_set_default_wep_key(wl,
3636 				key_idx,
3637 				wlvif->sta.hlid);
3638 		if (ret < 0)
3639 			goto out_sleep;
3640 	}
3641 
3642 out_sleep:
3643 	pm_runtime_mark_last_busy(wl->dev);
3644 	pm_runtime_put_autosuspend(wl->dev);
3645 
3646 out_unlock:
3647 	mutex_unlock(&wl->mutex);
3648 }
3649 
wlcore_regdomain_config(struct wl1271 * wl)3650 void wlcore_regdomain_config(struct wl1271 *wl)
3651 {
3652 	int ret;
3653 
3654 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3655 		return;
3656 
3657 	mutex_lock(&wl->mutex);
3658 
3659 	if (unlikely(wl->state != WLCORE_STATE_ON))
3660 		goto out;
3661 
3662 	ret = pm_runtime_get_sync(wl->dev);
3663 	if (ret < 0) {
3664 		pm_runtime_put_autosuspend(wl->dev);
3665 		goto out;
3666 	}
3667 
3668 	ret = wlcore_cmd_regdomain_config_locked(wl);
3669 	if (ret < 0) {
3670 		wl12xx_queue_recovery_work(wl);
3671 		goto out;
3672 	}
3673 
3674 	pm_runtime_mark_last_busy(wl->dev);
3675 	pm_runtime_put_autosuspend(wl->dev);
3676 out:
3677 	mutex_unlock(&wl->mutex);
3678 }
3679 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3680 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3681 			     struct ieee80211_vif *vif,
3682 			     struct ieee80211_scan_request *hw_req)
3683 {
3684 	struct cfg80211_scan_request *req = &hw_req->req;
3685 	struct wl1271 *wl = hw->priv;
3686 	int ret;
3687 	u8 *ssid = NULL;
3688 	size_t len = 0;
3689 
3690 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3691 
3692 	if (req->n_ssids) {
3693 		ssid = req->ssids[0].ssid;
3694 		len = req->ssids[0].ssid_len;
3695 	}
3696 
3697 	mutex_lock(&wl->mutex);
3698 
3699 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3700 		/*
3701 		 * We cannot return -EBUSY here because cfg80211 will expect
3702 		 * a call to ieee80211_scan_completed if we do - in this case
3703 		 * there won't be any call.
3704 		 */
3705 		ret = -EAGAIN;
3706 		goto out;
3707 	}
3708 
3709 	ret = pm_runtime_get_sync(wl->dev);
3710 	if (ret < 0) {
3711 		pm_runtime_put_noidle(wl->dev);
3712 		goto out;
3713 	}
3714 
3715 	/* fail if there is any role in ROC */
3716 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3717 		/* don't allow scanning right now */
3718 		ret = -EBUSY;
3719 		goto out_sleep;
3720 	}
3721 
3722 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3723 out_sleep:
3724 	pm_runtime_mark_last_busy(wl->dev);
3725 	pm_runtime_put_autosuspend(wl->dev);
3726 out:
3727 	mutex_unlock(&wl->mutex);
3728 
3729 	return ret;
3730 }
3731 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3732 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3733 				     struct ieee80211_vif *vif)
3734 {
3735 	struct wl1271 *wl = hw->priv;
3736 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3737 	struct cfg80211_scan_info info = {
3738 		.aborted = true,
3739 	};
3740 	int ret;
3741 
3742 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3743 
3744 	mutex_lock(&wl->mutex);
3745 
3746 	if (unlikely(wl->state != WLCORE_STATE_ON))
3747 		goto out;
3748 
3749 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3750 		goto out;
3751 
3752 	ret = pm_runtime_get_sync(wl->dev);
3753 	if (ret < 0) {
3754 		pm_runtime_put_noidle(wl->dev);
3755 		goto out;
3756 	}
3757 
3758 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3759 		ret = wl->ops->scan_stop(wl, wlvif);
3760 		if (ret < 0)
3761 			goto out_sleep;
3762 	}
3763 
3764 	/*
3765 	 * Rearm the tx watchdog just before idling scan. This
3766 	 * prevents just-finished scans from triggering the watchdog
3767 	 */
3768 	wl12xx_rearm_tx_watchdog_locked(wl);
3769 
3770 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3771 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3772 	wl->scan_wlvif = NULL;
3773 	wl->scan.req = NULL;
3774 	ieee80211_scan_completed(wl->hw, &info);
3775 
3776 out_sleep:
3777 	pm_runtime_mark_last_busy(wl->dev);
3778 	pm_runtime_put_autosuspend(wl->dev);
3779 out:
3780 	mutex_unlock(&wl->mutex);
3781 
3782 	cancel_delayed_work_sync(&wl->scan_complete_work);
3783 }
3784 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3785 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3786 				      struct ieee80211_vif *vif,
3787 				      struct cfg80211_sched_scan_request *req,
3788 				      struct ieee80211_scan_ies *ies)
3789 {
3790 	struct wl1271 *wl = hw->priv;
3791 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3792 	int ret;
3793 
3794 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3795 
3796 	mutex_lock(&wl->mutex);
3797 
3798 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3799 		ret = -EAGAIN;
3800 		goto out;
3801 	}
3802 
3803 	ret = pm_runtime_get_sync(wl->dev);
3804 	if (ret < 0) {
3805 		pm_runtime_put_noidle(wl->dev);
3806 		goto out;
3807 	}
3808 
3809 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3810 	if (ret < 0)
3811 		goto out_sleep;
3812 
3813 	wl->sched_vif = wlvif;
3814 
3815 out_sleep:
3816 	pm_runtime_mark_last_busy(wl->dev);
3817 	pm_runtime_put_autosuspend(wl->dev);
3818 out:
3819 	mutex_unlock(&wl->mutex);
3820 	return ret;
3821 }
3822 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3823 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3824 				     struct ieee80211_vif *vif)
3825 {
3826 	struct wl1271 *wl = hw->priv;
3827 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3828 	int ret;
3829 
3830 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3831 
3832 	mutex_lock(&wl->mutex);
3833 
3834 	if (unlikely(wl->state != WLCORE_STATE_ON))
3835 		goto out;
3836 
3837 	ret = pm_runtime_get_sync(wl->dev);
3838 	if (ret < 0) {
3839 		pm_runtime_put_noidle(wl->dev);
3840 		goto out;
3841 	}
3842 
3843 	wl->ops->sched_scan_stop(wl, wlvif);
3844 
3845 	pm_runtime_mark_last_busy(wl->dev);
3846 	pm_runtime_put_autosuspend(wl->dev);
3847 out:
3848 	mutex_unlock(&wl->mutex);
3849 
3850 	return 0;
3851 }
3852 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3853 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3854 {
3855 	struct wl1271 *wl = hw->priv;
3856 	int ret = 0;
3857 
3858 	mutex_lock(&wl->mutex);
3859 
3860 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3861 		ret = -EAGAIN;
3862 		goto out;
3863 	}
3864 
3865 	ret = pm_runtime_get_sync(wl->dev);
3866 	if (ret < 0) {
3867 		pm_runtime_put_noidle(wl->dev);
3868 		goto out;
3869 	}
3870 
3871 	ret = wl1271_acx_frag_threshold(wl, value);
3872 	if (ret < 0)
3873 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3874 
3875 	pm_runtime_mark_last_busy(wl->dev);
3876 	pm_runtime_put_autosuspend(wl->dev);
3877 
3878 out:
3879 	mutex_unlock(&wl->mutex);
3880 
3881 	return ret;
3882 }
3883 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3884 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3885 {
3886 	struct wl1271 *wl = hw->priv;
3887 	struct wl12xx_vif *wlvif;
3888 	int ret = 0;
3889 
3890 	mutex_lock(&wl->mutex);
3891 
3892 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3893 		ret = -EAGAIN;
3894 		goto out;
3895 	}
3896 
3897 	ret = pm_runtime_get_sync(wl->dev);
3898 	if (ret < 0) {
3899 		pm_runtime_put_noidle(wl->dev);
3900 		goto out;
3901 	}
3902 
3903 	wl12xx_for_each_wlvif(wl, wlvif) {
3904 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3905 		if (ret < 0)
3906 			wl1271_warning("set rts threshold failed: %d", ret);
3907 	}
3908 	pm_runtime_mark_last_busy(wl->dev);
3909 	pm_runtime_put_autosuspend(wl->dev);
3910 
3911 out:
3912 	mutex_unlock(&wl->mutex);
3913 
3914 	return ret;
3915 }
3916 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3917 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3918 {
3919 	int len;
3920 	const u8 *next, *end = skb->data + skb->len;
3921 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3922 					skb->len - ieoffset);
3923 	if (!ie)
3924 		return;
3925 	len = ie[1] + 2;
3926 	next = ie + len;
3927 	memmove(ie, next, end - next);
3928 	skb_trim(skb, skb->len - len);
3929 }
3930 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3931 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3932 					    unsigned int oui, u8 oui_type,
3933 					    int ieoffset)
3934 {
3935 	int len;
3936 	const u8 *next, *end = skb->data + skb->len;
3937 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3938 					       skb->data + ieoffset,
3939 					       skb->len - ieoffset);
3940 	if (!ie)
3941 		return;
3942 	len = ie[1] + 2;
3943 	next = ie + len;
3944 	memmove(ie, next, end - next);
3945 	skb_trim(skb, skb->len - len);
3946 }
3947 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3948 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3949 					 struct ieee80211_vif *vif)
3950 {
3951 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3952 	struct sk_buff *skb;
3953 	int ret;
3954 
3955 	skb = ieee80211_proberesp_get(wl->hw, vif);
3956 	if (!skb)
3957 		return -EOPNOTSUPP;
3958 
3959 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3960 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3961 				      skb->data,
3962 				      skb->len, 0,
3963 				      rates);
3964 	dev_kfree_skb(skb);
3965 
3966 	if (ret < 0)
3967 		goto out;
3968 
3969 	wl1271_debug(DEBUG_AP, "probe response updated");
3970 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3971 
3972 out:
3973 	return ret;
3974 }
3975 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3976 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3977 					     struct ieee80211_vif *vif,
3978 					     u8 *probe_rsp_data,
3979 					     size_t probe_rsp_len,
3980 					     u32 rates)
3981 {
3982 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3983 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3984 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3985 	int ssid_ie_offset, ie_offset, templ_len;
3986 	const u8 *ptr;
3987 
3988 	/* no need to change probe response if the SSID is set correctly */
3989 	if (wlvif->ssid_len > 0)
3990 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3991 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3992 					       probe_rsp_data,
3993 					       probe_rsp_len, 0,
3994 					       rates);
3995 
3996 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3997 		wl1271_error("probe_rsp template too big");
3998 		return -EINVAL;
3999 	}
4000 
4001 	/* start searching from IE offset */
4002 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
4003 
4004 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
4005 			       probe_rsp_len - ie_offset);
4006 	if (!ptr) {
4007 		wl1271_error("No SSID in beacon!");
4008 		return -EINVAL;
4009 	}
4010 
4011 	ssid_ie_offset = ptr - probe_rsp_data;
4012 	ptr += (ptr[1] + 2);
4013 
4014 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
4015 
4016 	/* insert SSID from bss_conf */
4017 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
4018 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
4019 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
4020 	       bss_conf->ssid, bss_conf->ssid_len);
4021 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
4022 
4023 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
4024 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
4025 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
4026 
4027 	return wl1271_cmd_template_set(wl, wlvif->role_id,
4028 				       CMD_TEMPL_AP_PROBE_RESPONSE,
4029 				       probe_rsp_templ,
4030 				       templ_len, 0,
4031 				       rates);
4032 }
4033 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4034 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
4035 				       struct ieee80211_vif *vif,
4036 				       struct ieee80211_bss_conf *bss_conf,
4037 				       u32 changed)
4038 {
4039 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4040 	int ret = 0;
4041 
4042 	if (changed & BSS_CHANGED_ERP_SLOT) {
4043 		if (bss_conf->use_short_slot)
4044 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4045 		else
4046 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4047 		if (ret < 0) {
4048 			wl1271_warning("Set slot time failed %d", ret);
4049 			goto out;
4050 		}
4051 	}
4052 
4053 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4054 		if (bss_conf->use_short_preamble)
4055 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4056 		else
4057 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4058 	}
4059 
4060 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4061 		if (bss_conf->use_cts_prot)
4062 			ret = wl1271_acx_cts_protect(wl, wlvif,
4063 						     CTSPROTECT_ENABLE);
4064 		else
4065 			ret = wl1271_acx_cts_protect(wl, wlvif,
4066 						     CTSPROTECT_DISABLE);
4067 		if (ret < 0) {
4068 			wl1271_warning("Set ctsprotect failed %d", ret);
4069 			goto out;
4070 		}
4071 	}
4072 
4073 out:
4074 	return ret;
4075 }
4076 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4077 static int wlcore_set_beacon_template(struct wl1271 *wl,
4078 				      struct ieee80211_vif *vif,
4079 				      bool is_ap)
4080 {
4081 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4082 	struct ieee80211_hdr *hdr;
4083 	u32 min_rate;
4084 	int ret;
4085 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4086 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4087 	u16 tmpl_id;
4088 
4089 	if (!beacon) {
4090 		ret = -EINVAL;
4091 		goto out;
4092 	}
4093 
4094 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4095 
4096 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4097 	if (ret < 0) {
4098 		dev_kfree_skb(beacon);
4099 		goto out;
4100 	}
4101 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4102 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4103 		CMD_TEMPL_BEACON;
4104 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4105 				      beacon->data,
4106 				      beacon->len, 0,
4107 				      min_rate);
4108 	if (ret < 0) {
4109 		dev_kfree_skb(beacon);
4110 		goto out;
4111 	}
4112 
4113 	wlvif->wmm_enabled =
4114 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4115 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4116 					beacon->data + ieoffset,
4117 					beacon->len - ieoffset);
4118 
4119 	/*
4120 	 * In case we already have a probe-resp beacon set explicitly
4121 	 * by usermode, don't use the beacon data.
4122 	 */
4123 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4124 		goto end_bcn;
4125 
4126 	/* remove TIM ie from probe response */
4127 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4128 
4129 	/*
4130 	 * remove p2p ie from probe response.
4131 	 * the fw reponds to probe requests that don't include
4132 	 * the p2p ie. probe requests with p2p ie will be passed,
4133 	 * and will be responded by the supplicant (the spec
4134 	 * forbids including the p2p ie when responding to probe
4135 	 * requests that didn't include it).
4136 	 */
4137 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4138 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4139 
4140 	hdr = (struct ieee80211_hdr *) beacon->data;
4141 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4142 					 IEEE80211_STYPE_PROBE_RESP);
4143 	if (is_ap)
4144 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4145 							   beacon->data,
4146 							   beacon->len,
4147 							   min_rate);
4148 	else
4149 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4150 					      CMD_TEMPL_PROBE_RESPONSE,
4151 					      beacon->data,
4152 					      beacon->len, 0,
4153 					      min_rate);
4154 end_bcn:
4155 	dev_kfree_skb(beacon);
4156 	if (ret < 0)
4157 		goto out;
4158 
4159 out:
4160 	return ret;
4161 }
4162 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4163 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4164 					  struct ieee80211_vif *vif,
4165 					  struct ieee80211_bss_conf *bss_conf,
4166 					  u32 changed)
4167 {
4168 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4169 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4170 	int ret = 0;
4171 
4172 	if (changed & BSS_CHANGED_BEACON_INT) {
4173 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4174 			bss_conf->beacon_int);
4175 
4176 		wlvif->beacon_int = bss_conf->beacon_int;
4177 	}
4178 
4179 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4180 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4181 
4182 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4183 	}
4184 
4185 	if (changed & BSS_CHANGED_BEACON) {
4186 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4187 		if (ret < 0)
4188 			goto out;
4189 
4190 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4191 				       &wlvif->flags)) {
4192 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4193 			if (ret < 0)
4194 				goto out;
4195 		}
4196 	}
4197 out:
4198 	if (ret != 0)
4199 		wl1271_error("beacon info change failed: %d", ret);
4200 	return ret;
4201 }
4202 
4203 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4204 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4205 				       struct ieee80211_vif *vif,
4206 				       struct ieee80211_bss_conf *bss_conf,
4207 				       u32 changed)
4208 {
4209 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4210 	int ret = 0;
4211 
4212 	if (changed & BSS_CHANGED_BASIC_RATES) {
4213 		u32 rates = bss_conf->basic_rates;
4214 
4215 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4216 								 wlvif->band);
4217 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4218 							wlvif->basic_rate_set);
4219 
4220 		ret = wl1271_init_ap_rates(wl, wlvif);
4221 		if (ret < 0) {
4222 			wl1271_error("AP rate policy change failed %d", ret);
4223 			goto out;
4224 		}
4225 
4226 		ret = wl1271_ap_init_templates(wl, vif);
4227 		if (ret < 0)
4228 			goto out;
4229 
4230 		/* No need to set probe resp template for mesh */
4231 		if (!ieee80211_vif_is_mesh(vif)) {
4232 			ret = wl1271_ap_set_probe_resp_tmpl(wl,
4233 							    wlvif->basic_rate,
4234 							    vif);
4235 			if (ret < 0)
4236 				goto out;
4237 		}
4238 
4239 		ret = wlcore_set_beacon_template(wl, vif, true);
4240 		if (ret < 0)
4241 			goto out;
4242 	}
4243 
4244 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4245 	if (ret < 0)
4246 		goto out;
4247 
4248 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4249 		if (bss_conf->enable_beacon) {
4250 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4251 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4252 				if (ret < 0)
4253 					goto out;
4254 
4255 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4256 				if (ret < 0)
4257 					goto out;
4258 
4259 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4260 				wl1271_debug(DEBUG_AP, "started AP");
4261 			}
4262 		} else {
4263 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4264 				/*
4265 				 * AP might be in ROC in case we have just
4266 				 * sent auth reply. handle it.
4267 				 */
4268 				if (test_bit(wlvif->role_id, wl->roc_map))
4269 					wl12xx_croc(wl, wlvif->role_id);
4270 
4271 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4272 				if (ret < 0)
4273 					goto out;
4274 
4275 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4276 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4277 					  &wlvif->flags);
4278 				wl1271_debug(DEBUG_AP, "stopped AP");
4279 			}
4280 		}
4281 	}
4282 
4283 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4284 	if (ret < 0)
4285 		goto out;
4286 
4287 	/* Handle HT information change */
4288 	if ((changed & BSS_CHANGED_HT) &&
4289 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4290 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4291 					bss_conf->ht_operation_mode);
4292 		if (ret < 0) {
4293 			wl1271_warning("Set ht information failed %d", ret);
4294 			goto out;
4295 		}
4296 	}
4297 
4298 out:
4299 	return;
4300 }
4301 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4302 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4303 			    struct ieee80211_bss_conf *bss_conf,
4304 			    u32 sta_rate_set)
4305 {
4306 	u32 rates;
4307 	int ret;
4308 
4309 	wl1271_debug(DEBUG_MAC80211,
4310 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4311 	     bss_conf->bssid, bss_conf->aid,
4312 	     bss_conf->beacon_int,
4313 	     bss_conf->basic_rates, sta_rate_set);
4314 
4315 	wlvif->beacon_int = bss_conf->beacon_int;
4316 	rates = bss_conf->basic_rates;
4317 	wlvif->basic_rate_set =
4318 		wl1271_tx_enabled_rates_get(wl, rates,
4319 					    wlvif->band);
4320 	wlvif->basic_rate =
4321 		wl1271_tx_min_rate_get(wl,
4322 				       wlvif->basic_rate_set);
4323 
4324 	if (sta_rate_set)
4325 		wlvif->rate_set =
4326 			wl1271_tx_enabled_rates_get(wl,
4327 						sta_rate_set,
4328 						wlvif->band);
4329 
4330 	/* we only support sched_scan while not connected */
4331 	if (wl->sched_vif == wlvif)
4332 		wl->ops->sched_scan_stop(wl, wlvif);
4333 
4334 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4335 	if (ret < 0)
4336 		return ret;
4337 
4338 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4339 	if (ret < 0)
4340 		return ret;
4341 
4342 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4343 	if (ret < 0)
4344 		return ret;
4345 
4346 	wlcore_set_ssid(wl, wlvif);
4347 
4348 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4349 
4350 	return 0;
4351 }
4352 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4353 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4354 {
4355 	int ret;
4356 
4357 	/* revert back to minimum rates for the current band */
4358 	wl1271_set_band_rate(wl, wlvif);
4359 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4360 
4361 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4362 	if (ret < 0)
4363 		return ret;
4364 
4365 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4366 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4367 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4368 		if (ret < 0)
4369 			return ret;
4370 	}
4371 
4372 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4373 	return 0;
4374 }
4375 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4376 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4377 					struct ieee80211_vif *vif,
4378 					struct ieee80211_bss_conf *bss_conf,
4379 					u32 changed)
4380 {
4381 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4382 	bool do_join = false;
4383 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4384 	bool ibss_joined = false;
4385 	u32 sta_rate_set = 0;
4386 	int ret;
4387 	struct ieee80211_sta *sta;
4388 	bool sta_exists = false;
4389 	struct ieee80211_sta_ht_cap sta_ht_cap;
4390 
4391 	if (is_ibss) {
4392 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4393 						     changed);
4394 		if (ret < 0)
4395 			goto out;
4396 	}
4397 
4398 	if (changed & BSS_CHANGED_IBSS) {
4399 		if (bss_conf->ibss_joined) {
4400 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4401 			ibss_joined = true;
4402 		} else {
4403 			wlcore_unset_assoc(wl, wlvif);
4404 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4405 		}
4406 	}
4407 
4408 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4409 		do_join = true;
4410 
4411 	/* Need to update the SSID (for filtering etc) */
4412 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4413 		do_join = true;
4414 
4415 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4416 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4417 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4418 
4419 		do_join = true;
4420 	}
4421 
4422 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4423 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4424 
4425 	if (changed & BSS_CHANGED_CQM) {
4426 		bool enable = false;
4427 		if (bss_conf->cqm_rssi_thold)
4428 			enable = true;
4429 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4430 						  bss_conf->cqm_rssi_thold,
4431 						  bss_conf->cqm_rssi_hyst);
4432 		if (ret < 0)
4433 			goto out;
4434 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4435 	}
4436 
4437 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4438 		       BSS_CHANGED_ASSOC)) {
4439 		rcu_read_lock();
4440 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4441 		if (sta) {
4442 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4443 
4444 			/* save the supp_rates of the ap */
4445 			sta_rate_set = sta->supp_rates[wlvif->band];
4446 			if (sta->ht_cap.ht_supported)
4447 				sta_rate_set |=
4448 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4449 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4450 			sta_ht_cap = sta->ht_cap;
4451 			sta_exists = true;
4452 		}
4453 
4454 		rcu_read_unlock();
4455 	}
4456 
4457 	if (changed & BSS_CHANGED_BSSID) {
4458 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4459 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4460 					       sta_rate_set);
4461 			if (ret < 0)
4462 				goto out;
4463 
4464 			/* Need to update the BSSID (for filtering etc) */
4465 			do_join = true;
4466 		} else {
4467 			ret = wlcore_clear_bssid(wl, wlvif);
4468 			if (ret < 0)
4469 				goto out;
4470 		}
4471 	}
4472 
4473 	if (changed & BSS_CHANGED_IBSS) {
4474 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4475 			     bss_conf->ibss_joined);
4476 
4477 		if (bss_conf->ibss_joined) {
4478 			u32 rates = bss_conf->basic_rates;
4479 			wlvif->basic_rate_set =
4480 				wl1271_tx_enabled_rates_get(wl, rates,
4481 							    wlvif->band);
4482 			wlvif->basic_rate =
4483 				wl1271_tx_min_rate_get(wl,
4484 						       wlvif->basic_rate_set);
4485 
4486 			/* by default, use 11b + OFDM rates */
4487 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4488 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4489 			if (ret < 0)
4490 				goto out;
4491 		}
4492 	}
4493 
4494 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4495 		/* enable beacon filtering */
4496 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4497 		if (ret < 0)
4498 			goto out;
4499 	}
4500 
4501 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4502 	if (ret < 0)
4503 		goto out;
4504 
4505 	if (do_join) {
4506 		ret = wlcore_join(wl, wlvif);
4507 		if (ret < 0) {
4508 			wl1271_warning("cmd join failed %d", ret);
4509 			goto out;
4510 		}
4511 	}
4512 
4513 	if (changed & BSS_CHANGED_ASSOC) {
4514 		if (bss_conf->assoc) {
4515 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4516 					       sta_rate_set);
4517 			if (ret < 0)
4518 				goto out;
4519 
4520 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4521 				wl12xx_set_authorized(wl, wlvif);
4522 		} else {
4523 			wlcore_unset_assoc(wl, wlvif);
4524 		}
4525 	}
4526 
4527 	if (changed & BSS_CHANGED_PS) {
4528 		if ((bss_conf->ps) &&
4529 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4530 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4531 			int ps_mode;
4532 			char *ps_mode_str;
4533 
4534 			if (wl->conf.conn.forced_ps) {
4535 				ps_mode = STATION_POWER_SAVE_MODE;
4536 				ps_mode_str = "forced";
4537 			} else {
4538 				ps_mode = STATION_AUTO_PS_MODE;
4539 				ps_mode_str = "auto";
4540 			}
4541 
4542 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4543 
4544 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4545 			if (ret < 0)
4546 				wl1271_warning("enter %s ps failed %d",
4547 					       ps_mode_str, ret);
4548 		} else if (!bss_conf->ps &&
4549 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4550 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4551 
4552 			ret = wl1271_ps_set_mode(wl, wlvif,
4553 						 STATION_ACTIVE_MODE);
4554 			if (ret < 0)
4555 				wl1271_warning("exit auto ps failed %d", ret);
4556 		}
4557 	}
4558 
4559 	/* Handle new association with HT. Do this after join. */
4560 	if (sta_exists) {
4561 		bool enabled =
4562 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4563 
4564 		ret = wlcore_hw_set_peer_cap(wl,
4565 					     &sta_ht_cap,
4566 					     enabled,
4567 					     wlvif->rate_set,
4568 					     wlvif->sta.hlid);
4569 		if (ret < 0) {
4570 			wl1271_warning("Set ht cap failed %d", ret);
4571 			goto out;
4572 
4573 		}
4574 
4575 		if (enabled) {
4576 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4577 						bss_conf->ht_operation_mode);
4578 			if (ret < 0) {
4579 				wl1271_warning("Set ht information failed %d",
4580 					       ret);
4581 				goto out;
4582 			}
4583 		}
4584 	}
4585 
4586 	/* Handle arp filtering. Done after join. */
4587 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4588 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4589 		__be32 addr = bss_conf->arp_addr_list[0];
4590 		wlvif->sta.qos = bss_conf->qos;
4591 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4592 
4593 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4594 			wlvif->ip_addr = addr;
4595 			/*
4596 			 * The template should have been configured only upon
4597 			 * association. however, it seems that the correct ip
4598 			 * isn't being set (when sending), so we have to
4599 			 * reconfigure the template upon every ip change.
4600 			 */
4601 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4602 			if (ret < 0) {
4603 				wl1271_warning("build arp rsp failed: %d", ret);
4604 				goto out;
4605 			}
4606 
4607 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4608 				(ACX_ARP_FILTER_ARP_FILTERING |
4609 				 ACX_ARP_FILTER_AUTO_ARP),
4610 				addr);
4611 		} else {
4612 			wlvif->ip_addr = 0;
4613 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4614 		}
4615 
4616 		if (ret < 0)
4617 			goto out;
4618 	}
4619 
4620 out:
4621 	return;
4622 }
4623 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4624 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4625 				       struct ieee80211_vif *vif,
4626 				       struct ieee80211_bss_conf *bss_conf,
4627 				       u32 changed)
4628 {
4629 	struct wl1271 *wl = hw->priv;
4630 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4631 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4632 	int ret;
4633 
4634 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4635 		     wlvif->role_id, (int)changed);
4636 
4637 	/*
4638 	 * make sure to cancel pending disconnections if our association
4639 	 * state changed
4640 	 */
4641 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4642 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4643 
4644 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4645 	    !bss_conf->enable_beacon)
4646 		wl1271_tx_flush(wl);
4647 
4648 	mutex_lock(&wl->mutex);
4649 
4650 	if (unlikely(wl->state != WLCORE_STATE_ON))
4651 		goto out;
4652 
4653 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4654 		goto out;
4655 
4656 	ret = pm_runtime_get_sync(wl->dev);
4657 	if (ret < 0) {
4658 		pm_runtime_put_noidle(wl->dev);
4659 		goto out;
4660 	}
4661 
4662 	if ((changed & BSS_CHANGED_TXPOWER) &&
4663 	    bss_conf->txpower != wlvif->power_level) {
4664 
4665 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4666 		if (ret < 0)
4667 			goto out;
4668 
4669 		wlvif->power_level = bss_conf->txpower;
4670 	}
4671 
4672 	if (is_ap)
4673 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4674 	else
4675 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4676 
4677 	pm_runtime_mark_last_busy(wl->dev);
4678 	pm_runtime_put_autosuspend(wl->dev);
4679 
4680 out:
4681 	mutex_unlock(&wl->mutex);
4682 }
4683 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4684 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4685 				 struct ieee80211_chanctx_conf *ctx)
4686 {
4687 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4688 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4689 		     cfg80211_get_chandef_type(&ctx->def));
4690 	return 0;
4691 }
4692 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4693 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4694 				     struct ieee80211_chanctx_conf *ctx)
4695 {
4696 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4697 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4698 		     cfg80211_get_chandef_type(&ctx->def));
4699 }
4700 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4701 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4702 				     struct ieee80211_chanctx_conf *ctx,
4703 				     u32 changed)
4704 {
4705 	struct wl1271 *wl = hw->priv;
4706 	struct wl12xx_vif *wlvif;
4707 	int ret;
4708 	int channel = ieee80211_frequency_to_channel(
4709 		ctx->def.chan->center_freq);
4710 
4711 	wl1271_debug(DEBUG_MAC80211,
4712 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4713 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4714 
4715 	mutex_lock(&wl->mutex);
4716 
4717 	ret = pm_runtime_get_sync(wl->dev);
4718 	if (ret < 0) {
4719 		pm_runtime_put_noidle(wl->dev);
4720 		goto out;
4721 	}
4722 
4723 	wl12xx_for_each_wlvif(wl, wlvif) {
4724 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4725 
4726 		rcu_read_lock();
4727 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4728 			rcu_read_unlock();
4729 			continue;
4730 		}
4731 		rcu_read_unlock();
4732 
4733 		/* start radar if needed */
4734 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4735 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4736 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4737 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4738 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4739 			wlcore_hw_set_cac(wl, wlvif, true);
4740 			wlvif->radar_enabled = true;
4741 		}
4742 	}
4743 
4744 	pm_runtime_mark_last_busy(wl->dev);
4745 	pm_runtime_put_autosuspend(wl->dev);
4746 out:
4747 	mutex_unlock(&wl->mutex);
4748 }
4749 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4750 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4751 					struct ieee80211_vif *vif,
4752 					struct ieee80211_chanctx_conf *ctx)
4753 {
4754 	struct wl1271 *wl = hw->priv;
4755 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4756 	int channel = ieee80211_frequency_to_channel(
4757 		ctx->def.chan->center_freq);
4758 	int ret = -EINVAL;
4759 
4760 	wl1271_debug(DEBUG_MAC80211,
4761 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4762 		     wlvif->role_id, channel,
4763 		     cfg80211_get_chandef_type(&ctx->def),
4764 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4765 
4766 	mutex_lock(&wl->mutex);
4767 
4768 	if (unlikely(wl->state != WLCORE_STATE_ON))
4769 		goto out;
4770 
4771 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4772 		goto out;
4773 
4774 	ret = pm_runtime_get_sync(wl->dev);
4775 	if (ret < 0) {
4776 		pm_runtime_put_noidle(wl->dev);
4777 		goto out;
4778 	}
4779 
4780 	wlvif->band = ctx->def.chan->band;
4781 	wlvif->channel = channel;
4782 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4783 
4784 	/* update default rates according to the band */
4785 	wl1271_set_band_rate(wl, wlvif);
4786 
4787 	if (ctx->radar_enabled &&
4788 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4789 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4790 		wlcore_hw_set_cac(wl, wlvif, true);
4791 		wlvif->radar_enabled = true;
4792 	}
4793 
4794 	pm_runtime_mark_last_busy(wl->dev);
4795 	pm_runtime_put_autosuspend(wl->dev);
4796 out:
4797 	mutex_unlock(&wl->mutex);
4798 
4799 	return 0;
4800 }
4801 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4802 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4803 					   struct ieee80211_vif *vif,
4804 					   struct ieee80211_chanctx_conf *ctx)
4805 {
4806 	struct wl1271 *wl = hw->priv;
4807 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4808 	int ret;
4809 
4810 	wl1271_debug(DEBUG_MAC80211,
4811 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4812 		     wlvif->role_id,
4813 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4814 		     cfg80211_get_chandef_type(&ctx->def));
4815 
4816 	wl1271_tx_flush(wl);
4817 
4818 	mutex_lock(&wl->mutex);
4819 
4820 	if (unlikely(wl->state != WLCORE_STATE_ON))
4821 		goto out;
4822 
4823 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4824 		goto out;
4825 
4826 	ret = pm_runtime_get_sync(wl->dev);
4827 	if (ret < 0) {
4828 		pm_runtime_put_noidle(wl->dev);
4829 		goto out;
4830 	}
4831 
4832 	if (wlvif->radar_enabled) {
4833 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4834 		wlcore_hw_set_cac(wl, wlvif, false);
4835 		wlvif->radar_enabled = false;
4836 	}
4837 
4838 	pm_runtime_mark_last_busy(wl->dev);
4839 	pm_runtime_put_autosuspend(wl->dev);
4840 out:
4841 	mutex_unlock(&wl->mutex);
4842 }
4843 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4844 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4845 				    struct wl12xx_vif *wlvif,
4846 				    struct ieee80211_chanctx_conf *new_ctx)
4847 {
4848 	int channel = ieee80211_frequency_to_channel(
4849 		new_ctx->def.chan->center_freq);
4850 
4851 	wl1271_debug(DEBUG_MAC80211,
4852 		     "switch vif (role %d) %d -> %d chan_type: %d",
4853 		     wlvif->role_id, wlvif->channel, channel,
4854 		     cfg80211_get_chandef_type(&new_ctx->def));
4855 
4856 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4857 		return 0;
4858 
4859 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4860 
4861 	if (wlvif->radar_enabled) {
4862 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4863 		wlcore_hw_set_cac(wl, wlvif, false);
4864 		wlvif->radar_enabled = false;
4865 	}
4866 
4867 	wlvif->band = new_ctx->def.chan->band;
4868 	wlvif->channel = channel;
4869 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4870 
4871 	/* start radar if needed */
4872 	if (new_ctx->radar_enabled) {
4873 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4874 		wlcore_hw_set_cac(wl, wlvif, true);
4875 		wlvif->radar_enabled = true;
4876 	}
4877 
4878 	return 0;
4879 }
4880 
4881 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4882 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4883 			     struct ieee80211_vif_chanctx_switch *vifs,
4884 			     int n_vifs,
4885 			     enum ieee80211_chanctx_switch_mode mode)
4886 {
4887 	struct wl1271 *wl = hw->priv;
4888 	int i, ret;
4889 
4890 	wl1271_debug(DEBUG_MAC80211,
4891 		     "mac80211 switch chanctx n_vifs %d mode %d",
4892 		     n_vifs, mode);
4893 
4894 	mutex_lock(&wl->mutex);
4895 
4896 	ret = pm_runtime_get_sync(wl->dev);
4897 	if (ret < 0) {
4898 		pm_runtime_put_noidle(wl->dev);
4899 		goto out;
4900 	}
4901 
4902 	for (i = 0; i < n_vifs; i++) {
4903 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4904 
4905 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4906 		if (ret)
4907 			goto out_sleep;
4908 	}
4909 out_sleep:
4910 	pm_runtime_mark_last_busy(wl->dev);
4911 	pm_runtime_put_autosuspend(wl->dev);
4912 out:
4913 	mutex_unlock(&wl->mutex);
4914 
4915 	return 0;
4916 }
4917 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4918 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4919 			     struct ieee80211_vif *vif, u16 queue,
4920 			     const struct ieee80211_tx_queue_params *params)
4921 {
4922 	struct wl1271 *wl = hw->priv;
4923 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4924 	u8 ps_scheme;
4925 	int ret = 0;
4926 
4927 	if (wlcore_is_p2p_mgmt(wlvif))
4928 		return 0;
4929 
4930 	mutex_lock(&wl->mutex);
4931 
4932 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4933 
4934 	if (params->uapsd)
4935 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4936 	else
4937 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4938 
4939 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4940 		goto out;
4941 
4942 	ret = pm_runtime_get_sync(wl->dev);
4943 	if (ret < 0) {
4944 		pm_runtime_put_noidle(wl->dev);
4945 		goto out;
4946 	}
4947 
4948 	/*
4949 	 * the txop is confed in units of 32us by the mac80211,
4950 	 * we need us
4951 	 */
4952 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4953 				params->cw_min, params->cw_max,
4954 				params->aifs, params->txop << 5);
4955 	if (ret < 0)
4956 		goto out_sleep;
4957 
4958 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4959 				 CONF_CHANNEL_TYPE_EDCF,
4960 				 wl1271_tx_get_queue(queue),
4961 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4962 				 0, 0);
4963 
4964 out_sleep:
4965 	pm_runtime_mark_last_busy(wl->dev);
4966 	pm_runtime_put_autosuspend(wl->dev);
4967 
4968 out:
4969 	mutex_unlock(&wl->mutex);
4970 
4971 	return ret;
4972 }
4973 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4974 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4975 			     struct ieee80211_vif *vif)
4976 {
4977 
4978 	struct wl1271 *wl = hw->priv;
4979 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4980 	u64 mactime = ULLONG_MAX;
4981 	int ret;
4982 
4983 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4984 
4985 	mutex_lock(&wl->mutex);
4986 
4987 	if (unlikely(wl->state != WLCORE_STATE_ON))
4988 		goto out;
4989 
4990 	ret = pm_runtime_get_sync(wl->dev);
4991 	if (ret < 0) {
4992 		pm_runtime_put_noidle(wl->dev);
4993 		goto out;
4994 	}
4995 
4996 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4997 	if (ret < 0)
4998 		goto out_sleep;
4999 
5000 out_sleep:
5001 	pm_runtime_mark_last_busy(wl->dev);
5002 	pm_runtime_put_autosuspend(wl->dev);
5003 
5004 out:
5005 	mutex_unlock(&wl->mutex);
5006 	return mactime;
5007 }
5008 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)5009 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
5010 				struct survey_info *survey)
5011 {
5012 	struct ieee80211_conf *conf = &hw->conf;
5013 
5014 	if (idx != 0)
5015 		return -ENOENT;
5016 
5017 	survey->channel = conf->chandef.chan;
5018 	survey->filled = 0;
5019 	return 0;
5020 }
5021 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5022 static int wl1271_allocate_sta(struct wl1271 *wl,
5023 			     struct wl12xx_vif *wlvif,
5024 			     struct ieee80211_sta *sta)
5025 {
5026 	struct wl1271_station *wl_sta;
5027 	int ret;
5028 
5029 
5030 	if (wl->active_sta_count >= wl->max_ap_stations) {
5031 		wl1271_warning("could not allocate HLID - too much stations");
5032 		return -EBUSY;
5033 	}
5034 
5035 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5036 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
5037 	if (ret < 0) {
5038 		wl1271_warning("could not allocate HLID - too many links");
5039 		return -EBUSY;
5040 	}
5041 
5042 	/* use the previous security seq, if this is a recovery/resume */
5043 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
5044 
5045 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
5046 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
5047 	wl->active_sta_count++;
5048 	return 0;
5049 }
5050 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)5051 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
5052 {
5053 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
5054 		return;
5055 
5056 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
5057 	__clear_bit(hlid, &wl->ap_ps_map);
5058 	__clear_bit(hlid, &wl->ap_fw_ps_map);
5059 
5060 	/*
5061 	 * save the last used PN in the private part of iee80211_sta,
5062 	 * in case of recovery/suspend
5063 	 */
5064 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5065 
5066 	wl12xx_free_link(wl, wlvif, &hlid);
5067 	wl->active_sta_count--;
5068 
5069 	/*
5070 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5071 	 * chance to return STA-buffered packets before complaining.
5072 	 */
5073 	if (wl->active_sta_count == 0)
5074 		wl12xx_rearm_tx_watchdog_locked(wl);
5075 }
5076 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5077 static int wl12xx_sta_add(struct wl1271 *wl,
5078 			  struct wl12xx_vif *wlvif,
5079 			  struct ieee80211_sta *sta)
5080 {
5081 	struct wl1271_station *wl_sta;
5082 	int ret = 0;
5083 	u8 hlid;
5084 
5085 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5086 
5087 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5088 	if (ret < 0)
5089 		return ret;
5090 
5091 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5092 	hlid = wl_sta->hlid;
5093 
5094 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5095 	if (ret < 0)
5096 		wl1271_free_sta(wl, wlvif, hlid);
5097 
5098 	return ret;
5099 }
5100 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5101 static int wl12xx_sta_remove(struct wl1271 *wl,
5102 			     struct wl12xx_vif *wlvif,
5103 			     struct ieee80211_sta *sta)
5104 {
5105 	struct wl1271_station *wl_sta;
5106 	int ret = 0, id;
5107 
5108 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5109 
5110 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5111 	id = wl_sta->hlid;
5112 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5113 		return -EINVAL;
5114 
5115 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5116 	if (ret < 0)
5117 		return ret;
5118 
5119 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5120 	return ret;
5121 }
5122 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5123 static void wlcore_roc_if_possible(struct wl1271 *wl,
5124 				   struct wl12xx_vif *wlvif)
5125 {
5126 	if (find_first_bit(wl->roc_map,
5127 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5128 		return;
5129 
5130 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5131 		return;
5132 
5133 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5134 }
5135 
5136 /*
5137  * when wl_sta is NULL, we treat this call as if coming from a
5138  * pending auth reply.
5139  * wl->mutex must be taken and the FW must be awake when the call
5140  * takes place.
5141  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5142 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5143 			      struct wl1271_station *wl_sta, bool in_conn)
5144 {
5145 	if (in_conn) {
5146 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5147 			return;
5148 
5149 		if (!wlvif->ap_pending_auth_reply &&
5150 		    !wlvif->inconn_count)
5151 			wlcore_roc_if_possible(wl, wlvif);
5152 
5153 		if (wl_sta) {
5154 			wl_sta->in_connection = true;
5155 			wlvif->inconn_count++;
5156 		} else {
5157 			wlvif->ap_pending_auth_reply = true;
5158 		}
5159 	} else {
5160 		if (wl_sta && !wl_sta->in_connection)
5161 			return;
5162 
5163 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5164 			return;
5165 
5166 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5167 			return;
5168 
5169 		if (wl_sta) {
5170 			wl_sta->in_connection = false;
5171 			wlvif->inconn_count--;
5172 		} else {
5173 			wlvif->ap_pending_auth_reply = false;
5174 		}
5175 
5176 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5177 		    test_bit(wlvif->role_id, wl->roc_map))
5178 			wl12xx_croc(wl, wlvif->role_id);
5179 	}
5180 }
5181 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5182 static int wl12xx_update_sta_state(struct wl1271 *wl,
5183 				   struct wl12xx_vif *wlvif,
5184 				   struct ieee80211_sta *sta,
5185 				   enum ieee80211_sta_state old_state,
5186 				   enum ieee80211_sta_state new_state)
5187 {
5188 	struct wl1271_station *wl_sta;
5189 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5190 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5191 	int ret;
5192 
5193 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5194 
5195 	/* Add station (AP mode) */
5196 	if (is_ap &&
5197 	    old_state == IEEE80211_STA_NOTEXIST &&
5198 	    new_state == IEEE80211_STA_NONE) {
5199 		ret = wl12xx_sta_add(wl, wlvif, sta);
5200 		if (ret)
5201 			return ret;
5202 
5203 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5204 	}
5205 
5206 	/* Remove station (AP mode) */
5207 	if (is_ap &&
5208 	    old_state == IEEE80211_STA_NONE &&
5209 	    new_state == IEEE80211_STA_NOTEXIST) {
5210 		/* must not fail */
5211 		wl12xx_sta_remove(wl, wlvif, sta);
5212 
5213 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5214 	}
5215 
5216 	/* Authorize station (AP mode) */
5217 	if (is_ap &&
5218 	    new_state == IEEE80211_STA_AUTHORIZED) {
5219 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5220 		if (ret < 0)
5221 			return ret;
5222 
5223 		/* reconfigure rates */
5224 		ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
5225 		if (ret < 0)
5226 			return ret;
5227 
5228 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5229 						     wl_sta->hlid);
5230 		if (ret)
5231 			return ret;
5232 
5233 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5234 	}
5235 
5236 	/* Authorize station */
5237 	if (is_sta &&
5238 	    new_state == IEEE80211_STA_AUTHORIZED) {
5239 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5240 		ret = wl12xx_set_authorized(wl, wlvif);
5241 		if (ret)
5242 			return ret;
5243 	}
5244 
5245 	if (is_sta &&
5246 	    old_state == IEEE80211_STA_AUTHORIZED &&
5247 	    new_state == IEEE80211_STA_ASSOC) {
5248 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5249 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5250 	}
5251 
5252 	/* save seq number on disassoc (suspend) */
5253 	if (is_sta &&
5254 	    old_state == IEEE80211_STA_ASSOC &&
5255 	    new_state == IEEE80211_STA_AUTH) {
5256 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5257 		wlvif->total_freed_pkts = 0;
5258 	}
5259 
5260 	/* restore seq number on assoc (resume) */
5261 	if (is_sta &&
5262 	    old_state == IEEE80211_STA_AUTH &&
5263 	    new_state == IEEE80211_STA_ASSOC) {
5264 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5265 	}
5266 
5267 	/* clear ROCs on failure or authorization */
5268 	if (is_sta &&
5269 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5270 	     new_state == IEEE80211_STA_NOTEXIST)) {
5271 		if (test_bit(wlvif->role_id, wl->roc_map))
5272 			wl12xx_croc(wl, wlvif->role_id);
5273 	}
5274 
5275 	if (is_sta &&
5276 	    old_state == IEEE80211_STA_NOTEXIST &&
5277 	    new_state == IEEE80211_STA_NONE) {
5278 		if (find_first_bit(wl->roc_map,
5279 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5280 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5281 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5282 				   wlvif->band, wlvif->channel);
5283 		}
5284 	}
5285 	return 0;
5286 }
5287 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5288 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5289 			       struct ieee80211_vif *vif,
5290 			       struct ieee80211_sta *sta,
5291 			       enum ieee80211_sta_state old_state,
5292 			       enum ieee80211_sta_state new_state)
5293 {
5294 	struct wl1271 *wl = hw->priv;
5295 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5296 	int ret;
5297 
5298 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5299 		     sta->aid, old_state, new_state);
5300 
5301 	mutex_lock(&wl->mutex);
5302 
5303 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5304 		ret = -EBUSY;
5305 		goto out;
5306 	}
5307 
5308 	ret = pm_runtime_get_sync(wl->dev);
5309 	if (ret < 0) {
5310 		pm_runtime_put_noidle(wl->dev);
5311 		goto out;
5312 	}
5313 
5314 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5315 
5316 	pm_runtime_mark_last_busy(wl->dev);
5317 	pm_runtime_put_autosuspend(wl->dev);
5318 out:
5319 	mutex_unlock(&wl->mutex);
5320 	if (new_state < old_state)
5321 		return 0;
5322 	return ret;
5323 }
5324 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5325 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5326 				  struct ieee80211_vif *vif,
5327 				  struct ieee80211_ampdu_params *params)
5328 {
5329 	struct wl1271 *wl = hw->priv;
5330 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5331 	int ret;
5332 	u8 hlid, *ba_bitmap;
5333 	struct ieee80211_sta *sta = params->sta;
5334 	enum ieee80211_ampdu_mlme_action action = params->action;
5335 	u16 tid = params->tid;
5336 	u16 *ssn = &params->ssn;
5337 
5338 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5339 		     tid);
5340 
5341 	/* sanity check - the fields in FW are only 8bits wide */
5342 	if (WARN_ON(tid > 0xFF))
5343 		return -ENOTSUPP;
5344 
5345 	mutex_lock(&wl->mutex);
5346 
5347 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5348 		ret = -EAGAIN;
5349 		goto out;
5350 	}
5351 
5352 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5353 		hlid = wlvif->sta.hlid;
5354 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5355 		struct wl1271_station *wl_sta;
5356 
5357 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5358 		hlid = wl_sta->hlid;
5359 	} else {
5360 		ret = -EINVAL;
5361 		goto out;
5362 	}
5363 
5364 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5365 
5366 	ret = pm_runtime_get_sync(wl->dev);
5367 	if (ret < 0) {
5368 		pm_runtime_put_noidle(wl->dev);
5369 		goto out;
5370 	}
5371 
5372 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5373 		     tid, action);
5374 
5375 	switch (action) {
5376 	case IEEE80211_AMPDU_RX_START:
5377 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5378 			ret = -ENOTSUPP;
5379 			break;
5380 		}
5381 
5382 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5383 			ret = -EBUSY;
5384 			wl1271_error("exceeded max RX BA sessions");
5385 			break;
5386 		}
5387 
5388 		if (*ba_bitmap & BIT(tid)) {
5389 			ret = -EINVAL;
5390 			wl1271_error("cannot enable RX BA session on active "
5391 				     "tid: %d", tid);
5392 			break;
5393 		}
5394 
5395 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5396 				hlid,
5397 				params->buf_size);
5398 
5399 		if (!ret) {
5400 			*ba_bitmap |= BIT(tid);
5401 			wl->ba_rx_session_count++;
5402 		}
5403 		break;
5404 
5405 	case IEEE80211_AMPDU_RX_STOP:
5406 		if (!(*ba_bitmap & BIT(tid))) {
5407 			/*
5408 			 * this happens on reconfig - so only output a debug
5409 			 * message for now, and don't fail the function.
5410 			 */
5411 			wl1271_debug(DEBUG_MAC80211,
5412 				     "no active RX BA session on tid: %d",
5413 				     tid);
5414 			ret = 0;
5415 			break;
5416 		}
5417 
5418 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5419 							 hlid, 0);
5420 		if (!ret) {
5421 			*ba_bitmap &= ~BIT(tid);
5422 			wl->ba_rx_session_count--;
5423 		}
5424 		break;
5425 
5426 	/*
5427 	 * The BA initiator session management in FW independently.
5428 	 * Falling break here on purpose for all TX APDU commands.
5429 	 */
5430 	case IEEE80211_AMPDU_TX_START:
5431 	case IEEE80211_AMPDU_TX_STOP_CONT:
5432 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5433 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5434 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5435 		ret = -EINVAL;
5436 		break;
5437 
5438 	default:
5439 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5440 		ret = -EINVAL;
5441 	}
5442 
5443 	pm_runtime_mark_last_busy(wl->dev);
5444 	pm_runtime_put_autosuspend(wl->dev);
5445 
5446 out:
5447 	mutex_unlock(&wl->mutex);
5448 
5449 	return ret;
5450 }
5451 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5452 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5453 				   struct ieee80211_vif *vif,
5454 				   const struct cfg80211_bitrate_mask *mask)
5455 {
5456 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5457 	struct wl1271 *wl = hw->priv;
5458 	int i, ret = 0;
5459 
5460 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5461 		mask->control[NL80211_BAND_2GHZ].legacy,
5462 		mask->control[NL80211_BAND_5GHZ].legacy);
5463 
5464 	mutex_lock(&wl->mutex);
5465 
5466 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5467 		wlvif->bitrate_masks[i] =
5468 			wl1271_tx_enabled_rates_get(wl,
5469 						    mask->control[i].legacy,
5470 						    i);
5471 
5472 	if (unlikely(wl->state != WLCORE_STATE_ON))
5473 		goto out;
5474 
5475 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5476 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5477 
5478 		ret = pm_runtime_get_sync(wl->dev);
5479 		if (ret < 0) {
5480 			pm_runtime_put_noidle(wl->dev);
5481 			goto out;
5482 		}
5483 
5484 		wl1271_set_band_rate(wl, wlvif);
5485 		wlvif->basic_rate =
5486 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5487 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5488 
5489 		pm_runtime_mark_last_busy(wl->dev);
5490 		pm_runtime_put_autosuspend(wl->dev);
5491 	}
5492 out:
5493 	mutex_unlock(&wl->mutex);
5494 
5495 	return ret;
5496 }
5497 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5498 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5499 				     struct ieee80211_vif *vif,
5500 				     struct ieee80211_channel_switch *ch_switch)
5501 {
5502 	struct wl1271 *wl = hw->priv;
5503 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5504 	int ret;
5505 
5506 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5507 
5508 	wl1271_tx_flush(wl);
5509 
5510 	mutex_lock(&wl->mutex);
5511 
5512 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5513 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5514 			ieee80211_chswitch_done(vif, false);
5515 		goto out;
5516 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5517 		goto out;
5518 	}
5519 
5520 	ret = pm_runtime_get_sync(wl->dev);
5521 	if (ret < 0) {
5522 		pm_runtime_put_noidle(wl->dev);
5523 		goto out;
5524 	}
5525 
5526 	/* TODO: change mac80211 to pass vif as param */
5527 
5528 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5529 		unsigned long delay_usec;
5530 
5531 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5532 		if (ret)
5533 			goto out_sleep;
5534 
5535 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5536 
5537 		/* indicate failure 5 seconds after channel switch time */
5538 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5539 			ch_switch->count;
5540 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5541 					     usecs_to_jiffies(delay_usec) +
5542 					     msecs_to_jiffies(5000));
5543 	}
5544 
5545 out_sleep:
5546 	pm_runtime_mark_last_busy(wl->dev);
5547 	pm_runtime_put_autosuspend(wl->dev);
5548 
5549 out:
5550 	mutex_unlock(&wl->mutex);
5551 }
5552 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5553 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5554 					struct wl12xx_vif *wlvif,
5555 					u8 eid)
5556 {
5557 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5558 	struct sk_buff *beacon =
5559 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5560 
5561 	if (!beacon)
5562 		return NULL;
5563 
5564 	return cfg80211_find_ie(eid,
5565 				beacon->data + ieoffset,
5566 				beacon->len - ieoffset);
5567 }
5568 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5569 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5570 				u8 *csa_count)
5571 {
5572 	const u8 *ie;
5573 	const struct ieee80211_channel_sw_ie *ie_csa;
5574 
5575 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5576 	if (!ie)
5577 		return -EINVAL;
5578 
5579 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5580 	*csa_count = ie_csa->count;
5581 
5582 	return 0;
5583 }
5584 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5585 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5586 					    struct ieee80211_vif *vif,
5587 					    struct cfg80211_chan_def *chandef)
5588 {
5589 	struct wl1271 *wl = hw->priv;
5590 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5591 	struct ieee80211_channel_switch ch_switch = {
5592 		.block_tx = true,
5593 		.chandef = *chandef,
5594 	};
5595 	int ret;
5596 
5597 	wl1271_debug(DEBUG_MAC80211,
5598 		     "mac80211 channel switch beacon (role %d)",
5599 		     wlvif->role_id);
5600 
5601 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5602 	if (ret < 0) {
5603 		wl1271_error("error getting beacon (for CSA counter)");
5604 		return;
5605 	}
5606 
5607 	mutex_lock(&wl->mutex);
5608 
5609 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5610 		ret = -EBUSY;
5611 		goto out;
5612 	}
5613 
5614 	ret = pm_runtime_get_sync(wl->dev);
5615 	if (ret < 0) {
5616 		pm_runtime_put_noidle(wl->dev);
5617 		goto out;
5618 	}
5619 
5620 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5621 	if (ret)
5622 		goto out_sleep;
5623 
5624 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5625 
5626 out_sleep:
5627 	pm_runtime_mark_last_busy(wl->dev);
5628 	pm_runtime_put_autosuspend(wl->dev);
5629 out:
5630 	mutex_unlock(&wl->mutex);
5631 }
5632 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5633 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5634 			    u32 queues, bool drop)
5635 {
5636 	struct wl1271 *wl = hw->priv;
5637 
5638 	wl1271_tx_flush(wl);
5639 }
5640 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5641 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5642 				       struct ieee80211_vif *vif,
5643 				       struct ieee80211_channel *chan,
5644 				       int duration,
5645 				       enum ieee80211_roc_type type)
5646 {
5647 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5648 	struct wl1271 *wl = hw->priv;
5649 	int channel, active_roc, ret = 0;
5650 
5651 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5652 
5653 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5654 		     channel, wlvif->role_id);
5655 
5656 	mutex_lock(&wl->mutex);
5657 
5658 	if (unlikely(wl->state != WLCORE_STATE_ON))
5659 		goto out;
5660 
5661 	/* return EBUSY if we can't ROC right now */
5662 	active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
5663 	if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
5664 		wl1271_warning("active roc on role %d", active_roc);
5665 		ret = -EBUSY;
5666 		goto out;
5667 	}
5668 
5669 	ret = pm_runtime_get_sync(wl->dev);
5670 	if (ret < 0) {
5671 		pm_runtime_put_noidle(wl->dev);
5672 		goto out;
5673 	}
5674 
5675 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5676 	if (ret < 0)
5677 		goto out_sleep;
5678 
5679 	wl->roc_vif = vif;
5680 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5681 				     msecs_to_jiffies(duration));
5682 out_sleep:
5683 	pm_runtime_mark_last_busy(wl->dev);
5684 	pm_runtime_put_autosuspend(wl->dev);
5685 out:
5686 	mutex_unlock(&wl->mutex);
5687 	return ret;
5688 }
5689 
__wlcore_roc_completed(struct wl1271 * wl)5690 static int __wlcore_roc_completed(struct wl1271 *wl)
5691 {
5692 	struct wl12xx_vif *wlvif;
5693 	int ret;
5694 
5695 	/* already completed */
5696 	if (unlikely(!wl->roc_vif))
5697 		return 0;
5698 
5699 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5700 
5701 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5702 		return -EBUSY;
5703 
5704 	ret = wl12xx_stop_dev(wl, wlvif);
5705 	if (ret < 0)
5706 		return ret;
5707 
5708 	wl->roc_vif = NULL;
5709 
5710 	return 0;
5711 }
5712 
wlcore_roc_completed(struct wl1271 * wl)5713 static int wlcore_roc_completed(struct wl1271 *wl)
5714 {
5715 	int ret;
5716 
5717 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5718 
5719 	mutex_lock(&wl->mutex);
5720 
5721 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5722 		ret = -EBUSY;
5723 		goto out;
5724 	}
5725 
5726 	ret = pm_runtime_get_sync(wl->dev);
5727 	if (ret < 0) {
5728 		pm_runtime_put_noidle(wl->dev);
5729 		goto out;
5730 	}
5731 
5732 	ret = __wlcore_roc_completed(wl);
5733 
5734 	pm_runtime_mark_last_busy(wl->dev);
5735 	pm_runtime_put_autosuspend(wl->dev);
5736 out:
5737 	mutex_unlock(&wl->mutex);
5738 
5739 	return ret;
5740 }
5741 
wlcore_roc_complete_work(struct work_struct * work)5742 static void wlcore_roc_complete_work(struct work_struct *work)
5743 {
5744 	struct delayed_work *dwork;
5745 	struct wl1271 *wl;
5746 	int ret;
5747 
5748 	dwork = to_delayed_work(work);
5749 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5750 
5751 	ret = wlcore_roc_completed(wl);
5752 	if (!ret)
5753 		ieee80211_remain_on_channel_expired(wl->hw);
5754 }
5755 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5756 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
5757 					      struct ieee80211_vif *vif)
5758 {
5759 	struct wl1271 *wl = hw->priv;
5760 
5761 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5762 
5763 	/* TODO: per-vif */
5764 	wl1271_tx_flush(wl);
5765 
5766 	/*
5767 	 * we can't just flush_work here, because it might deadlock
5768 	 * (as we might get called from the same workqueue)
5769 	 */
5770 	cancel_delayed_work_sync(&wl->roc_complete_work);
5771 	wlcore_roc_completed(wl);
5772 
5773 	return 0;
5774 }
5775 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5776 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5777 				    struct ieee80211_vif *vif,
5778 				    struct ieee80211_sta *sta,
5779 				    u32 changed)
5780 {
5781 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5782 
5783 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5784 
5785 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5786 		return;
5787 
5788 	/* this callback is atomic, so schedule a new work */
5789 	wlvif->rc_update_bw = sta->bandwidth;
5790 	memcpy(&wlvif->rc_ht_cap, &sta->ht_cap, sizeof(sta->ht_cap));
5791 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5792 }
5793 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5794 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5795 				     struct ieee80211_vif *vif,
5796 				     struct ieee80211_sta *sta,
5797 				     struct station_info *sinfo)
5798 {
5799 	struct wl1271 *wl = hw->priv;
5800 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5801 	s8 rssi_dbm;
5802 	int ret;
5803 
5804 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5805 
5806 	mutex_lock(&wl->mutex);
5807 
5808 	if (unlikely(wl->state != WLCORE_STATE_ON))
5809 		goto out;
5810 
5811 	ret = pm_runtime_get_sync(wl->dev);
5812 	if (ret < 0) {
5813 		pm_runtime_put_noidle(wl->dev);
5814 		goto out_sleep;
5815 	}
5816 
5817 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5818 	if (ret < 0)
5819 		goto out_sleep;
5820 
5821 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
5822 	sinfo->signal = rssi_dbm;
5823 
5824 out_sleep:
5825 	pm_runtime_mark_last_busy(wl->dev);
5826 	pm_runtime_put_autosuspend(wl->dev);
5827 
5828 out:
5829 	mutex_unlock(&wl->mutex);
5830 }
5831 
wlcore_op_get_expected_throughput(struct ieee80211_hw * hw,struct ieee80211_sta * sta)5832 static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
5833 					     struct ieee80211_sta *sta)
5834 {
5835 	struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
5836 	struct wl1271 *wl = hw->priv;
5837 	u8 hlid = wl_sta->hlid;
5838 
5839 	/* return in units of Kbps */
5840 	return (wl->links[hlid].fw_rate_mbps * 1000);
5841 }
5842 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5843 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5844 {
5845 	struct wl1271 *wl = hw->priv;
5846 	bool ret = false;
5847 
5848 	mutex_lock(&wl->mutex);
5849 
5850 	if (unlikely(wl->state != WLCORE_STATE_ON))
5851 		goto out;
5852 
5853 	/* packets are considered pending if in the TX queue or the FW */
5854 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5855 out:
5856 	mutex_unlock(&wl->mutex);
5857 
5858 	return ret;
5859 }
5860 
5861 /* can't be const, mac80211 writes to this */
5862 static struct ieee80211_rate wl1271_rates[] = {
5863 	{ .bitrate = 10,
5864 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5865 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5866 	{ .bitrate = 20,
5867 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5868 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5869 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5870 	{ .bitrate = 55,
5871 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5872 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5873 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5874 	{ .bitrate = 110,
5875 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5876 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5877 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5878 	{ .bitrate = 60,
5879 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5880 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5881 	{ .bitrate = 90,
5882 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5883 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5884 	{ .bitrate = 120,
5885 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5886 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5887 	{ .bitrate = 180,
5888 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5889 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5890 	{ .bitrate = 240,
5891 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5892 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5893 	{ .bitrate = 360,
5894 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5895 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5896 	{ .bitrate = 480,
5897 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5898 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5899 	{ .bitrate = 540,
5900 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5901 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5902 };
5903 
5904 /* can't be const, mac80211 writes to this */
5905 static struct ieee80211_channel wl1271_channels[] = {
5906 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5907 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5908 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5909 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5910 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5911 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5912 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5913 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5914 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5915 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5916 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5917 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5918 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5919 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5920 };
5921 
5922 /* can't be const, mac80211 writes to this */
5923 static struct ieee80211_supported_band wl1271_band_2ghz = {
5924 	.channels = wl1271_channels,
5925 	.n_channels = ARRAY_SIZE(wl1271_channels),
5926 	.bitrates = wl1271_rates,
5927 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5928 };
5929 
5930 /* 5 GHz data rates for WL1273 */
5931 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5932 	{ .bitrate = 60,
5933 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5934 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5935 	{ .bitrate = 90,
5936 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5937 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5938 	{ .bitrate = 120,
5939 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5940 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5941 	{ .bitrate = 180,
5942 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5943 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5944 	{ .bitrate = 240,
5945 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5946 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5947 	{ .bitrate = 360,
5948 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5949 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5950 	{ .bitrate = 480,
5951 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5952 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5953 	{ .bitrate = 540,
5954 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5955 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5956 };
5957 
5958 /* 5 GHz band channels for WL1273 */
5959 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5960 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5961 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5962 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5963 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5964 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5965 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5966 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5967 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5968 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5969 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5970 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5971 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5972 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5973 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5974 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5975 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5976 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5977 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5978 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5979 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5980 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5981 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5982 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5983 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5984 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5985 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5986 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5987 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5988 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5989 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5990 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5991 };
5992 
5993 static struct ieee80211_supported_band wl1271_band_5ghz = {
5994 	.channels = wl1271_channels_5ghz,
5995 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5996 	.bitrates = wl1271_rates_5ghz,
5997 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5998 };
5999 
6000 static const struct ieee80211_ops wl1271_ops = {
6001 	.start = wl1271_op_start,
6002 	.stop = wlcore_op_stop,
6003 	.add_interface = wl1271_op_add_interface,
6004 	.remove_interface = wl1271_op_remove_interface,
6005 	.change_interface = wl12xx_op_change_interface,
6006 #ifdef CONFIG_PM
6007 	.suspend = wl1271_op_suspend,
6008 	.resume = wl1271_op_resume,
6009 #endif
6010 	.config = wl1271_op_config,
6011 	.prepare_multicast = wl1271_op_prepare_multicast,
6012 	.configure_filter = wl1271_op_configure_filter,
6013 	.tx = wl1271_op_tx,
6014 	.set_key = wlcore_op_set_key,
6015 	.hw_scan = wl1271_op_hw_scan,
6016 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
6017 	.sched_scan_start = wl1271_op_sched_scan_start,
6018 	.sched_scan_stop = wl1271_op_sched_scan_stop,
6019 	.bss_info_changed = wl1271_op_bss_info_changed,
6020 	.set_frag_threshold = wl1271_op_set_frag_threshold,
6021 	.set_rts_threshold = wl1271_op_set_rts_threshold,
6022 	.conf_tx = wl1271_op_conf_tx,
6023 	.get_tsf = wl1271_op_get_tsf,
6024 	.get_survey = wl1271_op_get_survey,
6025 	.sta_state = wl12xx_op_sta_state,
6026 	.ampdu_action = wl1271_op_ampdu_action,
6027 	.tx_frames_pending = wl1271_tx_frames_pending,
6028 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
6029 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
6030 	.channel_switch = wl12xx_op_channel_switch,
6031 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
6032 	.flush = wlcore_op_flush,
6033 	.remain_on_channel = wlcore_op_remain_on_channel,
6034 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
6035 	.add_chanctx = wlcore_op_add_chanctx,
6036 	.remove_chanctx = wlcore_op_remove_chanctx,
6037 	.change_chanctx = wlcore_op_change_chanctx,
6038 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
6039 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
6040 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
6041 	.sta_rc_update = wlcore_op_sta_rc_update,
6042 	.sta_statistics = wlcore_op_sta_statistics,
6043 	.get_expected_throughput = wlcore_op_get_expected_throughput,
6044 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
6045 };
6046 
6047 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum nl80211_band band)6048 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
6049 {
6050 	u8 idx;
6051 
6052 	BUG_ON(band >= 2);
6053 
6054 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
6055 		wl1271_error("Illegal RX rate from HW: %d", rate);
6056 		return 0;
6057 	}
6058 
6059 	idx = wl->band_rate_to_idx[band][rate];
6060 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
6061 		wl1271_error("Unsupported RX rate from HW: %d", rate);
6062 		return 0;
6063 	}
6064 
6065 	return idx;
6066 }
6067 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)6068 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
6069 {
6070 	int i;
6071 
6072 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
6073 		     oui, nic);
6074 
6075 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
6076 		wl1271_warning("NIC part of the MAC address wraps around!");
6077 
6078 	for (i = 0; i < wl->num_mac_addr; i++) {
6079 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
6080 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
6081 		wl->addresses[i].addr[2] = (u8) oui;
6082 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
6083 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
6084 		wl->addresses[i].addr[5] = (u8) nic;
6085 		nic++;
6086 	}
6087 
6088 	/* we may be one address short at the most */
6089 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
6090 
6091 	/*
6092 	 * turn on the LAA bit in the first address and use it as
6093 	 * the last address.
6094 	 */
6095 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
6096 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
6097 		memcpy(&wl->addresses[idx], &wl->addresses[0],
6098 		       sizeof(wl->addresses[0]));
6099 		/* LAA bit */
6100 		wl->addresses[idx].addr[0] |= BIT(1);
6101 	}
6102 
6103 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
6104 	wl->hw->wiphy->addresses = wl->addresses;
6105 }
6106 
wl12xx_get_hw_info(struct wl1271 * wl)6107 static int wl12xx_get_hw_info(struct wl1271 *wl)
6108 {
6109 	int ret;
6110 
6111 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6112 	if (ret < 0)
6113 		goto out;
6114 
6115 	wl->fuse_oui_addr = 0;
6116 	wl->fuse_nic_addr = 0;
6117 
6118 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6119 	if (ret < 0)
6120 		goto out;
6121 
6122 	if (wl->ops->get_mac)
6123 		ret = wl->ops->get_mac(wl);
6124 
6125 out:
6126 	return ret;
6127 }
6128 
wl1271_register_hw(struct wl1271 * wl)6129 static int wl1271_register_hw(struct wl1271 *wl)
6130 {
6131 	int ret;
6132 	u32 oui_addr = 0, nic_addr = 0;
6133 	struct platform_device *pdev = wl->pdev;
6134 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6135 
6136 	if (wl->mac80211_registered)
6137 		return 0;
6138 
6139 	if (wl->nvs_len >= 12) {
6140 		/* NOTE: The wl->nvs->nvs element must be first, in
6141 		 * order to simplify the casting, we assume it is at
6142 		 * the beginning of the wl->nvs structure.
6143 		 */
6144 		u8 *nvs_ptr = (u8 *)wl->nvs;
6145 
6146 		oui_addr =
6147 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6148 		nic_addr =
6149 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6150 	}
6151 
6152 	/* if the MAC address is zeroed in the NVS derive from fuse */
6153 	if (oui_addr == 0 && nic_addr == 0) {
6154 		oui_addr = wl->fuse_oui_addr;
6155 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6156 		nic_addr = wl->fuse_nic_addr + 1;
6157 	}
6158 
6159 	if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
6160 		wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
6161 		if (!strcmp(pdev_data->family->name, "wl18xx")) {
6162 			wl1271_warning("This default nvs file can be removed from the file system");
6163 		} else {
6164 			wl1271_warning("Your device performance is not optimized.");
6165 			wl1271_warning("Please use the calibrator tool to configure your device.");
6166 		}
6167 
6168 		if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
6169 			wl1271_warning("Fuse mac address is zero. using random mac");
6170 			/* Use TI oui and a random nic */
6171 			oui_addr = WLCORE_TI_OUI_ADDRESS;
6172 			nic_addr = get_random_int();
6173 		} else {
6174 			oui_addr = wl->fuse_oui_addr;
6175 			/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6176 			nic_addr = wl->fuse_nic_addr + 1;
6177 		}
6178 	}
6179 
6180 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6181 
6182 	ret = ieee80211_register_hw(wl->hw);
6183 	if (ret < 0) {
6184 		wl1271_error("unable to register mac80211 hw: %d", ret);
6185 		goto out;
6186 	}
6187 
6188 	wl->mac80211_registered = true;
6189 
6190 	wl1271_debugfs_init(wl);
6191 
6192 	wl1271_notice("loaded");
6193 
6194 out:
6195 	return ret;
6196 }
6197 
wl1271_unregister_hw(struct wl1271 * wl)6198 static void wl1271_unregister_hw(struct wl1271 *wl)
6199 {
6200 	if (wl->plt)
6201 		wl1271_plt_stop(wl);
6202 
6203 	ieee80211_unregister_hw(wl->hw);
6204 	wl->mac80211_registered = false;
6205 
6206 }
6207 
wl1271_init_ieee80211(struct wl1271 * wl)6208 static int wl1271_init_ieee80211(struct wl1271 *wl)
6209 {
6210 	int i;
6211 	static const u32 cipher_suites[] = {
6212 		WLAN_CIPHER_SUITE_WEP40,
6213 		WLAN_CIPHER_SUITE_WEP104,
6214 		WLAN_CIPHER_SUITE_TKIP,
6215 		WLAN_CIPHER_SUITE_CCMP,
6216 		WL1271_CIPHER_SUITE_GEM,
6217 	};
6218 
6219 	/* The tx descriptor buffer */
6220 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6221 
6222 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6223 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6224 
6225 	/* unit us */
6226 	/* FIXME: find a proper value */
6227 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6228 
6229 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6230 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6231 	ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
6232 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6233 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6234 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6235 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6236 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6237 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6238 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6239 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6240 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6241 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6242 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6243 	ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
6244 
6245 	wl->hw->wiphy->cipher_suites = cipher_suites;
6246 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6247 
6248 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6249 					 BIT(NL80211_IFTYPE_AP) |
6250 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6251 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6252 #ifdef CONFIG_MAC80211_MESH
6253 					 BIT(NL80211_IFTYPE_MESH_POINT) |
6254 #endif
6255 					 BIT(NL80211_IFTYPE_P2P_GO);
6256 
6257 	wl->hw->wiphy->max_scan_ssids = 1;
6258 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6259 	wl->hw->wiphy->max_match_sets = 16;
6260 	/*
6261 	 * Maximum length of elements in scanning probe request templates
6262 	 * should be the maximum length possible for a template, without
6263 	 * the IEEE80211 header of the template
6264 	 */
6265 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6266 			sizeof(struct ieee80211_header);
6267 
6268 	wl->hw->wiphy->max_sched_scan_reqs = 1;
6269 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6270 		sizeof(struct ieee80211_header);
6271 
6272 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6273 
6274 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6275 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6276 				WIPHY_FLAG_HAS_CHANNEL_SWITCH |
6277 				WIPHY_FLAG_IBSS_RSN;
6278 
6279 	wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
6280 
6281 	/* make sure all our channels fit in the scanned_ch bitmask */
6282 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6283 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6284 		     WL1271_MAX_CHANNELS);
6285 	/*
6286 	* clear channel flags from the previous usage
6287 	* and restore max_power & max_antenna_gain values.
6288 	*/
6289 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6290 		wl1271_band_2ghz.channels[i].flags = 0;
6291 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6292 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6293 	}
6294 
6295 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6296 		wl1271_band_5ghz.channels[i].flags = 0;
6297 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6298 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6299 	}
6300 
6301 	/*
6302 	 * We keep local copies of the band structs because we need to
6303 	 * modify them on a per-device basis.
6304 	 */
6305 	memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
6306 	       sizeof(wl1271_band_2ghz));
6307 	memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
6308 	       &wl->ht_cap[NL80211_BAND_2GHZ],
6309 	       sizeof(*wl->ht_cap));
6310 	memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
6311 	       sizeof(wl1271_band_5ghz));
6312 	memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
6313 	       &wl->ht_cap[NL80211_BAND_5GHZ],
6314 	       sizeof(*wl->ht_cap));
6315 
6316 	wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
6317 		&wl->bands[NL80211_BAND_2GHZ];
6318 	wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
6319 		&wl->bands[NL80211_BAND_5GHZ];
6320 
6321 	/*
6322 	 * allow 4 queues per mac address we support +
6323 	 * 1 cab queue per mac + one global offchannel Tx queue
6324 	 */
6325 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6326 
6327 	/* the last queue is the offchannel queue */
6328 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6329 	wl->hw->max_rates = 1;
6330 
6331 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6332 
6333 	/* the FW answers probe-requests in AP-mode */
6334 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6335 	wl->hw->wiphy->probe_resp_offload =
6336 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6337 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6338 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6339 
6340 	/* allowed interface combinations */
6341 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6342 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6343 
6344 	/* register vendor commands */
6345 	wlcore_set_vendor_commands(wl->hw->wiphy);
6346 
6347 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6348 
6349 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6350 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6351 
6352 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6353 
6354 	return 0;
6355 }
6356 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6357 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6358 				     u32 mbox_size)
6359 {
6360 	struct ieee80211_hw *hw;
6361 	struct wl1271 *wl;
6362 	int i, j, ret;
6363 	unsigned int order;
6364 
6365 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6366 	if (!hw) {
6367 		wl1271_error("could not alloc ieee80211_hw");
6368 		ret = -ENOMEM;
6369 		goto err_hw_alloc;
6370 	}
6371 
6372 	wl = hw->priv;
6373 	memset(wl, 0, sizeof(*wl));
6374 
6375 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6376 	if (!wl->priv) {
6377 		wl1271_error("could not alloc wl priv");
6378 		ret = -ENOMEM;
6379 		goto err_priv_alloc;
6380 	}
6381 
6382 	INIT_LIST_HEAD(&wl->wlvif_list);
6383 
6384 	wl->hw = hw;
6385 
6386 	/*
6387 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6388 	 * we don't allocate any additional resource here, so that's fine.
6389 	 */
6390 	for (i = 0; i < NUM_TX_QUEUES; i++)
6391 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6392 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6393 
6394 	skb_queue_head_init(&wl->deferred_rx_queue);
6395 	skb_queue_head_init(&wl->deferred_tx_queue);
6396 
6397 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6398 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6399 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6400 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6401 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6402 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6403 
6404 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6405 	if (!wl->freezable_wq) {
6406 		ret = -ENOMEM;
6407 		goto err_hw;
6408 	}
6409 
6410 	wl->channel = 0;
6411 	wl->rx_counter = 0;
6412 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6413 	wl->band = NL80211_BAND_2GHZ;
6414 	wl->channel_type = NL80211_CHAN_NO_HT;
6415 	wl->flags = 0;
6416 	wl->sg_enabled = true;
6417 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6418 	wl->recovery_count = 0;
6419 	wl->hw_pg_ver = -1;
6420 	wl->ap_ps_map = 0;
6421 	wl->ap_fw_ps_map = 0;
6422 	wl->quirks = 0;
6423 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6424 	wl->active_sta_count = 0;
6425 	wl->active_link_count = 0;
6426 	wl->fwlog_size = 0;
6427 
6428 	/* The system link is always allocated */
6429 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6430 
6431 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6432 	for (i = 0; i < wl->num_tx_desc; i++)
6433 		wl->tx_frames[i] = NULL;
6434 
6435 	spin_lock_init(&wl->wl_lock);
6436 
6437 	wl->state = WLCORE_STATE_OFF;
6438 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6439 	mutex_init(&wl->mutex);
6440 	mutex_init(&wl->flush_mutex);
6441 	init_completion(&wl->nvs_loading_complete);
6442 
6443 	order = get_order(aggr_buf_size);
6444 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6445 	if (!wl->aggr_buf) {
6446 		ret = -ENOMEM;
6447 		goto err_wq;
6448 	}
6449 	wl->aggr_buf_size = aggr_buf_size;
6450 
6451 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6452 	if (!wl->dummy_packet) {
6453 		ret = -ENOMEM;
6454 		goto err_aggr;
6455 	}
6456 
6457 	/* Allocate one page for the FW log */
6458 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6459 	if (!wl->fwlog) {
6460 		ret = -ENOMEM;
6461 		goto err_dummy_packet;
6462 	}
6463 
6464 	wl->mbox_size = mbox_size;
6465 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6466 	if (!wl->mbox) {
6467 		ret = -ENOMEM;
6468 		goto err_fwlog;
6469 	}
6470 
6471 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6472 	if (!wl->buffer_32) {
6473 		ret = -ENOMEM;
6474 		goto err_mbox;
6475 	}
6476 
6477 	return hw;
6478 
6479 err_mbox:
6480 	kfree(wl->mbox);
6481 
6482 err_fwlog:
6483 	free_page((unsigned long)wl->fwlog);
6484 
6485 err_dummy_packet:
6486 	dev_kfree_skb(wl->dummy_packet);
6487 
6488 err_aggr:
6489 	free_pages((unsigned long)wl->aggr_buf, order);
6490 
6491 err_wq:
6492 	destroy_workqueue(wl->freezable_wq);
6493 
6494 err_hw:
6495 	wl1271_debugfs_exit(wl);
6496 	kfree(wl->priv);
6497 
6498 err_priv_alloc:
6499 	ieee80211_free_hw(hw);
6500 
6501 err_hw_alloc:
6502 
6503 	return ERR_PTR(ret);
6504 }
6505 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6506 
wlcore_free_hw(struct wl1271 * wl)6507 int wlcore_free_hw(struct wl1271 *wl)
6508 {
6509 	/* Unblock any fwlog readers */
6510 	mutex_lock(&wl->mutex);
6511 	wl->fwlog_size = -1;
6512 	mutex_unlock(&wl->mutex);
6513 
6514 	wlcore_sysfs_free(wl);
6515 
6516 	kfree(wl->buffer_32);
6517 	kfree(wl->mbox);
6518 	free_page((unsigned long)wl->fwlog);
6519 	dev_kfree_skb(wl->dummy_packet);
6520 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6521 
6522 	wl1271_debugfs_exit(wl);
6523 
6524 	vfree(wl->fw);
6525 	wl->fw = NULL;
6526 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6527 	kfree(wl->nvs);
6528 	wl->nvs = NULL;
6529 
6530 	kfree(wl->raw_fw_status);
6531 	kfree(wl->fw_status);
6532 	kfree(wl->tx_res_if);
6533 	destroy_workqueue(wl->freezable_wq);
6534 
6535 	kfree(wl->priv);
6536 	ieee80211_free_hw(wl->hw);
6537 
6538 	return 0;
6539 }
6540 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6541 
6542 #ifdef CONFIG_PM
6543 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6544 	.flags = WIPHY_WOWLAN_ANY,
6545 	.n_patterns = WL1271_MAX_RX_FILTERS,
6546 	.pattern_min_len = 1,
6547 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6548 };
6549 #endif
6550 
wlcore_hardirq(int irq,void * cookie)6551 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6552 {
6553 	return IRQ_WAKE_THREAD;
6554 }
6555 
wlcore_nvs_cb(const struct firmware * fw,void * context)6556 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6557 {
6558 	struct wl1271 *wl = context;
6559 	struct platform_device *pdev = wl->pdev;
6560 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6561 	struct resource *res;
6562 
6563 	int ret;
6564 	irq_handler_t hardirq_fn = NULL;
6565 
6566 	if (fw) {
6567 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6568 		if (!wl->nvs) {
6569 			wl1271_error("Could not allocate nvs data");
6570 			goto out;
6571 		}
6572 		wl->nvs_len = fw->size;
6573 	} else if (pdev_data->family->nvs_name) {
6574 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6575 			     pdev_data->family->nvs_name);
6576 		wl->nvs = NULL;
6577 		wl->nvs_len = 0;
6578 	} else {
6579 		wl->nvs = NULL;
6580 		wl->nvs_len = 0;
6581 	}
6582 
6583 	ret = wl->ops->setup(wl);
6584 	if (ret < 0)
6585 		goto out_free_nvs;
6586 
6587 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6588 
6589 	/* adjust some runtime configuration parameters */
6590 	wlcore_adjust_conf(wl);
6591 
6592 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6593 	if (!res) {
6594 		wl1271_error("Could not get IRQ resource");
6595 		goto out_free_nvs;
6596 	}
6597 
6598 	wl->irq = res->start;
6599 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6600 	wl->if_ops = pdev_data->if_ops;
6601 
6602 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6603 		hardirq_fn = wlcore_hardirq;
6604 	else
6605 		wl->irq_flags |= IRQF_ONESHOT;
6606 
6607 	ret = wl12xx_set_power_on(wl);
6608 	if (ret < 0)
6609 		goto out_free_nvs;
6610 
6611 	ret = wl12xx_get_hw_info(wl);
6612 	if (ret < 0) {
6613 		wl1271_error("couldn't get hw info");
6614 		wl1271_power_off(wl);
6615 		goto out_free_nvs;
6616 	}
6617 
6618 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6619 				   wl->irq_flags, pdev->name, wl);
6620 	if (ret < 0) {
6621 		wl1271_error("interrupt configuration failed");
6622 		wl1271_power_off(wl);
6623 		goto out_free_nvs;
6624 	}
6625 
6626 #ifdef CONFIG_PM
6627 	device_init_wakeup(wl->dev, true);
6628 
6629 	ret = enable_irq_wake(wl->irq);
6630 	if (!ret) {
6631 		wl->irq_wake_enabled = true;
6632 		if (pdev_data->pwr_in_suspend)
6633 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6634 	}
6635 
6636 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
6637 	if (res) {
6638 		wl->wakeirq = res->start;
6639 		wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
6640 		ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
6641 		if (ret)
6642 			wl->wakeirq = -ENODEV;
6643 	} else {
6644 		wl->wakeirq = -ENODEV;
6645 	}
6646 #endif
6647 	disable_irq(wl->irq);
6648 	wl1271_power_off(wl);
6649 
6650 	ret = wl->ops->identify_chip(wl);
6651 	if (ret < 0)
6652 		goto out_irq;
6653 
6654 	ret = wl1271_init_ieee80211(wl);
6655 	if (ret)
6656 		goto out_irq;
6657 
6658 	ret = wl1271_register_hw(wl);
6659 	if (ret)
6660 		goto out_irq;
6661 
6662 	ret = wlcore_sysfs_init(wl);
6663 	if (ret)
6664 		goto out_unreg;
6665 
6666 	wl->initialized = true;
6667 	goto out;
6668 
6669 out_unreg:
6670 	wl1271_unregister_hw(wl);
6671 
6672 out_irq:
6673 	if (wl->wakeirq >= 0)
6674 		dev_pm_clear_wake_irq(wl->dev);
6675 	device_init_wakeup(wl->dev, false);
6676 	free_irq(wl->irq, wl);
6677 
6678 out_free_nvs:
6679 	kfree(wl->nvs);
6680 
6681 out:
6682 	release_firmware(fw);
6683 	complete_all(&wl->nvs_loading_complete);
6684 }
6685 
wlcore_runtime_suspend(struct device * dev)6686 static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
6687 {
6688 	struct wl1271 *wl = dev_get_drvdata(dev);
6689 	struct wl12xx_vif *wlvif;
6690 	int error;
6691 
6692 	/* We do not enter elp sleep in PLT mode */
6693 	if (wl->plt)
6694 		return 0;
6695 
6696 	/* Nothing to do if no ELP mode requested */
6697 	if (wl->sleep_auth != WL1271_PSM_ELP)
6698 		return 0;
6699 
6700 	wl12xx_for_each_wlvif(wl, wlvif) {
6701 		if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
6702 		    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
6703 			return -EBUSY;
6704 	}
6705 
6706 	wl1271_debug(DEBUG_PSM, "chip to elp");
6707 	error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
6708 	if (error < 0) {
6709 		wl12xx_queue_recovery_work(wl);
6710 
6711 		return error;
6712 	}
6713 
6714 	set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6715 
6716 	return 0;
6717 }
6718 
wlcore_runtime_resume(struct device * dev)6719 static int __maybe_unused wlcore_runtime_resume(struct device *dev)
6720 {
6721 	struct wl1271 *wl = dev_get_drvdata(dev);
6722 	DECLARE_COMPLETION_ONSTACK(compl);
6723 	unsigned long flags;
6724 	int ret;
6725 	unsigned long start_time = jiffies;
6726 	bool recovery = false;
6727 
6728 	/* Nothing to do if no ELP mode requested */
6729 	if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
6730 		return 0;
6731 
6732 	wl1271_debug(DEBUG_PSM, "waking up chip from elp");
6733 
6734 	spin_lock_irqsave(&wl->wl_lock, flags);
6735 	wl->elp_compl = &compl;
6736 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6737 
6738 	ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
6739 	if (ret < 0) {
6740 		recovery = true;
6741 	} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
6742 		ret = wait_for_completion_timeout(&compl,
6743 			msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
6744 		if (ret == 0) {
6745 			wl1271_warning("ELP wakeup timeout!");
6746 			recovery = true;
6747 		}
6748 	}
6749 
6750 	spin_lock_irqsave(&wl->wl_lock, flags);
6751 	wl->elp_compl = NULL;
6752 	spin_unlock_irqrestore(&wl->wl_lock, flags);
6753 	clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
6754 
6755 	if (recovery) {
6756 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
6757 		wl12xx_queue_recovery_work(wl);
6758 	} else {
6759 		wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
6760 			     jiffies_to_msecs(jiffies - start_time));
6761 	}
6762 
6763 	return 0;
6764 }
6765 
6766 static const struct dev_pm_ops wlcore_pm_ops = {
6767 	SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
6768 			   wlcore_runtime_resume,
6769 			   NULL)
6770 };
6771 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6772 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6773 {
6774 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6775 	const char *nvs_name;
6776 	int ret = 0;
6777 
6778 	if (!wl->ops || !wl->ptable || !pdev_data)
6779 		return -EINVAL;
6780 
6781 	wl->dev = &pdev->dev;
6782 	wl->pdev = pdev;
6783 	platform_set_drvdata(pdev, wl);
6784 
6785 	if (pdev_data->family && pdev_data->family->nvs_name) {
6786 		nvs_name = pdev_data->family->nvs_name;
6787 		ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6788 					      nvs_name, &pdev->dev, GFP_KERNEL,
6789 					      wl, wlcore_nvs_cb);
6790 		if (ret < 0) {
6791 			wl1271_error("request_firmware_nowait failed for %s: %d",
6792 				     nvs_name, ret);
6793 			complete_all(&wl->nvs_loading_complete);
6794 		}
6795 	} else {
6796 		wlcore_nvs_cb(NULL, wl);
6797 	}
6798 
6799 	wl->dev->driver->pm = &wlcore_pm_ops;
6800 	pm_runtime_set_autosuspend_delay(wl->dev, 50);
6801 	pm_runtime_use_autosuspend(wl->dev);
6802 	pm_runtime_enable(wl->dev);
6803 
6804 	return ret;
6805 }
6806 EXPORT_SYMBOL_GPL(wlcore_probe);
6807 
wlcore_remove(struct platform_device * pdev)6808 int wlcore_remove(struct platform_device *pdev)
6809 {
6810 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6811 	struct wl1271 *wl = platform_get_drvdata(pdev);
6812 	int error;
6813 
6814 	error = pm_runtime_get_sync(wl->dev);
6815 	if (error < 0)
6816 		dev_warn(wl->dev, "PM runtime failed: %i\n", error);
6817 
6818 	wl->dev->driver->pm = NULL;
6819 
6820 	if (pdev_data->family && pdev_data->family->nvs_name)
6821 		wait_for_completion(&wl->nvs_loading_complete);
6822 	if (!wl->initialized)
6823 		return 0;
6824 
6825 	if (wl->wakeirq >= 0) {
6826 		dev_pm_clear_wake_irq(wl->dev);
6827 		wl->wakeirq = -ENODEV;
6828 	}
6829 
6830 	device_init_wakeup(wl->dev, false);
6831 
6832 	if (wl->irq_wake_enabled)
6833 		disable_irq_wake(wl->irq);
6834 
6835 	wl1271_unregister_hw(wl);
6836 
6837 	pm_runtime_put_sync(wl->dev);
6838 	pm_runtime_dont_use_autosuspend(wl->dev);
6839 	pm_runtime_disable(wl->dev);
6840 
6841 	free_irq(wl->irq, wl);
6842 	wlcore_free_hw(wl);
6843 
6844 	return 0;
6845 }
6846 EXPORT_SYMBOL_GPL(wlcore_remove);
6847 
6848 u32 wl12xx_debug_level = DEBUG_NONE;
6849 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6850 module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
6851 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6852 
6853 module_param_named(fwlog, fwlog_param, charp, 0);
6854 MODULE_PARM_DESC(fwlog,
6855 		 "FW logger options: continuous, dbgpins or disable");
6856 
6857 module_param(fwlog_mem_blocks, int, 0600);
6858 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6859 
6860 module_param(bug_on_recovery, int, 0600);
6861 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6862 
6863 module_param(no_recovery, int, 0600);
6864 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6865 
6866 MODULE_LICENSE("GPL");
6867 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6868 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6869