• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44 
45 #define WL1271_BOOT_RETRIES 3
46 
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery     = -1;
51 
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 					 struct ieee80211_vif *vif,
54 					 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59 {
60 	int ret;
61 
62 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 		return -EINVAL;
64 
65 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 		return 0;
67 
68 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 		return 0;
70 
71 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 	if (ret < 0)
73 		return ret;
74 
75 	wl1271_info("Association completed.");
76 	return 0;
77 }
78 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 			      struct regulatory_request *request)
81 {
82 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
83 	struct wl1271 *wl = hw->priv;
84 
85 	/* copy the current dfs region */
86 	if (request)
87 		wl->dfs_region = request->dfs_region;
88 
89 	wlcore_regdomain_config(wl);
90 }
91 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)92 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
93 				   bool enable)
94 {
95 	int ret = 0;
96 
97 	/* we should hold wl->mutex */
98 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
99 	if (ret < 0)
100 		goto out;
101 
102 	if (enable)
103 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
104 	else
105 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
106 out:
107 	return ret;
108 }
109 
110 /*
111  * this function is being called when the rx_streaming interval
112  * has beed changed or rx_streaming should be disabled
113  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)114 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
115 {
116 	int ret = 0;
117 	int period = wl->conf.rx_streaming.interval;
118 
119 	/* don't reconfigure if rx_streaming is disabled */
120 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
121 		goto out;
122 
123 	/* reconfigure/disable according to new streaming_period */
124 	if (period &&
125 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
126 	    (wl->conf.rx_streaming.always ||
127 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
128 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
129 	else {
130 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
131 		/* don't cancel_work_sync since we might deadlock */
132 		del_timer_sync(&wlvif->rx_streaming_timer);
133 	}
134 out:
135 	return ret;
136 }
137 
wl1271_rx_streaming_enable_work(struct work_struct * work)138 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
139 {
140 	int ret;
141 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
142 						rx_streaming_enable_work);
143 	struct wl1271 *wl = wlvif->wl;
144 
145 	mutex_lock(&wl->mutex);
146 
147 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
148 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
149 	    (!wl->conf.rx_streaming.always &&
150 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
151 		goto out;
152 
153 	if (!wl->conf.rx_streaming.interval)
154 		goto out;
155 
156 	ret = wl1271_ps_elp_wakeup(wl);
157 	if (ret < 0)
158 		goto out;
159 
160 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
161 	if (ret < 0)
162 		goto out_sleep;
163 
164 	/* stop it after some time of inactivity */
165 	mod_timer(&wlvif->rx_streaming_timer,
166 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
167 
168 out_sleep:
169 	wl1271_ps_elp_sleep(wl);
170 out:
171 	mutex_unlock(&wl->mutex);
172 }
173 
wl1271_rx_streaming_disable_work(struct work_struct * work)174 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
175 {
176 	int ret;
177 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
178 						rx_streaming_disable_work);
179 	struct wl1271 *wl = wlvif->wl;
180 
181 	mutex_lock(&wl->mutex);
182 
183 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
184 		goto out;
185 
186 	ret = wl1271_ps_elp_wakeup(wl);
187 	if (ret < 0)
188 		goto out;
189 
190 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
191 	if (ret)
192 		goto out_sleep;
193 
194 out_sleep:
195 	wl1271_ps_elp_sleep(wl);
196 out:
197 	mutex_unlock(&wl->mutex);
198 }
199 
wl1271_rx_streaming_timer(unsigned long data)200 static void wl1271_rx_streaming_timer(unsigned long data)
201 {
202 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
203 	struct wl1271 *wl = wlvif->wl;
204 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
205 }
206 
207 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)208 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
209 {
210 	/* if the watchdog is not armed, don't do anything */
211 	if (wl->tx_allocated_blocks == 0)
212 		return;
213 
214 	cancel_delayed_work(&wl->tx_watchdog_work);
215 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
216 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
217 }
218 
wlcore_rc_update_work(struct work_struct * work)219 static void wlcore_rc_update_work(struct work_struct *work)
220 {
221 	int ret;
222 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
223 						rc_update_work);
224 	struct wl1271 *wl = wlvif->wl;
225 
226 	mutex_lock(&wl->mutex);
227 
228 	if (unlikely(wl->state != WLCORE_STATE_ON))
229 		goto out;
230 
231 	ret = wl1271_ps_elp_wakeup(wl);
232 	if (ret < 0)
233 		goto out;
234 
235 	wlcore_hw_sta_rc_update(wl, wlvif);
236 
237 	wl1271_ps_elp_sleep(wl);
238 out:
239 	mutex_unlock(&wl->mutex);
240 }
241 
wl12xx_tx_watchdog_work(struct work_struct * work)242 static void wl12xx_tx_watchdog_work(struct work_struct *work)
243 {
244 	struct delayed_work *dwork;
245 	struct wl1271 *wl;
246 
247 	dwork = container_of(work, struct delayed_work, work);
248 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
249 
250 	mutex_lock(&wl->mutex);
251 
252 	if (unlikely(wl->state != WLCORE_STATE_ON))
253 		goto out;
254 
255 	/* Tx went out in the meantime - everything is ok */
256 	if (unlikely(wl->tx_allocated_blocks == 0))
257 		goto out;
258 
259 	/*
260 	 * if a ROC is in progress, we might not have any Tx for a long
261 	 * time (e.g. pending Tx on the non-ROC channels)
262 	 */
263 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
264 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
265 			     wl->conf.tx.tx_watchdog_timeout);
266 		wl12xx_rearm_tx_watchdog_locked(wl);
267 		goto out;
268 	}
269 
270 	/*
271 	 * if a scan is in progress, we might not have any Tx for a long
272 	 * time
273 	 */
274 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
276 			     wl->conf.tx.tx_watchdog_timeout);
277 		wl12xx_rearm_tx_watchdog_locked(wl);
278 		goto out;
279 	}
280 
281 	/*
282 	* AP might cache a frame for a long time for a sleeping station,
283 	* so rearm the timer if there's an AP interface with stations. If
284 	* Tx is genuinely stuck we will most hopefully discover it when all
285 	* stations are removed due to inactivity.
286 	*/
287 	if (wl->active_sta_count) {
288 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
289 			     " %d stations",
290 			      wl->conf.tx.tx_watchdog_timeout,
291 			      wl->active_sta_count);
292 		wl12xx_rearm_tx_watchdog_locked(wl);
293 		goto out;
294 	}
295 
296 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
297 		     wl->conf.tx.tx_watchdog_timeout);
298 	wl12xx_queue_recovery_work(wl);
299 
300 out:
301 	mutex_unlock(&wl->mutex);
302 }
303 
wlcore_adjust_conf(struct wl1271 * wl)304 static void wlcore_adjust_conf(struct wl1271 *wl)
305 {
306 	/* Adjust settings according to optional module parameters */
307 
308 	/* Firmware Logger params */
309 	if (fwlog_mem_blocks != -1) {
310 		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
311 		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
312 			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
313 		} else {
314 			wl1271_error(
315 				"Illegal fwlog_mem_blocks=%d using default %d",
316 				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
317 		}
318 	}
319 
320 	if (fwlog_param) {
321 		if (!strcmp(fwlog_param, "continuous")) {
322 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
323 		} else if (!strcmp(fwlog_param, "ondemand")) {
324 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
325 		} else if (!strcmp(fwlog_param, "dbgpins")) {
326 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
327 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
328 		} else if (!strcmp(fwlog_param, "disable")) {
329 			wl->conf.fwlog.mem_blocks = 0;
330 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
331 		} else {
332 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
333 		}
334 	}
335 
336 	if (bug_on_recovery != -1)
337 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
338 
339 	if (no_recovery != -1)
340 		wl->conf.recovery.no_recovery = (u8) no_recovery;
341 }
342 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)343 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
344 					struct wl12xx_vif *wlvif,
345 					u8 hlid, u8 tx_pkts)
346 {
347 	bool fw_ps;
348 
349 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
350 
351 	/*
352 	 * Wake up from high level PS if the STA is asleep with too little
353 	 * packets in FW or if the STA is awake.
354 	 */
355 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_end(wl, wlvif, hlid);
357 
358 	/*
359 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
360 	 * Make an exception if this is the only connected link. In this
361 	 * case FW-memory congestion is less of a problem.
362 	 * Note that a single connected STA means 2*ap_count + 1 active links,
363 	 * since we must account for the global and broadcast AP links
364 	 * for each AP. The "fw_ps" check assures us the other link is a STA
365 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
366 	 */
367 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
368 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
369 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
370 }
371 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)372 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
373 					   struct wl12xx_vif *wlvif,
374 					   struct wl_fw_status *status)
375 {
376 	unsigned long cur_fw_ps_map;
377 	u8 hlid;
378 
379 	cur_fw_ps_map = status->link_ps_bitmap;
380 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
381 		wl1271_debug(DEBUG_PSM,
382 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
383 			     wl->ap_fw_ps_map, cur_fw_ps_map,
384 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
385 
386 		wl->ap_fw_ps_map = cur_fw_ps_map;
387 	}
388 
389 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
390 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
391 					    wl->links[hlid].allocated_pkts);
392 }
393 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)394 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
395 {
396 	struct wl12xx_vif *wlvif;
397 	struct timespec ts;
398 	u32 old_tx_blk_count = wl->tx_blocks_available;
399 	int avail, freed_blocks;
400 	int i;
401 	int ret;
402 	struct wl1271_link *lnk;
403 
404 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
405 				   wl->raw_fw_status,
406 				   wl->fw_status_len, false);
407 	if (ret < 0)
408 		return ret;
409 
410 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
411 
412 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
413 		     "drv_rx_counter = %d, tx_results_counter = %d)",
414 		     status->intr,
415 		     status->fw_rx_counter,
416 		     status->drv_rx_counter,
417 		     status->tx_results_counter);
418 
419 	for (i = 0; i < NUM_TX_QUEUES; i++) {
420 		/* prevent wrap-around in freed-packets counter */
421 		wl->tx_allocated_pkts[i] -=
422 				(status->counters.tx_released_pkts[i] -
423 				wl->tx_pkts_freed[i]) & 0xff;
424 
425 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
426 	}
427 
428 
429 	for_each_set_bit(i, wl->links_map, wl->num_links) {
430 		u8 diff;
431 		lnk = &wl->links[i];
432 
433 		/* prevent wrap-around in freed-packets counter */
434 		diff = (status->counters.tx_lnk_free_pkts[i] -
435 		       lnk->prev_freed_pkts) & 0xff;
436 
437 		if (diff == 0)
438 			continue;
439 
440 		lnk->allocated_pkts -= diff;
441 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
442 
443 		/* accumulate the prev_freed_pkts counter */
444 		lnk->total_freed_pkts += diff;
445 	}
446 
447 	/* prevent wrap-around in total blocks counter */
448 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
449 		freed_blocks = status->total_released_blks -
450 			       wl->tx_blocks_freed;
451 	else
452 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
453 			       status->total_released_blks;
454 
455 	wl->tx_blocks_freed = status->total_released_blks;
456 
457 	wl->tx_allocated_blocks -= freed_blocks;
458 
459 	/*
460 	 * If the FW freed some blocks:
461 	 * If we still have allocated blocks - re-arm the timer, Tx is
462 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
463 	 */
464 	if (freed_blocks) {
465 		if (wl->tx_allocated_blocks)
466 			wl12xx_rearm_tx_watchdog_locked(wl);
467 		else
468 			cancel_delayed_work(&wl->tx_watchdog_work);
469 	}
470 
471 	avail = status->tx_total - wl->tx_allocated_blocks;
472 
473 	/*
474 	 * The FW might change the total number of TX memblocks before
475 	 * we get a notification about blocks being released. Thus, the
476 	 * available blocks calculation might yield a temporary result
477 	 * which is lower than the actual available blocks. Keeping in
478 	 * mind that only blocks that were allocated can be moved from
479 	 * TX to RX, tx_blocks_available should never decrease here.
480 	 */
481 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
482 				      avail);
483 
484 	/* if more blocks are available now, tx work can be scheduled */
485 	if (wl->tx_blocks_available > old_tx_blk_count)
486 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487 
488 	/* for AP update num of allocated TX blocks per link and ps status */
489 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
490 		wl12xx_irq_update_links_status(wl, wlvif, status);
491 	}
492 
493 	/* update the host-chipset time offset */
494 	getnstimeofday(&ts);
495 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
496 		(s64)(status->fw_localtime);
497 
498 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
499 
500 	return 0;
501 }
502 
wl1271_flush_deferred_work(struct wl1271 * wl)503 static void wl1271_flush_deferred_work(struct wl1271 *wl)
504 {
505 	struct sk_buff *skb;
506 
507 	/* Pass all received frames to the network stack */
508 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
509 		ieee80211_rx_ni(wl->hw, skb);
510 
511 	/* Return sent skbs to the network stack */
512 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
513 		ieee80211_tx_status_ni(wl->hw, skb);
514 }
515 
wl1271_netstack_work(struct work_struct * work)516 static void wl1271_netstack_work(struct work_struct *work)
517 {
518 	struct wl1271 *wl =
519 		container_of(work, struct wl1271, netstack_work);
520 
521 	do {
522 		wl1271_flush_deferred_work(wl);
523 	} while (skb_queue_len(&wl->deferred_rx_queue));
524 }
525 
526 #define WL1271_IRQ_MAX_LOOPS 256
527 
wlcore_irq_locked(struct wl1271 * wl)528 static int wlcore_irq_locked(struct wl1271 *wl)
529 {
530 	int ret = 0;
531 	u32 intr;
532 	int loopcount = WL1271_IRQ_MAX_LOOPS;
533 	bool done = false;
534 	unsigned int defer_count;
535 	unsigned long flags;
536 
537 	/*
538 	 * In case edge triggered interrupt must be used, we cannot iterate
539 	 * more than once without introducing race conditions with the hardirq.
540 	 */
541 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
542 		loopcount = 1;
543 
544 	wl1271_debug(DEBUG_IRQ, "IRQ work");
545 
546 	if (unlikely(wl->state != WLCORE_STATE_ON))
547 		goto out;
548 
549 	ret = wl1271_ps_elp_wakeup(wl);
550 	if (ret < 0)
551 		goto out;
552 
553 	while (!done && loopcount--) {
554 		/*
555 		 * In order to avoid a race with the hardirq, clear the flag
556 		 * before acknowledging the chip. Since the mutex is held,
557 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
558 		 */
559 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
560 		smp_mb__after_atomic();
561 
562 		ret = wlcore_fw_status(wl, wl->fw_status);
563 		if (ret < 0)
564 			goto out;
565 
566 		wlcore_hw_tx_immediate_compl(wl);
567 
568 		intr = wl->fw_status->intr;
569 		intr &= WLCORE_ALL_INTR_MASK;
570 		if (!intr) {
571 			done = true;
572 			continue;
573 		}
574 
575 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
576 			wl1271_error("HW watchdog interrupt received! starting recovery.");
577 			wl->watchdog_recovery = true;
578 			ret = -EIO;
579 
580 			/* restarting the chip. ignore any other interrupt. */
581 			goto out;
582 		}
583 
584 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
585 			wl1271_error("SW watchdog interrupt received! "
586 				     "starting recovery.");
587 			wl->watchdog_recovery = true;
588 			ret = -EIO;
589 
590 			/* restarting the chip. ignore any other interrupt. */
591 			goto out;
592 		}
593 
594 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
595 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
596 
597 			ret = wlcore_rx(wl, wl->fw_status);
598 			if (ret < 0)
599 				goto out;
600 
601 			/* Check if any tx blocks were freed */
602 			spin_lock_irqsave(&wl->wl_lock, flags);
603 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
604 			    wl1271_tx_total_queue_count(wl) > 0) {
605 				spin_unlock_irqrestore(&wl->wl_lock, flags);
606 				/*
607 				 * In order to avoid starvation of the TX path,
608 				 * call the work function directly.
609 				 */
610 				ret = wlcore_tx_work_locked(wl);
611 				if (ret < 0)
612 					goto out;
613 			} else {
614 				spin_unlock_irqrestore(&wl->wl_lock, flags);
615 			}
616 
617 			/* check for tx results */
618 			ret = wlcore_hw_tx_delayed_compl(wl);
619 			if (ret < 0)
620 				goto out;
621 
622 			/* Make sure the deferred queues don't get too long */
623 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
624 				      skb_queue_len(&wl->deferred_rx_queue);
625 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
626 				wl1271_flush_deferred_work(wl);
627 		}
628 
629 		if (intr & WL1271_ACX_INTR_EVENT_A) {
630 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
631 			ret = wl1271_event_handle(wl, 0);
632 			if (ret < 0)
633 				goto out;
634 		}
635 
636 		if (intr & WL1271_ACX_INTR_EVENT_B) {
637 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
638 			ret = wl1271_event_handle(wl, 1);
639 			if (ret < 0)
640 				goto out;
641 		}
642 
643 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
644 			wl1271_debug(DEBUG_IRQ,
645 				     "WL1271_ACX_INTR_INIT_COMPLETE");
646 
647 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
648 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
649 	}
650 
651 	wl1271_ps_elp_sleep(wl);
652 
653 out:
654 	return ret;
655 }
656 
wlcore_irq(int irq,void * cookie)657 static irqreturn_t wlcore_irq(int irq, void *cookie)
658 {
659 	int ret;
660 	unsigned long flags;
661 	struct wl1271 *wl = cookie;
662 
663 	/* complete the ELP completion */
664 	spin_lock_irqsave(&wl->wl_lock, flags);
665 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
666 	if (wl->elp_compl) {
667 		complete(wl->elp_compl);
668 		wl->elp_compl = NULL;
669 	}
670 
671 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
672 		/* don't enqueue a work right now. mark it as pending */
673 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
674 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
675 		disable_irq_nosync(wl->irq);
676 		pm_wakeup_event(wl->dev, 0);
677 		spin_unlock_irqrestore(&wl->wl_lock, flags);
678 		return IRQ_HANDLED;
679 	}
680 	spin_unlock_irqrestore(&wl->wl_lock, flags);
681 
682 	/* TX might be handled here, avoid redundant work */
683 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
684 	cancel_work_sync(&wl->tx_work);
685 
686 	mutex_lock(&wl->mutex);
687 
688 	ret = wlcore_irq_locked(wl);
689 	if (ret)
690 		wl12xx_queue_recovery_work(wl);
691 
692 	spin_lock_irqsave(&wl->wl_lock, flags);
693 	/* In case TX was not handled here, queue TX work */
694 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
695 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
696 	    wl1271_tx_total_queue_count(wl) > 0)
697 		ieee80211_queue_work(wl->hw, &wl->tx_work);
698 	spin_unlock_irqrestore(&wl->wl_lock, flags);
699 
700 	mutex_unlock(&wl->mutex);
701 
702 	return IRQ_HANDLED;
703 }
704 
705 struct vif_counter_data {
706 	u8 counter;
707 
708 	struct ieee80211_vif *cur_vif;
709 	bool cur_vif_running;
710 };
711 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)712 static void wl12xx_vif_count_iter(void *data, u8 *mac,
713 				  struct ieee80211_vif *vif)
714 {
715 	struct vif_counter_data *counter = data;
716 
717 	counter->counter++;
718 	if (counter->cur_vif == vif)
719 		counter->cur_vif_running = true;
720 }
721 
722 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)723 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
724 			       struct ieee80211_vif *cur_vif,
725 			       struct vif_counter_data *data)
726 {
727 	memset(data, 0, sizeof(*data));
728 	data->cur_vif = cur_vif;
729 
730 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
731 					    wl12xx_vif_count_iter, data);
732 }
733 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)734 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
735 {
736 	const struct firmware *fw;
737 	const char *fw_name;
738 	enum wl12xx_fw_type fw_type;
739 	int ret;
740 
741 	if (plt) {
742 		fw_type = WL12XX_FW_TYPE_PLT;
743 		fw_name = wl->plt_fw_name;
744 	} else {
745 		/*
746 		 * we can't call wl12xx_get_vif_count() here because
747 		 * wl->mutex is taken, so use the cached last_vif_count value
748 		 */
749 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
750 			fw_type = WL12XX_FW_TYPE_MULTI;
751 			fw_name = wl->mr_fw_name;
752 		} else {
753 			fw_type = WL12XX_FW_TYPE_NORMAL;
754 			fw_name = wl->sr_fw_name;
755 		}
756 	}
757 
758 	if (wl->fw_type == fw_type)
759 		return 0;
760 
761 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
762 
763 	ret = request_firmware(&fw, fw_name, wl->dev);
764 
765 	if (ret < 0) {
766 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
767 		return ret;
768 	}
769 
770 	if (fw->size % 4) {
771 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
772 			     fw->size);
773 		ret = -EILSEQ;
774 		goto out;
775 	}
776 
777 	vfree(wl->fw);
778 	wl->fw_type = WL12XX_FW_TYPE_NONE;
779 	wl->fw_len = fw->size;
780 	wl->fw = vmalloc(wl->fw_len);
781 
782 	if (!wl->fw) {
783 		wl1271_error("could not allocate memory for the firmware");
784 		ret = -ENOMEM;
785 		goto out;
786 	}
787 
788 	memcpy(wl->fw, fw->data, wl->fw_len);
789 	ret = 0;
790 	wl->fw_type = fw_type;
791 out:
792 	release_firmware(fw);
793 
794 	return ret;
795 }
796 
wl12xx_queue_recovery_work(struct wl1271 * wl)797 void wl12xx_queue_recovery_work(struct wl1271 *wl)
798 {
799 	/* Avoid a recursive recovery */
800 	if (wl->state == WLCORE_STATE_ON) {
801 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
802 				  &wl->flags));
803 
804 		wl->state = WLCORE_STATE_RESTARTING;
805 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
806 		wl1271_ps_elp_wakeup(wl);
807 		wlcore_disable_interrupts_nosync(wl);
808 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
809 	}
810 }
811 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)812 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
813 {
814 	size_t len;
815 
816 	/* Make sure we have enough room */
817 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
818 
819 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
820 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
821 	wl->fwlog_size += len;
822 
823 	return len;
824 }
825 
wl12xx_read_fwlog_panic(struct wl1271 * wl)826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 	struct wlcore_partition_set part, old_part;
829 	u32 addr;
830 	u32 offset;
831 	u32 end_of_log;
832 	u8 *block;
833 	int ret;
834 
835 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
836 	    (wl->conf.fwlog.mem_blocks == 0))
837 		return;
838 
839 	wl1271_info("Reading FW panic log");
840 
841 	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
842 	if (!block)
843 		return;
844 
845 	/*
846 	 * Make sure the chip is awake and the logger isn't active.
847 	 * Do not send a stop fwlog command if the fw is hanged or if
848 	 * dbgpins are used (due to some fw bug).
849 	 */
850 	if (wl1271_ps_elp_wakeup(wl))
851 		goto out;
852 	if (!wl->watchdog_recovery &&
853 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
854 		wl12xx_cmd_stop_fwlog(wl);
855 
856 	/* Read the first memory block address */
857 	ret = wlcore_fw_status(wl, wl->fw_status);
858 	if (ret < 0)
859 		goto out;
860 
861 	addr = wl->fw_status->log_start_addr;
862 	if (!addr)
863 		goto out;
864 
865 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
866 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
867 		end_of_log = wl->fwlog_end;
868 	} else {
869 		offset = sizeof(addr);
870 		end_of_log = addr;
871 	}
872 
873 	old_part = wl->curr_part;
874 	memset(&part, 0, sizeof(part));
875 
876 	/* Traverse the memory blocks linked list */
877 	do {
878 		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
879 		part.mem.size  = PAGE_SIZE;
880 
881 		ret = wlcore_set_partition(wl, &part);
882 		if (ret < 0) {
883 			wl1271_error("%s: set_partition start=0x%X size=%d",
884 				__func__, part.mem.start, part.mem.size);
885 			goto out;
886 		}
887 
888 		memset(block, 0, wl->fw_mem_block_size);
889 		ret = wlcore_read_hwaddr(wl, addr, block,
890 					wl->fw_mem_block_size, false);
891 
892 		if (ret < 0)
893 			goto out;
894 
895 		/*
896 		 * Memory blocks are linked to one another. The first 4 bytes
897 		 * of each memory block hold the hardware address of the next
898 		 * one. The last memory block points to the first one in
899 		 * on demand mode and is equal to 0x2000000 in continuous mode.
900 		 */
901 		addr = le32_to_cpup((__le32 *)block);
902 
903 		if (!wl12xx_copy_fwlog(wl, block + offset,
904 					wl->fw_mem_block_size - offset))
905 			break;
906 	} while (addr && (addr != end_of_log));
907 
908 	wake_up_interruptible(&wl->fwlog_waitq);
909 
910 out:
911 	kfree(block);
912 	wlcore_set_partition(wl, &old_part);
913 }
914 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)915 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
916 				   u8 hlid, struct ieee80211_sta *sta)
917 {
918 	struct wl1271_station *wl_sta;
919 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
920 
921 	wl_sta = (void *)sta->drv_priv;
922 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
923 
924 	/*
925 	 * increment the initial seq number on recovery to account for
926 	 * transmitted packets that we haven't yet got in the FW status
927 	 */
928 	if (wlvif->encryption_type == KEY_GEM)
929 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
930 
931 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
932 		wl_sta->total_freed_pkts += sqn_recovery_padding;
933 }
934 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)935 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
936 					struct wl12xx_vif *wlvif,
937 					u8 hlid, const u8 *addr)
938 {
939 	struct ieee80211_sta *sta;
940 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
941 
942 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
943 		    is_zero_ether_addr(addr)))
944 		return;
945 
946 	rcu_read_lock();
947 	sta = ieee80211_find_sta(vif, addr);
948 	if (sta)
949 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
950 	rcu_read_unlock();
951 }
952 
wlcore_print_recovery(struct wl1271 * wl)953 static void wlcore_print_recovery(struct wl1271 *wl)
954 {
955 	u32 pc = 0;
956 	u32 hint_sts = 0;
957 	int ret;
958 
959 	wl1271_info("Hardware recovery in progress. FW ver: %s",
960 		    wl->chip.fw_ver_str);
961 
962 	/* change partitions momentarily so we can read the FW pc */
963 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
964 	if (ret < 0)
965 		return;
966 
967 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
968 	if (ret < 0)
969 		return;
970 
971 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
972 	if (ret < 0)
973 		return;
974 
975 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
976 				pc, hint_sts, ++wl->recovery_count);
977 
978 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
979 }
980 
981 
wl1271_recovery_work(struct work_struct * work)982 static void wl1271_recovery_work(struct work_struct *work)
983 {
984 	struct wl1271 *wl =
985 		container_of(work, struct wl1271, recovery_work);
986 	struct wl12xx_vif *wlvif;
987 	struct ieee80211_vif *vif;
988 
989 	mutex_lock(&wl->mutex);
990 
991 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
992 		goto out_unlock;
993 
994 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
995 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
996 			wl12xx_read_fwlog_panic(wl);
997 		wlcore_print_recovery(wl);
998 	}
999 
1000 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
1001 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
1002 
1003 	if (wl->conf.recovery.no_recovery) {
1004 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
1005 		goto out_unlock;
1006 	}
1007 
1008 	/* Prevent spurious TX during FW restart */
1009 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1010 
1011 	/* reboot the chipset */
1012 	while (!list_empty(&wl->wlvif_list)) {
1013 		wlvif = list_first_entry(&wl->wlvif_list,
1014 				       struct wl12xx_vif, list);
1015 		vif = wl12xx_wlvif_to_vif(wlvif);
1016 
1017 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1018 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1019 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1020 						    vif->bss_conf.bssid);
1021 		}
1022 
1023 		__wl1271_op_remove_interface(wl, vif, false);
1024 	}
1025 
1026 	wlcore_op_stop_locked(wl);
1027 
1028 	ieee80211_restart_hw(wl->hw);
1029 
1030 	/*
1031 	 * Its safe to enable TX now - the queues are stopped after a request
1032 	 * to restart the HW.
1033 	 */
1034 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1035 
1036 out_unlock:
1037 	wl->watchdog_recovery = false;
1038 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1039 	mutex_unlock(&wl->mutex);
1040 }
1041 
wlcore_fw_wakeup(struct wl1271 * wl)1042 static int wlcore_fw_wakeup(struct wl1271 *wl)
1043 {
1044 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1045 }
1046 
wl1271_setup(struct wl1271 * wl)1047 static int wl1271_setup(struct wl1271 *wl)
1048 {
1049 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1050 	if (!wl->raw_fw_status)
1051 		goto err;
1052 
1053 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1054 	if (!wl->fw_status)
1055 		goto err;
1056 
1057 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1058 	if (!wl->tx_res_if)
1059 		goto err;
1060 
1061 	return 0;
1062 err:
1063 	kfree(wl->fw_status);
1064 	kfree(wl->raw_fw_status);
1065 	return -ENOMEM;
1066 }
1067 
wl12xx_set_power_on(struct wl1271 * wl)1068 static int wl12xx_set_power_on(struct wl1271 *wl)
1069 {
1070 	int ret;
1071 
1072 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1073 	ret = wl1271_power_on(wl);
1074 	if (ret < 0)
1075 		goto out;
1076 	msleep(WL1271_POWER_ON_SLEEP);
1077 	wl1271_io_reset(wl);
1078 	wl1271_io_init(wl);
1079 
1080 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1081 	if (ret < 0)
1082 		goto fail;
1083 
1084 	/* ELP module wake up */
1085 	ret = wlcore_fw_wakeup(wl);
1086 	if (ret < 0)
1087 		goto fail;
1088 
1089 out:
1090 	return ret;
1091 
1092 fail:
1093 	wl1271_power_off(wl);
1094 	return ret;
1095 }
1096 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1097 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1098 {
1099 	int ret = 0;
1100 
1101 	ret = wl12xx_set_power_on(wl);
1102 	if (ret < 0)
1103 		goto out;
1104 
1105 	/*
1106 	 * For wl127x based devices we could use the default block
1107 	 * size (512 bytes), but due to a bug in the sdio driver, we
1108 	 * need to set it explicitly after the chip is powered on.  To
1109 	 * simplify the code and since the performance impact is
1110 	 * negligible, we use the same block size for all different
1111 	 * chip types.
1112 	 *
1113 	 * Check if the bus supports blocksize alignment and, if it
1114 	 * doesn't, make sure we don't have the quirk.
1115 	 */
1116 	if (!wl1271_set_block_size(wl))
1117 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1118 
1119 	/* TODO: make sure the lower driver has set things up correctly */
1120 
1121 	ret = wl1271_setup(wl);
1122 	if (ret < 0)
1123 		goto out;
1124 
1125 	ret = wl12xx_fetch_firmware(wl, plt);
1126 	if (ret < 0) {
1127 		kfree(wl->fw_status);
1128 		kfree(wl->raw_fw_status);
1129 		kfree(wl->tx_res_if);
1130 	}
1131 
1132 out:
1133 	return ret;
1134 }
1135 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1136 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1137 {
1138 	int retries = WL1271_BOOT_RETRIES;
1139 	struct wiphy *wiphy = wl->hw->wiphy;
1140 
1141 	static const char* const PLT_MODE[] = {
1142 		"PLT_OFF",
1143 		"PLT_ON",
1144 		"PLT_FEM_DETECT",
1145 		"PLT_CHIP_AWAKE"
1146 	};
1147 
1148 	int ret;
1149 
1150 	mutex_lock(&wl->mutex);
1151 
1152 	wl1271_notice("power up");
1153 
1154 	if (wl->state != WLCORE_STATE_OFF) {
1155 		wl1271_error("cannot go into PLT state because not "
1156 			     "in off state: %d", wl->state);
1157 		ret = -EBUSY;
1158 		goto out;
1159 	}
1160 
1161 	/* Indicate to lower levels that we are now in PLT mode */
1162 	wl->plt = true;
1163 	wl->plt_mode = plt_mode;
1164 
1165 	while (retries) {
1166 		retries--;
1167 		ret = wl12xx_chip_wakeup(wl, true);
1168 		if (ret < 0)
1169 			goto power_off;
1170 
1171 		if (plt_mode != PLT_CHIP_AWAKE) {
1172 			ret = wl->ops->plt_init(wl);
1173 			if (ret < 0)
1174 				goto power_off;
1175 		}
1176 
1177 		wl->state = WLCORE_STATE_ON;
1178 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1179 			      PLT_MODE[plt_mode],
1180 			      wl->chip.fw_ver_str);
1181 
1182 		/* update hw/fw version info in wiphy struct */
1183 		wiphy->hw_version = wl->chip.id;
1184 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1185 			sizeof(wiphy->fw_version));
1186 
1187 		goto out;
1188 
1189 power_off:
1190 		wl1271_power_off(wl);
1191 	}
1192 
1193 	wl->plt = false;
1194 	wl->plt_mode = PLT_OFF;
1195 
1196 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1197 		     WL1271_BOOT_RETRIES);
1198 out:
1199 	mutex_unlock(&wl->mutex);
1200 
1201 	return ret;
1202 }
1203 
wl1271_plt_stop(struct wl1271 * wl)1204 int wl1271_plt_stop(struct wl1271 *wl)
1205 {
1206 	int ret = 0;
1207 
1208 	wl1271_notice("power down");
1209 
1210 	/*
1211 	 * Interrupts must be disabled before setting the state to OFF.
1212 	 * Otherwise, the interrupt handler might be called and exit without
1213 	 * reading the interrupt status.
1214 	 */
1215 	wlcore_disable_interrupts(wl);
1216 	mutex_lock(&wl->mutex);
1217 	if (!wl->plt) {
1218 		mutex_unlock(&wl->mutex);
1219 
1220 		/*
1221 		 * This will not necessarily enable interrupts as interrupts
1222 		 * may have been disabled when op_stop was called. It will,
1223 		 * however, balance the above call to disable_interrupts().
1224 		 */
1225 		wlcore_enable_interrupts(wl);
1226 
1227 		wl1271_error("cannot power down because not in PLT "
1228 			     "state: %d", wl->state);
1229 		ret = -EBUSY;
1230 		goto out;
1231 	}
1232 
1233 	mutex_unlock(&wl->mutex);
1234 
1235 	wl1271_flush_deferred_work(wl);
1236 	cancel_work_sync(&wl->netstack_work);
1237 	cancel_work_sync(&wl->recovery_work);
1238 	cancel_delayed_work_sync(&wl->elp_work);
1239 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1240 
1241 	mutex_lock(&wl->mutex);
1242 	wl1271_power_off(wl);
1243 	wl->flags = 0;
1244 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1245 	wl->state = WLCORE_STATE_OFF;
1246 	wl->plt = false;
1247 	wl->plt_mode = PLT_OFF;
1248 	wl->rx_counter = 0;
1249 	mutex_unlock(&wl->mutex);
1250 
1251 out:
1252 	return ret;
1253 }
1254 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1255 static void wl1271_op_tx(struct ieee80211_hw *hw,
1256 			 struct ieee80211_tx_control *control,
1257 			 struct sk_buff *skb)
1258 {
1259 	struct wl1271 *wl = hw->priv;
1260 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1261 	struct ieee80211_vif *vif = info->control.vif;
1262 	struct wl12xx_vif *wlvif = NULL;
1263 	unsigned long flags;
1264 	int q, mapping;
1265 	u8 hlid;
1266 
1267 	if (!vif) {
1268 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1269 		ieee80211_free_txskb(hw, skb);
1270 		return;
1271 	}
1272 
1273 	wlvif = wl12xx_vif_to_data(vif);
1274 	mapping = skb_get_queue_mapping(skb);
1275 	q = wl1271_tx_get_queue(mapping);
1276 
1277 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1278 
1279 	spin_lock_irqsave(&wl->wl_lock, flags);
1280 
1281 	/*
1282 	 * drop the packet if the link is invalid or the queue is stopped
1283 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1284 	 * allow these packets through.
1285 	 */
1286 	if (hlid == WL12XX_INVALID_LINK_ID ||
1287 	    (!test_bit(hlid, wlvif->links_map)) ||
1288 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1289 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1290 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1291 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1292 		ieee80211_free_txskb(hw, skb);
1293 		goto out;
1294 	}
1295 
1296 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1297 		     hlid, q, skb->len);
1298 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1299 
1300 	wl->tx_queue_count[q]++;
1301 	wlvif->tx_queue_count[q]++;
1302 
1303 	/*
1304 	 * The workqueue is slow to process the tx_queue and we need stop
1305 	 * the queue here, otherwise the queue will get too long.
1306 	 */
1307 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1308 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1309 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1310 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1311 		wlcore_stop_queue_locked(wl, wlvif, q,
1312 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1313 	}
1314 
1315 	/*
1316 	 * The chip specific setup must run before the first TX packet -
1317 	 * before that, the tx_work will not be initialized!
1318 	 */
1319 
1320 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1321 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1322 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1323 
1324 out:
1325 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1326 }
1327 
wl1271_tx_dummy_packet(struct wl1271 * wl)1328 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1329 {
1330 	unsigned long flags;
1331 	int q;
1332 
1333 	/* no need to queue a new dummy packet if one is already pending */
1334 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1335 		return 0;
1336 
1337 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1338 
1339 	spin_lock_irqsave(&wl->wl_lock, flags);
1340 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1341 	wl->tx_queue_count[q]++;
1342 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1343 
1344 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1345 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1346 		return wlcore_tx_work_locked(wl);
1347 
1348 	/*
1349 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1350 	 * interrupt handler function
1351 	 */
1352 	return 0;
1353 }
1354 
1355 /*
1356  * The size of the dummy packet should be at least 1400 bytes. However, in
1357  * order to minimize the number of bus transactions, aligning it to 512 bytes
1358  * boundaries could be beneficial, performance wise
1359  */
1360 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1361 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1362 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1363 {
1364 	struct sk_buff *skb;
1365 	struct ieee80211_hdr_3addr *hdr;
1366 	unsigned int dummy_packet_size;
1367 
1368 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1369 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1370 
1371 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1372 	if (!skb) {
1373 		wl1271_warning("Failed to allocate a dummy packet skb");
1374 		return NULL;
1375 	}
1376 
1377 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1378 
1379 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1380 	memset(hdr, 0, sizeof(*hdr));
1381 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1382 					 IEEE80211_STYPE_NULLFUNC |
1383 					 IEEE80211_FCTL_TODS);
1384 
1385 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1386 
1387 	/* Dummy packets require the TID to be management */
1388 	skb->priority = WL1271_TID_MGMT;
1389 
1390 	/* Initialize all fields that might be used */
1391 	skb_set_queue_mapping(skb, 0);
1392 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1393 
1394 	return skb;
1395 }
1396 
1397 
1398 #ifdef CONFIG_PM
1399 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1400 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1401 {
1402 	int num_fields = 0, in_field = 0, fields_size = 0;
1403 	int i, pattern_len = 0;
1404 
1405 	if (!p->mask) {
1406 		wl1271_warning("No mask in WoWLAN pattern");
1407 		return -EINVAL;
1408 	}
1409 
1410 	/*
1411 	 * The pattern is broken up into segments of bytes at different offsets
1412 	 * that need to be checked by the FW filter. Each segment is called
1413 	 * a field in the FW API. We verify that the total number of fields
1414 	 * required for this pattern won't exceed FW limits (8)
1415 	 * as well as the total fields buffer won't exceed the FW limit.
1416 	 * Note that if there's a pattern which crosses Ethernet/IP header
1417 	 * boundary a new field is required.
1418 	 */
1419 	for (i = 0; i < p->pattern_len; i++) {
1420 		if (test_bit(i, (unsigned long *)p->mask)) {
1421 			if (!in_field) {
1422 				in_field = 1;
1423 				pattern_len = 1;
1424 			} else {
1425 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1426 					num_fields++;
1427 					fields_size += pattern_len +
1428 						RX_FILTER_FIELD_OVERHEAD;
1429 					pattern_len = 1;
1430 				} else
1431 					pattern_len++;
1432 			}
1433 		} else {
1434 			if (in_field) {
1435 				in_field = 0;
1436 				fields_size += pattern_len +
1437 					RX_FILTER_FIELD_OVERHEAD;
1438 				num_fields++;
1439 			}
1440 		}
1441 	}
1442 
1443 	if (in_field) {
1444 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1445 		num_fields++;
1446 	}
1447 
1448 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1449 		wl1271_warning("RX Filter too complex. Too many segments");
1450 		return -EINVAL;
1451 	}
1452 
1453 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1454 		wl1271_warning("RX filter pattern is too big");
1455 		return -E2BIG;
1456 	}
1457 
1458 	return 0;
1459 }
1460 
wl1271_rx_filter_alloc(void)1461 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1462 {
1463 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1464 }
1465 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1466 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1467 {
1468 	int i;
1469 
1470 	if (filter == NULL)
1471 		return;
1472 
1473 	for (i = 0; i < filter->num_fields; i++)
1474 		kfree(filter->fields[i].pattern);
1475 
1476 	kfree(filter);
1477 }
1478 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1479 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1480 				 u16 offset, u8 flags,
1481 				 const u8 *pattern, u8 len)
1482 {
1483 	struct wl12xx_rx_filter_field *field;
1484 
1485 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1486 		wl1271_warning("Max fields per RX filter. can't alloc another");
1487 		return -EINVAL;
1488 	}
1489 
1490 	field = &filter->fields[filter->num_fields];
1491 
1492 	field->pattern = kzalloc(len, GFP_KERNEL);
1493 	if (!field->pattern) {
1494 		wl1271_warning("Failed to allocate RX filter pattern");
1495 		return -ENOMEM;
1496 	}
1497 
1498 	filter->num_fields++;
1499 
1500 	field->offset = cpu_to_le16(offset);
1501 	field->flags = flags;
1502 	field->len = len;
1503 	memcpy(field->pattern, pattern, len);
1504 
1505 	return 0;
1506 }
1507 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1508 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1509 {
1510 	int i, fields_size = 0;
1511 
1512 	for (i = 0; i < filter->num_fields; i++)
1513 		fields_size += filter->fields[i].len +
1514 			sizeof(struct wl12xx_rx_filter_field) -
1515 			sizeof(u8 *);
1516 
1517 	return fields_size;
1518 }
1519 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1520 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1521 				    u8 *buf)
1522 {
1523 	int i;
1524 	struct wl12xx_rx_filter_field *field;
1525 
1526 	for (i = 0; i < filter->num_fields; i++) {
1527 		field = (struct wl12xx_rx_filter_field *)buf;
1528 
1529 		field->offset = filter->fields[i].offset;
1530 		field->flags = filter->fields[i].flags;
1531 		field->len = filter->fields[i].len;
1532 
1533 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1534 		buf += sizeof(struct wl12xx_rx_filter_field) -
1535 			sizeof(u8 *) + field->len;
1536 	}
1537 }
1538 
1539 /*
1540  * Allocates an RX filter returned through f
1541  * which needs to be freed using rx_filter_free()
1542  */
1543 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1544 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1545 					   struct wl12xx_rx_filter **f)
1546 {
1547 	int i, j, ret = 0;
1548 	struct wl12xx_rx_filter *filter;
1549 	u16 offset;
1550 	u8 flags, len;
1551 
1552 	filter = wl1271_rx_filter_alloc();
1553 	if (!filter) {
1554 		wl1271_warning("Failed to alloc rx filter");
1555 		ret = -ENOMEM;
1556 		goto err;
1557 	}
1558 
1559 	i = 0;
1560 	while (i < p->pattern_len) {
1561 		if (!test_bit(i, (unsigned long *)p->mask)) {
1562 			i++;
1563 			continue;
1564 		}
1565 
1566 		for (j = i; j < p->pattern_len; j++) {
1567 			if (!test_bit(j, (unsigned long *)p->mask))
1568 				break;
1569 
1570 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1571 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1572 				break;
1573 		}
1574 
1575 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1576 			offset = i;
1577 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1578 		} else {
1579 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1580 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1581 		}
1582 
1583 		len = j - i;
1584 
1585 		ret = wl1271_rx_filter_alloc_field(filter,
1586 						   offset,
1587 						   flags,
1588 						   &p->pattern[i], len);
1589 		if (ret)
1590 			goto err;
1591 
1592 		i = j;
1593 	}
1594 
1595 	filter->action = FILTER_SIGNAL;
1596 
1597 	*f = filter;
1598 	return 0;
1599 
1600 err:
1601 	wl1271_rx_filter_free(filter);
1602 	*f = NULL;
1603 
1604 	return ret;
1605 }
1606 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1607 static int wl1271_configure_wowlan(struct wl1271 *wl,
1608 				   struct cfg80211_wowlan *wow)
1609 {
1610 	int i, ret;
1611 
1612 	if (!wow || wow->any || !wow->n_patterns) {
1613 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1614 							  FILTER_SIGNAL);
1615 		if (ret)
1616 			goto out;
1617 
1618 		ret = wl1271_rx_filter_clear_all(wl);
1619 		if (ret)
1620 			goto out;
1621 
1622 		return 0;
1623 	}
1624 
1625 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1626 		return -EINVAL;
1627 
1628 	/* Validate all incoming patterns before clearing current FW state */
1629 	for (i = 0; i < wow->n_patterns; i++) {
1630 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1631 		if (ret) {
1632 			wl1271_warning("Bad wowlan pattern %d", i);
1633 			return ret;
1634 		}
1635 	}
1636 
1637 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1638 	if (ret)
1639 		goto out;
1640 
1641 	ret = wl1271_rx_filter_clear_all(wl);
1642 	if (ret)
1643 		goto out;
1644 
1645 	/* Translate WoWLAN patterns into filters */
1646 	for (i = 0; i < wow->n_patterns; i++) {
1647 		struct cfg80211_pkt_pattern *p;
1648 		struct wl12xx_rx_filter *filter = NULL;
1649 
1650 		p = &wow->patterns[i];
1651 
1652 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1653 		if (ret) {
1654 			wl1271_warning("Failed to create an RX filter from "
1655 				       "wowlan pattern %d", i);
1656 			goto out;
1657 		}
1658 
1659 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1660 
1661 		wl1271_rx_filter_free(filter);
1662 		if (ret)
1663 			goto out;
1664 	}
1665 
1666 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1667 
1668 out:
1669 	return ret;
1670 }
1671 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1672 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1673 					struct wl12xx_vif *wlvif,
1674 					struct cfg80211_wowlan *wow)
1675 {
1676 	int ret = 0;
1677 
1678 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1679 		goto out;
1680 
1681 	ret = wl1271_configure_wowlan(wl, wow);
1682 	if (ret < 0)
1683 		goto out;
1684 
1685 	if ((wl->conf.conn.suspend_wake_up_event ==
1686 	     wl->conf.conn.wake_up_event) &&
1687 	    (wl->conf.conn.suspend_listen_interval ==
1688 	     wl->conf.conn.listen_interval))
1689 		goto out;
1690 
1691 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1692 				    wl->conf.conn.suspend_wake_up_event,
1693 				    wl->conf.conn.suspend_listen_interval);
1694 
1695 	if (ret < 0)
1696 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1697 out:
1698 	return ret;
1699 
1700 }
1701 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1702 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1703 					struct wl12xx_vif *wlvif,
1704 					struct cfg80211_wowlan *wow)
1705 {
1706 	int ret = 0;
1707 
1708 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1709 		goto out;
1710 
1711 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1712 	if (ret < 0)
1713 		goto out;
1714 
1715 	ret = wl1271_configure_wowlan(wl, wow);
1716 	if (ret < 0)
1717 		goto out;
1718 
1719 out:
1720 	return ret;
1721 
1722 }
1723 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1724 static int wl1271_configure_suspend(struct wl1271 *wl,
1725 				    struct wl12xx_vif *wlvif,
1726 				    struct cfg80211_wowlan *wow)
1727 {
1728 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1729 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1730 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1731 		return wl1271_configure_suspend_ap(wl, wlvif, wow);
1732 	return 0;
1733 }
1734 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1735 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1736 {
1737 	int ret = 0;
1738 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1739 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1740 
1741 	if ((!is_ap) && (!is_sta))
1742 		return;
1743 
1744 	if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
1745 	    (is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
1746 		return;
1747 
1748 	wl1271_configure_wowlan(wl, NULL);
1749 
1750 	if (is_sta) {
1751 		if ((wl->conf.conn.suspend_wake_up_event ==
1752 		     wl->conf.conn.wake_up_event) &&
1753 		    (wl->conf.conn.suspend_listen_interval ==
1754 		     wl->conf.conn.listen_interval))
1755 			return;
1756 
1757 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1758 				    wl->conf.conn.wake_up_event,
1759 				    wl->conf.conn.listen_interval);
1760 
1761 		if (ret < 0)
1762 			wl1271_error("resume: wake up conditions failed: %d",
1763 				     ret);
1764 
1765 	} else if (is_ap) {
1766 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1767 	}
1768 }
1769 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1770 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1771 			    struct cfg80211_wowlan *wow)
1772 {
1773 	struct wl1271 *wl = hw->priv;
1774 	struct wl12xx_vif *wlvif;
1775 	int ret;
1776 
1777 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1778 	WARN_ON(!wow);
1779 
1780 	/* we want to perform the recovery before suspending */
1781 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1782 		wl1271_warning("postponing suspend to perform recovery");
1783 		return -EBUSY;
1784 	}
1785 
1786 	wl1271_tx_flush(wl);
1787 
1788 	mutex_lock(&wl->mutex);
1789 
1790 	ret = wl1271_ps_elp_wakeup(wl);
1791 	if (ret < 0) {
1792 		mutex_unlock(&wl->mutex);
1793 		return ret;
1794 	}
1795 
1796 	wl->wow_enabled = true;
1797 	wl12xx_for_each_wlvif(wl, wlvif) {
1798 		if (wlcore_is_p2p_mgmt(wlvif))
1799 			continue;
1800 
1801 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1802 		if (ret < 0) {
1803 			mutex_unlock(&wl->mutex);
1804 			wl1271_warning("couldn't prepare device to suspend");
1805 			return ret;
1806 		}
1807 	}
1808 
1809 	/* disable fast link flow control notifications from FW */
1810 	ret = wlcore_hw_interrupt_notify(wl, false);
1811 	if (ret < 0)
1812 		goto out_sleep;
1813 
1814 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1815 	ret = wlcore_hw_rx_ba_filter(wl,
1816 				     !!wl->conf.conn.suspend_rx_ba_activity);
1817 	if (ret < 0)
1818 		goto out_sleep;
1819 
1820 out_sleep:
1821 	wl1271_ps_elp_sleep(wl);
1822 	mutex_unlock(&wl->mutex);
1823 
1824 	if (ret < 0) {
1825 		wl1271_warning("couldn't prepare device to suspend");
1826 		return ret;
1827 	}
1828 
1829 	/* flush any remaining work */
1830 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1831 
1832 	/*
1833 	 * disable and re-enable interrupts in order to flush
1834 	 * the threaded_irq
1835 	 */
1836 	wlcore_disable_interrupts(wl);
1837 
1838 	/*
1839 	 * set suspended flag to avoid triggering a new threaded_irq
1840 	 * work. no need for spinlock as interrupts are disabled.
1841 	 */
1842 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1843 
1844 	wlcore_enable_interrupts(wl);
1845 	flush_work(&wl->tx_work);
1846 	flush_delayed_work(&wl->elp_work);
1847 
1848 	/*
1849 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1850 	 * it on resume anyway.
1851 	 */
1852 	cancel_delayed_work(&wl->tx_watchdog_work);
1853 
1854 	return 0;
1855 }
1856 
wl1271_op_resume(struct ieee80211_hw * hw)1857 static int wl1271_op_resume(struct ieee80211_hw *hw)
1858 {
1859 	struct wl1271 *wl = hw->priv;
1860 	struct wl12xx_vif *wlvif;
1861 	unsigned long flags;
1862 	bool run_irq_work = false, pending_recovery;
1863 	int ret;
1864 
1865 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1866 		     wl->wow_enabled);
1867 	WARN_ON(!wl->wow_enabled);
1868 
1869 	/*
1870 	 * re-enable irq_work enqueuing, and call irq_work directly if
1871 	 * there is a pending work.
1872 	 */
1873 	spin_lock_irqsave(&wl->wl_lock, flags);
1874 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1875 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1876 		run_irq_work = true;
1877 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1878 
1879 	mutex_lock(&wl->mutex);
1880 
1881 	/* test the recovery flag before calling any SDIO functions */
1882 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1883 				    &wl->flags);
1884 
1885 	if (run_irq_work) {
1886 		wl1271_debug(DEBUG_MAC80211,
1887 			     "run postponed irq_work directly");
1888 
1889 		/* don't talk to the HW if recovery is pending */
1890 		if (!pending_recovery) {
1891 			ret = wlcore_irq_locked(wl);
1892 			if (ret)
1893 				wl12xx_queue_recovery_work(wl);
1894 		}
1895 
1896 		wlcore_enable_interrupts(wl);
1897 	}
1898 
1899 	if (pending_recovery) {
1900 		wl1271_warning("queuing forgotten recovery on resume");
1901 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1902 		goto out_sleep;
1903 	}
1904 
1905 	ret = wl1271_ps_elp_wakeup(wl);
1906 	if (ret < 0)
1907 		goto out;
1908 
1909 	wl12xx_for_each_wlvif(wl, wlvif) {
1910 		if (wlcore_is_p2p_mgmt(wlvif))
1911 			continue;
1912 
1913 		wl1271_configure_resume(wl, wlvif);
1914 	}
1915 
1916 	ret = wlcore_hw_interrupt_notify(wl, true);
1917 	if (ret < 0)
1918 		goto out_sleep;
1919 
1920 	/* if filtering is enabled, configure the FW to drop all RX BA frames */
1921 	ret = wlcore_hw_rx_ba_filter(wl, false);
1922 	if (ret < 0)
1923 		goto out_sleep;
1924 
1925 out_sleep:
1926 	wl1271_ps_elp_sleep(wl);
1927 
1928 out:
1929 	wl->wow_enabled = false;
1930 
1931 	/*
1932 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1933 	 * That way we avoid possible conditions where Tx-complete interrupts
1934 	 * fail to arrive and we perform a spurious recovery.
1935 	 */
1936 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1937 	mutex_unlock(&wl->mutex);
1938 
1939 	return 0;
1940 }
1941 #endif
1942 
wl1271_op_start(struct ieee80211_hw * hw)1943 static int wl1271_op_start(struct ieee80211_hw *hw)
1944 {
1945 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1946 
1947 	/*
1948 	 * We have to delay the booting of the hardware because
1949 	 * we need to know the local MAC address before downloading and
1950 	 * initializing the firmware. The MAC address cannot be changed
1951 	 * after boot, and without the proper MAC address, the firmware
1952 	 * will not function properly.
1953 	 *
1954 	 * The MAC address is first known when the corresponding interface
1955 	 * is added. That is where we will initialize the hardware.
1956 	 */
1957 
1958 	return 0;
1959 }
1960 
wlcore_op_stop_locked(struct wl1271 * wl)1961 static void wlcore_op_stop_locked(struct wl1271 *wl)
1962 {
1963 	int i;
1964 
1965 	if (wl->state == WLCORE_STATE_OFF) {
1966 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1967 					&wl->flags))
1968 			wlcore_enable_interrupts(wl);
1969 
1970 		return;
1971 	}
1972 
1973 	/*
1974 	 * this must be before the cancel_work calls below, so that the work
1975 	 * functions don't perform further work.
1976 	 */
1977 	wl->state = WLCORE_STATE_OFF;
1978 
1979 	/*
1980 	 * Use the nosync variant to disable interrupts, so the mutex could be
1981 	 * held while doing so without deadlocking.
1982 	 */
1983 	wlcore_disable_interrupts_nosync(wl);
1984 
1985 	mutex_unlock(&wl->mutex);
1986 
1987 	wlcore_synchronize_interrupts(wl);
1988 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1989 		cancel_work_sync(&wl->recovery_work);
1990 	wl1271_flush_deferred_work(wl);
1991 	cancel_delayed_work_sync(&wl->scan_complete_work);
1992 	cancel_work_sync(&wl->netstack_work);
1993 	cancel_work_sync(&wl->tx_work);
1994 	cancel_delayed_work_sync(&wl->elp_work);
1995 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1996 
1997 	/* let's notify MAC80211 about the remaining pending TX frames */
1998 	mutex_lock(&wl->mutex);
1999 	wl12xx_tx_reset(wl);
2000 
2001 	wl1271_power_off(wl);
2002 	/*
2003 	 * In case a recovery was scheduled, interrupts were disabled to avoid
2004 	 * an interrupt storm. Now that the power is down, it is safe to
2005 	 * re-enable interrupts to balance the disable depth
2006 	 */
2007 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
2008 		wlcore_enable_interrupts(wl);
2009 
2010 	wl->band = IEEE80211_BAND_2GHZ;
2011 
2012 	wl->rx_counter = 0;
2013 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2014 	wl->channel_type = NL80211_CHAN_NO_HT;
2015 	wl->tx_blocks_available = 0;
2016 	wl->tx_allocated_blocks = 0;
2017 	wl->tx_results_count = 0;
2018 	wl->tx_packets_count = 0;
2019 	wl->time_offset = 0;
2020 	wl->ap_fw_ps_map = 0;
2021 	wl->ap_ps_map = 0;
2022 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
2023 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
2024 	memset(wl->links_map, 0, sizeof(wl->links_map));
2025 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
2026 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
2027 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
2028 	wl->active_sta_count = 0;
2029 	wl->active_link_count = 0;
2030 
2031 	/* The system link is always allocated */
2032 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
2033 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
2034 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
2035 
2036 	/*
2037 	 * this is performed after the cancel_work calls and the associated
2038 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
2039 	 * get executed before all these vars have been reset.
2040 	 */
2041 	wl->flags = 0;
2042 
2043 	wl->tx_blocks_freed = 0;
2044 
2045 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2046 		wl->tx_pkts_freed[i] = 0;
2047 		wl->tx_allocated_pkts[i] = 0;
2048 	}
2049 
2050 	wl1271_debugfs_reset(wl);
2051 
2052 	kfree(wl->raw_fw_status);
2053 	wl->raw_fw_status = NULL;
2054 	kfree(wl->fw_status);
2055 	wl->fw_status = NULL;
2056 	kfree(wl->tx_res_if);
2057 	wl->tx_res_if = NULL;
2058 	kfree(wl->target_mem_map);
2059 	wl->target_mem_map = NULL;
2060 
2061 	/*
2062 	 * FW channels must be re-calibrated after recovery,
2063 	 * save current Reg-Domain channel configuration and clear it.
2064 	 */
2065 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2066 	       sizeof(wl->reg_ch_conf_pending));
2067 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2068 }
2069 
wlcore_op_stop(struct ieee80211_hw * hw)2070 static void wlcore_op_stop(struct ieee80211_hw *hw)
2071 {
2072 	struct wl1271 *wl = hw->priv;
2073 
2074 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2075 
2076 	mutex_lock(&wl->mutex);
2077 
2078 	wlcore_op_stop_locked(wl);
2079 
2080 	mutex_unlock(&wl->mutex);
2081 }
2082 
wlcore_channel_switch_work(struct work_struct * work)2083 static void wlcore_channel_switch_work(struct work_struct *work)
2084 {
2085 	struct delayed_work *dwork;
2086 	struct wl1271 *wl;
2087 	struct ieee80211_vif *vif;
2088 	struct wl12xx_vif *wlvif;
2089 	int ret;
2090 
2091 	dwork = container_of(work, struct delayed_work, work);
2092 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2093 	wl = wlvif->wl;
2094 
2095 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2096 
2097 	mutex_lock(&wl->mutex);
2098 
2099 	if (unlikely(wl->state != WLCORE_STATE_ON))
2100 		goto out;
2101 
2102 	/* check the channel switch is still ongoing */
2103 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2104 		goto out;
2105 
2106 	vif = wl12xx_wlvif_to_vif(wlvif);
2107 	ieee80211_chswitch_done(vif, false);
2108 
2109 	ret = wl1271_ps_elp_wakeup(wl);
2110 	if (ret < 0)
2111 		goto out;
2112 
2113 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2114 
2115 	wl1271_ps_elp_sleep(wl);
2116 out:
2117 	mutex_unlock(&wl->mutex);
2118 }
2119 
wlcore_connection_loss_work(struct work_struct * work)2120 static void wlcore_connection_loss_work(struct work_struct *work)
2121 {
2122 	struct delayed_work *dwork;
2123 	struct wl1271 *wl;
2124 	struct ieee80211_vif *vif;
2125 	struct wl12xx_vif *wlvif;
2126 
2127 	dwork = container_of(work, struct delayed_work, work);
2128 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2129 	wl = wlvif->wl;
2130 
2131 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2132 
2133 	mutex_lock(&wl->mutex);
2134 
2135 	if (unlikely(wl->state != WLCORE_STATE_ON))
2136 		goto out;
2137 
2138 	/* Call mac80211 connection loss */
2139 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2140 		goto out;
2141 
2142 	vif = wl12xx_wlvif_to_vif(wlvif);
2143 	ieee80211_connection_loss(vif);
2144 out:
2145 	mutex_unlock(&wl->mutex);
2146 }
2147 
wlcore_pending_auth_complete_work(struct work_struct * work)2148 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2149 {
2150 	struct delayed_work *dwork;
2151 	struct wl1271 *wl;
2152 	struct wl12xx_vif *wlvif;
2153 	unsigned long time_spare;
2154 	int ret;
2155 
2156 	dwork = container_of(work, struct delayed_work, work);
2157 	wlvif = container_of(dwork, struct wl12xx_vif,
2158 			     pending_auth_complete_work);
2159 	wl = wlvif->wl;
2160 
2161 	mutex_lock(&wl->mutex);
2162 
2163 	if (unlikely(wl->state != WLCORE_STATE_ON))
2164 		goto out;
2165 
2166 	/*
2167 	 * Make sure a second really passed since the last auth reply. Maybe
2168 	 * a second auth reply arrived while we were stuck on the mutex.
2169 	 * Check for a little less than the timeout to protect from scheduler
2170 	 * irregularities.
2171 	 */
2172 	time_spare = jiffies +
2173 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2174 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2175 		goto out;
2176 
2177 	ret = wl1271_ps_elp_wakeup(wl);
2178 	if (ret < 0)
2179 		goto out;
2180 
2181 	/* cancel the ROC if active */
2182 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2183 
2184 	wl1271_ps_elp_sleep(wl);
2185 out:
2186 	mutex_unlock(&wl->mutex);
2187 }
2188 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2189 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2190 {
2191 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2192 					WL12XX_MAX_RATE_POLICIES);
2193 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2194 		return -EBUSY;
2195 
2196 	__set_bit(policy, wl->rate_policies_map);
2197 	*idx = policy;
2198 	return 0;
2199 }
2200 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2201 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2202 {
2203 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2204 		return;
2205 
2206 	__clear_bit(*idx, wl->rate_policies_map);
2207 	*idx = WL12XX_MAX_RATE_POLICIES;
2208 }
2209 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2210 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2211 {
2212 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2213 					WLCORE_MAX_KLV_TEMPLATES);
2214 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2215 		return -EBUSY;
2216 
2217 	__set_bit(policy, wl->klv_templates_map);
2218 	*idx = policy;
2219 	return 0;
2220 }
2221 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2222 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2223 {
2224 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2225 		return;
2226 
2227 	__clear_bit(*idx, wl->klv_templates_map);
2228 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2229 }
2230 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2231 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2232 {
2233 	switch (wlvif->bss_type) {
2234 	case BSS_TYPE_AP_BSS:
2235 		if (wlvif->p2p)
2236 			return WL1271_ROLE_P2P_GO;
2237 		else
2238 			return WL1271_ROLE_AP;
2239 
2240 	case BSS_TYPE_STA_BSS:
2241 		if (wlvif->p2p)
2242 			return WL1271_ROLE_P2P_CL;
2243 		else
2244 			return WL1271_ROLE_STA;
2245 
2246 	case BSS_TYPE_IBSS:
2247 		return WL1271_ROLE_IBSS;
2248 
2249 	default:
2250 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2251 	}
2252 	return WL12XX_INVALID_ROLE_TYPE;
2253 }
2254 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2255 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2256 {
2257 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2258 	int i;
2259 
2260 	/* clear everything but the persistent data */
2261 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2262 
2263 	switch (ieee80211_vif_type_p2p(vif)) {
2264 	case NL80211_IFTYPE_P2P_CLIENT:
2265 		wlvif->p2p = 1;
2266 		/* fall-through */
2267 	case NL80211_IFTYPE_STATION:
2268 	case NL80211_IFTYPE_P2P_DEVICE:
2269 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2270 		break;
2271 	case NL80211_IFTYPE_ADHOC:
2272 		wlvif->bss_type = BSS_TYPE_IBSS;
2273 		break;
2274 	case NL80211_IFTYPE_P2P_GO:
2275 		wlvif->p2p = 1;
2276 		/* fall-through */
2277 	case NL80211_IFTYPE_AP:
2278 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2279 		break;
2280 	default:
2281 		wlvif->bss_type = MAX_BSS_TYPE;
2282 		return -EOPNOTSUPP;
2283 	}
2284 
2285 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2286 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2287 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2288 
2289 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2290 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2291 		/* init sta/ibss data */
2292 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2293 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2294 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2295 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2296 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2297 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2298 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2299 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2300 	} else {
2301 		/* init ap data */
2302 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2303 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2304 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2305 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2306 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2307 			wl12xx_allocate_rate_policy(wl,
2308 						&wlvif->ap.ucast_rate_idx[i]);
2309 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2310 		/*
2311 		 * TODO: check if basic_rate shouldn't be
2312 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2313 		 * instead (the same thing for STA above).
2314 		*/
2315 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2316 		/* TODO: this seems to be used only for STA, check it */
2317 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2318 	}
2319 
2320 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2321 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2322 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2323 
2324 	/*
2325 	 * mac80211 configures some values globally, while we treat them
2326 	 * per-interface. thus, on init, we have to copy them from wl
2327 	 */
2328 	wlvif->band = wl->band;
2329 	wlvif->channel = wl->channel;
2330 	wlvif->power_level = wl->power_level;
2331 	wlvif->channel_type = wl->channel_type;
2332 
2333 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2334 		  wl1271_rx_streaming_enable_work);
2335 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2336 		  wl1271_rx_streaming_disable_work);
2337 	INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
2338 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2339 			  wlcore_channel_switch_work);
2340 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2341 			  wlcore_connection_loss_work);
2342 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2343 			  wlcore_pending_auth_complete_work);
2344 	INIT_LIST_HEAD(&wlvif->list);
2345 
2346 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2347 		    (unsigned long) wlvif);
2348 	return 0;
2349 }
2350 
wl12xx_init_fw(struct wl1271 * wl)2351 static int wl12xx_init_fw(struct wl1271 *wl)
2352 {
2353 	int retries = WL1271_BOOT_RETRIES;
2354 	bool booted = false;
2355 	struct wiphy *wiphy = wl->hw->wiphy;
2356 	int ret;
2357 
2358 	while (retries) {
2359 		retries--;
2360 		ret = wl12xx_chip_wakeup(wl, false);
2361 		if (ret < 0)
2362 			goto power_off;
2363 
2364 		ret = wl->ops->boot(wl);
2365 		if (ret < 0)
2366 			goto power_off;
2367 
2368 		ret = wl1271_hw_init(wl);
2369 		if (ret < 0)
2370 			goto irq_disable;
2371 
2372 		booted = true;
2373 		break;
2374 
2375 irq_disable:
2376 		mutex_unlock(&wl->mutex);
2377 		/* Unlocking the mutex in the middle of handling is
2378 		   inherently unsafe. In this case we deem it safe to do,
2379 		   because we need to let any possibly pending IRQ out of
2380 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2381 		   work function will not do anything.) Also, any other
2382 		   possible concurrent operations will fail due to the
2383 		   current state, hence the wl1271 struct should be safe. */
2384 		wlcore_disable_interrupts(wl);
2385 		wl1271_flush_deferred_work(wl);
2386 		cancel_work_sync(&wl->netstack_work);
2387 		mutex_lock(&wl->mutex);
2388 power_off:
2389 		wl1271_power_off(wl);
2390 	}
2391 
2392 	if (!booted) {
2393 		wl1271_error("firmware boot failed despite %d retries",
2394 			     WL1271_BOOT_RETRIES);
2395 		goto out;
2396 	}
2397 
2398 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2399 
2400 	/* update hw/fw version info in wiphy struct */
2401 	wiphy->hw_version = wl->chip.id;
2402 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2403 		sizeof(wiphy->fw_version));
2404 
2405 	/*
2406 	 * Now we know if 11a is supported (info from the NVS), so disable
2407 	 * 11a channels if not supported
2408 	 */
2409 	if (!wl->enable_11a)
2410 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2411 
2412 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2413 		     wl->enable_11a ? "" : "not ");
2414 
2415 	wl->state = WLCORE_STATE_ON;
2416 out:
2417 	return ret;
2418 }
2419 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2420 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2421 {
2422 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2423 }
2424 
2425 /*
2426  * Check whether a fw switch (i.e. moving from one loaded
2427  * fw to another) is needed. This function is also responsible
2428  * for updating wl->last_vif_count, so it must be called before
2429  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2430  * will be used).
2431  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2432 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2433 				  struct vif_counter_data vif_counter_data,
2434 				  bool add)
2435 {
2436 	enum wl12xx_fw_type current_fw = wl->fw_type;
2437 	u8 vif_count = vif_counter_data.counter;
2438 
2439 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2440 		return false;
2441 
2442 	/* increase the vif count if this is a new vif */
2443 	if (add && !vif_counter_data.cur_vif_running)
2444 		vif_count++;
2445 
2446 	wl->last_vif_count = vif_count;
2447 
2448 	/* no need for fw change if the device is OFF */
2449 	if (wl->state == WLCORE_STATE_OFF)
2450 		return false;
2451 
2452 	/* no need for fw change if a single fw is used */
2453 	if (!wl->mr_fw_name)
2454 		return false;
2455 
2456 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2457 		return true;
2458 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2459 		return true;
2460 
2461 	return false;
2462 }
2463 
2464 /*
2465  * Enter "forced psm". Make sure the sta is in psm against the ap,
2466  * to make the fw switch a bit more disconnection-persistent.
2467  */
wl12xx_force_active_psm(struct wl1271 * wl)2468 static void wl12xx_force_active_psm(struct wl1271 *wl)
2469 {
2470 	struct wl12xx_vif *wlvif;
2471 
2472 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2473 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2474 	}
2475 }
2476 
2477 struct wlcore_hw_queue_iter_data {
2478 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2479 	/* current vif */
2480 	struct ieee80211_vif *vif;
2481 	/* is the current vif among those iterated */
2482 	bool cur_running;
2483 };
2484 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2485 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2486 				 struct ieee80211_vif *vif)
2487 {
2488 	struct wlcore_hw_queue_iter_data *iter_data = data;
2489 
2490 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2491 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2492 		return;
2493 
2494 	if (iter_data->cur_running || vif == iter_data->vif) {
2495 		iter_data->cur_running = true;
2496 		return;
2497 	}
2498 
2499 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2500 }
2501 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2502 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2503 					 struct wl12xx_vif *wlvif)
2504 {
2505 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2506 	struct wlcore_hw_queue_iter_data iter_data = {};
2507 	int i, q_base;
2508 
2509 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2510 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2511 		return 0;
2512 	}
2513 
2514 	iter_data.vif = vif;
2515 
2516 	/* mark all bits taken by active interfaces */
2517 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2518 					IEEE80211_IFACE_ITER_RESUME_ALL,
2519 					wlcore_hw_queue_iter, &iter_data);
2520 
2521 	/* the current vif is already running in mac80211 (resume/recovery) */
2522 	if (iter_data.cur_running) {
2523 		wlvif->hw_queue_base = vif->hw_queue[0];
2524 		wl1271_debug(DEBUG_MAC80211,
2525 			     "using pre-allocated hw queue base %d",
2526 			     wlvif->hw_queue_base);
2527 
2528 		/* interface type might have changed type */
2529 		goto adjust_cab_queue;
2530 	}
2531 
2532 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2533 				     WLCORE_NUM_MAC_ADDRESSES);
2534 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2535 		return -EBUSY;
2536 
2537 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2538 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2539 		     wlvif->hw_queue_base);
2540 
2541 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2542 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2543 		/* register hw queues in mac80211 */
2544 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2545 	}
2546 
2547 adjust_cab_queue:
2548 	/* the last places are reserved for cab queues per interface */
2549 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2550 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2551 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2552 	else
2553 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2554 
2555 	return 0;
2556 }
2557 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2558 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2559 				   struct ieee80211_vif *vif)
2560 {
2561 	struct wl1271 *wl = hw->priv;
2562 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2563 	struct vif_counter_data vif_count;
2564 	int ret = 0;
2565 	u8 role_type;
2566 
2567 	if (wl->plt) {
2568 		wl1271_error("Adding Interface not allowed while in PLT mode");
2569 		return -EBUSY;
2570 	}
2571 
2572 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2573 			     IEEE80211_VIF_SUPPORTS_UAPSD |
2574 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2575 
2576 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2577 		     ieee80211_vif_type_p2p(vif), vif->addr);
2578 
2579 	wl12xx_get_vif_count(hw, vif, &vif_count);
2580 
2581 	mutex_lock(&wl->mutex);
2582 	ret = wl1271_ps_elp_wakeup(wl);
2583 	if (ret < 0)
2584 		goto out_unlock;
2585 
2586 	/*
2587 	 * in some very corner case HW recovery scenarios its possible to
2588 	 * get here before __wl1271_op_remove_interface is complete, so
2589 	 * opt out if that is the case.
2590 	 */
2591 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2592 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2593 		ret = -EBUSY;
2594 		goto out;
2595 	}
2596 
2597 
2598 	ret = wl12xx_init_vif_data(wl, vif);
2599 	if (ret < 0)
2600 		goto out;
2601 
2602 	wlvif->wl = wl;
2603 	role_type = wl12xx_get_role_type(wl, wlvif);
2604 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2605 		ret = -EINVAL;
2606 		goto out;
2607 	}
2608 
2609 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2610 	if (ret < 0)
2611 		goto out;
2612 
2613 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2614 		wl12xx_force_active_psm(wl);
2615 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2616 		mutex_unlock(&wl->mutex);
2617 		wl1271_recovery_work(&wl->recovery_work);
2618 		return 0;
2619 	}
2620 
2621 	/*
2622 	 * TODO: after the nvs issue will be solved, move this block
2623 	 * to start(), and make sure here the driver is ON.
2624 	 */
2625 	if (wl->state == WLCORE_STATE_OFF) {
2626 		/*
2627 		 * we still need this in order to configure the fw
2628 		 * while uploading the nvs
2629 		 */
2630 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2631 
2632 		ret = wl12xx_init_fw(wl);
2633 		if (ret < 0)
2634 			goto out;
2635 	}
2636 
2637 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2638 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2639 					     role_type, &wlvif->role_id);
2640 		if (ret < 0)
2641 			goto out;
2642 
2643 		ret = wl1271_init_vif_specific(wl, vif);
2644 		if (ret < 0)
2645 			goto out;
2646 
2647 	} else {
2648 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2649 					     &wlvif->dev_role_id);
2650 		if (ret < 0)
2651 			goto out;
2652 
2653 		/* needed mainly for configuring rate policies */
2654 		ret = wl1271_sta_hw_init(wl, wlvif);
2655 		if (ret < 0)
2656 			goto out;
2657 	}
2658 
2659 	list_add(&wlvif->list, &wl->wlvif_list);
2660 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2661 
2662 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2663 		wl->ap_count++;
2664 	else
2665 		wl->sta_count++;
2666 out:
2667 	wl1271_ps_elp_sleep(wl);
2668 out_unlock:
2669 	mutex_unlock(&wl->mutex);
2670 
2671 	return ret;
2672 }
2673 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2674 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2675 					 struct ieee80211_vif *vif,
2676 					 bool reset_tx_queues)
2677 {
2678 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2679 	int i, ret;
2680 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2681 
2682 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2683 
2684 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2685 		return;
2686 
2687 	/* because of hardware recovery, we may get here twice */
2688 	if (wl->state == WLCORE_STATE_OFF)
2689 		return;
2690 
2691 	wl1271_info("down");
2692 
2693 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2694 	    wl->scan_wlvif == wlvif) {
2695 		/*
2696 		 * Rearm the tx watchdog just before idling scan. This
2697 		 * prevents just-finished scans from triggering the watchdog
2698 		 */
2699 		wl12xx_rearm_tx_watchdog_locked(wl);
2700 
2701 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2702 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2703 		wl->scan_wlvif = NULL;
2704 		wl->scan.req = NULL;
2705 		ieee80211_scan_completed(wl->hw, true);
2706 	}
2707 
2708 	if (wl->sched_vif == wlvif)
2709 		wl->sched_vif = NULL;
2710 
2711 	if (wl->roc_vif == vif) {
2712 		wl->roc_vif = NULL;
2713 		ieee80211_remain_on_channel_expired(wl->hw);
2714 	}
2715 
2716 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2717 		/* disable active roles */
2718 		ret = wl1271_ps_elp_wakeup(wl);
2719 		if (ret < 0)
2720 			goto deinit;
2721 
2722 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2723 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2724 			if (wl12xx_dev_role_started(wlvif))
2725 				wl12xx_stop_dev(wl, wlvif);
2726 		}
2727 
2728 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2729 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2730 			if (ret < 0)
2731 				goto deinit;
2732 		} else {
2733 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2734 			if (ret < 0)
2735 				goto deinit;
2736 		}
2737 
2738 		wl1271_ps_elp_sleep(wl);
2739 	}
2740 deinit:
2741 	wl12xx_tx_reset_wlvif(wl, wlvif);
2742 
2743 	/* clear all hlids (except system_hlid) */
2744 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2745 
2746 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2747 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2748 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2749 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2750 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2751 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2752 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2753 	} else {
2754 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2755 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2756 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2757 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2758 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2759 			wl12xx_free_rate_policy(wl,
2760 						&wlvif->ap.ucast_rate_idx[i]);
2761 		wl1271_free_ap_keys(wl, wlvif);
2762 	}
2763 
2764 	dev_kfree_skb(wlvif->probereq);
2765 	wlvif->probereq = NULL;
2766 	if (wl->last_wlvif == wlvif)
2767 		wl->last_wlvif = NULL;
2768 	list_del(&wlvif->list);
2769 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2770 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2771 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2772 
2773 	if (is_ap)
2774 		wl->ap_count--;
2775 	else
2776 		wl->sta_count--;
2777 
2778 	/*
2779 	 * Last AP, have more stations. Configure sleep auth according to STA.
2780 	 * Don't do thin on unintended recovery.
2781 	 */
2782 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2783 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2784 		goto unlock;
2785 
2786 	if (wl->ap_count == 0 && is_ap) {
2787 		/* mask ap events */
2788 		wl->event_mask &= ~wl->ap_event_mask;
2789 		wl1271_event_unmask(wl);
2790 	}
2791 
2792 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2793 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2794 		/* Configure for power according to debugfs */
2795 		if (sta_auth != WL1271_PSM_ILLEGAL)
2796 			wl1271_acx_sleep_auth(wl, sta_auth);
2797 		/* Configure for ELP power saving */
2798 		else
2799 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2800 	}
2801 
2802 unlock:
2803 	mutex_unlock(&wl->mutex);
2804 
2805 	del_timer_sync(&wlvif->rx_streaming_timer);
2806 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2807 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2808 	cancel_work_sync(&wlvif->rc_update_work);
2809 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2810 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2811 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2812 
2813 	mutex_lock(&wl->mutex);
2814 }
2815 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2816 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2817 				       struct ieee80211_vif *vif)
2818 {
2819 	struct wl1271 *wl = hw->priv;
2820 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2821 	struct wl12xx_vif *iter;
2822 	struct vif_counter_data vif_count;
2823 
2824 	wl12xx_get_vif_count(hw, vif, &vif_count);
2825 	mutex_lock(&wl->mutex);
2826 
2827 	if (wl->state == WLCORE_STATE_OFF ||
2828 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2829 		goto out;
2830 
2831 	/*
2832 	 * wl->vif can be null here if someone shuts down the interface
2833 	 * just when hardware recovery has been started.
2834 	 */
2835 	wl12xx_for_each_wlvif(wl, iter) {
2836 		if (iter != wlvif)
2837 			continue;
2838 
2839 		__wl1271_op_remove_interface(wl, vif, true);
2840 		break;
2841 	}
2842 	WARN_ON(iter != wlvif);
2843 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2844 		wl12xx_force_active_psm(wl);
2845 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2846 		wl12xx_queue_recovery_work(wl);
2847 	}
2848 out:
2849 	mutex_unlock(&wl->mutex);
2850 }
2851 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2852 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2853 				      struct ieee80211_vif *vif,
2854 				      enum nl80211_iftype new_type, bool p2p)
2855 {
2856 	struct wl1271 *wl = hw->priv;
2857 	int ret;
2858 
2859 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2860 	wl1271_op_remove_interface(hw, vif);
2861 
2862 	vif->type = new_type;
2863 	vif->p2p = p2p;
2864 	ret = wl1271_op_add_interface(hw, vif);
2865 
2866 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2867 	return ret;
2868 }
2869 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2870 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2871 {
2872 	int ret;
2873 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2874 
2875 	/*
2876 	 * One of the side effects of the JOIN command is that is clears
2877 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2878 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2879 	 * Currently the only valid scenario for JOIN during association
2880 	 * is on roaming, in which case we will also be given new keys.
2881 	 * Keep the below message for now, unless it starts bothering
2882 	 * users who really like to roam a lot :)
2883 	 */
2884 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2885 		wl1271_info("JOIN while associated.");
2886 
2887 	/* clear encryption type */
2888 	wlvif->encryption_type = KEY_NONE;
2889 
2890 	if (is_ibss)
2891 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2892 	else
2893 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2894 
2895 	return ret;
2896 }
2897 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2898 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2899 			    int offset)
2900 {
2901 	u8 ssid_len;
2902 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2903 					 skb->len - offset);
2904 
2905 	if (!ptr) {
2906 		wl1271_error("No SSID in IEs!");
2907 		return -ENOENT;
2908 	}
2909 
2910 	ssid_len = ptr[1];
2911 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2912 		wl1271_error("SSID is too long!");
2913 		return -EINVAL;
2914 	}
2915 
2916 	wlvif->ssid_len = ssid_len;
2917 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2918 	return 0;
2919 }
2920 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2921 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2922 {
2923 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2924 	struct sk_buff *skb;
2925 	int ieoffset;
2926 
2927 	/* we currently only support setting the ssid from the ap probe req */
2928 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2929 		return -EINVAL;
2930 
2931 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2932 	if (!skb)
2933 		return -EINVAL;
2934 
2935 	ieoffset = offsetof(struct ieee80211_mgmt,
2936 			    u.probe_req.variable);
2937 	wl1271_ssid_set(wlvif, skb, ieoffset);
2938 	dev_kfree_skb(skb);
2939 
2940 	return 0;
2941 }
2942 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2943 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2944 			    struct ieee80211_bss_conf *bss_conf,
2945 			    u32 sta_rate_set)
2946 {
2947 	int ieoffset;
2948 	int ret;
2949 
2950 	wlvif->aid = bss_conf->aid;
2951 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2952 	wlvif->beacon_int = bss_conf->beacon_int;
2953 	wlvif->wmm_enabled = bss_conf->qos;
2954 
2955 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2956 
2957 	/*
2958 	 * with wl1271, we don't need to update the
2959 	 * beacon_int and dtim_period, because the firmware
2960 	 * updates it by itself when the first beacon is
2961 	 * received after a join.
2962 	 */
2963 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2964 	if (ret < 0)
2965 		return ret;
2966 
2967 	/*
2968 	 * Get a template for hardware connection maintenance
2969 	 */
2970 	dev_kfree_skb(wlvif->probereq);
2971 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2972 							wlvif,
2973 							NULL);
2974 	ieoffset = offsetof(struct ieee80211_mgmt,
2975 			    u.probe_req.variable);
2976 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2977 
2978 	/* enable the connection monitoring feature */
2979 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2980 	if (ret < 0)
2981 		return ret;
2982 
2983 	/*
2984 	 * The join command disable the keep-alive mode, shut down its process,
2985 	 * and also clear the template config, so we need to reset it all after
2986 	 * the join. The acx_aid starts the keep-alive process, and the order
2987 	 * of the commands below is relevant.
2988 	 */
2989 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2990 	if (ret < 0)
2991 		return ret;
2992 
2993 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2994 	if (ret < 0)
2995 		return ret;
2996 
2997 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2998 	if (ret < 0)
2999 		return ret;
3000 
3001 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
3002 					   wlvif->sta.klv_template_id,
3003 					   ACX_KEEP_ALIVE_TPL_VALID);
3004 	if (ret < 0)
3005 		return ret;
3006 
3007 	/*
3008 	 * The default fw psm configuration is AUTO, while mac80211 default
3009 	 * setting is off (ACTIVE), so sync the fw with the correct value.
3010 	 */
3011 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
3012 	if (ret < 0)
3013 		return ret;
3014 
3015 	if (sta_rate_set) {
3016 		wlvif->rate_set =
3017 			wl1271_tx_enabled_rates_get(wl,
3018 						    sta_rate_set,
3019 						    wlvif->band);
3020 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3021 		if (ret < 0)
3022 			return ret;
3023 	}
3024 
3025 	return ret;
3026 }
3027 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3028 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3029 {
3030 	int ret;
3031 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
3032 
3033 	/* make sure we are connected (sta) joined */
3034 	if (sta &&
3035 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
3036 		return false;
3037 
3038 	/* make sure we are joined (ibss) */
3039 	if (!sta &&
3040 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3041 		return false;
3042 
3043 	if (sta) {
3044 		/* use defaults when not associated */
3045 		wlvif->aid = 0;
3046 
3047 		/* free probe-request template */
3048 		dev_kfree_skb(wlvif->probereq);
3049 		wlvif->probereq = NULL;
3050 
3051 		/* disable connection monitor features */
3052 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3053 		if (ret < 0)
3054 			return ret;
3055 
3056 		/* Disable the keep-alive feature */
3057 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3058 		if (ret < 0)
3059 			return ret;
3060 
3061 		/* disable beacon filtering */
3062 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3063 		if (ret < 0)
3064 			return ret;
3065 	}
3066 
3067 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3068 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3069 
3070 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3071 		ieee80211_chswitch_done(vif, false);
3072 		cancel_delayed_work(&wlvif->channel_switch_work);
3073 	}
3074 
3075 	/* invalidate keep-alive template */
3076 	wl1271_acx_keep_alive_config(wl, wlvif,
3077 				     wlvif->sta.klv_template_id,
3078 				     ACX_KEEP_ALIVE_TPL_INVALID);
3079 
3080 	return 0;
3081 }
3082 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3083 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3084 {
3085 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3086 	wlvif->rate_set = wlvif->basic_rate_set;
3087 }
3088 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3089 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3090 				   bool idle)
3091 {
3092 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3093 
3094 	if (idle == cur_idle)
3095 		return;
3096 
3097 	if (idle) {
3098 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3099 	} else {
3100 		/* The current firmware only supports sched_scan in idle */
3101 		if (wl->sched_vif == wlvif)
3102 			wl->ops->sched_scan_stop(wl, wlvif);
3103 
3104 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3105 	}
3106 }
3107 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3108 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3109 			     struct ieee80211_conf *conf, u32 changed)
3110 {
3111 	int ret;
3112 
3113 	if (wlcore_is_p2p_mgmt(wlvif))
3114 		return 0;
3115 
3116 	if (conf->power_level != wlvif->power_level) {
3117 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3118 		if (ret < 0)
3119 			return ret;
3120 
3121 		wlvif->power_level = conf->power_level;
3122 	}
3123 
3124 	return 0;
3125 }
3126 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3127 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3128 {
3129 	struct wl1271 *wl = hw->priv;
3130 	struct wl12xx_vif *wlvif;
3131 	struct ieee80211_conf *conf = &hw->conf;
3132 	int ret = 0;
3133 
3134 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3135 		     " changed 0x%x",
3136 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3137 		     conf->power_level,
3138 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3139 			 changed);
3140 
3141 	mutex_lock(&wl->mutex);
3142 
3143 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3144 		wl->power_level = conf->power_level;
3145 
3146 	if (unlikely(wl->state != WLCORE_STATE_ON))
3147 		goto out;
3148 
3149 	ret = wl1271_ps_elp_wakeup(wl);
3150 	if (ret < 0)
3151 		goto out;
3152 
3153 	/* configure each interface */
3154 	wl12xx_for_each_wlvif(wl, wlvif) {
3155 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3156 		if (ret < 0)
3157 			goto out_sleep;
3158 	}
3159 
3160 out_sleep:
3161 	wl1271_ps_elp_sleep(wl);
3162 
3163 out:
3164 	mutex_unlock(&wl->mutex);
3165 
3166 	return ret;
3167 }
3168 
3169 struct wl1271_filter_params {
3170 	bool enabled;
3171 	int mc_list_length;
3172 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3173 };
3174 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3175 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3176 				       struct netdev_hw_addr_list *mc_list)
3177 {
3178 	struct wl1271_filter_params *fp;
3179 	struct netdev_hw_addr *ha;
3180 
3181 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3182 	if (!fp) {
3183 		wl1271_error("Out of memory setting filters.");
3184 		return 0;
3185 	}
3186 
3187 	/* update multicast filtering parameters */
3188 	fp->mc_list_length = 0;
3189 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3190 		fp->enabled = false;
3191 	} else {
3192 		fp->enabled = true;
3193 		netdev_hw_addr_list_for_each(ha, mc_list) {
3194 			memcpy(fp->mc_list[fp->mc_list_length],
3195 					ha->addr, ETH_ALEN);
3196 			fp->mc_list_length++;
3197 		}
3198 	}
3199 
3200 	return (u64)(unsigned long)fp;
3201 }
3202 
3203 #define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
3204 				  FIF_FCSFAIL | \
3205 				  FIF_BCN_PRBRESP_PROMISC | \
3206 				  FIF_CONTROL | \
3207 				  FIF_OTHER_BSS)
3208 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3209 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3210 				       unsigned int changed,
3211 				       unsigned int *total, u64 multicast)
3212 {
3213 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3214 	struct wl1271 *wl = hw->priv;
3215 	struct wl12xx_vif *wlvif;
3216 
3217 	int ret;
3218 
3219 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3220 		     " total %x", changed, *total);
3221 
3222 	mutex_lock(&wl->mutex);
3223 
3224 	*total &= WL1271_SUPPORTED_FILTERS;
3225 	changed &= WL1271_SUPPORTED_FILTERS;
3226 
3227 	if (unlikely(wl->state != WLCORE_STATE_ON))
3228 		goto out;
3229 
3230 	ret = wl1271_ps_elp_wakeup(wl);
3231 	if (ret < 0)
3232 		goto out;
3233 
3234 	wl12xx_for_each_wlvif(wl, wlvif) {
3235 		if (wlcore_is_p2p_mgmt(wlvif))
3236 			continue;
3237 
3238 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3239 			if (*total & FIF_ALLMULTI)
3240 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3241 								   false,
3242 								   NULL, 0);
3243 			else if (fp)
3244 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3245 							fp->enabled,
3246 							fp->mc_list,
3247 							fp->mc_list_length);
3248 			if (ret < 0)
3249 				goto out_sleep;
3250 		}
3251 	}
3252 
3253 	/*
3254 	 * the fw doesn't provide an api to configure the filters. instead,
3255 	 * the filters configuration is based on the active roles / ROC
3256 	 * state.
3257 	 */
3258 
3259 out_sleep:
3260 	wl1271_ps_elp_sleep(wl);
3261 
3262 out:
3263 	mutex_unlock(&wl->mutex);
3264 	kfree(fp);
3265 }
3266 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3267 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3268 				u8 id, u8 key_type, u8 key_size,
3269 				const u8 *key, u8 hlid, u32 tx_seq_32,
3270 				u16 tx_seq_16)
3271 {
3272 	struct wl1271_ap_key *ap_key;
3273 	int i;
3274 
3275 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3276 
3277 	if (key_size > MAX_KEY_SIZE)
3278 		return -EINVAL;
3279 
3280 	/*
3281 	 * Find next free entry in ap_keys. Also check we are not replacing
3282 	 * an existing key.
3283 	 */
3284 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3285 		if (wlvif->ap.recorded_keys[i] == NULL)
3286 			break;
3287 
3288 		if (wlvif->ap.recorded_keys[i]->id == id) {
3289 			wl1271_warning("trying to record key replacement");
3290 			return -EINVAL;
3291 		}
3292 	}
3293 
3294 	if (i == MAX_NUM_KEYS)
3295 		return -EBUSY;
3296 
3297 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3298 	if (!ap_key)
3299 		return -ENOMEM;
3300 
3301 	ap_key->id = id;
3302 	ap_key->key_type = key_type;
3303 	ap_key->key_size = key_size;
3304 	memcpy(ap_key->key, key, key_size);
3305 	ap_key->hlid = hlid;
3306 	ap_key->tx_seq_32 = tx_seq_32;
3307 	ap_key->tx_seq_16 = tx_seq_16;
3308 
3309 	wlvif->ap.recorded_keys[i] = ap_key;
3310 	return 0;
3311 }
3312 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3313 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3314 {
3315 	int i;
3316 
3317 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3318 		kfree(wlvif->ap.recorded_keys[i]);
3319 		wlvif->ap.recorded_keys[i] = NULL;
3320 	}
3321 }
3322 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3323 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3324 {
3325 	int i, ret = 0;
3326 	struct wl1271_ap_key *key;
3327 	bool wep_key_added = false;
3328 
3329 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3330 		u8 hlid;
3331 		if (wlvif->ap.recorded_keys[i] == NULL)
3332 			break;
3333 
3334 		key = wlvif->ap.recorded_keys[i];
3335 		hlid = key->hlid;
3336 		if (hlid == WL12XX_INVALID_LINK_ID)
3337 			hlid = wlvif->ap.bcast_hlid;
3338 
3339 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3340 					    key->id, key->key_type,
3341 					    key->key_size, key->key,
3342 					    hlid, key->tx_seq_32,
3343 					    key->tx_seq_16);
3344 		if (ret < 0)
3345 			goto out;
3346 
3347 		if (key->key_type == KEY_WEP)
3348 			wep_key_added = true;
3349 	}
3350 
3351 	if (wep_key_added) {
3352 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3353 						     wlvif->ap.bcast_hlid);
3354 		if (ret < 0)
3355 			goto out;
3356 	}
3357 
3358 out:
3359 	wl1271_free_ap_keys(wl, wlvif);
3360 	return ret;
3361 }
3362 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3363 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3364 		       u16 action, u8 id, u8 key_type,
3365 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3366 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3367 {
3368 	int ret;
3369 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3370 
3371 	if (is_ap) {
3372 		struct wl1271_station *wl_sta;
3373 		u8 hlid;
3374 
3375 		if (sta) {
3376 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3377 			hlid = wl_sta->hlid;
3378 		} else {
3379 			hlid = wlvif->ap.bcast_hlid;
3380 		}
3381 
3382 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3383 			/*
3384 			 * We do not support removing keys after AP shutdown.
3385 			 * Pretend we do to make mac80211 happy.
3386 			 */
3387 			if (action != KEY_ADD_OR_REPLACE)
3388 				return 0;
3389 
3390 			ret = wl1271_record_ap_key(wl, wlvif, id,
3391 					     key_type, key_size,
3392 					     key, hlid, tx_seq_32,
3393 					     tx_seq_16);
3394 		} else {
3395 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3396 					     id, key_type, key_size,
3397 					     key, hlid, tx_seq_32,
3398 					     tx_seq_16);
3399 		}
3400 
3401 		if (ret < 0)
3402 			return ret;
3403 	} else {
3404 		const u8 *addr;
3405 		static const u8 bcast_addr[ETH_ALEN] = {
3406 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3407 		};
3408 
3409 		addr = sta ? sta->addr : bcast_addr;
3410 
3411 		if (is_zero_ether_addr(addr)) {
3412 			/* We dont support TX only encryption */
3413 			return -EOPNOTSUPP;
3414 		}
3415 
3416 		/* The wl1271 does not allow to remove unicast keys - they
3417 		   will be cleared automatically on next CMD_JOIN. Ignore the
3418 		   request silently, as we dont want the mac80211 to emit
3419 		   an error message. */
3420 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3421 			return 0;
3422 
3423 		/* don't remove key if hlid was already deleted */
3424 		if (action == KEY_REMOVE &&
3425 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3426 			return 0;
3427 
3428 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3429 					     id, key_type, key_size,
3430 					     key, addr, tx_seq_32,
3431 					     tx_seq_16);
3432 		if (ret < 0)
3433 			return ret;
3434 
3435 	}
3436 
3437 	return 0;
3438 }
3439 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3440 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3441 			     struct ieee80211_vif *vif,
3442 			     struct ieee80211_sta *sta,
3443 			     struct ieee80211_key_conf *key_conf)
3444 {
3445 	struct wl1271 *wl = hw->priv;
3446 	int ret;
3447 	bool might_change_spare =
3448 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3449 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3450 
3451 	if (might_change_spare) {
3452 		/*
3453 		 * stop the queues and flush to ensure the next packets are
3454 		 * in sync with FW spare block accounting
3455 		 */
3456 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3457 		wl1271_tx_flush(wl);
3458 	}
3459 
3460 	mutex_lock(&wl->mutex);
3461 
3462 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3463 		ret = -EAGAIN;
3464 		goto out_wake_queues;
3465 	}
3466 
3467 	ret = wl1271_ps_elp_wakeup(wl);
3468 	if (ret < 0)
3469 		goto out_wake_queues;
3470 
3471 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3472 
3473 	wl1271_ps_elp_sleep(wl);
3474 
3475 out_wake_queues:
3476 	if (might_change_spare)
3477 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3478 
3479 	mutex_unlock(&wl->mutex);
3480 
3481 	return ret;
3482 }
3483 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3484 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3485 		   struct ieee80211_vif *vif,
3486 		   struct ieee80211_sta *sta,
3487 		   struct ieee80211_key_conf *key_conf)
3488 {
3489 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3490 	int ret;
3491 	u32 tx_seq_32 = 0;
3492 	u16 tx_seq_16 = 0;
3493 	u8 key_type;
3494 	u8 hlid;
3495 
3496 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3497 
3498 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3499 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3500 		     key_conf->cipher, key_conf->keyidx,
3501 		     key_conf->keylen, key_conf->flags);
3502 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3503 
3504 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3505 		if (sta) {
3506 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3507 			hlid = wl_sta->hlid;
3508 		} else {
3509 			hlid = wlvif->ap.bcast_hlid;
3510 		}
3511 	else
3512 		hlid = wlvif->sta.hlid;
3513 
3514 	if (hlid != WL12XX_INVALID_LINK_ID) {
3515 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3516 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3517 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3518 	}
3519 
3520 	switch (key_conf->cipher) {
3521 	case WLAN_CIPHER_SUITE_WEP40:
3522 	case WLAN_CIPHER_SUITE_WEP104:
3523 		key_type = KEY_WEP;
3524 
3525 		key_conf->hw_key_idx = key_conf->keyidx;
3526 		break;
3527 	case WLAN_CIPHER_SUITE_TKIP:
3528 		key_type = KEY_TKIP;
3529 		key_conf->hw_key_idx = key_conf->keyidx;
3530 		break;
3531 	case WLAN_CIPHER_SUITE_CCMP:
3532 		key_type = KEY_AES;
3533 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3534 		break;
3535 	case WL1271_CIPHER_SUITE_GEM:
3536 		key_type = KEY_GEM;
3537 		break;
3538 	default:
3539 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3540 
3541 		return -EOPNOTSUPP;
3542 	}
3543 
3544 	switch (cmd) {
3545 	case SET_KEY:
3546 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3547 				 key_conf->keyidx, key_type,
3548 				 key_conf->keylen, key_conf->key,
3549 				 tx_seq_32, tx_seq_16, sta);
3550 		if (ret < 0) {
3551 			wl1271_error("Could not add or replace key");
3552 			return ret;
3553 		}
3554 
3555 		/*
3556 		 * reconfiguring arp response if the unicast (or common)
3557 		 * encryption key type was changed
3558 		 */
3559 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3560 		    (sta || key_type == KEY_WEP) &&
3561 		    wlvif->encryption_type != key_type) {
3562 			wlvif->encryption_type = key_type;
3563 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3564 			if (ret < 0) {
3565 				wl1271_warning("build arp rsp failed: %d", ret);
3566 				return ret;
3567 			}
3568 		}
3569 		break;
3570 
3571 	case DISABLE_KEY:
3572 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3573 				     key_conf->keyidx, key_type,
3574 				     key_conf->keylen, key_conf->key,
3575 				     0, 0, sta);
3576 		if (ret < 0) {
3577 			wl1271_error("Could not remove key");
3578 			return ret;
3579 		}
3580 		break;
3581 
3582 	default:
3583 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3584 		return -EOPNOTSUPP;
3585 	}
3586 
3587 	return ret;
3588 }
3589 EXPORT_SYMBOL_GPL(wlcore_set_key);
3590 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3591 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3592 					  struct ieee80211_vif *vif,
3593 					  int key_idx)
3594 {
3595 	struct wl1271 *wl = hw->priv;
3596 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3597 	int ret;
3598 
3599 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3600 		     key_idx);
3601 
3602 	/* we don't handle unsetting of default key */
3603 	if (key_idx == -1)
3604 		return;
3605 
3606 	mutex_lock(&wl->mutex);
3607 
3608 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3609 		ret = -EAGAIN;
3610 		goto out_unlock;
3611 	}
3612 
3613 	ret = wl1271_ps_elp_wakeup(wl);
3614 	if (ret < 0)
3615 		goto out_unlock;
3616 
3617 	wlvif->default_key = key_idx;
3618 
3619 	/* the default WEP key needs to be configured at least once */
3620 	if (wlvif->encryption_type == KEY_WEP) {
3621 		ret = wl12xx_cmd_set_default_wep_key(wl,
3622 				key_idx,
3623 				wlvif->sta.hlid);
3624 		if (ret < 0)
3625 			goto out_sleep;
3626 	}
3627 
3628 out_sleep:
3629 	wl1271_ps_elp_sleep(wl);
3630 
3631 out_unlock:
3632 	mutex_unlock(&wl->mutex);
3633 }
3634 
wlcore_regdomain_config(struct wl1271 * wl)3635 void wlcore_regdomain_config(struct wl1271 *wl)
3636 {
3637 	int ret;
3638 
3639 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3640 		return;
3641 
3642 	mutex_lock(&wl->mutex);
3643 
3644 	if (unlikely(wl->state != WLCORE_STATE_ON))
3645 		goto out;
3646 
3647 	ret = wl1271_ps_elp_wakeup(wl);
3648 	if (ret < 0)
3649 		goto out;
3650 
3651 	ret = wlcore_cmd_regdomain_config_locked(wl);
3652 	if (ret < 0) {
3653 		wl12xx_queue_recovery_work(wl);
3654 		goto out;
3655 	}
3656 
3657 	wl1271_ps_elp_sleep(wl);
3658 out:
3659 	mutex_unlock(&wl->mutex);
3660 }
3661 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3662 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3663 			     struct ieee80211_vif *vif,
3664 			     struct ieee80211_scan_request *hw_req)
3665 {
3666 	struct cfg80211_scan_request *req = &hw_req->req;
3667 	struct wl1271 *wl = hw->priv;
3668 	int ret;
3669 	u8 *ssid = NULL;
3670 	size_t len = 0;
3671 
3672 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3673 
3674 	if (req->n_ssids) {
3675 		ssid = req->ssids[0].ssid;
3676 		len = req->ssids[0].ssid_len;
3677 	}
3678 
3679 	mutex_lock(&wl->mutex);
3680 
3681 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3682 		/*
3683 		 * We cannot return -EBUSY here because cfg80211 will expect
3684 		 * a call to ieee80211_scan_completed if we do - in this case
3685 		 * there won't be any call.
3686 		 */
3687 		ret = -EAGAIN;
3688 		goto out;
3689 	}
3690 
3691 	ret = wl1271_ps_elp_wakeup(wl);
3692 	if (ret < 0)
3693 		goto out;
3694 
3695 	/* fail if there is any role in ROC */
3696 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3697 		/* don't allow scanning right now */
3698 		ret = -EBUSY;
3699 		goto out_sleep;
3700 	}
3701 
3702 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3703 out_sleep:
3704 	wl1271_ps_elp_sleep(wl);
3705 out:
3706 	mutex_unlock(&wl->mutex);
3707 
3708 	return ret;
3709 }
3710 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3711 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3712 				     struct ieee80211_vif *vif)
3713 {
3714 	struct wl1271 *wl = hw->priv;
3715 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3716 	int ret;
3717 
3718 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3719 
3720 	mutex_lock(&wl->mutex);
3721 
3722 	if (unlikely(wl->state != WLCORE_STATE_ON))
3723 		goto out;
3724 
3725 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3726 		goto out;
3727 
3728 	ret = wl1271_ps_elp_wakeup(wl);
3729 	if (ret < 0)
3730 		goto out;
3731 
3732 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3733 		ret = wl->ops->scan_stop(wl, wlvif);
3734 		if (ret < 0)
3735 			goto out_sleep;
3736 	}
3737 
3738 	/*
3739 	 * Rearm the tx watchdog just before idling scan. This
3740 	 * prevents just-finished scans from triggering the watchdog
3741 	 */
3742 	wl12xx_rearm_tx_watchdog_locked(wl);
3743 
3744 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3745 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3746 	wl->scan_wlvif = NULL;
3747 	wl->scan.req = NULL;
3748 	ieee80211_scan_completed(wl->hw, true);
3749 
3750 out_sleep:
3751 	wl1271_ps_elp_sleep(wl);
3752 out:
3753 	mutex_unlock(&wl->mutex);
3754 
3755 	cancel_delayed_work_sync(&wl->scan_complete_work);
3756 }
3757 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3758 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3759 				      struct ieee80211_vif *vif,
3760 				      struct cfg80211_sched_scan_request *req,
3761 				      struct ieee80211_scan_ies *ies)
3762 {
3763 	struct wl1271 *wl = hw->priv;
3764 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3765 	int ret;
3766 
3767 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3768 
3769 	mutex_lock(&wl->mutex);
3770 
3771 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3772 		ret = -EAGAIN;
3773 		goto out;
3774 	}
3775 
3776 	ret = wl1271_ps_elp_wakeup(wl);
3777 	if (ret < 0)
3778 		goto out;
3779 
3780 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3781 	if (ret < 0)
3782 		goto out_sleep;
3783 
3784 	wl->sched_vif = wlvif;
3785 
3786 out_sleep:
3787 	wl1271_ps_elp_sleep(wl);
3788 out:
3789 	mutex_unlock(&wl->mutex);
3790 	return ret;
3791 }
3792 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3793 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3794 				     struct ieee80211_vif *vif)
3795 {
3796 	struct wl1271 *wl = hw->priv;
3797 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3798 	int ret;
3799 
3800 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3801 
3802 	mutex_lock(&wl->mutex);
3803 
3804 	if (unlikely(wl->state != WLCORE_STATE_ON))
3805 		goto out;
3806 
3807 	ret = wl1271_ps_elp_wakeup(wl);
3808 	if (ret < 0)
3809 		goto out;
3810 
3811 	wl->ops->sched_scan_stop(wl, wlvif);
3812 
3813 	wl1271_ps_elp_sleep(wl);
3814 out:
3815 	mutex_unlock(&wl->mutex);
3816 
3817 	return 0;
3818 }
3819 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3820 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3821 {
3822 	struct wl1271 *wl = hw->priv;
3823 	int ret = 0;
3824 
3825 	mutex_lock(&wl->mutex);
3826 
3827 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3828 		ret = -EAGAIN;
3829 		goto out;
3830 	}
3831 
3832 	ret = wl1271_ps_elp_wakeup(wl);
3833 	if (ret < 0)
3834 		goto out;
3835 
3836 	ret = wl1271_acx_frag_threshold(wl, value);
3837 	if (ret < 0)
3838 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3839 
3840 	wl1271_ps_elp_sleep(wl);
3841 
3842 out:
3843 	mutex_unlock(&wl->mutex);
3844 
3845 	return ret;
3846 }
3847 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3848 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3849 {
3850 	struct wl1271 *wl = hw->priv;
3851 	struct wl12xx_vif *wlvif;
3852 	int ret = 0;
3853 
3854 	mutex_lock(&wl->mutex);
3855 
3856 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3857 		ret = -EAGAIN;
3858 		goto out;
3859 	}
3860 
3861 	ret = wl1271_ps_elp_wakeup(wl);
3862 	if (ret < 0)
3863 		goto out;
3864 
3865 	wl12xx_for_each_wlvif(wl, wlvif) {
3866 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3867 		if (ret < 0)
3868 			wl1271_warning("set rts threshold failed: %d", ret);
3869 	}
3870 	wl1271_ps_elp_sleep(wl);
3871 
3872 out:
3873 	mutex_unlock(&wl->mutex);
3874 
3875 	return ret;
3876 }
3877 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3878 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3879 {
3880 	int len;
3881 	const u8 *next, *end = skb->data + skb->len;
3882 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3883 					skb->len - ieoffset);
3884 	if (!ie)
3885 		return;
3886 	len = ie[1] + 2;
3887 	next = ie + len;
3888 	memmove(ie, next, end - next);
3889 	skb_trim(skb, skb->len - len);
3890 }
3891 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3892 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3893 					    unsigned int oui, u8 oui_type,
3894 					    int ieoffset)
3895 {
3896 	int len;
3897 	const u8 *next, *end = skb->data + skb->len;
3898 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3899 					       skb->data + ieoffset,
3900 					       skb->len - ieoffset);
3901 	if (!ie)
3902 		return;
3903 	len = ie[1] + 2;
3904 	next = ie + len;
3905 	memmove(ie, next, end - next);
3906 	skb_trim(skb, skb->len - len);
3907 }
3908 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3909 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3910 					 struct ieee80211_vif *vif)
3911 {
3912 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3913 	struct sk_buff *skb;
3914 	int ret;
3915 
3916 	skb = ieee80211_proberesp_get(wl->hw, vif);
3917 	if (!skb)
3918 		return -EOPNOTSUPP;
3919 
3920 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3921 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3922 				      skb->data,
3923 				      skb->len, 0,
3924 				      rates);
3925 	dev_kfree_skb(skb);
3926 
3927 	if (ret < 0)
3928 		goto out;
3929 
3930 	wl1271_debug(DEBUG_AP, "probe response updated");
3931 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3932 
3933 out:
3934 	return ret;
3935 }
3936 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3937 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3938 					     struct ieee80211_vif *vif,
3939 					     u8 *probe_rsp_data,
3940 					     size_t probe_rsp_len,
3941 					     u32 rates)
3942 {
3943 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3944 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3945 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3946 	int ssid_ie_offset, ie_offset, templ_len;
3947 	const u8 *ptr;
3948 
3949 	/* no need to change probe response if the SSID is set correctly */
3950 	if (wlvif->ssid_len > 0)
3951 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3952 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3953 					       probe_rsp_data,
3954 					       probe_rsp_len, 0,
3955 					       rates);
3956 
3957 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3958 		wl1271_error("probe_rsp template too big");
3959 		return -EINVAL;
3960 	}
3961 
3962 	/* start searching from IE offset */
3963 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3964 
3965 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3966 			       probe_rsp_len - ie_offset);
3967 	if (!ptr) {
3968 		wl1271_error("No SSID in beacon!");
3969 		return -EINVAL;
3970 	}
3971 
3972 	ssid_ie_offset = ptr - probe_rsp_data;
3973 	ptr += (ptr[1] + 2);
3974 
3975 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3976 
3977 	/* insert SSID from bss_conf */
3978 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3979 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3980 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3981 	       bss_conf->ssid, bss_conf->ssid_len);
3982 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3983 
3984 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3985 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3986 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3987 
3988 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3989 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3990 				       probe_rsp_templ,
3991 				       templ_len, 0,
3992 				       rates);
3993 }
3994 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)3995 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3996 				       struct ieee80211_vif *vif,
3997 				       struct ieee80211_bss_conf *bss_conf,
3998 				       u32 changed)
3999 {
4000 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4001 	int ret = 0;
4002 
4003 	if (changed & BSS_CHANGED_ERP_SLOT) {
4004 		if (bss_conf->use_short_slot)
4005 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
4006 		else
4007 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
4008 		if (ret < 0) {
4009 			wl1271_warning("Set slot time failed %d", ret);
4010 			goto out;
4011 		}
4012 	}
4013 
4014 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
4015 		if (bss_conf->use_short_preamble)
4016 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
4017 		else
4018 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
4019 	}
4020 
4021 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
4022 		if (bss_conf->use_cts_prot)
4023 			ret = wl1271_acx_cts_protect(wl, wlvif,
4024 						     CTSPROTECT_ENABLE);
4025 		else
4026 			ret = wl1271_acx_cts_protect(wl, wlvif,
4027 						     CTSPROTECT_DISABLE);
4028 		if (ret < 0) {
4029 			wl1271_warning("Set ctsprotect failed %d", ret);
4030 			goto out;
4031 		}
4032 	}
4033 
4034 out:
4035 	return ret;
4036 }
4037 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4038 static int wlcore_set_beacon_template(struct wl1271 *wl,
4039 				      struct ieee80211_vif *vif,
4040 				      bool is_ap)
4041 {
4042 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4043 	struct ieee80211_hdr *hdr;
4044 	u32 min_rate;
4045 	int ret;
4046 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4047 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4048 	u16 tmpl_id;
4049 
4050 	if (!beacon) {
4051 		ret = -EINVAL;
4052 		goto out;
4053 	}
4054 
4055 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4056 
4057 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4058 	if (ret < 0) {
4059 		dev_kfree_skb(beacon);
4060 		goto out;
4061 	}
4062 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4063 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4064 		CMD_TEMPL_BEACON;
4065 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4066 				      beacon->data,
4067 				      beacon->len, 0,
4068 				      min_rate);
4069 	if (ret < 0) {
4070 		dev_kfree_skb(beacon);
4071 		goto out;
4072 	}
4073 
4074 	wlvif->wmm_enabled =
4075 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4076 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4077 					beacon->data + ieoffset,
4078 					beacon->len - ieoffset);
4079 
4080 	/*
4081 	 * In case we already have a probe-resp beacon set explicitly
4082 	 * by usermode, don't use the beacon data.
4083 	 */
4084 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4085 		goto end_bcn;
4086 
4087 	/* remove TIM ie from probe response */
4088 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4089 
4090 	/*
4091 	 * remove p2p ie from probe response.
4092 	 * the fw reponds to probe requests that don't include
4093 	 * the p2p ie. probe requests with p2p ie will be passed,
4094 	 * and will be responded by the supplicant (the spec
4095 	 * forbids including the p2p ie when responding to probe
4096 	 * requests that didn't include it).
4097 	 */
4098 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4099 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4100 
4101 	hdr = (struct ieee80211_hdr *) beacon->data;
4102 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4103 					 IEEE80211_STYPE_PROBE_RESP);
4104 	if (is_ap)
4105 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4106 							   beacon->data,
4107 							   beacon->len,
4108 							   min_rate);
4109 	else
4110 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4111 					      CMD_TEMPL_PROBE_RESPONSE,
4112 					      beacon->data,
4113 					      beacon->len, 0,
4114 					      min_rate);
4115 end_bcn:
4116 	dev_kfree_skb(beacon);
4117 	if (ret < 0)
4118 		goto out;
4119 
4120 out:
4121 	return ret;
4122 }
4123 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4124 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4125 					  struct ieee80211_vif *vif,
4126 					  struct ieee80211_bss_conf *bss_conf,
4127 					  u32 changed)
4128 {
4129 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4130 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4131 	int ret = 0;
4132 
4133 	if (changed & BSS_CHANGED_BEACON_INT) {
4134 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4135 			bss_conf->beacon_int);
4136 
4137 		wlvif->beacon_int = bss_conf->beacon_int;
4138 	}
4139 
4140 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4141 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4142 
4143 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4144 	}
4145 
4146 	if (changed & BSS_CHANGED_BEACON) {
4147 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4148 		if (ret < 0)
4149 			goto out;
4150 
4151 		if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
4152 				       &wlvif->flags)) {
4153 			ret = wlcore_hw_dfs_master_restart(wl, wlvif);
4154 			if (ret < 0)
4155 				goto out;
4156 		}
4157 	}
4158 out:
4159 	if (ret != 0)
4160 		wl1271_error("beacon info change failed: %d", ret);
4161 	return ret;
4162 }
4163 
4164 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4165 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4166 				       struct ieee80211_vif *vif,
4167 				       struct ieee80211_bss_conf *bss_conf,
4168 				       u32 changed)
4169 {
4170 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4171 	int ret = 0;
4172 
4173 	if (changed & BSS_CHANGED_BASIC_RATES) {
4174 		u32 rates = bss_conf->basic_rates;
4175 
4176 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4177 								 wlvif->band);
4178 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4179 							wlvif->basic_rate_set);
4180 
4181 		ret = wl1271_init_ap_rates(wl, wlvif);
4182 		if (ret < 0) {
4183 			wl1271_error("AP rate policy change failed %d", ret);
4184 			goto out;
4185 		}
4186 
4187 		ret = wl1271_ap_init_templates(wl, vif);
4188 		if (ret < 0)
4189 			goto out;
4190 
4191 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4192 		if (ret < 0)
4193 			goto out;
4194 
4195 		ret = wlcore_set_beacon_template(wl, vif, true);
4196 		if (ret < 0)
4197 			goto out;
4198 	}
4199 
4200 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4201 	if (ret < 0)
4202 		goto out;
4203 
4204 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4205 		if (bss_conf->enable_beacon) {
4206 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4207 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4208 				if (ret < 0)
4209 					goto out;
4210 
4211 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4212 				if (ret < 0)
4213 					goto out;
4214 
4215 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4216 				wl1271_debug(DEBUG_AP, "started AP");
4217 			}
4218 		} else {
4219 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4220 				/*
4221 				 * AP might be in ROC in case we have just
4222 				 * sent auth reply. handle it.
4223 				 */
4224 				if (test_bit(wlvif->role_id, wl->roc_map))
4225 					wl12xx_croc(wl, wlvif->role_id);
4226 
4227 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4228 				if (ret < 0)
4229 					goto out;
4230 
4231 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4232 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4233 					  &wlvif->flags);
4234 				wl1271_debug(DEBUG_AP, "stopped AP");
4235 			}
4236 		}
4237 	}
4238 
4239 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4240 	if (ret < 0)
4241 		goto out;
4242 
4243 	/* Handle HT information change */
4244 	if ((changed & BSS_CHANGED_HT) &&
4245 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4246 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4247 					bss_conf->ht_operation_mode);
4248 		if (ret < 0) {
4249 			wl1271_warning("Set ht information failed %d", ret);
4250 			goto out;
4251 		}
4252 	}
4253 
4254 out:
4255 	return;
4256 }
4257 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4258 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4259 			    struct ieee80211_bss_conf *bss_conf,
4260 			    u32 sta_rate_set)
4261 {
4262 	u32 rates;
4263 	int ret;
4264 
4265 	wl1271_debug(DEBUG_MAC80211,
4266 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4267 	     bss_conf->bssid, bss_conf->aid,
4268 	     bss_conf->beacon_int,
4269 	     bss_conf->basic_rates, sta_rate_set);
4270 
4271 	wlvif->beacon_int = bss_conf->beacon_int;
4272 	rates = bss_conf->basic_rates;
4273 	wlvif->basic_rate_set =
4274 		wl1271_tx_enabled_rates_get(wl, rates,
4275 					    wlvif->band);
4276 	wlvif->basic_rate =
4277 		wl1271_tx_min_rate_get(wl,
4278 				       wlvif->basic_rate_set);
4279 
4280 	if (sta_rate_set)
4281 		wlvif->rate_set =
4282 			wl1271_tx_enabled_rates_get(wl,
4283 						sta_rate_set,
4284 						wlvif->band);
4285 
4286 	/* we only support sched_scan while not connected */
4287 	if (wl->sched_vif == wlvif)
4288 		wl->ops->sched_scan_stop(wl, wlvif);
4289 
4290 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4291 	if (ret < 0)
4292 		return ret;
4293 
4294 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4295 	if (ret < 0)
4296 		return ret;
4297 
4298 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4299 	if (ret < 0)
4300 		return ret;
4301 
4302 	wlcore_set_ssid(wl, wlvif);
4303 
4304 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4305 
4306 	return 0;
4307 }
4308 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4309 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4310 {
4311 	int ret;
4312 
4313 	/* revert back to minimum rates for the current band */
4314 	wl1271_set_band_rate(wl, wlvif);
4315 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4316 
4317 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4318 	if (ret < 0)
4319 		return ret;
4320 
4321 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4322 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4323 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4324 		if (ret < 0)
4325 			return ret;
4326 	}
4327 
4328 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4329 	return 0;
4330 }
4331 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4332 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4333 					struct ieee80211_vif *vif,
4334 					struct ieee80211_bss_conf *bss_conf,
4335 					u32 changed)
4336 {
4337 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4338 	bool do_join = false;
4339 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4340 	bool ibss_joined = false;
4341 	u32 sta_rate_set = 0;
4342 	int ret;
4343 	struct ieee80211_sta *sta;
4344 	bool sta_exists = false;
4345 	struct ieee80211_sta_ht_cap sta_ht_cap;
4346 
4347 	if (is_ibss) {
4348 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4349 						     changed);
4350 		if (ret < 0)
4351 			goto out;
4352 	}
4353 
4354 	if (changed & BSS_CHANGED_IBSS) {
4355 		if (bss_conf->ibss_joined) {
4356 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4357 			ibss_joined = true;
4358 		} else {
4359 			wlcore_unset_assoc(wl, wlvif);
4360 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4361 		}
4362 	}
4363 
4364 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4365 		do_join = true;
4366 
4367 	/* Need to update the SSID (for filtering etc) */
4368 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4369 		do_join = true;
4370 
4371 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4372 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4373 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4374 
4375 		do_join = true;
4376 	}
4377 
4378 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4379 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4380 
4381 	if (changed & BSS_CHANGED_CQM) {
4382 		bool enable = false;
4383 		if (bss_conf->cqm_rssi_thold)
4384 			enable = true;
4385 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4386 						  bss_conf->cqm_rssi_thold,
4387 						  bss_conf->cqm_rssi_hyst);
4388 		if (ret < 0)
4389 			goto out;
4390 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4391 	}
4392 
4393 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4394 		       BSS_CHANGED_ASSOC)) {
4395 		rcu_read_lock();
4396 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4397 		if (sta) {
4398 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4399 
4400 			/* save the supp_rates of the ap */
4401 			sta_rate_set = sta->supp_rates[wlvif->band];
4402 			if (sta->ht_cap.ht_supported)
4403 				sta_rate_set |=
4404 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4405 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4406 			sta_ht_cap = sta->ht_cap;
4407 			sta_exists = true;
4408 		}
4409 
4410 		rcu_read_unlock();
4411 	}
4412 
4413 	if (changed & BSS_CHANGED_BSSID) {
4414 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4415 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4416 					       sta_rate_set);
4417 			if (ret < 0)
4418 				goto out;
4419 
4420 			/* Need to update the BSSID (for filtering etc) */
4421 			do_join = true;
4422 		} else {
4423 			ret = wlcore_clear_bssid(wl, wlvif);
4424 			if (ret < 0)
4425 				goto out;
4426 		}
4427 	}
4428 
4429 	if (changed & BSS_CHANGED_IBSS) {
4430 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4431 			     bss_conf->ibss_joined);
4432 
4433 		if (bss_conf->ibss_joined) {
4434 			u32 rates = bss_conf->basic_rates;
4435 			wlvif->basic_rate_set =
4436 				wl1271_tx_enabled_rates_get(wl, rates,
4437 							    wlvif->band);
4438 			wlvif->basic_rate =
4439 				wl1271_tx_min_rate_get(wl,
4440 						       wlvif->basic_rate_set);
4441 
4442 			/* by default, use 11b + OFDM rates */
4443 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4444 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4445 			if (ret < 0)
4446 				goto out;
4447 		}
4448 	}
4449 
4450 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4451 		/* enable beacon filtering */
4452 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4453 		if (ret < 0)
4454 			goto out;
4455 	}
4456 
4457 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4458 	if (ret < 0)
4459 		goto out;
4460 
4461 	if (do_join) {
4462 		ret = wlcore_join(wl, wlvif);
4463 		if (ret < 0) {
4464 			wl1271_warning("cmd join failed %d", ret);
4465 			goto out;
4466 		}
4467 	}
4468 
4469 	if (changed & BSS_CHANGED_ASSOC) {
4470 		if (bss_conf->assoc) {
4471 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4472 					       sta_rate_set);
4473 			if (ret < 0)
4474 				goto out;
4475 
4476 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4477 				wl12xx_set_authorized(wl, wlvif);
4478 		} else {
4479 			wlcore_unset_assoc(wl, wlvif);
4480 		}
4481 	}
4482 
4483 	if (changed & BSS_CHANGED_PS) {
4484 		if ((bss_conf->ps) &&
4485 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4486 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4487 			int ps_mode;
4488 			char *ps_mode_str;
4489 
4490 			if (wl->conf.conn.forced_ps) {
4491 				ps_mode = STATION_POWER_SAVE_MODE;
4492 				ps_mode_str = "forced";
4493 			} else {
4494 				ps_mode = STATION_AUTO_PS_MODE;
4495 				ps_mode_str = "auto";
4496 			}
4497 
4498 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4499 
4500 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4501 			if (ret < 0)
4502 				wl1271_warning("enter %s ps failed %d",
4503 					       ps_mode_str, ret);
4504 		} else if (!bss_conf->ps &&
4505 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4506 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4507 
4508 			ret = wl1271_ps_set_mode(wl, wlvif,
4509 						 STATION_ACTIVE_MODE);
4510 			if (ret < 0)
4511 				wl1271_warning("exit auto ps failed %d", ret);
4512 		}
4513 	}
4514 
4515 	/* Handle new association with HT. Do this after join. */
4516 	if (sta_exists) {
4517 		bool enabled =
4518 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4519 
4520 		ret = wlcore_hw_set_peer_cap(wl,
4521 					     &sta_ht_cap,
4522 					     enabled,
4523 					     wlvif->rate_set,
4524 					     wlvif->sta.hlid);
4525 		if (ret < 0) {
4526 			wl1271_warning("Set ht cap failed %d", ret);
4527 			goto out;
4528 
4529 		}
4530 
4531 		if (enabled) {
4532 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4533 						bss_conf->ht_operation_mode);
4534 			if (ret < 0) {
4535 				wl1271_warning("Set ht information failed %d",
4536 					       ret);
4537 				goto out;
4538 			}
4539 		}
4540 	}
4541 
4542 	/* Handle arp filtering. Done after join. */
4543 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4544 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4545 		__be32 addr = bss_conf->arp_addr_list[0];
4546 		wlvif->sta.qos = bss_conf->qos;
4547 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4548 
4549 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4550 			wlvif->ip_addr = addr;
4551 			/*
4552 			 * The template should have been configured only upon
4553 			 * association. however, it seems that the correct ip
4554 			 * isn't being set (when sending), so we have to
4555 			 * reconfigure the template upon every ip change.
4556 			 */
4557 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4558 			if (ret < 0) {
4559 				wl1271_warning("build arp rsp failed: %d", ret);
4560 				goto out;
4561 			}
4562 
4563 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4564 				(ACX_ARP_FILTER_ARP_FILTERING |
4565 				 ACX_ARP_FILTER_AUTO_ARP),
4566 				addr);
4567 		} else {
4568 			wlvif->ip_addr = 0;
4569 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4570 		}
4571 
4572 		if (ret < 0)
4573 			goto out;
4574 	}
4575 
4576 out:
4577 	return;
4578 }
4579 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4580 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4581 				       struct ieee80211_vif *vif,
4582 				       struct ieee80211_bss_conf *bss_conf,
4583 				       u32 changed)
4584 {
4585 	struct wl1271 *wl = hw->priv;
4586 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4587 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4588 	int ret;
4589 
4590 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4591 		     wlvif->role_id, (int)changed);
4592 
4593 	/*
4594 	 * make sure to cancel pending disconnections if our association
4595 	 * state changed
4596 	 */
4597 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4598 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4599 
4600 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4601 	    !bss_conf->enable_beacon)
4602 		wl1271_tx_flush(wl);
4603 
4604 	mutex_lock(&wl->mutex);
4605 
4606 	if (unlikely(wl->state != WLCORE_STATE_ON))
4607 		goto out;
4608 
4609 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4610 		goto out;
4611 
4612 	ret = wl1271_ps_elp_wakeup(wl);
4613 	if (ret < 0)
4614 		goto out;
4615 
4616 	if ((changed & BSS_CHANGED_TXPOWER) &&
4617 	    bss_conf->txpower != wlvif->power_level) {
4618 
4619 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4620 		if (ret < 0)
4621 			goto out;
4622 
4623 		wlvif->power_level = bss_conf->txpower;
4624 	}
4625 
4626 	if (is_ap)
4627 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4628 	else
4629 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4630 
4631 	wl1271_ps_elp_sleep(wl);
4632 
4633 out:
4634 	mutex_unlock(&wl->mutex);
4635 }
4636 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4637 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4638 				 struct ieee80211_chanctx_conf *ctx)
4639 {
4640 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4641 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4642 		     cfg80211_get_chandef_type(&ctx->def));
4643 	return 0;
4644 }
4645 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4646 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4647 				     struct ieee80211_chanctx_conf *ctx)
4648 {
4649 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4650 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4651 		     cfg80211_get_chandef_type(&ctx->def));
4652 }
4653 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4654 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4655 				     struct ieee80211_chanctx_conf *ctx,
4656 				     u32 changed)
4657 {
4658 	struct wl1271 *wl = hw->priv;
4659 	struct wl12xx_vif *wlvif;
4660 	int ret;
4661 	int channel = ieee80211_frequency_to_channel(
4662 		ctx->def.chan->center_freq);
4663 
4664 	wl1271_debug(DEBUG_MAC80211,
4665 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4666 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4667 
4668 	mutex_lock(&wl->mutex);
4669 
4670 	ret = wl1271_ps_elp_wakeup(wl);
4671 	if (ret < 0)
4672 		goto out;
4673 
4674 	wl12xx_for_each_wlvif(wl, wlvif) {
4675 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4676 
4677 		rcu_read_lock();
4678 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4679 			rcu_read_unlock();
4680 			continue;
4681 		}
4682 		rcu_read_unlock();
4683 
4684 		/* start radar if needed */
4685 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4686 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4687 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4688 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4689 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4690 			wlcore_hw_set_cac(wl, wlvif, true);
4691 			wlvif->radar_enabled = true;
4692 		}
4693 	}
4694 
4695 	wl1271_ps_elp_sleep(wl);
4696 out:
4697 	mutex_unlock(&wl->mutex);
4698 }
4699 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4700 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4701 					struct ieee80211_vif *vif,
4702 					struct ieee80211_chanctx_conf *ctx)
4703 {
4704 	struct wl1271 *wl = hw->priv;
4705 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4706 	int channel = ieee80211_frequency_to_channel(
4707 		ctx->def.chan->center_freq);
4708 	int ret = -EINVAL;
4709 
4710 	wl1271_debug(DEBUG_MAC80211,
4711 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4712 		     wlvif->role_id, channel,
4713 		     cfg80211_get_chandef_type(&ctx->def),
4714 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4715 
4716 	mutex_lock(&wl->mutex);
4717 
4718 	if (unlikely(wl->state != WLCORE_STATE_ON))
4719 		goto out;
4720 
4721 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4722 		goto out;
4723 
4724 	ret = wl1271_ps_elp_wakeup(wl);
4725 	if (ret < 0)
4726 		goto out;
4727 
4728 	wlvif->band = ctx->def.chan->band;
4729 	wlvif->channel = channel;
4730 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4731 
4732 	/* update default rates according to the band */
4733 	wl1271_set_band_rate(wl, wlvif);
4734 
4735 	if (ctx->radar_enabled &&
4736 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4737 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4738 		wlcore_hw_set_cac(wl, wlvif, true);
4739 		wlvif->radar_enabled = true;
4740 	}
4741 
4742 	wl1271_ps_elp_sleep(wl);
4743 out:
4744 	mutex_unlock(&wl->mutex);
4745 
4746 	return 0;
4747 }
4748 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4749 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4750 					   struct ieee80211_vif *vif,
4751 					   struct ieee80211_chanctx_conf *ctx)
4752 {
4753 	struct wl1271 *wl = hw->priv;
4754 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4755 	int ret;
4756 
4757 	wl1271_debug(DEBUG_MAC80211,
4758 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4759 		     wlvif->role_id,
4760 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4761 		     cfg80211_get_chandef_type(&ctx->def));
4762 
4763 	wl1271_tx_flush(wl);
4764 
4765 	mutex_lock(&wl->mutex);
4766 
4767 	if (unlikely(wl->state != WLCORE_STATE_ON))
4768 		goto out;
4769 
4770 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4771 		goto out;
4772 
4773 	ret = wl1271_ps_elp_wakeup(wl);
4774 	if (ret < 0)
4775 		goto out;
4776 
4777 	if (wlvif->radar_enabled) {
4778 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4779 		wlcore_hw_set_cac(wl, wlvif, false);
4780 		wlvif->radar_enabled = false;
4781 	}
4782 
4783 	wl1271_ps_elp_sleep(wl);
4784 out:
4785 	mutex_unlock(&wl->mutex);
4786 }
4787 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4788 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4789 				    struct wl12xx_vif *wlvif,
4790 				    struct ieee80211_chanctx_conf *new_ctx)
4791 {
4792 	int channel = ieee80211_frequency_to_channel(
4793 		new_ctx->def.chan->center_freq);
4794 
4795 	wl1271_debug(DEBUG_MAC80211,
4796 		     "switch vif (role %d) %d -> %d chan_type: %d",
4797 		     wlvif->role_id, wlvif->channel, channel,
4798 		     cfg80211_get_chandef_type(&new_ctx->def));
4799 
4800 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4801 		return 0;
4802 
4803 	WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
4804 
4805 	if (wlvif->radar_enabled) {
4806 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4807 		wlcore_hw_set_cac(wl, wlvif, false);
4808 		wlvif->radar_enabled = false;
4809 	}
4810 
4811 	wlvif->band = new_ctx->def.chan->band;
4812 	wlvif->channel = channel;
4813 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4814 
4815 	/* start radar if needed */
4816 	if (new_ctx->radar_enabled) {
4817 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4818 		wlcore_hw_set_cac(wl, wlvif, true);
4819 		wlvif->radar_enabled = true;
4820 	}
4821 
4822 	return 0;
4823 }
4824 
4825 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4826 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4827 			     struct ieee80211_vif_chanctx_switch *vifs,
4828 			     int n_vifs,
4829 			     enum ieee80211_chanctx_switch_mode mode)
4830 {
4831 	struct wl1271 *wl = hw->priv;
4832 	int i, ret;
4833 
4834 	wl1271_debug(DEBUG_MAC80211,
4835 		     "mac80211 switch chanctx n_vifs %d mode %d",
4836 		     n_vifs, mode);
4837 
4838 	mutex_lock(&wl->mutex);
4839 
4840 	ret = wl1271_ps_elp_wakeup(wl);
4841 	if (ret < 0)
4842 		goto out;
4843 
4844 	for (i = 0; i < n_vifs; i++) {
4845 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4846 
4847 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4848 		if (ret)
4849 			goto out_sleep;
4850 	}
4851 out_sleep:
4852 	wl1271_ps_elp_sleep(wl);
4853 out:
4854 	mutex_unlock(&wl->mutex);
4855 
4856 	return 0;
4857 }
4858 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4859 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4860 			     struct ieee80211_vif *vif, u16 queue,
4861 			     const struct ieee80211_tx_queue_params *params)
4862 {
4863 	struct wl1271 *wl = hw->priv;
4864 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4865 	u8 ps_scheme;
4866 	int ret = 0;
4867 
4868 	if (wlcore_is_p2p_mgmt(wlvif))
4869 		return 0;
4870 
4871 	mutex_lock(&wl->mutex);
4872 
4873 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4874 
4875 	if (params->uapsd)
4876 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4877 	else
4878 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4879 
4880 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4881 		goto out;
4882 
4883 	ret = wl1271_ps_elp_wakeup(wl);
4884 	if (ret < 0)
4885 		goto out;
4886 
4887 	/*
4888 	 * the txop is confed in units of 32us by the mac80211,
4889 	 * we need us
4890 	 */
4891 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4892 				params->cw_min, params->cw_max,
4893 				params->aifs, params->txop << 5);
4894 	if (ret < 0)
4895 		goto out_sleep;
4896 
4897 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4898 				 CONF_CHANNEL_TYPE_EDCF,
4899 				 wl1271_tx_get_queue(queue),
4900 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4901 				 0, 0);
4902 
4903 out_sleep:
4904 	wl1271_ps_elp_sleep(wl);
4905 
4906 out:
4907 	mutex_unlock(&wl->mutex);
4908 
4909 	return ret;
4910 }
4911 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4912 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4913 			     struct ieee80211_vif *vif)
4914 {
4915 
4916 	struct wl1271 *wl = hw->priv;
4917 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4918 	u64 mactime = ULLONG_MAX;
4919 	int ret;
4920 
4921 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4922 
4923 	mutex_lock(&wl->mutex);
4924 
4925 	if (unlikely(wl->state != WLCORE_STATE_ON))
4926 		goto out;
4927 
4928 	ret = wl1271_ps_elp_wakeup(wl);
4929 	if (ret < 0)
4930 		goto out;
4931 
4932 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4933 	if (ret < 0)
4934 		goto out_sleep;
4935 
4936 out_sleep:
4937 	wl1271_ps_elp_sleep(wl);
4938 
4939 out:
4940 	mutex_unlock(&wl->mutex);
4941 	return mactime;
4942 }
4943 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4944 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4945 				struct survey_info *survey)
4946 {
4947 	struct ieee80211_conf *conf = &hw->conf;
4948 
4949 	if (idx != 0)
4950 		return -ENOENT;
4951 
4952 	survey->channel = conf->chandef.chan;
4953 	survey->filled = 0;
4954 	return 0;
4955 }
4956 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4957 static int wl1271_allocate_sta(struct wl1271 *wl,
4958 			     struct wl12xx_vif *wlvif,
4959 			     struct ieee80211_sta *sta)
4960 {
4961 	struct wl1271_station *wl_sta;
4962 	int ret;
4963 
4964 
4965 	if (wl->active_sta_count >= wl->max_ap_stations) {
4966 		wl1271_warning("could not allocate HLID - too much stations");
4967 		return -EBUSY;
4968 	}
4969 
4970 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4971 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4972 	if (ret < 0) {
4973 		wl1271_warning("could not allocate HLID - too many links");
4974 		return -EBUSY;
4975 	}
4976 
4977 	/* use the previous security seq, if this is a recovery/resume */
4978 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4979 
4980 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4981 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4982 	wl->active_sta_count++;
4983 	return 0;
4984 }
4985 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)4986 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4987 {
4988 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4989 		return;
4990 
4991 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4992 	__clear_bit(hlid, &wl->ap_ps_map);
4993 	__clear_bit(hlid, &wl->ap_fw_ps_map);
4994 
4995 	/*
4996 	 * save the last used PN in the private part of iee80211_sta,
4997 	 * in case of recovery/suspend
4998 	 */
4999 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
5000 
5001 	wl12xx_free_link(wl, wlvif, &hlid);
5002 	wl->active_sta_count--;
5003 
5004 	/*
5005 	 * rearm the tx watchdog when the last STA is freed - give the FW a
5006 	 * chance to return STA-buffered packets before complaining.
5007 	 */
5008 	if (wl->active_sta_count == 0)
5009 		wl12xx_rearm_tx_watchdog_locked(wl);
5010 }
5011 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5012 static int wl12xx_sta_add(struct wl1271 *wl,
5013 			  struct wl12xx_vif *wlvif,
5014 			  struct ieee80211_sta *sta)
5015 {
5016 	struct wl1271_station *wl_sta;
5017 	int ret = 0;
5018 	u8 hlid;
5019 
5020 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
5021 
5022 	ret = wl1271_allocate_sta(wl, wlvif, sta);
5023 	if (ret < 0)
5024 		return ret;
5025 
5026 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5027 	hlid = wl_sta->hlid;
5028 
5029 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
5030 	if (ret < 0)
5031 		wl1271_free_sta(wl, wlvif, hlid);
5032 
5033 	return ret;
5034 }
5035 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)5036 static int wl12xx_sta_remove(struct wl1271 *wl,
5037 			     struct wl12xx_vif *wlvif,
5038 			     struct ieee80211_sta *sta)
5039 {
5040 	struct wl1271_station *wl_sta;
5041 	int ret = 0, id;
5042 
5043 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
5044 
5045 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5046 	id = wl_sta->hlid;
5047 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5048 		return -EINVAL;
5049 
5050 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5051 	if (ret < 0)
5052 		return ret;
5053 
5054 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5055 	return ret;
5056 }
5057 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5058 static void wlcore_roc_if_possible(struct wl1271 *wl,
5059 				   struct wl12xx_vif *wlvif)
5060 {
5061 	if (find_first_bit(wl->roc_map,
5062 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5063 		return;
5064 
5065 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5066 		return;
5067 
5068 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5069 }
5070 
5071 /*
5072  * when wl_sta is NULL, we treat this call as if coming from a
5073  * pending auth reply.
5074  * wl->mutex must be taken and the FW must be awake when the call
5075  * takes place.
5076  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5077 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5078 			      struct wl1271_station *wl_sta, bool in_conn)
5079 {
5080 	if (in_conn) {
5081 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5082 			return;
5083 
5084 		if (!wlvif->ap_pending_auth_reply &&
5085 		    !wlvif->inconn_count)
5086 			wlcore_roc_if_possible(wl, wlvif);
5087 
5088 		if (wl_sta) {
5089 			wl_sta->in_connection = true;
5090 			wlvif->inconn_count++;
5091 		} else {
5092 			wlvif->ap_pending_auth_reply = true;
5093 		}
5094 	} else {
5095 		if (wl_sta && !wl_sta->in_connection)
5096 			return;
5097 
5098 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5099 			return;
5100 
5101 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5102 			return;
5103 
5104 		if (wl_sta) {
5105 			wl_sta->in_connection = false;
5106 			wlvif->inconn_count--;
5107 		} else {
5108 			wlvif->ap_pending_auth_reply = false;
5109 		}
5110 
5111 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5112 		    test_bit(wlvif->role_id, wl->roc_map))
5113 			wl12xx_croc(wl, wlvif->role_id);
5114 	}
5115 }
5116 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5117 static int wl12xx_update_sta_state(struct wl1271 *wl,
5118 				   struct wl12xx_vif *wlvif,
5119 				   struct ieee80211_sta *sta,
5120 				   enum ieee80211_sta_state old_state,
5121 				   enum ieee80211_sta_state new_state)
5122 {
5123 	struct wl1271_station *wl_sta;
5124 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5125 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5126 	int ret;
5127 
5128 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5129 
5130 	/* Add station (AP mode) */
5131 	if (is_ap &&
5132 	    old_state == IEEE80211_STA_NOTEXIST &&
5133 	    new_state == IEEE80211_STA_NONE) {
5134 		ret = wl12xx_sta_add(wl, wlvif, sta);
5135 		if (ret)
5136 			return ret;
5137 
5138 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5139 	}
5140 
5141 	/* Remove station (AP mode) */
5142 	if (is_ap &&
5143 	    old_state == IEEE80211_STA_NONE &&
5144 	    new_state == IEEE80211_STA_NOTEXIST) {
5145 		/* must not fail */
5146 		wl12xx_sta_remove(wl, wlvif, sta);
5147 
5148 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5149 	}
5150 
5151 	/* Authorize station (AP mode) */
5152 	if (is_ap &&
5153 	    new_state == IEEE80211_STA_AUTHORIZED) {
5154 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5155 		if (ret < 0)
5156 			return ret;
5157 
5158 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5159 						     wl_sta->hlid);
5160 		if (ret)
5161 			return ret;
5162 
5163 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5164 	}
5165 
5166 	/* Authorize station */
5167 	if (is_sta &&
5168 	    new_state == IEEE80211_STA_AUTHORIZED) {
5169 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5170 		ret = wl12xx_set_authorized(wl, wlvif);
5171 		if (ret)
5172 			return ret;
5173 	}
5174 
5175 	if (is_sta &&
5176 	    old_state == IEEE80211_STA_AUTHORIZED &&
5177 	    new_state == IEEE80211_STA_ASSOC) {
5178 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5179 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5180 	}
5181 
5182 	/* save seq number on disassoc (suspend) */
5183 	if (is_sta &&
5184 	    old_state == IEEE80211_STA_ASSOC &&
5185 	    new_state == IEEE80211_STA_AUTH) {
5186 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5187 		wlvif->total_freed_pkts = 0;
5188 	}
5189 
5190 	/* restore seq number on assoc (resume) */
5191 	if (is_sta &&
5192 	    old_state == IEEE80211_STA_AUTH &&
5193 	    new_state == IEEE80211_STA_ASSOC) {
5194 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5195 	}
5196 
5197 	/* clear ROCs on failure or authorization */
5198 	if (is_sta &&
5199 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5200 	     new_state == IEEE80211_STA_NOTEXIST)) {
5201 		if (test_bit(wlvif->role_id, wl->roc_map))
5202 			wl12xx_croc(wl, wlvif->role_id);
5203 	}
5204 
5205 	if (is_sta &&
5206 	    old_state == IEEE80211_STA_NOTEXIST &&
5207 	    new_state == IEEE80211_STA_NONE) {
5208 		if (find_first_bit(wl->roc_map,
5209 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5210 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5211 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5212 				   wlvif->band, wlvif->channel);
5213 		}
5214 	}
5215 	return 0;
5216 }
5217 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5218 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5219 			       struct ieee80211_vif *vif,
5220 			       struct ieee80211_sta *sta,
5221 			       enum ieee80211_sta_state old_state,
5222 			       enum ieee80211_sta_state new_state)
5223 {
5224 	struct wl1271 *wl = hw->priv;
5225 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5226 	int ret;
5227 
5228 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5229 		     sta->aid, old_state, new_state);
5230 
5231 	mutex_lock(&wl->mutex);
5232 
5233 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5234 		ret = -EBUSY;
5235 		goto out;
5236 	}
5237 
5238 	ret = wl1271_ps_elp_wakeup(wl);
5239 	if (ret < 0)
5240 		goto out;
5241 
5242 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5243 
5244 	wl1271_ps_elp_sleep(wl);
5245 out:
5246 	mutex_unlock(&wl->mutex);
5247 	if (new_state < old_state)
5248 		return 0;
5249 	return ret;
5250 }
5251 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)5252 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5253 				  struct ieee80211_vif *vif,
5254 				  struct ieee80211_ampdu_params *params)
5255 {
5256 	struct wl1271 *wl = hw->priv;
5257 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5258 	int ret;
5259 	u8 hlid, *ba_bitmap;
5260 	struct ieee80211_sta *sta = params->sta;
5261 	enum ieee80211_ampdu_mlme_action action = params->action;
5262 	u16 tid = params->tid;
5263 	u16 *ssn = &params->ssn;
5264 
5265 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5266 		     tid);
5267 
5268 	/* sanity check - the fields in FW are only 8bits wide */
5269 	if (WARN_ON(tid > 0xFF))
5270 		return -ENOTSUPP;
5271 
5272 	mutex_lock(&wl->mutex);
5273 
5274 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5275 		ret = -EAGAIN;
5276 		goto out;
5277 	}
5278 
5279 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5280 		hlid = wlvif->sta.hlid;
5281 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5282 		struct wl1271_station *wl_sta;
5283 
5284 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5285 		hlid = wl_sta->hlid;
5286 	} else {
5287 		ret = -EINVAL;
5288 		goto out;
5289 	}
5290 
5291 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5292 
5293 	ret = wl1271_ps_elp_wakeup(wl);
5294 	if (ret < 0)
5295 		goto out;
5296 
5297 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5298 		     tid, action);
5299 
5300 	switch (action) {
5301 	case IEEE80211_AMPDU_RX_START:
5302 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5303 			ret = -ENOTSUPP;
5304 			break;
5305 		}
5306 
5307 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5308 			ret = -EBUSY;
5309 			wl1271_error("exceeded max RX BA sessions");
5310 			break;
5311 		}
5312 
5313 		if (*ba_bitmap & BIT(tid)) {
5314 			ret = -EINVAL;
5315 			wl1271_error("cannot enable RX BA session on active "
5316 				     "tid: %d", tid);
5317 			break;
5318 		}
5319 
5320 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5321 				hlid,
5322 				params->buf_size);
5323 
5324 		if (!ret) {
5325 			*ba_bitmap |= BIT(tid);
5326 			wl->ba_rx_session_count++;
5327 		}
5328 		break;
5329 
5330 	case IEEE80211_AMPDU_RX_STOP:
5331 		if (!(*ba_bitmap & BIT(tid))) {
5332 			/*
5333 			 * this happens on reconfig - so only output a debug
5334 			 * message for now, and don't fail the function.
5335 			 */
5336 			wl1271_debug(DEBUG_MAC80211,
5337 				     "no active RX BA session on tid: %d",
5338 				     tid);
5339 			ret = 0;
5340 			break;
5341 		}
5342 
5343 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5344 							 hlid, 0);
5345 		if (!ret) {
5346 			*ba_bitmap &= ~BIT(tid);
5347 			wl->ba_rx_session_count--;
5348 		}
5349 		break;
5350 
5351 	/*
5352 	 * The BA initiator session management in FW independently.
5353 	 * Falling break here on purpose for all TX APDU commands.
5354 	 */
5355 	case IEEE80211_AMPDU_TX_START:
5356 	case IEEE80211_AMPDU_TX_STOP_CONT:
5357 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5358 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5359 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5360 		ret = -EINVAL;
5361 		break;
5362 
5363 	default:
5364 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5365 		ret = -EINVAL;
5366 	}
5367 
5368 	wl1271_ps_elp_sleep(wl);
5369 
5370 out:
5371 	mutex_unlock(&wl->mutex);
5372 
5373 	return ret;
5374 }
5375 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5376 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5377 				   struct ieee80211_vif *vif,
5378 				   const struct cfg80211_bitrate_mask *mask)
5379 {
5380 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5381 	struct wl1271 *wl = hw->priv;
5382 	int i, ret = 0;
5383 
5384 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5385 		mask->control[NL80211_BAND_2GHZ].legacy,
5386 		mask->control[NL80211_BAND_5GHZ].legacy);
5387 
5388 	mutex_lock(&wl->mutex);
5389 
5390 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5391 		wlvif->bitrate_masks[i] =
5392 			wl1271_tx_enabled_rates_get(wl,
5393 						    mask->control[i].legacy,
5394 						    i);
5395 
5396 	if (unlikely(wl->state != WLCORE_STATE_ON))
5397 		goto out;
5398 
5399 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5400 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5401 
5402 		ret = wl1271_ps_elp_wakeup(wl);
5403 		if (ret < 0)
5404 			goto out;
5405 
5406 		wl1271_set_band_rate(wl, wlvif);
5407 		wlvif->basic_rate =
5408 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5409 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5410 
5411 		wl1271_ps_elp_sleep(wl);
5412 	}
5413 out:
5414 	mutex_unlock(&wl->mutex);
5415 
5416 	return ret;
5417 }
5418 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel_switch * ch_switch)5419 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5420 				     struct ieee80211_vif *vif,
5421 				     struct ieee80211_channel_switch *ch_switch)
5422 {
5423 	struct wl1271 *wl = hw->priv;
5424 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5425 	int ret;
5426 
5427 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5428 
5429 	wl1271_tx_flush(wl);
5430 
5431 	mutex_lock(&wl->mutex);
5432 
5433 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5434 		if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5435 			ieee80211_chswitch_done(vif, false);
5436 		goto out;
5437 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5438 		goto out;
5439 	}
5440 
5441 	ret = wl1271_ps_elp_wakeup(wl);
5442 	if (ret < 0)
5443 		goto out;
5444 
5445 	/* TODO: change mac80211 to pass vif as param */
5446 
5447 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5448 		unsigned long delay_usec;
5449 
5450 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5451 		if (ret)
5452 			goto out_sleep;
5453 
5454 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5455 
5456 		/* indicate failure 5 seconds after channel switch time */
5457 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5458 			ch_switch->count;
5459 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5460 					     usecs_to_jiffies(delay_usec) +
5461 					     msecs_to_jiffies(5000));
5462 	}
5463 
5464 out_sleep:
5465 	wl1271_ps_elp_sleep(wl);
5466 
5467 out:
5468 	mutex_unlock(&wl->mutex);
5469 }
5470 
wlcore_get_beacon_ie(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 eid)5471 static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
5472 					struct wl12xx_vif *wlvif,
5473 					u8 eid)
5474 {
5475 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
5476 	struct sk_buff *beacon =
5477 		ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif));
5478 
5479 	if (!beacon)
5480 		return NULL;
5481 
5482 	return cfg80211_find_ie(eid,
5483 				beacon->data + ieoffset,
5484 				beacon->len - ieoffset);
5485 }
5486 
wlcore_get_csa_count(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 * csa_count)5487 static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5488 				u8 *csa_count)
5489 {
5490 	const u8 *ie;
5491 	const struct ieee80211_channel_sw_ie *ie_csa;
5492 
5493 	ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
5494 	if (!ie)
5495 		return -EINVAL;
5496 
5497 	ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
5498 	*csa_count = ie_csa->count;
5499 
5500 	return 0;
5501 }
5502 
wlcore_op_channel_switch_beacon(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_chan_def * chandef)5503 static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
5504 					    struct ieee80211_vif *vif,
5505 					    struct cfg80211_chan_def *chandef)
5506 {
5507 	struct wl1271 *wl = hw->priv;
5508 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5509 	struct ieee80211_channel_switch ch_switch = {
5510 		.block_tx = true,
5511 		.chandef = *chandef,
5512 	};
5513 	int ret;
5514 
5515 	wl1271_debug(DEBUG_MAC80211,
5516 		     "mac80211 channel switch beacon (role %d)",
5517 		     wlvif->role_id);
5518 
5519 	ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
5520 	if (ret < 0) {
5521 		wl1271_error("error getting beacon (for CSA counter)");
5522 		return;
5523 	}
5524 
5525 	mutex_lock(&wl->mutex);
5526 
5527 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5528 		ret = -EBUSY;
5529 		goto out;
5530 	}
5531 
5532 	ret = wl1271_ps_elp_wakeup(wl);
5533 	if (ret < 0)
5534 		goto out;
5535 
5536 	ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
5537 	if (ret)
5538 		goto out_sleep;
5539 
5540 	set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5541 
5542 out_sleep:
5543 	wl1271_ps_elp_sleep(wl);
5544 out:
5545 	mutex_unlock(&wl->mutex);
5546 }
5547 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5548 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5549 			    u32 queues, bool drop)
5550 {
5551 	struct wl1271 *wl = hw->priv;
5552 
5553 	wl1271_tx_flush(wl);
5554 }
5555 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5556 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5557 				       struct ieee80211_vif *vif,
5558 				       struct ieee80211_channel *chan,
5559 				       int duration,
5560 				       enum ieee80211_roc_type type)
5561 {
5562 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5563 	struct wl1271 *wl = hw->priv;
5564 	int channel, ret = 0;
5565 
5566 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5567 
5568 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5569 		     channel, wlvif->role_id);
5570 
5571 	mutex_lock(&wl->mutex);
5572 
5573 	if (unlikely(wl->state != WLCORE_STATE_ON))
5574 		goto out;
5575 
5576 	/* return EBUSY if we can't ROC right now */
5577 	if (WARN_ON(wl->roc_vif ||
5578 		    find_first_bit(wl->roc_map,
5579 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5580 		ret = -EBUSY;
5581 		goto out;
5582 	}
5583 
5584 	ret = wl1271_ps_elp_wakeup(wl);
5585 	if (ret < 0)
5586 		goto out;
5587 
5588 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5589 	if (ret < 0)
5590 		goto out_sleep;
5591 
5592 	wl->roc_vif = vif;
5593 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5594 				     msecs_to_jiffies(duration));
5595 out_sleep:
5596 	wl1271_ps_elp_sleep(wl);
5597 out:
5598 	mutex_unlock(&wl->mutex);
5599 	return ret;
5600 }
5601 
__wlcore_roc_completed(struct wl1271 * wl)5602 static int __wlcore_roc_completed(struct wl1271 *wl)
5603 {
5604 	struct wl12xx_vif *wlvif;
5605 	int ret;
5606 
5607 	/* already completed */
5608 	if (unlikely(!wl->roc_vif))
5609 		return 0;
5610 
5611 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5612 
5613 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5614 		return -EBUSY;
5615 
5616 	ret = wl12xx_stop_dev(wl, wlvif);
5617 	if (ret < 0)
5618 		return ret;
5619 
5620 	wl->roc_vif = NULL;
5621 
5622 	return 0;
5623 }
5624 
wlcore_roc_completed(struct wl1271 * wl)5625 static int wlcore_roc_completed(struct wl1271 *wl)
5626 {
5627 	int ret;
5628 
5629 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5630 
5631 	mutex_lock(&wl->mutex);
5632 
5633 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5634 		ret = -EBUSY;
5635 		goto out;
5636 	}
5637 
5638 	ret = wl1271_ps_elp_wakeup(wl);
5639 	if (ret < 0)
5640 		goto out;
5641 
5642 	ret = __wlcore_roc_completed(wl);
5643 
5644 	wl1271_ps_elp_sleep(wl);
5645 out:
5646 	mutex_unlock(&wl->mutex);
5647 
5648 	return ret;
5649 }
5650 
wlcore_roc_complete_work(struct work_struct * work)5651 static void wlcore_roc_complete_work(struct work_struct *work)
5652 {
5653 	struct delayed_work *dwork;
5654 	struct wl1271 *wl;
5655 	int ret;
5656 
5657 	dwork = container_of(work, struct delayed_work, work);
5658 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5659 
5660 	ret = wlcore_roc_completed(wl);
5661 	if (!ret)
5662 		ieee80211_remain_on_channel_expired(wl->hw);
5663 }
5664 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw)5665 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5666 {
5667 	struct wl1271 *wl = hw->priv;
5668 
5669 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5670 
5671 	/* TODO: per-vif */
5672 	wl1271_tx_flush(wl);
5673 
5674 	/*
5675 	 * we can't just flush_work here, because it might deadlock
5676 	 * (as we might get called from the same workqueue)
5677 	 */
5678 	cancel_delayed_work_sync(&wl->roc_complete_work);
5679 	wlcore_roc_completed(wl);
5680 
5681 	return 0;
5682 }
5683 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5684 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5685 				    struct ieee80211_vif *vif,
5686 				    struct ieee80211_sta *sta,
5687 				    u32 changed)
5688 {
5689 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5690 
5691 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
5692 
5693 	if (!(changed & IEEE80211_RC_BW_CHANGED))
5694 		return;
5695 
5696 	/* this callback is atomic, so schedule a new work */
5697 	wlvif->rc_update_bw = sta->bandwidth;
5698 	ieee80211_queue_work(hw, &wlvif->rc_update_work);
5699 }
5700 
wlcore_op_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)5701 static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
5702 				     struct ieee80211_vif *vif,
5703 				     struct ieee80211_sta *sta,
5704 				     struct station_info *sinfo)
5705 {
5706 	struct wl1271 *wl = hw->priv;
5707 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5708 	s8 rssi_dbm;
5709 	int ret;
5710 
5711 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5712 
5713 	mutex_lock(&wl->mutex);
5714 
5715 	if (unlikely(wl->state != WLCORE_STATE_ON))
5716 		goto out;
5717 
5718 	ret = wl1271_ps_elp_wakeup(wl);
5719 	if (ret < 0)
5720 		goto out_sleep;
5721 
5722 	ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
5723 	if (ret < 0)
5724 		goto out_sleep;
5725 
5726 	sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL);
5727 	sinfo->signal = rssi_dbm;
5728 
5729 out_sleep:
5730 	wl1271_ps_elp_sleep(wl);
5731 
5732 out:
5733 	mutex_unlock(&wl->mutex);
5734 }
5735 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5736 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5737 {
5738 	struct wl1271 *wl = hw->priv;
5739 	bool ret = false;
5740 
5741 	mutex_lock(&wl->mutex);
5742 
5743 	if (unlikely(wl->state != WLCORE_STATE_ON))
5744 		goto out;
5745 
5746 	/* packets are considered pending if in the TX queue or the FW */
5747 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5748 out:
5749 	mutex_unlock(&wl->mutex);
5750 
5751 	return ret;
5752 }
5753 
5754 /* can't be const, mac80211 writes to this */
5755 static struct ieee80211_rate wl1271_rates[] = {
5756 	{ .bitrate = 10,
5757 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5758 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5759 	{ .bitrate = 20,
5760 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5761 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5762 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5763 	{ .bitrate = 55,
5764 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5765 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5766 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5767 	{ .bitrate = 110,
5768 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5769 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5770 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5771 	{ .bitrate = 60,
5772 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5773 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5774 	{ .bitrate = 90,
5775 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5776 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5777 	{ .bitrate = 120,
5778 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5779 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5780 	{ .bitrate = 180,
5781 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5782 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5783 	{ .bitrate = 240,
5784 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5785 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5786 	{ .bitrate = 360,
5787 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5788 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5789 	{ .bitrate = 480,
5790 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5791 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5792 	{ .bitrate = 540,
5793 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5794 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5795 };
5796 
5797 /* can't be const, mac80211 writes to this */
5798 static struct ieee80211_channel wl1271_channels[] = {
5799 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5800 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5801 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5802 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5803 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5804 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5805 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5806 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5807 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5808 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5809 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5810 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5811 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5812 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5813 };
5814 
5815 /* can't be const, mac80211 writes to this */
5816 static struct ieee80211_supported_band wl1271_band_2ghz = {
5817 	.channels = wl1271_channels,
5818 	.n_channels = ARRAY_SIZE(wl1271_channels),
5819 	.bitrates = wl1271_rates,
5820 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5821 };
5822 
5823 /* 5 GHz data rates for WL1273 */
5824 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5825 	{ .bitrate = 60,
5826 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5827 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5828 	{ .bitrate = 90,
5829 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5830 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5831 	{ .bitrate = 120,
5832 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5833 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5834 	{ .bitrate = 180,
5835 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5836 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5837 	{ .bitrate = 240,
5838 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5839 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5840 	{ .bitrate = 360,
5841 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5842 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5843 	{ .bitrate = 480,
5844 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5845 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5846 	{ .bitrate = 540,
5847 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5848 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5849 };
5850 
5851 /* 5 GHz band channels for WL1273 */
5852 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5853 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5854 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5855 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5856 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5857 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5858 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5859 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5860 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5861 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5862 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5863 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5864 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5865 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5866 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5867 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5868 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5869 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5870 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5871 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5872 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5873 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5874 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5875 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5876 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5877 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5878 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5879 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5880 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5881 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5882 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5883 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5884 };
5885 
5886 static struct ieee80211_supported_band wl1271_band_5ghz = {
5887 	.channels = wl1271_channels_5ghz,
5888 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5889 	.bitrates = wl1271_rates_5ghz,
5890 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5891 };
5892 
5893 static const struct ieee80211_ops wl1271_ops = {
5894 	.start = wl1271_op_start,
5895 	.stop = wlcore_op_stop,
5896 	.add_interface = wl1271_op_add_interface,
5897 	.remove_interface = wl1271_op_remove_interface,
5898 	.change_interface = wl12xx_op_change_interface,
5899 #ifdef CONFIG_PM
5900 	.suspend = wl1271_op_suspend,
5901 	.resume = wl1271_op_resume,
5902 #endif
5903 	.config = wl1271_op_config,
5904 	.prepare_multicast = wl1271_op_prepare_multicast,
5905 	.configure_filter = wl1271_op_configure_filter,
5906 	.tx = wl1271_op_tx,
5907 	.set_key = wlcore_op_set_key,
5908 	.hw_scan = wl1271_op_hw_scan,
5909 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5910 	.sched_scan_start = wl1271_op_sched_scan_start,
5911 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5912 	.bss_info_changed = wl1271_op_bss_info_changed,
5913 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5914 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5915 	.conf_tx = wl1271_op_conf_tx,
5916 	.get_tsf = wl1271_op_get_tsf,
5917 	.get_survey = wl1271_op_get_survey,
5918 	.sta_state = wl12xx_op_sta_state,
5919 	.ampdu_action = wl1271_op_ampdu_action,
5920 	.tx_frames_pending = wl1271_tx_frames_pending,
5921 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5922 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5923 	.channel_switch = wl12xx_op_channel_switch,
5924 	.channel_switch_beacon = wlcore_op_channel_switch_beacon,
5925 	.flush = wlcore_op_flush,
5926 	.remain_on_channel = wlcore_op_remain_on_channel,
5927 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5928 	.add_chanctx = wlcore_op_add_chanctx,
5929 	.remove_chanctx = wlcore_op_remove_chanctx,
5930 	.change_chanctx = wlcore_op_change_chanctx,
5931 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5932 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5933 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5934 	.sta_rc_update = wlcore_op_sta_rc_update,
5935 	.sta_statistics = wlcore_op_sta_statistics,
5936 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5937 };
5938 
5939 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum ieee80211_band band)5940 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5941 {
5942 	u8 idx;
5943 
5944 	BUG_ON(band >= 2);
5945 
5946 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5947 		wl1271_error("Illegal RX rate from HW: %d", rate);
5948 		return 0;
5949 	}
5950 
5951 	idx = wl->band_rate_to_idx[band][rate];
5952 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5953 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5954 		return 0;
5955 	}
5956 
5957 	return idx;
5958 }
5959 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)5960 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5961 {
5962 	int i;
5963 
5964 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5965 		     oui, nic);
5966 
5967 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5968 		wl1271_warning("NIC part of the MAC address wraps around!");
5969 
5970 	for (i = 0; i < wl->num_mac_addr; i++) {
5971 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5972 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5973 		wl->addresses[i].addr[2] = (u8) oui;
5974 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5975 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5976 		wl->addresses[i].addr[5] = (u8) nic;
5977 		nic++;
5978 	}
5979 
5980 	/* we may be one address short at the most */
5981 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5982 
5983 	/*
5984 	 * turn on the LAA bit in the first address and use it as
5985 	 * the last address.
5986 	 */
5987 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5988 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5989 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5990 		       sizeof(wl->addresses[0]));
5991 		/* LAA bit */
5992 		wl->addresses[idx].addr[0] |= BIT(1);
5993 	}
5994 
5995 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5996 	wl->hw->wiphy->addresses = wl->addresses;
5997 }
5998 
wl12xx_get_hw_info(struct wl1271 * wl)5999 static int wl12xx_get_hw_info(struct wl1271 *wl)
6000 {
6001 	int ret;
6002 
6003 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
6004 	if (ret < 0)
6005 		goto out;
6006 
6007 	wl->fuse_oui_addr = 0;
6008 	wl->fuse_nic_addr = 0;
6009 
6010 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
6011 	if (ret < 0)
6012 		goto out;
6013 
6014 	if (wl->ops->get_mac)
6015 		ret = wl->ops->get_mac(wl);
6016 
6017 out:
6018 	return ret;
6019 }
6020 
wl1271_register_hw(struct wl1271 * wl)6021 static int wl1271_register_hw(struct wl1271 *wl)
6022 {
6023 	int ret;
6024 	u32 oui_addr = 0, nic_addr = 0;
6025 
6026 	if (wl->mac80211_registered)
6027 		return 0;
6028 
6029 	if (wl->nvs_len >= 12) {
6030 		/* NOTE: The wl->nvs->nvs element must be first, in
6031 		 * order to simplify the casting, we assume it is at
6032 		 * the beginning of the wl->nvs structure.
6033 		 */
6034 		u8 *nvs_ptr = (u8 *)wl->nvs;
6035 
6036 		oui_addr =
6037 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
6038 		nic_addr =
6039 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
6040 	}
6041 
6042 	/* if the MAC address is zeroed in the NVS derive from fuse */
6043 	if (oui_addr == 0 && nic_addr == 0) {
6044 		oui_addr = wl->fuse_oui_addr;
6045 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
6046 		nic_addr = wl->fuse_nic_addr + 1;
6047 	}
6048 
6049 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
6050 
6051 	ret = ieee80211_register_hw(wl->hw);
6052 	if (ret < 0) {
6053 		wl1271_error("unable to register mac80211 hw: %d", ret);
6054 		goto out;
6055 	}
6056 
6057 	wl->mac80211_registered = true;
6058 
6059 	wl1271_debugfs_init(wl);
6060 
6061 	wl1271_notice("loaded");
6062 
6063 out:
6064 	return ret;
6065 }
6066 
wl1271_unregister_hw(struct wl1271 * wl)6067 static void wl1271_unregister_hw(struct wl1271 *wl)
6068 {
6069 	if (wl->plt)
6070 		wl1271_plt_stop(wl);
6071 
6072 	ieee80211_unregister_hw(wl->hw);
6073 	wl->mac80211_registered = false;
6074 
6075 }
6076 
wl1271_init_ieee80211(struct wl1271 * wl)6077 static int wl1271_init_ieee80211(struct wl1271 *wl)
6078 {
6079 	int i;
6080 	static const u32 cipher_suites[] = {
6081 		WLAN_CIPHER_SUITE_WEP40,
6082 		WLAN_CIPHER_SUITE_WEP104,
6083 		WLAN_CIPHER_SUITE_TKIP,
6084 		WLAN_CIPHER_SUITE_CCMP,
6085 		WL1271_CIPHER_SUITE_GEM,
6086 	};
6087 
6088 	/* The tx descriptor buffer */
6089 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
6090 
6091 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
6092 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
6093 
6094 	/* unit us */
6095 	/* FIXME: find a proper value */
6096 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
6097 
6098 	ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
6099 	ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
6100 	ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
6101 	ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
6102 	ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
6103 	ieee80211_hw_set(wl->hw, AP_LINK_PS);
6104 	ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
6105 	ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
6106 	ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
6107 	ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
6108 	ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
6109 	ieee80211_hw_set(wl->hw, SIGNAL_DBM);
6110 	ieee80211_hw_set(wl->hw, SUPPORTS_PS);
6111 
6112 	wl->hw->wiphy->cipher_suites = cipher_suites;
6113 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
6114 
6115 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
6116 					 BIT(NL80211_IFTYPE_AP) |
6117 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
6118 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
6119 					 BIT(NL80211_IFTYPE_P2P_GO);
6120 	wl->hw->wiphy->max_scan_ssids = 1;
6121 	wl->hw->wiphy->max_sched_scan_ssids = 16;
6122 	wl->hw->wiphy->max_match_sets = 16;
6123 	/*
6124 	 * Maximum length of elements in scanning probe request templates
6125 	 * should be the maximum length possible for a template, without
6126 	 * the IEEE80211 header of the template
6127 	 */
6128 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6129 			sizeof(struct ieee80211_header);
6130 
6131 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6132 		sizeof(struct ieee80211_header);
6133 
6134 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6135 
6136 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6137 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6138 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN |
6139 				WIPHY_FLAG_HAS_CHANNEL_SWITCH;
6140 
6141 	/* make sure all our channels fit in the scanned_ch bitmask */
6142 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6143 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6144 		     WL1271_MAX_CHANNELS);
6145 	/*
6146 	* clear channel flags from the previous usage
6147 	* and restore max_power & max_antenna_gain values.
6148 	*/
6149 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6150 		wl1271_band_2ghz.channels[i].flags = 0;
6151 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6152 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6153 	}
6154 
6155 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6156 		wl1271_band_5ghz.channels[i].flags = 0;
6157 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6158 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6159 	}
6160 
6161 	/*
6162 	 * We keep local copies of the band structs because we need to
6163 	 * modify them on a per-device basis.
6164 	 */
6165 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6166 	       sizeof(wl1271_band_2ghz));
6167 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6168 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
6169 	       sizeof(*wl->ht_cap));
6170 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6171 	       sizeof(wl1271_band_5ghz));
6172 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6173 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
6174 	       sizeof(*wl->ht_cap));
6175 
6176 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6177 		&wl->bands[IEEE80211_BAND_2GHZ];
6178 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6179 		&wl->bands[IEEE80211_BAND_5GHZ];
6180 
6181 	/*
6182 	 * allow 4 queues per mac address we support +
6183 	 * 1 cab queue per mac + one global offchannel Tx queue
6184 	 */
6185 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6186 
6187 	/* the last queue is the offchannel queue */
6188 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6189 	wl->hw->max_rates = 1;
6190 
6191 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6192 
6193 	/* the FW answers probe-requests in AP-mode */
6194 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6195 	wl->hw->wiphy->probe_resp_offload =
6196 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6197 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6198 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6199 
6200 	/* allowed interface combinations */
6201 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6202 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6203 
6204 	/* register vendor commands */
6205 	wlcore_set_vendor_commands(wl->hw->wiphy);
6206 
6207 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6208 
6209 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6210 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6211 
6212 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6213 
6214 	return 0;
6215 }
6216 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6217 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6218 				     u32 mbox_size)
6219 {
6220 	struct ieee80211_hw *hw;
6221 	struct wl1271 *wl;
6222 	int i, j, ret;
6223 	unsigned int order;
6224 
6225 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6226 	if (!hw) {
6227 		wl1271_error("could not alloc ieee80211_hw");
6228 		ret = -ENOMEM;
6229 		goto err_hw_alloc;
6230 	}
6231 
6232 	wl = hw->priv;
6233 	memset(wl, 0, sizeof(*wl));
6234 
6235 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6236 	if (!wl->priv) {
6237 		wl1271_error("could not alloc wl priv");
6238 		ret = -ENOMEM;
6239 		goto err_priv_alloc;
6240 	}
6241 
6242 	INIT_LIST_HEAD(&wl->wlvif_list);
6243 
6244 	wl->hw = hw;
6245 
6246 	/*
6247 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6248 	 * we don't allocate any additional resource here, so that's fine.
6249 	 */
6250 	for (i = 0; i < NUM_TX_QUEUES; i++)
6251 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6252 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6253 
6254 	skb_queue_head_init(&wl->deferred_rx_queue);
6255 	skb_queue_head_init(&wl->deferred_tx_queue);
6256 
6257 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6258 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6259 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6260 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6261 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6262 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6263 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6264 
6265 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6266 	if (!wl->freezable_wq) {
6267 		ret = -ENOMEM;
6268 		goto err_hw;
6269 	}
6270 
6271 	wl->channel = 0;
6272 	wl->rx_counter = 0;
6273 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6274 	wl->band = IEEE80211_BAND_2GHZ;
6275 	wl->channel_type = NL80211_CHAN_NO_HT;
6276 	wl->flags = 0;
6277 	wl->sg_enabled = true;
6278 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6279 	wl->recovery_count = 0;
6280 	wl->hw_pg_ver = -1;
6281 	wl->ap_ps_map = 0;
6282 	wl->ap_fw_ps_map = 0;
6283 	wl->quirks = 0;
6284 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6285 	wl->active_sta_count = 0;
6286 	wl->active_link_count = 0;
6287 	wl->fwlog_size = 0;
6288 	init_waitqueue_head(&wl->fwlog_waitq);
6289 
6290 	/* The system link is always allocated */
6291 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6292 
6293 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6294 	for (i = 0; i < wl->num_tx_desc; i++)
6295 		wl->tx_frames[i] = NULL;
6296 
6297 	spin_lock_init(&wl->wl_lock);
6298 
6299 	wl->state = WLCORE_STATE_OFF;
6300 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6301 	mutex_init(&wl->mutex);
6302 	mutex_init(&wl->flush_mutex);
6303 	init_completion(&wl->nvs_loading_complete);
6304 
6305 	order = get_order(aggr_buf_size);
6306 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6307 	if (!wl->aggr_buf) {
6308 		ret = -ENOMEM;
6309 		goto err_wq;
6310 	}
6311 	wl->aggr_buf_size = aggr_buf_size;
6312 
6313 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6314 	if (!wl->dummy_packet) {
6315 		ret = -ENOMEM;
6316 		goto err_aggr;
6317 	}
6318 
6319 	/* Allocate one page for the FW log */
6320 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6321 	if (!wl->fwlog) {
6322 		ret = -ENOMEM;
6323 		goto err_dummy_packet;
6324 	}
6325 
6326 	wl->mbox_size = mbox_size;
6327 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6328 	if (!wl->mbox) {
6329 		ret = -ENOMEM;
6330 		goto err_fwlog;
6331 	}
6332 
6333 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6334 	if (!wl->buffer_32) {
6335 		ret = -ENOMEM;
6336 		goto err_mbox;
6337 	}
6338 
6339 	return hw;
6340 
6341 err_mbox:
6342 	kfree(wl->mbox);
6343 
6344 err_fwlog:
6345 	free_page((unsigned long)wl->fwlog);
6346 
6347 err_dummy_packet:
6348 	dev_kfree_skb(wl->dummy_packet);
6349 
6350 err_aggr:
6351 	free_pages((unsigned long)wl->aggr_buf, order);
6352 
6353 err_wq:
6354 	destroy_workqueue(wl->freezable_wq);
6355 
6356 err_hw:
6357 	wl1271_debugfs_exit(wl);
6358 	kfree(wl->priv);
6359 
6360 err_priv_alloc:
6361 	ieee80211_free_hw(hw);
6362 
6363 err_hw_alloc:
6364 
6365 	return ERR_PTR(ret);
6366 }
6367 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6368 
wlcore_free_hw(struct wl1271 * wl)6369 int wlcore_free_hw(struct wl1271 *wl)
6370 {
6371 	/* Unblock any fwlog readers */
6372 	mutex_lock(&wl->mutex);
6373 	wl->fwlog_size = -1;
6374 	wake_up_interruptible_all(&wl->fwlog_waitq);
6375 	mutex_unlock(&wl->mutex);
6376 
6377 	wlcore_sysfs_free(wl);
6378 
6379 	kfree(wl->buffer_32);
6380 	kfree(wl->mbox);
6381 	free_page((unsigned long)wl->fwlog);
6382 	dev_kfree_skb(wl->dummy_packet);
6383 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6384 
6385 	wl1271_debugfs_exit(wl);
6386 
6387 	vfree(wl->fw);
6388 	wl->fw = NULL;
6389 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6390 	kfree(wl->nvs);
6391 	wl->nvs = NULL;
6392 
6393 	kfree(wl->raw_fw_status);
6394 	kfree(wl->fw_status);
6395 	kfree(wl->tx_res_if);
6396 	destroy_workqueue(wl->freezable_wq);
6397 
6398 	kfree(wl->priv);
6399 	ieee80211_free_hw(wl->hw);
6400 
6401 	return 0;
6402 }
6403 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6404 
6405 #ifdef CONFIG_PM
6406 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6407 	.flags = WIPHY_WOWLAN_ANY,
6408 	.n_patterns = WL1271_MAX_RX_FILTERS,
6409 	.pattern_min_len = 1,
6410 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6411 };
6412 #endif
6413 
wlcore_hardirq(int irq,void * cookie)6414 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6415 {
6416 	return IRQ_WAKE_THREAD;
6417 }
6418 
wlcore_nvs_cb(const struct firmware * fw,void * context)6419 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6420 {
6421 	struct wl1271 *wl = context;
6422 	struct platform_device *pdev = wl->pdev;
6423 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6424 	struct resource *res;
6425 
6426 	int ret;
6427 	irq_handler_t hardirq_fn = NULL;
6428 
6429 	if (fw) {
6430 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6431 		if (!wl->nvs) {
6432 			wl1271_error("Could not allocate nvs data");
6433 			goto out;
6434 		}
6435 		wl->nvs_len = fw->size;
6436 	} else {
6437 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6438 			     WL12XX_NVS_NAME);
6439 		wl->nvs = NULL;
6440 		wl->nvs_len = 0;
6441 	}
6442 
6443 	ret = wl->ops->setup(wl);
6444 	if (ret < 0)
6445 		goto out_free_nvs;
6446 
6447 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6448 
6449 	/* adjust some runtime configuration parameters */
6450 	wlcore_adjust_conf(wl);
6451 
6452 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6453 	if (!res) {
6454 		wl1271_error("Could not get IRQ resource");
6455 		goto out_free_nvs;
6456 	}
6457 
6458 	wl->irq = res->start;
6459 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6460 	wl->if_ops = pdev_data->if_ops;
6461 
6462 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6463 		hardirq_fn = wlcore_hardirq;
6464 	else
6465 		wl->irq_flags |= IRQF_ONESHOT;
6466 
6467 	ret = wl12xx_set_power_on(wl);
6468 	if (ret < 0)
6469 		goto out_free_nvs;
6470 
6471 	ret = wl12xx_get_hw_info(wl);
6472 	if (ret < 0) {
6473 		wl1271_error("couldn't get hw info");
6474 		wl1271_power_off(wl);
6475 		goto out_free_nvs;
6476 	}
6477 
6478 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6479 				   wl->irq_flags, pdev->name, wl);
6480 	if (ret < 0) {
6481 		wl1271_error("interrupt configuration failed");
6482 		wl1271_power_off(wl);
6483 		goto out_free_nvs;
6484 	}
6485 
6486 #ifdef CONFIG_PM
6487 	ret = enable_irq_wake(wl->irq);
6488 	if (!ret) {
6489 		wl->irq_wake_enabled = true;
6490 		device_init_wakeup(wl->dev, 1);
6491 		if (pdev_data->pwr_in_suspend)
6492 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6493 	}
6494 #endif
6495 	disable_irq(wl->irq);
6496 	wl1271_power_off(wl);
6497 
6498 	ret = wl->ops->identify_chip(wl);
6499 	if (ret < 0)
6500 		goto out_irq;
6501 
6502 	ret = wl1271_init_ieee80211(wl);
6503 	if (ret)
6504 		goto out_irq;
6505 
6506 	ret = wl1271_register_hw(wl);
6507 	if (ret)
6508 		goto out_irq;
6509 
6510 	ret = wlcore_sysfs_init(wl);
6511 	if (ret)
6512 		goto out_unreg;
6513 
6514 	wl->initialized = true;
6515 	goto out;
6516 
6517 out_unreg:
6518 	wl1271_unregister_hw(wl);
6519 
6520 out_irq:
6521 	free_irq(wl->irq, wl);
6522 
6523 out_free_nvs:
6524 	kfree(wl->nvs);
6525 
6526 out:
6527 	release_firmware(fw);
6528 	complete_all(&wl->nvs_loading_complete);
6529 }
6530 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6531 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6532 {
6533 	int ret;
6534 
6535 	if (!wl->ops || !wl->ptable)
6536 		return -EINVAL;
6537 
6538 	wl->dev = &pdev->dev;
6539 	wl->pdev = pdev;
6540 	platform_set_drvdata(pdev, wl);
6541 
6542 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6543 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6544 				      wl, wlcore_nvs_cb);
6545 	if (ret < 0) {
6546 		wl1271_error("request_firmware_nowait failed: %d", ret);
6547 		complete_all(&wl->nvs_loading_complete);
6548 	}
6549 
6550 	return ret;
6551 }
6552 EXPORT_SYMBOL_GPL(wlcore_probe);
6553 
wlcore_remove(struct platform_device * pdev)6554 int wlcore_remove(struct platform_device *pdev)
6555 {
6556 	struct wl1271 *wl = platform_get_drvdata(pdev);
6557 
6558 	wait_for_completion(&wl->nvs_loading_complete);
6559 	if (!wl->initialized)
6560 		return 0;
6561 
6562 	if (wl->irq_wake_enabled) {
6563 		device_init_wakeup(wl->dev, 0);
6564 		disable_irq_wake(wl->irq);
6565 	}
6566 	wl1271_unregister_hw(wl);
6567 	free_irq(wl->irq, wl);
6568 	wlcore_free_hw(wl);
6569 
6570 	return 0;
6571 }
6572 EXPORT_SYMBOL_GPL(wlcore_remove);
6573 
6574 u32 wl12xx_debug_level = DEBUG_NONE;
6575 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6576 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6577 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6578 
6579 module_param_named(fwlog, fwlog_param, charp, 0);
6580 MODULE_PARM_DESC(fwlog,
6581 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6582 
6583 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6584 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6585 
6586 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6587 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6588 
6589 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6590 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6591 
6592 MODULE_LICENSE("GPL");
6593 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6594 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6595 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6596