• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 
2 /*
3  * This file is part of wlcore
4  *
5  * Copyright (C) 2008-2010 Nokia Corporation
6  * Copyright (C) 2011-2013 Texas Instruments Inc.
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/module.h>
25 #include <linux/firmware.h>
26 #include <linux/etherdevice.h>
27 #include <linux/vmalloc.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 
31 #include "wlcore.h"
32 #include "debug.h"
33 #include "wl12xx_80211.h"
34 #include "io.h"
35 #include "tx.h"
36 #include "ps.h"
37 #include "init.h"
38 #include "debugfs.h"
39 #include "testmode.h"
40 #include "vendor_cmd.h"
41 #include "scan.h"
42 #include "hw_ops.h"
43 #include "sysfs.h"
44 
45 #define WL1271_BOOT_RETRIES 3
46 
47 static char *fwlog_param;
48 static int fwlog_mem_blocks = -1;
49 static int bug_on_recovery = -1;
50 static int no_recovery     = -1;
51 
52 static void __wl1271_op_remove_interface(struct wl1271 *wl,
53 					 struct ieee80211_vif *vif,
54 					 bool reset_tx_queues);
55 static void wlcore_op_stop_locked(struct wl1271 *wl);
56 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
57 
wl12xx_set_authorized(struct wl1271 * wl,struct wl12xx_vif * wlvif)58 static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
59 {
60 	int ret;
61 
62 	if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
63 		return -EINVAL;
64 
65 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
66 		return 0;
67 
68 	if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
69 		return 0;
70 
71 	ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
72 	if (ret < 0)
73 		return ret;
74 
75 	wl1271_info("Association completed.");
76 	return 0;
77 }
78 
wl1271_reg_notify(struct wiphy * wiphy,struct regulatory_request * request)79 static void wl1271_reg_notify(struct wiphy *wiphy,
80 			      struct regulatory_request *request)
81 {
82 	struct ieee80211_supported_band *band;
83 	struct ieee80211_channel *ch;
84 	int i;
85 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
86 	struct wl1271 *wl = hw->priv;
87 
88 	band = wiphy->bands[IEEE80211_BAND_5GHZ];
89 	for (i = 0; i < band->n_channels; i++) {
90 		ch = &band->channels[i];
91 		if (ch->flags & IEEE80211_CHAN_DISABLED)
92 			continue;
93 
94 		if (ch->flags & IEEE80211_CHAN_RADAR)
95 			ch->flags |= IEEE80211_CHAN_NO_IR;
96 
97 	}
98 
99 	wlcore_regdomain_config(wl);
100 }
101 
wl1271_set_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool enable)102 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
103 				   bool enable)
104 {
105 	int ret = 0;
106 
107 	/* we should hold wl->mutex */
108 	ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
109 	if (ret < 0)
110 		goto out;
111 
112 	if (enable)
113 		set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
114 	else
115 		clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
116 out:
117 	return ret;
118 }
119 
120 /*
121  * this function is being called when the rx_streaming interval
122  * has beed changed or rx_streaming should be disabled
123  */
wl1271_recalc_rx_streaming(struct wl1271 * wl,struct wl12xx_vif * wlvif)124 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
125 {
126 	int ret = 0;
127 	int period = wl->conf.rx_streaming.interval;
128 
129 	/* don't reconfigure if rx_streaming is disabled */
130 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
131 		goto out;
132 
133 	/* reconfigure/disable according to new streaming_period */
134 	if (period &&
135 	    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
136 	    (wl->conf.rx_streaming.always ||
137 	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
138 		ret = wl1271_set_rx_streaming(wl, wlvif, true);
139 	else {
140 		ret = wl1271_set_rx_streaming(wl, wlvif, false);
141 		/* don't cancel_work_sync since we might deadlock */
142 		del_timer_sync(&wlvif->rx_streaming_timer);
143 	}
144 out:
145 	return ret;
146 }
147 
wl1271_rx_streaming_enable_work(struct work_struct * work)148 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
149 {
150 	int ret;
151 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
152 						rx_streaming_enable_work);
153 	struct wl1271 *wl = wlvif->wl;
154 
155 	mutex_lock(&wl->mutex);
156 
157 	if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
158 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
159 	    (!wl->conf.rx_streaming.always &&
160 	     !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
161 		goto out;
162 
163 	if (!wl->conf.rx_streaming.interval)
164 		goto out;
165 
166 	ret = wl1271_ps_elp_wakeup(wl);
167 	if (ret < 0)
168 		goto out;
169 
170 	ret = wl1271_set_rx_streaming(wl, wlvif, true);
171 	if (ret < 0)
172 		goto out_sleep;
173 
174 	/* stop it after some time of inactivity */
175 	mod_timer(&wlvif->rx_streaming_timer,
176 		  jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
177 
178 out_sleep:
179 	wl1271_ps_elp_sleep(wl);
180 out:
181 	mutex_unlock(&wl->mutex);
182 }
183 
wl1271_rx_streaming_disable_work(struct work_struct * work)184 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
185 {
186 	int ret;
187 	struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
188 						rx_streaming_disable_work);
189 	struct wl1271 *wl = wlvif->wl;
190 
191 	mutex_lock(&wl->mutex);
192 
193 	if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
194 		goto out;
195 
196 	ret = wl1271_ps_elp_wakeup(wl);
197 	if (ret < 0)
198 		goto out;
199 
200 	ret = wl1271_set_rx_streaming(wl, wlvif, false);
201 	if (ret)
202 		goto out_sleep;
203 
204 out_sleep:
205 	wl1271_ps_elp_sleep(wl);
206 out:
207 	mutex_unlock(&wl->mutex);
208 }
209 
wl1271_rx_streaming_timer(unsigned long data)210 static void wl1271_rx_streaming_timer(unsigned long data)
211 {
212 	struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
213 	struct wl1271 *wl = wlvif->wl;
214 	ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
215 }
216 
217 /* wl->mutex must be taken */
wl12xx_rearm_tx_watchdog_locked(struct wl1271 * wl)218 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
219 {
220 	/* if the watchdog is not armed, don't do anything */
221 	if (wl->tx_allocated_blocks == 0)
222 		return;
223 
224 	cancel_delayed_work(&wl->tx_watchdog_work);
225 	ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
226 		msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
227 }
228 
wl12xx_tx_watchdog_work(struct work_struct * work)229 static void wl12xx_tx_watchdog_work(struct work_struct *work)
230 {
231 	struct delayed_work *dwork;
232 	struct wl1271 *wl;
233 
234 	dwork = container_of(work, struct delayed_work, work);
235 	wl = container_of(dwork, struct wl1271, tx_watchdog_work);
236 
237 	mutex_lock(&wl->mutex);
238 
239 	if (unlikely(wl->state != WLCORE_STATE_ON))
240 		goto out;
241 
242 	/* Tx went out in the meantime - everything is ok */
243 	if (unlikely(wl->tx_allocated_blocks == 0))
244 		goto out;
245 
246 	/*
247 	 * if a ROC is in progress, we might not have any Tx for a long
248 	 * time (e.g. pending Tx on the non-ROC channels)
249 	 */
250 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
251 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
252 			     wl->conf.tx.tx_watchdog_timeout);
253 		wl12xx_rearm_tx_watchdog_locked(wl);
254 		goto out;
255 	}
256 
257 	/*
258 	 * if a scan is in progress, we might not have any Tx for a long
259 	 * time
260 	 */
261 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
262 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
263 			     wl->conf.tx.tx_watchdog_timeout);
264 		wl12xx_rearm_tx_watchdog_locked(wl);
265 		goto out;
266 	}
267 
268 	/*
269 	* AP might cache a frame for a long time for a sleeping station,
270 	* so rearm the timer if there's an AP interface with stations. If
271 	* Tx is genuinely stuck we will most hopefully discover it when all
272 	* stations are removed due to inactivity.
273 	*/
274 	if (wl->active_sta_count) {
275 		wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
276 			     " %d stations",
277 			      wl->conf.tx.tx_watchdog_timeout,
278 			      wl->active_sta_count);
279 		wl12xx_rearm_tx_watchdog_locked(wl);
280 		goto out;
281 	}
282 
283 	wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
284 		     wl->conf.tx.tx_watchdog_timeout);
285 	wl12xx_queue_recovery_work(wl);
286 
287 out:
288 	mutex_unlock(&wl->mutex);
289 }
290 
wlcore_adjust_conf(struct wl1271 * wl)291 static void wlcore_adjust_conf(struct wl1271 *wl)
292 {
293 	/* Adjust settings according to optional module parameters */
294 
295 	/* Firmware Logger params */
296 	if (fwlog_mem_blocks != -1) {
297 		if (fwlog_mem_blocks >= CONF_FWLOG_MIN_MEM_BLOCKS &&
298 		    fwlog_mem_blocks <= CONF_FWLOG_MAX_MEM_BLOCKS) {
299 			wl->conf.fwlog.mem_blocks = fwlog_mem_blocks;
300 		} else {
301 			wl1271_error(
302 				"Illegal fwlog_mem_blocks=%d using default %d",
303 				fwlog_mem_blocks, wl->conf.fwlog.mem_blocks);
304 		}
305 	}
306 
307 	if (fwlog_param) {
308 		if (!strcmp(fwlog_param, "continuous")) {
309 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 		} else if (!strcmp(fwlog_param, "ondemand")) {
311 			wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 		} else if (!strcmp(fwlog_param, "dbgpins")) {
313 			wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 		} else if (!strcmp(fwlog_param, "disable")) {
316 			wl->conf.fwlog.mem_blocks = 0;
317 			wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 		} else {
319 			wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 		}
321 	}
322 
323 	if (bug_on_recovery != -1)
324 		wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325 
326 	if (no_recovery != -1)
327 		wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329 
wl12xx_irq_ps_regulate_link(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,u8 tx_pkts)330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 					struct wl12xx_vif *wlvif,
332 					u8 hlid, u8 tx_pkts)
333 {
334 	bool fw_ps;
335 
336 	fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
337 
338 	/*
339 	 * Wake up from high level PS if the STA is asleep with too little
340 	 * packets in FW or if the STA is awake.
341 	 */
342 	if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 		wl12xx_ps_link_end(wl, wlvif, hlid);
344 
345 	/*
346 	 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 	 * Make an exception if this is the only connected link. In this
348 	 * case FW-memory congestion is less of a problem.
349 	 * Note that a single connected STA means 2*ap_count + 1 active links,
350 	 * since we must account for the global and broadcast AP links
351 	 * for each AP. The "fw_ps" check assures us the other link is a STA
352 	 * connected to the AP. Otherwise the FW would not set the PSM bit.
353 	 */
354 	else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
355 		 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358 
wl12xx_irq_update_links_status(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl_fw_status * status)359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 					   struct wl12xx_vif *wlvif,
361 					   struct wl_fw_status *status)
362 {
363 	unsigned long cur_fw_ps_map;
364 	u8 hlid;
365 
366 	cur_fw_ps_map = status->link_ps_bitmap;
367 	if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 		wl1271_debug(DEBUG_PSM,
369 			     "link ps prev 0x%lx cur 0x%lx changed 0x%lx",
370 			     wl->ap_fw_ps_map, cur_fw_ps_map,
371 			     wl->ap_fw_ps_map ^ cur_fw_ps_map);
372 
373 		wl->ap_fw_ps_map = cur_fw_ps_map;
374 	}
375 
376 	for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
377 		wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 					    wl->links[hlid].allocated_pkts);
379 }
380 
wlcore_fw_status(struct wl1271 * wl,struct wl_fw_status * status)381 static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
382 {
383 	struct wl12xx_vif *wlvif;
384 	struct timespec ts;
385 	u32 old_tx_blk_count = wl->tx_blocks_available;
386 	int avail, freed_blocks;
387 	int i;
388 	int ret;
389 	struct wl1271_link *lnk;
390 
391 	ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
392 				   wl->raw_fw_status,
393 				   wl->fw_status_len, false);
394 	if (ret < 0)
395 		return ret;
396 
397 	wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
398 
399 	wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
400 		     "drv_rx_counter = %d, tx_results_counter = %d)",
401 		     status->intr,
402 		     status->fw_rx_counter,
403 		     status->drv_rx_counter,
404 		     status->tx_results_counter);
405 
406 	for (i = 0; i < NUM_TX_QUEUES; i++) {
407 		/* prevent wrap-around in freed-packets counter */
408 		wl->tx_allocated_pkts[i] -=
409 				(status->counters.tx_released_pkts[i] -
410 				wl->tx_pkts_freed[i]) & 0xff;
411 
412 		wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
413 	}
414 
415 
416 	for_each_set_bit(i, wl->links_map, wl->num_links) {
417 		u8 diff;
418 		lnk = &wl->links[i];
419 
420 		/* prevent wrap-around in freed-packets counter */
421 		diff = (status->counters.tx_lnk_free_pkts[i] -
422 		       lnk->prev_freed_pkts) & 0xff;
423 
424 		if (diff == 0)
425 			continue;
426 
427 		lnk->allocated_pkts -= diff;
428 		lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
429 
430 		/* accumulate the prev_freed_pkts counter */
431 		lnk->total_freed_pkts += diff;
432 	}
433 
434 	/* prevent wrap-around in total blocks counter */
435 	if (likely(wl->tx_blocks_freed <= status->total_released_blks))
436 		freed_blocks = status->total_released_blks -
437 			       wl->tx_blocks_freed;
438 	else
439 		freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
440 			       status->total_released_blks;
441 
442 	wl->tx_blocks_freed = status->total_released_blks;
443 
444 	wl->tx_allocated_blocks -= freed_blocks;
445 
446 	/*
447 	 * If the FW freed some blocks:
448 	 * If we still have allocated blocks - re-arm the timer, Tx is
449 	 * not stuck. Otherwise, cancel the timer (no Tx currently).
450 	 */
451 	if (freed_blocks) {
452 		if (wl->tx_allocated_blocks)
453 			wl12xx_rearm_tx_watchdog_locked(wl);
454 		else
455 			cancel_delayed_work(&wl->tx_watchdog_work);
456 	}
457 
458 	avail = status->tx_total - wl->tx_allocated_blocks;
459 
460 	/*
461 	 * The FW might change the total number of TX memblocks before
462 	 * we get a notification about blocks being released. Thus, the
463 	 * available blocks calculation might yield a temporary result
464 	 * which is lower than the actual available blocks. Keeping in
465 	 * mind that only blocks that were allocated can be moved from
466 	 * TX to RX, tx_blocks_available should never decrease here.
467 	 */
468 	wl->tx_blocks_available = max((int)wl->tx_blocks_available,
469 				      avail);
470 
471 	/* if more blocks are available now, tx work can be scheduled */
472 	if (wl->tx_blocks_available > old_tx_blk_count)
473 		clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
474 
475 	/* for AP update num of allocated TX blocks per link and ps status */
476 	wl12xx_for_each_wlvif_ap(wl, wlvif) {
477 		wl12xx_irq_update_links_status(wl, wlvif, status);
478 	}
479 
480 	/* update the host-chipset time offset */
481 	getnstimeofday(&ts);
482 	wl->time_offset = (timespec_to_ns(&ts) >> 10) -
483 		(s64)(status->fw_localtime);
484 
485 	wl->fw_fast_lnk_map = status->link_fast_bitmap;
486 
487 	return 0;
488 }
489 
wl1271_flush_deferred_work(struct wl1271 * wl)490 static void wl1271_flush_deferred_work(struct wl1271 *wl)
491 {
492 	struct sk_buff *skb;
493 
494 	/* Pass all received frames to the network stack */
495 	while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
496 		ieee80211_rx_ni(wl->hw, skb);
497 
498 	/* Return sent skbs to the network stack */
499 	while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
500 		ieee80211_tx_status_ni(wl->hw, skb);
501 }
502 
wl1271_netstack_work(struct work_struct * work)503 static void wl1271_netstack_work(struct work_struct *work)
504 {
505 	struct wl1271 *wl =
506 		container_of(work, struct wl1271, netstack_work);
507 
508 	do {
509 		wl1271_flush_deferred_work(wl);
510 	} while (skb_queue_len(&wl->deferred_rx_queue));
511 }
512 
513 #define WL1271_IRQ_MAX_LOOPS 256
514 
wlcore_irq_locked(struct wl1271 * wl)515 static int wlcore_irq_locked(struct wl1271 *wl)
516 {
517 	int ret = 0;
518 	u32 intr;
519 	int loopcount = WL1271_IRQ_MAX_LOOPS;
520 	bool done = false;
521 	unsigned int defer_count;
522 	unsigned long flags;
523 
524 	/*
525 	 * In case edge triggered interrupt must be used, we cannot iterate
526 	 * more than once without introducing race conditions with the hardirq.
527 	 */
528 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
529 		loopcount = 1;
530 
531 	wl1271_debug(DEBUG_IRQ, "IRQ work");
532 
533 	if (unlikely(wl->state != WLCORE_STATE_ON))
534 		goto out;
535 
536 	ret = wl1271_ps_elp_wakeup(wl);
537 	if (ret < 0)
538 		goto out;
539 
540 	while (!done && loopcount--) {
541 		/*
542 		 * In order to avoid a race with the hardirq, clear the flag
543 		 * before acknowledging the chip. Since the mutex is held,
544 		 * wl1271_ps_elp_wakeup cannot be called concurrently.
545 		 */
546 		clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
547 		smp_mb__after_atomic();
548 
549 		ret = wlcore_fw_status(wl, wl->fw_status);
550 		if (ret < 0)
551 			goto out;
552 
553 		wlcore_hw_tx_immediate_compl(wl);
554 
555 		intr = wl->fw_status->intr;
556 		intr &= WLCORE_ALL_INTR_MASK;
557 		if (!intr) {
558 			done = true;
559 			continue;
560 		}
561 
562 		if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
563 			wl1271_error("HW watchdog interrupt received! starting recovery.");
564 			wl->watchdog_recovery = true;
565 			ret = -EIO;
566 
567 			/* restarting the chip. ignore any other interrupt. */
568 			goto out;
569 		}
570 
571 		if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
572 			wl1271_error("SW watchdog interrupt received! "
573 				     "starting recovery.");
574 			wl->watchdog_recovery = true;
575 			ret = -EIO;
576 
577 			/* restarting the chip. ignore any other interrupt. */
578 			goto out;
579 		}
580 
581 		if (likely(intr & WL1271_ACX_INTR_DATA)) {
582 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
583 
584 			ret = wlcore_rx(wl, wl->fw_status);
585 			if (ret < 0)
586 				goto out;
587 
588 			/* Check if any tx blocks were freed */
589 			spin_lock_irqsave(&wl->wl_lock, flags);
590 			if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
591 			    wl1271_tx_total_queue_count(wl) > 0) {
592 				spin_unlock_irqrestore(&wl->wl_lock, flags);
593 				/*
594 				 * In order to avoid starvation of the TX path,
595 				 * call the work function directly.
596 				 */
597 				ret = wlcore_tx_work_locked(wl);
598 				if (ret < 0)
599 					goto out;
600 			} else {
601 				spin_unlock_irqrestore(&wl->wl_lock, flags);
602 			}
603 
604 			/* check for tx results */
605 			ret = wlcore_hw_tx_delayed_compl(wl);
606 			if (ret < 0)
607 				goto out;
608 
609 			/* Make sure the deferred queues don't get too long */
610 			defer_count = skb_queue_len(&wl->deferred_tx_queue) +
611 				      skb_queue_len(&wl->deferred_rx_queue);
612 			if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
613 				wl1271_flush_deferred_work(wl);
614 		}
615 
616 		if (intr & WL1271_ACX_INTR_EVENT_A) {
617 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
618 			ret = wl1271_event_handle(wl, 0);
619 			if (ret < 0)
620 				goto out;
621 		}
622 
623 		if (intr & WL1271_ACX_INTR_EVENT_B) {
624 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
625 			ret = wl1271_event_handle(wl, 1);
626 			if (ret < 0)
627 				goto out;
628 		}
629 
630 		if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
631 			wl1271_debug(DEBUG_IRQ,
632 				     "WL1271_ACX_INTR_INIT_COMPLETE");
633 
634 		if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
635 			wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
636 	}
637 
638 	wl1271_ps_elp_sleep(wl);
639 
640 out:
641 	return ret;
642 }
643 
wlcore_irq(int irq,void * cookie)644 static irqreturn_t wlcore_irq(int irq, void *cookie)
645 {
646 	int ret;
647 	unsigned long flags;
648 	struct wl1271 *wl = cookie;
649 
650 	/* complete the ELP completion */
651 	spin_lock_irqsave(&wl->wl_lock, flags);
652 	set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
653 	if (wl->elp_compl) {
654 		complete(wl->elp_compl);
655 		wl->elp_compl = NULL;
656 	}
657 
658 	if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
659 		/* don't enqueue a work right now. mark it as pending */
660 		set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
661 		wl1271_debug(DEBUG_IRQ, "should not enqueue work");
662 		disable_irq_nosync(wl->irq);
663 		pm_wakeup_event(wl->dev, 0);
664 		spin_unlock_irqrestore(&wl->wl_lock, flags);
665 		return IRQ_HANDLED;
666 	}
667 	spin_unlock_irqrestore(&wl->wl_lock, flags);
668 
669 	/* TX might be handled here, avoid redundant work */
670 	set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
671 	cancel_work_sync(&wl->tx_work);
672 
673 	mutex_lock(&wl->mutex);
674 
675 	ret = wlcore_irq_locked(wl);
676 	if (ret)
677 		wl12xx_queue_recovery_work(wl);
678 
679 	spin_lock_irqsave(&wl->wl_lock, flags);
680 	/* In case TX was not handled here, queue TX work */
681 	clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
682 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
683 	    wl1271_tx_total_queue_count(wl) > 0)
684 		ieee80211_queue_work(wl->hw, &wl->tx_work);
685 	spin_unlock_irqrestore(&wl->wl_lock, flags);
686 
687 	mutex_unlock(&wl->mutex);
688 
689 	return IRQ_HANDLED;
690 }
691 
692 struct vif_counter_data {
693 	u8 counter;
694 
695 	struct ieee80211_vif *cur_vif;
696 	bool cur_vif_running;
697 };
698 
wl12xx_vif_count_iter(void * data,u8 * mac,struct ieee80211_vif * vif)699 static void wl12xx_vif_count_iter(void *data, u8 *mac,
700 				  struct ieee80211_vif *vif)
701 {
702 	struct vif_counter_data *counter = data;
703 
704 	counter->counter++;
705 	if (counter->cur_vif == vif)
706 		counter->cur_vif_running = true;
707 }
708 
709 /* caller must not hold wl->mutex, as it might deadlock */
wl12xx_get_vif_count(struct ieee80211_hw * hw,struct ieee80211_vif * cur_vif,struct vif_counter_data * data)710 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
711 			       struct ieee80211_vif *cur_vif,
712 			       struct vif_counter_data *data)
713 {
714 	memset(data, 0, sizeof(*data));
715 	data->cur_vif = cur_vif;
716 
717 	ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
718 					    wl12xx_vif_count_iter, data);
719 }
720 
wl12xx_fetch_firmware(struct wl1271 * wl,bool plt)721 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
722 {
723 	const struct firmware *fw;
724 	const char *fw_name;
725 	enum wl12xx_fw_type fw_type;
726 	int ret;
727 
728 	if (plt) {
729 		fw_type = WL12XX_FW_TYPE_PLT;
730 		fw_name = wl->plt_fw_name;
731 	} else {
732 		/*
733 		 * we can't call wl12xx_get_vif_count() here because
734 		 * wl->mutex is taken, so use the cached last_vif_count value
735 		 */
736 		if (wl->last_vif_count > 1 && wl->mr_fw_name) {
737 			fw_type = WL12XX_FW_TYPE_MULTI;
738 			fw_name = wl->mr_fw_name;
739 		} else {
740 			fw_type = WL12XX_FW_TYPE_NORMAL;
741 			fw_name = wl->sr_fw_name;
742 		}
743 	}
744 
745 	if (wl->fw_type == fw_type)
746 		return 0;
747 
748 	wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
749 
750 	ret = request_firmware(&fw, fw_name, wl->dev);
751 
752 	if (ret < 0) {
753 		wl1271_error("could not get firmware %s: %d", fw_name, ret);
754 		return ret;
755 	}
756 
757 	if (fw->size % 4) {
758 		wl1271_error("firmware size is not multiple of 32 bits: %zu",
759 			     fw->size);
760 		ret = -EILSEQ;
761 		goto out;
762 	}
763 
764 	vfree(wl->fw);
765 	wl->fw_type = WL12XX_FW_TYPE_NONE;
766 	wl->fw_len = fw->size;
767 	wl->fw = vmalloc(wl->fw_len);
768 
769 	if (!wl->fw) {
770 		wl1271_error("could not allocate memory for the firmware");
771 		ret = -ENOMEM;
772 		goto out;
773 	}
774 
775 	memcpy(wl->fw, fw->data, wl->fw_len);
776 	ret = 0;
777 	wl->fw_type = fw_type;
778 out:
779 	release_firmware(fw);
780 
781 	return ret;
782 }
783 
wl12xx_queue_recovery_work(struct wl1271 * wl)784 void wl12xx_queue_recovery_work(struct wl1271 *wl)
785 {
786 	/* Avoid a recursive recovery */
787 	if (wl->state == WLCORE_STATE_ON) {
788 		WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
789 				  &wl->flags));
790 
791 		wl->state = WLCORE_STATE_RESTARTING;
792 		set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
793 		wl1271_ps_elp_wakeup(wl);
794 		wlcore_disable_interrupts_nosync(wl);
795 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
796 	}
797 }
798 
wl12xx_copy_fwlog(struct wl1271 * wl,u8 * memblock,size_t maxlen)799 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
800 {
801 	size_t len;
802 
803 	/* Make sure we have enough room */
804 	len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
805 
806 	/* Fill the FW log file, consumed by the sysfs fwlog entry */
807 	memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
808 	wl->fwlog_size += len;
809 
810 	return len;
811 }
812 
wl12xx_read_fwlog_panic(struct wl1271 * wl)813 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
814 {
815 	struct wlcore_partition_set part, old_part;
816 	u32 addr;
817 	u32 offset;
818 	u32 end_of_log;
819 	u8 *block;
820 	int ret;
821 
822 	if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
823 	    (wl->conf.fwlog.mem_blocks == 0))
824 		return;
825 
826 	wl1271_info("Reading FW panic log");
827 
828 	block = kmalloc(wl->fw_mem_block_size, GFP_KERNEL);
829 	if (!block)
830 		return;
831 
832 	/*
833 	 * Make sure the chip is awake and the logger isn't active.
834 	 * Do not send a stop fwlog command if the fw is hanged or if
835 	 * dbgpins are used (due to some fw bug).
836 	 */
837 	if (wl1271_ps_elp_wakeup(wl))
838 		goto out;
839 	if (!wl->watchdog_recovery &&
840 	    wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
841 		wl12xx_cmd_stop_fwlog(wl);
842 
843 	/* Read the first memory block address */
844 	ret = wlcore_fw_status(wl, wl->fw_status);
845 	if (ret < 0)
846 		goto out;
847 
848 	addr = wl->fw_status->log_start_addr;
849 	if (!addr)
850 		goto out;
851 
852 	if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
853 		offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
854 		end_of_log = wl->fwlog_end;
855 	} else {
856 		offset = sizeof(addr);
857 		end_of_log = addr;
858 	}
859 
860 	old_part = wl->curr_part;
861 	memset(&part, 0, sizeof(part));
862 
863 	/* Traverse the memory blocks linked list */
864 	do {
865 		part.mem.start = wlcore_hw_convert_hwaddr(wl, addr);
866 		part.mem.size  = PAGE_SIZE;
867 
868 		ret = wlcore_set_partition(wl, &part);
869 		if (ret < 0) {
870 			wl1271_error("%s: set_partition start=0x%X size=%d",
871 				__func__, part.mem.start, part.mem.size);
872 			goto out;
873 		}
874 
875 		memset(block, 0, wl->fw_mem_block_size);
876 		ret = wlcore_read_hwaddr(wl, addr, block,
877 					wl->fw_mem_block_size, false);
878 
879 		if (ret < 0)
880 			goto out;
881 
882 		/*
883 		 * Memory blocks are linked to one another. The first 4 bytes
884 		 * of each memory block hold the hardware address of the next
885 		 * one. The last memory block points to the first one in
886 		 * on demand mode and is equal to 0x2000000 in continuous mode.
887 		 */
888 		addr = le32_to_cpup((__le32 *)block);
889 
890 		if (!wl12xx_copy_fwlog(wl, block + offset,
891 					wl->fw_mem_block_size - offset))
892 			break;
893 	} while (addr && (addr != end_of_log));
894 
895 	wake_up_interruptible(&wl->fwlog_waitq);
896 
897 out:
898 	kfree(block);
899 	wlcore_set_partition(wl, &old_part);
900 }
901 
wlcore_save_freed_pkts(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,struct ieee80211_sta * sta)902 static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
903 				   u8 hlid, struct ieee80211_sta *sta)
904 {
905 	struct wl1271_station *wl_sta;
906 	u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
907 
908 	wl_sta = (void *)sta->drv_priv;
909 	wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
910 
911 	/*
912 	 * increment the initial seq number on recovery to account for
913 	 * transmitted packets that we haven't yet got in the FW status
914 	 */
915 	if (wlvif->encryption_type == KEY_GEM)
916 		sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
917 
918 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
919 		wl_sta->total_freed_pkts += sqn_recovery_padding;
920 }
921 
wlcore_save_freed_pkts_addr(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,const u8 * addr)922 static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
923 					struct wl12xx_vif *wlvif,
924 					u8 hlid, const u8 *addr)
925 {
926 	struct ieee80211_sta *sta;
927 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
928 
929 	if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
930 		    is_zero_ether_addr(addr)))
931 		return;
932 
933 	rcu_read_lock();
934 	sta = ieee80211_find_sta(vif, addr);
935 	if (sta)
936 		wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
937 	rcu_read_unlock();
938 }
939 
wlcore_print_recovery(struct wl1271 * wl)940 static void wlcore_print_recovery(struct wl1271 *wl)
941 {
942 	u32 pc = 0;
943 	u32 hint_sts = 0;
944 	int ret;
945 
946 	wl1271_info("Hardware recovery in progress. FW ver: %s",
947 		    wl->chip.fw_ver_str);
948 
949 	/* change partitions momentarily so we can read the FW pc */
950 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
951 	if (ret < 0)
952 		return;
953 
954 	ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
955 	if (ret < 0)
956 		return;
957 
958 	ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
959 	if (ret < 0)
960 		return;
961 
962 	wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
963 				pc, hint_sts, ++wl->recovery_count);
964 
965 	wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
966 }
967 
968 
wl1271_recovery_work(struct work_struct * work)969 static void wl1271_recovery_work(struct work_struct *work)
970 {
971 	struct wl1271 *wl =
972 		container_of(work, struct wl1271, recovery_work);
973 	struct wl12xx_vif *wlvif;
974 	struct ieee80211_vif *vif;
975 
976 	mutex_lock(&wl->mutex);
977 
978 	if (wl->state == WLCORE_STATE_OFF || wl->plt)
979 		goto out_unlock;
980 
981 	if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
982 		if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
983 			wl12xx_read_fwlog_panic(wl);
984 		wlcore_print_recovery(wl);
985 	}
986 
987 	BUG_ON(wl->conf.recovery.bug_on_recovery &&
988 	       !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
989 
990 	if (wl->conf.recovery.no_recovery) {
991 		wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
992 		goto out_unlock;
993 	}
994 
995 	/* Prevent spurious TX during FW restart */
996 	wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
997 
998 	/* reboot the chipset */
999 	while (!list_empty(&wl->wlvif_list)) {
1000 		wlvif = list_first_entry(&wl->wlvif_list,
1001 				       struct wl12xx_vif, list);
1002 		vif = wl12xx_wlvif_to_vif(wlvif);
1003 
1004 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
1005 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
1006 			wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
1007 						    vif->bss_conf.bssid);
1008 		}
1009 
1010 		__wl1271_op_remove_interface(wl, vif, false);
1011 	}
1012 
1013 	wlcore_op_stop_locked(wl);
1014 
1015 	ieee80211_restart_hw(wl->hw);
1016 
1017 	/*
1018 	 * Its safe to enable TX now - the queues are stopped after a request
1019 	 * to restart the HW.
1020 	 */
1021 	wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
1022 
1023 out_unlock:
1024 	wl->watchdog_recovery = false;
1025 	clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
1026 	mutex_unlock(&wl->mutex);
1027 }
1028 
wlcore_fw_wakeup(struct wl1271 * wl)1029 static int wlcore_fw_wakeup(struct wl1271 *wl)
1030 {
1031 	return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
1032 }
1033 
wl1271_setup(struct wl1271 * wl)1034 static int wl1271_setup(struct wl1271 *wl)
1035 {
1036 	wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
1037 	if (!wl->raw_fw_status)
1038 		goto err;
1039 
1040 	wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
1041 	if (!wl->fw_status)
1042 		goto err;
1043 
1044 	wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
1045 	if (!wl->tx_res_if)
1046 		goto err;
1047 
1048 	return 0;
1049 err:
1050 	kfree(wl->fw_status);
1051 	kfree(wl->raw_fw_status);
1052 	return -ENOMEM;
1053 }
1054 
wl12xx_set_power_on(struct wl1271 * wl)1055 static int wl12xx_set_power_on(struct wl1271 *wl)
1056 {
1057 	int ret;
1058 
1059 	msleep(WL1271_PRE_POWER_ON_SLEEP);
1060 	ret = wl1271_power_on(wl);
1061 	if (ret < 0)
1062 		goto out;
1063 	msleep(WL1271_POWER_ON_SLEEP);
1064 	wl1271_io_reset(wl);
1065 	wl1271_io_init(wl);
1066 
1067 	ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1068 	if (ret < 0)
1069 		goto fail;
1070 
1071 	/* ELP module wake up */
1072 	ret = wlcore_fw_wakeup(wl);
1073 	if (ret < 0)
1074 		goto fail;
1075 
1076 out:
1077 	return ret;
1078 
1079 fail:
1080 	wl1271_power_off(wl);
1081 	return ret;
1082 }
1083 
wl12xx_chip_wakeup(struct wl1271 * wl,bool plt)1084 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1085 {
1086 	int ret = 0;
1087 
1088 	ret = wl12xx_set_power_on(wl);
1089 	if (ret < 0)
1090 		goto out;
1091 
1092 	/*
1093 	 * For wl127x based devices we could use the default block
1094 	 * size (512 bytes), but due to a bug in the sdio driver, we
1095 	 * need to set it explicitly after the chip is powered on.  To
1096 	 * simplify the code and since the performance impact is
1097 	 * negligible, we use the same block size for all different
1098 	 * chip types.
1099 	 *
1100 	 * Check if the bus supports blocksize alignment and, if it
1101 	 * doesn't, make sure we don't have the quirk.
1102 	 */
1103 	if (!wl1271_set_block_size(wl))
1104 		wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1105 
1106 	/* TODO: make sure the lower driver has set things up correctly */
1107 
1108 	ret = wl1271_setup(wl);
1109 	if (ret < 0)
1110 		goto out;
1111 
1112 	ret = wl12xx_fetch_firmware(wl, plt);
1113 	if (ret < 0)
1114 		goto out;
1115 
1116 out:
1117 	return ret;
1118 }
1119 
wl1271_plt_start(struct wl1271 * wl,const enum plt_mode plt_mode)1120 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1121 {
1122 	int retries = WL1271_BOOT_RETRIES;
1123 	struct wiphy *wiphy = wl->hw->wiphy;
1124 
1125 	static const char* const PLT_MODE[] = {
1126 		"PLT_OFF",
1127 		"PLT_ON",
1128 		"PLT_FEM_DETECT",
1129 		"PLT_CHIP_AWAKE"
1130 	};
1131 
1132 	int ret;
1133 
1134 	mutex_lock(&wl->mutex);
1135 
1136 	wl1271_notice("power up");
1137 
1138 	if (wl->state != WLCORE_STATE_OFF) {
1139 		wl1271_error("cannot go into PLT state because not "
1140 			     "in off state: %d", wl->state);
1141 		ret = -EBUSY;
1142 		goto out;
1143 	}
1144 
1145 	/* Indicate to lower levels that we are now in PLT mode */
1146 	wl->plt = true;
1147 	wl->plt_mode = plt_mode;
1148 
1149 	while (retries) {
1150 		retries--;
1151 		ret = wl12xx_chip_wakeup(wl, true);
1152 		if (ret < 0)
1153 			goto power_off;
1154 
1155 		if (plt_mode != PLT_CHIP_AWAKE) {
1156 			ret = wl->ops->plt_init(wl);
1157 			if (ret < 0)
1158 				goto power_off;
1159 		}
1160 
1161 		wl->state = WLCORE_STATE_ON;
1162 		wl1271_notice("firmware booted in PLT mode %s (%s)",
1163 			      PLT_MODE[plt_mode],
1164 			      wl->chip.fw_ver_str);
1165 
1166 		/* update hw/fw version info in wiphy struct */
1167 		wiphy->hw_version = wl->chip.id;
1168 		strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1169 			sizeof(wiphy->fw_version));
1170 
1171 		goto out;
1172 
1173 power_off:
1174 		wl1271_power_off(wl);
1175 	}
1176 
1177 	wl->plt = false;
1178 	wl->plt_mode = PLT_OFF;
1179 
1180 	wl1271_error("firmware boot in PLT mode failed despite %d retries",
1181 		     WL1271_BOOT_RETRIES);
1182 out:
1183 	mutex_unlock(&wl->mutex);
1184 
1185 	return ret;
1186 }
1187 
wl1271_plt_stop(struct wl1271 * wl)1188 int wl1271_plt_stop(struct wl1271 *wl)
1189 {
1190 	int ret = 0;
1191 
1192 	wl1271_notice("power down");
1193 
1194 	/*
1195 	 * Interrupts must be disabled before setting the state to OFF.
1196 	 * Otherwise, the interrupt handler might be called and exit without
1197 	 * reading the interrupt status.
1198 	 */
1199 	wlcore_disable_interrupts(wl);
1200 	mutex_lock(&wl->mutex);
1201 	if (!wl->plt) {
1202 		mutex_unlock(&wl->mutex);
1203 
1204 		/*
1205 		 * This will not necessarily enable interrupts as interrupts
1206 		 * may have been disabled when op_stop was called. It will,
1207 		 * however, balance the above call to disable_interrupts().
1208 		 */
1209 		wlcore_enable_interrupts(wl);
1210 
1211 		wl1271_error("cannot power down because not in PLT "
1212 			     "state: %d", wl->state);
1213 		ret = -EBUSY;
1214 		goto out;
1215 	}
1216 
1217 	mutex_unlock(&wl->mutex);
1218 
1219 	wl1271_flush_deferred_work(wl);
1220 	cancel_work_sync(&wl->netstack_work);
1221 	cancel_work_sync(&wl->recovery_work);
1222 	cancel_delayed_work_sync(&wl->elp_work);
1223 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1224 
1225 	mutex_lock(&wl->mutex);
1226 	wl1271_power_off(wl);
1227 	wl->flags = 0;
1228 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1229 	wl->state = WLCORE_STATE_OFF;
1230 	wl->plt = false;
1231 	wl->plt_mode = PLT_OFF;
1232 	wl->rx_counter = 0;
1233 	mutex_unlock(&wl->mutex);
1234 
1235 out:
1236 	return ret;
1237 }
1238 
wl1271_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)1239 static void wl1271_op_tx(struct ieee80211_hw *hw,
1240 			 struct ieee80211_tx_control *control,
1241 			 struct sk_buff *skb)
1242 {
1243 	struct wl1271 *wl = hw->priv;
1244 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1245 	struct ieee80211_vif *vif = info->control.vif;
1246 	struct wl12xx_vif *wlvif = NULL;
1247 	unsigned long flags;
1248 	int q, mapping;
1249 	u8 hlid;
1250 
1251 	if (!vif) {
1252 		wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1253 		ieee80211_free_txskb(hw, skb);
1254 		return;
1255 	}
1256 
1257 	wlvif = wl12xx_vif_to_data(vif);
1258 	mapping = skb_get_queue_mapping(skb);
1259 	q = wl1271_tx_get_queue(mapping);
1260 
1261 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1262 
1263 	spin_lock_irqsave(&wl->wl_lock, flags);
1264 
1265 	/*
1266 	 * drop the packet if the link is invalid or the queue is stopped
1267 	 * for any reason but watermark. Watermark is a "soft"-stop so we
1268 	 * allow these packets through.
1269 	 */
1270 	if (hlid == WL12XX_INVALID_LINK_ID ||
1271 	    (!test_bit(hlid, wlvif->links_map)) ||
1272 	     (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1273 	      !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1274 			WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1275 		wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1276 		ieee80211_free_txskb(hw, skb);
1277 		goto out;
1278 	}
1279 
1280 	wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1281 		     hlid, q, skb->len);
1282 	skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1283 
1284 	wl->tx_queue_count[q]++;
1285 	wlvif->tx_queue_count[q]++;
1286 
1287 	/*
1288 	 * The workqueue is slow to process the tx_queue and we need stop
1289 	 * the queue here, otherwise the queue will get too long.
1290 	 */
1291 	if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1292 	    !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1293 					WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1294 		wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1295 		wlcore_stop_queue_locked(wl, wlvif, q,
1296 					 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1297 	}
1298 
1299 	/*
1300 	 * The chip specific setup must run before the first TX packet -
1301 	 * before that, the tx_work will not be initialized!
1302 	 */
1303 
1304 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1305 	    !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1306 		ieee80211_queue_work(wl->hw, &wl->tx_work);
1307 
1308 out:
1309 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1310 }
1311 
wl1271_tx_dummy_packet(struct wl1271 * wl)1312 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1313 {
1314 	unsigned long flags;
1315 	int q;
1316 
1317 	/* no need to queue a new dummy packet if one is already pending */
1318 	if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1319 		return 0;
1320 
1321 	q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1322 
1323 	spin_lock_irqsave(&wl->wl_lock, flags);
1324 	set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1325 	wl->tx_queue_count[q]++;
1326 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1327 
1328 	/* The FW is low on RX memory blocks, so send the dummy packet asap */
1329 	if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1330 		return wlcore_tx_work_locked(wl);
1331 
1332 	/*
1333 	 * If the FW TX is busy, TX work will be scheduled by the threaded
1334 	 * interrupt handler function
1335 	 */
1336 	return 0;
1337 }
1338 
1339 /*
1340  * The size of the dummy packet should be at least 1400 bytes. However, in
1341  * order to minimize the number of bus transactions, aligning it to 512 bytes
1342  * boundaries could be beneficial, performance wise
1343  */
1344 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1345 
wl12xx_alloc_dummy_packet(struct wl1271 * wl)1346 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1347 {
1348 	struct sk_buff *skb;
1349 	struct ieee80211_hdr_3addr *hdr;
1350 	unsigned int dummy_packet_size;
1351 
1352 	dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1353 			    sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1354 
1355 	skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1356 	if (!skb) {
1357 		wl1271_warning("Failed to allocate a dummy packet skb");
1358 		return NULL;
1359 	}
1360 
1361 	skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1362 
1363 	hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1364 	memset(hdr, 0, sizeof(*hdr));
1365 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1366 					 IEEE80211_STYPE_NULLFUNC |
1367 					 IEEE80211_FCTL_TODS);
1368 
1369 	memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1370 
1371 	/* Dummy packets require the TID to be management */
1372 	skb->priority = WL1271_TID_MGMT;
1373 
1374 	/* Initialize all fields that might be used */
1375 	skb_set_queue_mapping(skb, 0);
1376 	memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1377 
1378 	return skb;
1379 }
1380 
1381 
1382 #ifdef CONFIG_PM
1383 static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern * p)1384 wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
1385 {
1386 	int num_fields = 0, in_field = 0, fields_size = 0;
1387 	int i, pattern_len = 0;
1388 
1389 	if (!p->mask) {
1390 		wl1271_warning("No mask in WoWLAN pattern");
1391 		return -EINVAL;
1392 	}
1393 
1394 	/*
1395 	 * The pattern is broken up into segments of bytes at different offsets
1396 	 * that need to be checked by the FW filter. Each segment is called
1397 	 * a field in the FW API. We verify that the total number of fields
1398 	 * required for this pattern won't exceed FW limits (8)
1399 	 * as well as the total fields buffer won't exceed the FW limit.
1400 	 * Note that if there's a pattern which crosses Ethernet/IP header
1401 	 * boundary a new field is required.
1402 	 */
1403 	for (i = 0; i < p->pattern_len; i++) {
1404 		if (test_bit(i, (unsigned long *)p->mask)) {
1405 			if (!in_field) {
1406 				in_field = 1;
1407 				pattern_len = 1;
1408 			} else {
1409 				if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1410 					num_fields++;
1411 					fields_size += pattern_len +
1412 						RX_FILTER_FIELD_OVERHEAD;
1413 					pattern_len = 1;
1414 				} else
1415 					pattern_len++;
1416 			}
1417 		} else {
1418 			if (in_field) {
1419 				in_field = 0;
1420 				fields_size += pattern_len +
1421 					RX_FILTER_FIELD_OVERHEAD;
1422 				num_fields++;
1423 			}
1424 		}
1425 	}
1426 
1427 	if (in_field) {
1428 		fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1429 		num_fields++;
1430 	}
1431 
1432 	if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1433 		wl1271_warning("RX Filter too complex. Too many segments");
1434 		return -EINVAL;
1435 	}
1436 
1437 	if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1438 		wl1271_warning("RX filter pattern is too big");
1439 		return -E2BIG;
1440 	}
1441 
1442 	return 0;
1443 }
1444 
wl1271_rx_filter_alloc(void)1445 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1446 {
1447 	return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1448 }
1449 
wl1271_rx_filter_free(struct wl12xx_rx_filter * filter)1450 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1451 {
1452 	int i;
1453 
1454 	if (filter == NULL)
1455 		return;
1456 
1457 	for (i = 0; i < filter->num_fields; i++)
1458 		kfree(filter->fields[i].pattern);
1459 
1460 	kfree(filter);
1461 }
1462 
wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter * filter,u16 offset,u8 flags,const u8 * pattern,u8 len)1463 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1464 				 u16 offset, u8 flags,
1465 				 const u8 *pattern, u8 len)
1466 {
1467 	struct wl12xx_rx_filter_field *field;
1468 
1469 	if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1470 		wl1271_warning("Max fields per RX filter. can't alloc another");
1471 		return -EINVAL;
1472 	}
1473 
1474 	field = &filter->fields[filter->num_fields];
1475 
1476 	field->pattern = kzalloc(len, GFP_KERNEL);
1477 	if (!field->pattern) {
1478 		wl1271_warning("Failed to allocate RX filter pattern");
1479 		return -ENOMEM;
1480 	}
1481 
1482 	filter->num_fields++;
1483 
1484 	field->offset = cpu_to_le16(offset);
1485 	field->flags = flags;
1486 	field->len = len;
1487 	memcpy(field->pattern, pattern, len);
1488 
1489 	return 0;
1490 }
1491 
wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter * filter)1492 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1493 {
1494 	int i, fields_size = 0;
1495 
1496 	for (i = 0; i < filter->num_fields; i++)
1497 		fields_size += filter->fields[i].len +
1498 			sizeof(struct wl12xx_rx_filter_field) -
1499 			sizeof(u8 *);
1500 
1501 	return fields_size;
1502 }
1503 
wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter * filter,u8 * buf)1504 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1505 				    u8 *buf)
1506 {
1507 	int i;
1508 	struct wl12xx_rx_filter_field *field;
1509 
1510 	for (i = 0; i < filter->num_fields; i++) {
1511 		field = (struct wl12xx_rx_filter_field *)buf;
1512 
1513 		field->offset = filter->fields[i].offset;
1514 		field->flags = filter->fields[i].flags;
1515 		field->len = filter->fields[i].len;
1516 
1517 		memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1518 		buf += sizeof(struct wl12xx_rx_filter_field) -
1519 			sizeof(u8 *) + field->len;
1520 	}
1521 }
1522 
1523 /*
1524  * Allocates an RX filter returned through f
1525  * which needs to be freed using rx_filter_free()
1526  */
1527 static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern * p,struct wl12xx_rx_filter ** f)1528 wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
1529 					   struct wl12xx_rx_filter **f)
1530 {
1531 	int i, j, ret = 0;
1532 	struct wl12xx_rx_filter *filter;
1533 	u16 offset;
1534 	u8 flags, len;
1535 
1536 	filter = wl1271_rx_filter_alloc();
1537 	if (!filter) {
1538 		wl1271_warning("Failed to alloc rx filter");
1539 		ret = -ENOMEM;
1540 		goto err;
1541 	}
1542 
1543 	i = 0;
1544 	while (i < p->pattern_len) {
1545 		if (!test_bit(i, (unsigned long *)p->mask)) {
1546 			i++;
1547 			continue;
1548 		}
1549 
1550 		for (j = i; j < p->pattern_len; j++) {
1551 			if (!test_bit(j, (unsigned long *)p->mask))
1552 				break;
1553 
1554 			if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1555 			    j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1556 				break;
1557 		}
1558 
1559 		if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1560 			offset = i;
1561 			flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1562 		} else {
1563 			offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1564 			flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1565 		}
1566 
1567 		len = j - i;
1568 
1569 		ret = wl1271_rx_filter_alloc_field(filter,
1570 						   offset,
1571 						   flags,
1572 						   &p->pattern[i], len);
1573 		if (ret)
1574 			goto err;
1575 
1576 		i = j;
1577 	}
1578 
1579 	filter->action = FILTER_SIGNAL;
1580 
1581 	*f = filter;
1582 	return 0;
1583 
1584 err:
1585 	wl1271_rx_filter_free(filter);
1586 	*f = NULL;
1587 
1588 	return ret;
1589 }
1590 
wl1271_configure_wowlan(struct wl1271 * wl,struct cfg80211_wowlan * wow)1591 static int wl1271_configure_wowlan(struct wl1271 *wl,
1592 				   struct cfg80211_wowlan *wow)
1593 {
1594 	int i, ret;
1595 
1596 	if (!wow || wow->any || !wow->n_patterns) {
1597 		ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1598 							  FILTER_SIGNAL);
1599 		if (ret)
1600 			goto out;
1601 
1602 		ret = wl1271_rx_filter_clear_all(wl);
1603 		if (ret)
1604 			goto out;
1605 
1606 		return 0;
1607 	}
1608 
1609 	if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1610 		return -EINVAL;
1611 
1612 	/* Validate all incoming patterns before clearing current FW state */
1613 	for (i = 0; i < wow->n_patterns; i++) {
1614 		ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1615 		if (ret) {
1616 			wl1271_warning("Bad wowlan pattern %d", i);
1617 			return ret;
1618 		}
1619 	}
1620 
1621 	ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1622 	if (ret)
1623 		goto out;
1624 
1625 	ret = wl1271_rx_filter_clear_all(wl);
1626 	if (ret)
1627 		goto out;
1628 
1629 	/* Translate WoWLAN patterns into filters */
1630 	for (i = 0; i < wow->n_patterns; i++) {
1631 		struct cfg80211_pkt_pattern *p;
1632 		struct wl12xx_rx_filter *filter = NULL;
1633 
1634 		p = &wow->patterns[i];
1635 
1636 		ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1637 		if (ret) {
1638 			wl1271_warning("Failed to create an RX filter from "
1639 				       "wowlan pattern %d", i);
1640 			goto out;
1641 		}
1642 
1643 		ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1644 
1645 		wl1271_rx_filter_free(filter);
1646 		if (ret)
1647 			goto out;
1648 	}
1649 
1650 	ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1651 
1652 out:
1653 	return ret;
1654 }
1655 
wl1271_configure_suspend_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1656 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1657 					struct wl12xx_vif *wlvif,
1658 					struct cfg80211_wowlan *wow)
1659 {
1660 	int ret = 0;
1661 
1662 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1663 		goto out;
1664 
1665 	ret = wl1271_ps_elp_wakeup(wl);
1666 	if (ret < 0)
1667 		goto out;
1668 
1669 	ret = wl1271_configure_wowlan(wl, wow);
1670 	if (ret < 0)
1671 		goto out_sleep;
1672 
1673 	if ((wl->conf.conn.suspend_wake_up_event ==
1674 	     wl->conf.conn.wake_up_event) &&
1675 	    (wl->conf.conn.suspend_listen_interval ==
1676 	     wl->conf.conn.listen_interval))
1677 		goto out_sleep;
1678 
1679 	ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1680 				    wl->conf.conn.suspend_wake_up_event,
1681 				    wl->conf.conn.suspend_listen_interval);
1682 
1683 	if (ret < 0)
1684 		wl1271_error("suspend: set wake up conditions failed: %d", ret);
1685 
1686 out_sleep:
1687 	wl1271_ps_elp_sleep(wl);
1688 out:
1689 	return ret;
1690 
1691 }
1692 
wl1271_configure_suspend_ap(struct wl1271 * wl,struct wl12xx_vif * wlvif)1693 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1694 				       struct wl12xx_vif *wlvif)
1695 {
1696 	int ret = 0;
1697 
1698 	if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1699 		goto out;
1700 
1701 	ret = wl1271_ps_elp_wakeup(wl);
1702 	if (ret < 0)
1703 		goto out;
1704 
1705 	ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1706 
1707 	wl1271_ps_elp_sleep(wl);
1708 out:
1709 	return ret;
1710 
1711 }
1712 
wl1271_configure_suspend(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct cfg80211_wowlan * wow)1713 static int wl1271_configure_suspend(struct wl1271 *wl,
1714 				    struct wl12xx_vif *wlvif,
1715 				    struct cfg80211_wowlan *wow)
1716 {
1717 	if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1718 		return wl1271_configure_suspend_sta(wl, wlvif, wow);
1719 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1720 		return wl1271_configure_suspend_ap(wl, wlvif);
1721 	return 0;
1722 }
1723 
wl1271_configure_resume(struct wl1271 * wl,struct wl12xx_vif * wlvif)1724 static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
1725 {
1726 	int ret = 0;
1727 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1728 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1729 
1730 	if ((!is_ap) && (!is_sta))
1731 		return;
1732 
1733 	if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1734 		return;
1735 
1736 	ret = wl1271_ps_elp_wakeup(wl);
1737 	if (ret < 0)
1738 		return;
1739 
1740 	if (is_sta) {
1741 		wl1271_configure_wowlan(wl, NULL);
1742 
1743 		if ((wl->conf.conn.suspend_wake_up_event ==
1744 		     wl->conf.conn.wake_up_event) &&
1745 		    (wl->conf.conn.suspend_listen_interval ==
1746 		     wl->conf.conn.listen_interval))
1747 			goto out_sleep;
1748 
1749 		ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1750 				    wl->conf.conn.wake_up_event,
1751 				    wl->conf.conn.listen_interval);
1752 
1753 		if (ret < 0)
1754 			wl1271_error("resume: wake up conditions failed: %d",
1755 				     ret);
1756 
1757 	} else if (is_ap) {
1758 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1759 	}
1760 
1761 out_sleep:
1762 	wl1271_ps_elp_sleep(wl);
1763 }
1764 
wl1271_op_suspend(struct ieee80211_hw * hw,struct cfg80211_wowlan * wow)1765 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1766 			    struct cfg80211_wowlan *wow)
1767 {
1768 	struct wl1271 *wl = hw->priv;
1769 	struct wl12xx_vif *wlvif;
1770 	int ret;
1771 
1772 	wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1773 	WARN_ON(!wow);
1774 
1775 	/* we want to perform the recovery before suspending */
1776 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1777 		wl1271_warning("postponing suspend to perform recovery");
1778 		return -EBUSY;
1779 	}
1780 
1781 	wl1271_tx_flush(wl);
1782 
1783 	mutex_lock(&wl->mutex);
1784 	wl->wow_enabled = true;
1785 	wl12xx_for_each_wlvif(wl, wlvif) {
1786 		if (wlcore_is_p2p_mgmt(wlvif))
1787 			continue;
1788 
1789 		ret = wl1271_configure_suspend(wl, wlvif, wow);
1790 		if (ret < 0) {
1791 			mutex_unlock(&wl->mutex);
1792 			wl1271_warning("couldn't prepare device to suspend");
1793 			return ret;
1794 		}
1795 	}
1796 	mutex_unlock(&wl->mutex);
1797 	/* flush any remaining work */
1798 	wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1799 
1800 	/*
1801 	 * disable and re-enable interrupts in order to flush
1802 	 * the threaded_irq
1803 	 */
1804 	wlcore_disable_interrupts(wl);
1805 
1806 	/*
1807 	 * set suspended flag to avoid triggering a new threaded_irq
1808 	 * work. no need for spinlock as interrupts are disabled.
1809 	 */
1810 	set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1811 
1812 	wlcore_enable_interrupts(wl);
1813 	flush_work(&wl->tx_work);
1814 	flush_delayed_work(&wl->elp_work);
1815 
1816 	/*
1817 	 * Cancel the watchdog even if above tx_flush failed. We will detect
1818 	 * it on resume anyway.
1819 	 */
1820 	cancel_delayed_work(&wl->tx_watchdog_work);
1821 
1822 	return 0;
1823 }
1824 
wl1271_op_resume(struct ieee80211_hw * hw)1825 static int wl1271_op_resume(struct ieee80211_hw *hw)
1826 {
1827 	struct wl1271 *wl = hw->priv;
1828 	struct wl12xx_vif *wlvif;
1829 	unsigned long flags;
1830 	bool run_irq_work = false, pending_recovery;
1831 	int ret;
1832 
1833 	wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1834 		     wl->wow_enabled);
1835 	WARN_ON(!wl->wow_enabled);
1836 
1837 	/*
1838 	 * re-enable irq_work enqueuing, and call irq_work directly if
1839 	 * there is a pending work.
1840 	 */
1841 	spin_lock_irqsave(&wl->wl_lock, flags);
1842 	clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1843 	if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1844 		run_irq_work = true;
1845 	spin_unlock_irqrestore(&wl->wl_lock, flags);
1846 
1847 	mutex_lock(&wl->mutex);
1848 
1849 	/* test the recovery flag before calling any SDIO functions */
1850 	pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1851 				    &wl->flags);
1852 
1853 	if (run_irq_work) {
1854 		wl1271_debug(DEBUG_MAC80211,
1855 			     "run postponed irq_work directly");
1856 
1857 		/* don't talk to the HW if recovery is pending */
1858 		if (!pending_recovery) {
1859 			ret = wlcore_irq_locked(wl);
1860 			if (ret)
1861 				wl12xx_queue_recovery_work(wl);
1862 		}
1863 
1864 		wlcore_enable_interrupts(wl);
1865 	}
1866 
1867 	if (pending_recovery) {
1868 		wl1271_warning("queuing forgotten recovery on resume");
1869 		ieee80211_queue_work(wl->hw, &wl->recovery_work);
1870 		goto out;
1871 	}
1872 
1873 	wl12xx_for_each_wlvif(wl, wlvif) {
1874 		if (wlcore_is_p2p_mgmt(wlvif))
1875 			continue;
1876 
1877 		wl1271_configure_resume(wl, wlvif);
1878 	}
1879 
1880 out:
1881 	wl->wow_enabled = false;
1882 
1883 	/*
1884 	 * Set a flag to re-init the watchdog on the first Tx after resume.
1885 	 * That way we avoid possible conditions where Tx-complete interrupts
1886 	 * fail to arrive and we perform a spurious recovery.
1887 	 */
1888 	set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
1889 	mutex_unlock(&wl->mutex);
1890 
1891 	return 0;
1892 }
1893 #endif
1894 
wl1271_op_start(struct ieee80211_hw * hw)1895 static int wl1271_op_start(struct ieee80211_hw *hw)
1896 {
1897 	wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1898 
1899 	/*
1900 	 * We have to delay the booting of the hardware because
1901 	 * we need to know the local MAC address before downloading and
1902 	 * initializing the firmware. The MAC address cannot be changed
1903 	 * after boot, and without the proper MAC address, the firmware
1904 	 * will not function properly.
1905 	 *
1906 	 * The MAC address is first known when the corresponding interface
1907 	 * is added. That is where we will initialize the hardware.
1908 	 */
1909 
1910 	return 0;
1911 }
1912 
wlcore_op_stop_locked(struct wl1271 * wl)1913 static void wlcore_op_stop_locked(struct wl1271 *wl)
1914 {
1915 	int i;
1916 
1917 	if (wl->state == WLCORE_STATE_OFF) {
1918 		if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1919 					&wl->flags))
1920 			wlcore_enable_interrupts(wl);
1921 
1922 		return;
1923 	}
1924 
1925 	/*
1926 	 * this must be before the cancel_work calls below, so that the work
1927 	 * functions don't perform further work.
1928 	 */
1929 	wl->state = WLCORE_STATE_OFF;
1930 
1931 	/*
1932 	 * Use the nosync variant to disable interrupts, so the mutex could be
1933 	 * held while doing so without deadlocking.
1934 	 */
1935 	wlcore_disable_interrupts_nosync(wl);
1936 
1937 	mutex_unlock(&wl->mutex);
1938 
1939 	wlcore_synchronize_interrupts(wl);
1940 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1941 		cancel_work_sync(&wl->recovery_work);
1942 	wl1271_flush_deferred_work(wl);
1943 	cancel_delayed_work_sync(&wl->scan_complete_work);
1944 	cancel_work_sync(&wl->netstack_work);
1945 	cancel_work_sync(&wl->tx_work);
1946 	cancel_delayed_work_sync(&wl->elp_work);
1947 	cancel_delayed_work_sync(&wl->tx_watchdog_work);
1948 
1949 	/* let's notify MAC80211 about the remaining pending TX frames */
1950 	mutex_lock(&wl->mutex);
1951 	wl12xx_tx_reset(wl);
1952 
1953 	wl1271_power_off(wl);
1954 	/*
1955 	 * In case a recovery was scheduled, interrupts were disabled to avoid
1956 	 * an interrupt storm. Now that the power is down, it is safe to
1957 	 * re-enable interrupts to balance the disable depth
1958 	 */
1959 	if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1960 		wlcore_enable_interrupts(wl);
1961 
1962 	wl->band = IEEE80211_BAND_2GHZ;
1963 
1964 	wl->rx_counter = 0;
1965 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1966 	wl->channel_type = NL80211_CHAN_NO_HT;
1967 	wl->tx_blocks_available = 0;
1968 	wl->tx_allocated_blocks = 0;
1969 	wl->tx_results_count = 0;
1970 	wl->tx_packets_count = 0;
1971 	wl->time_offset = 0;
1972 	wl->ap_fw_ps_map = 0;
1973 	wl->ap_ps_map = 0;
1974 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
1975 	memset(wl->roles_map, 0, sizeof(wl->roles_map));
1976 	memset(wl->links_map, 0, sizeof(wl->links_map));
1977 	memset(wl->roc_map, 0, sizeof(wl->roc_map));
1978 	memset(wl->session_ids, 0, sizeof(wl->session_ids));
1979 	memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
1980 	wl->active_sta_count = 0;
1981 	wl->active_link_count = 0;
1982 
1983 	/* The system link is always allocated */
1984 	wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1985 	wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1986 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1987 
1988 	/*
1989 	 * this is performed after the cancel_work calls and the associated
1990 	 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1991 	 * get executed before all these vars have been reset.
1992 	 */
1993 	wl->flags = 0;
1994 
1995 	wl->tx_blocks_freed = 0;
1996 
1997 	for (i = 0; i < NUM_TX_QUEUES; i++) {
1998 		wl->tx_pkts_freed[i] = 0;
1999 		wl->tx_allocated_pkts[i] = 0;
2000 	}
2001 
2002 	wl1271_debugfs_reset(wl);
2003 
2004 	kfree(wl->raw_fw_status);
2005 	wl->raw_fw_status = NULL;
2006 	kfree(wl->fw_status);
2007 	wl->fw_status = NULL;
2008 	kfree(wl->tx_res_if);
2009 	wl->tx_res_if = NULL;
2010 	kfree(wl->target_mem_map);
2011 	wl->target_mem_map = NULL;
2012 
2013 	/*
2014 	 * FW channels must be re-calibrated after recovery,
2015 	 * save current Reg-Domain channel configuration and clear it.
2016 	 */
2017 	memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
2018 	       sizeof(wl->reg_ch_conf_pending));
2019 	memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
2020 }
2021 
wlcore_op_stop(struct ieee80211_hw * hw)2022 static void wlcore_op_stop(struct ieee80211_hw *hw)
2023 {
2024 	struct wl1271 *wl = hw->priv;
2025 
2026 	wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
2027 
2028 	mutex_lock(&wl->mutex);
2029 
2030 	wlcore_op_stop_locked(wl);
2031 
2032 	mutex_unlock(&wl->mutex);
2033 }
2034 
wlcore_channel_switch_work(struct work_struct * work)2035 static void wlcore_channel_switch_work(struct work_struct *work)
2036 {
2037 	struct delayed_work *dwork;
2038 	struct wl1271 *wl;
2039 	struct ieee80211_vif *vif;
2040 	struct wl12xx_vif *wlvif;
2041 	int ret;
2042 
2043 	dwork = container_of(work, struct delayed_work, work);
2044 	wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
2045 	wl = wlvif->wl;
2046 
2047 	wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
2048 
2049 	mutex_lock(&wl->mutex);
2050 
2051 	if (unlikely(wl->state != WLCORE_STATE_ON))
2052 		goto out;
2053 
2054 	/* check the channel switch is still ongoing */
2055 	if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
2056 		goto out;
2057 
2058 	vif = wl12xx_wlvif_to_vif(wlvif);
2059 	ieee80211_chswitch_done(vif, false);
2060 
2061 	ret = wl1271_ps_elp_wakeup(wl);
2062 	if (ret < 0)
2063 		goto out;
2064 
2065 	wl12xx_cmd_stop_channel_switch(wl, wlvif);
2066 
2067 	wl1271_ps_elp_sleep(wl);
2068 out:
2069 	mutex_unlock(&wl->mutex);
2070 }
2071 
wlcore_connection_loss_work(struct work_struct * work)2072 static void wlcore_connection_loss_work(struct work_struct *work)
2073 {
2074 	struct delayed_work *dwork;
2075 	struct wl1271 *wl;
2076 	struct ieee80211_vif *vif;
2077 	struct wl12xx_vif *wlvif;
2078 
2079 	dwork = container_of(work, struct delayed_work, work);
2080 	wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2081 	wl = wlvif->wl;
2082 
2083 	wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2084 
2085 	mutex_lock(&wl->mutex);
2086 
2087 	if (unlikely(wl->state != WLCORE_STATE_ON))
2088 		goto out;
2089 
2090 	/* Call mac80211 connection loss */
2091 	if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2092 		goto out;
2093 
2094 	vif = wl12xx_wlvif_to_vif(wlvif);
2095 	ieee80211_connection_loss(vif);
2096 out:
2097 	mutex_unlock(&wl->mutex);
2098 }
2099 
wlcore_pending_auth_complete_work(struct work_struct * work)2100 static void wlcore_pending_auth_complete_work(struct work_struct *work)
2101 {
2102 	struct delayed_work *dwork;
2103 	struct wl1271 *wl;
2104 	struct wl12xx_vif *wlvif;
2105 	unsigned long time_spare;
2106 	int ret;
2107 
2108 	dwork = container_of(work, struct delayed_work, work);
2109 	wlvif = container_of(dwork, struct wl12xx_vif,
2110 			     pending_auth_complete_work);
2111 	wl = wlvif->wl;
2112 
2113 	mutex_lock(&wl->mutex);
2114 
2115 	if (unlikely(wl->state != WLCORE_STATE_ON))
2116 		goto out;
2117 
2118 	/*
2119 	 * Make sure a second really passed since the last auth reply. Maybe
2120 	 * a second auth reply arrived while we were stuck on the mutex.
2121 	 * Check for a little less than the timeout to protect from scheduler
2122 	 * irregularities.
2123 	 */
2124 	time_spare = jiffies +
2125 			msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
2126 	if (!time_after(time_spare, wlvif->pending_auth_reply_time))
2127 		goto out;
2128 
2129 	ret = wl1271_ps_elp_wakeup(wl);
2130 	if (ret < 0)
2131 		goto out;
2132 
2133 	/* cancel the ROC if active */
2134 	wlcore_update_inconn_sta(wl, wlvif, NULL, false);
2135 
2136 	wl1271_ps_elp_sleep(wl);
2137 out:
2138 	mutex_unlock(&wl->mutex);
2139 }
2140 
wl12xx_allocate_rate_policy(struct wl1271 * wl,u8 * idx)2141 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2142 {
2143 	u8 policy = find_first_zero_bit(wl->rate_policies_map,
2144 					WL12XX_MAX_RATE_POLICIES);
2145 	if (policy >= WL12XX_MAX_RATE_POLICIES)
2146 		return -EBUSY;
2147 
2148 	__set_bit(policy, wl->rate_policies_map);
2149 	*idx = policy;
2150 	return 0;
2151 }
2152 
wl12xx_free_rate_policy(struct wl1271 * wl,u8 * idx)2153 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2154 {
2155 	if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2156 		return;
2157 
2158 	__clear_bit(*idx, wl->rate_policies_map);
2159 	*idx = WL12XX_MAX_RATE_POLICIES;
2160 }
2161 
wlcore_allocate_klv_template(struct wl1271 * wl,u8 * idx)2162 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2163 {
2164 	u8 policy = find_first_zero_bit(wl->klv_templates_map,
2165 					WLCORE_MAX_KLV_TEMPLATES);
2166 	if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2167 		return -EBUSY;
2168 
2169 	__set_bit(policy, wl->klv_templates_map);
2170 	*idx = policy;
2171 	return 0;
2172 }
2173 
wlcore_free_klv_template(struct wl1271 * wl,u8 * idx)2174 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2175 {
2176 	if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2177 		return;
2178 
2179 	__clear_bit(*idx, wl->klv_templates_map);
2180 	*idx = WLCORE_MAX_KLV_TEMPLATES;
2181 }
2182 
wl12xx_get_role_type(struct wl1271 * wl,struct wl12xx_vif * wlvif)2183 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2184 {
2185 	switch (wlvif->bss_type) {
2186 	case BSS_TYPE_AP_BSS:
2187 		if (wlvif->p2p)
2188 			return WL1271_ROLE_P2P_GO;
2189 		else
2190 			return WL1271_ROLE_AP;
2191 
2192 	case BSS_TYPE_STA_BSS:
2193 		if (wlvif->p2p)
2194 			return WL1271_ROLE_P2P_CL;
2195 		else
2196 			return WL1271_ROLE_STA;
2197 
2198 	case BSS_TYPE_IBSS:
2199 		return WL1271_ROLE_IBSS;
2200 
2201 	default:
2202 		wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2203 	}
2204 	return WL12XX_INVALID_ROLE_TYPE;
2205 }
2206 
wl12xx_init_vif_data(struct wl1271 * wl,struct ieee80211_vif * vif)2207 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2208 {
2209 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2210 	int i;
2211 
2212 	/* clear everything but the persistent data */
2213 	memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2214 
2215 	switch (ieee80211_vif_type_p2p(vif)) {
2216 	case NL80211_IFTYPE_P2P_CLIENT:
2217 		wlvif->p2p = 1;
2218 		/* fall-through */
2219 	case NL80211_IFTYPE_STATION:
2220 	case NL80211_IFTYPE_P2P_DEVICE:
2221 		wlvif->bss_type = BSS_TYPE_STA_BSS;
2222 		break;
2223 	case NL80211_IFTYPE_ADHOC:
2224 		wlvif->bss_type = BSS_TYPE_IBSS;
2225 		break;
2226 	case NL80211_IFTYPE_P2P_GO:
2227 		wlvif->p2p = 1;
2228 		/* fall-through */
2229 	case NL80211_IFTYPE_AP:
2230 		wlvif->bss_type = BSS_TYPE_AP_BSS;
2231 		break;
2232 	default:
2233 		wlvif->bss_type = MAX_BSS_TYPE;
2234 		return -EOPNOTSUPP;
2235 	}
2236 
2237 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2238 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2239 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2240 
2241 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2242 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2243 		/* init sta/ibss data */
2244 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2245 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2246 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2247 		wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2248 		wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2249 		wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2250 		wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2251 		wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2252 	} else {
2253 		/* init ap data */
2254 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2255 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2256 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2257 		wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2258 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2259 			wl12xx_allocate_rate_policy(wl,
2260 						&wlvif->ap.ucast_rate_idx[i]);
2261 		wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2262 		/*
2263 		 * TODO: check if basic_rate shouldn't be
2264 		 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2265 		 * instead (the same thing for STA above).
2266 		*/
2267 		wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2268 		/* TODO: this seems to be used only for STA, check it */
2269 		wlvif->rate_set = CONF_TX_ENABLED_RATES;
2270 	}
2271 
2272 	wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2273 	wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2274 	wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2275 
2276 	/*
2277 	 * mac80211 configures some values globally, while we treat them
2278 	 * per-interface. thus, on init, we have to copy them from wl
2279 	 */
2280 	wlvif->band = wl->band;
2281 	wlvif->channel = wl->channel;
2282 	wlvif->power_level = wl->power_level;
2283 	wlvif->channel_type = wl->channel_type;
2284 
2285 	INIT_WORK(&wlvif->rx_streaming_enable_work,
2286 		  wl1271_rx_streaming_enable_work);
2287 	INIT_WORK(&wlvif->rx_streaming_disable_work,
2288 		  wl1271_rx_streaming_disable_work);
2289 	INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2290 			  wlcore_channel_switch_work);
2291 	INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2292 			  wlcore_connection_loss_work);
2293 	INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
2294 			  wlcore_pending_auth_complete_work);
2295 	INIT_LIST_HEAD(&wlvif->list);
2296 
2297 	setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2298 		    (unsigned long) wlvif);
2299 	return 0;
2300 }
2301 
wl12xx_init_fw(struct wl1271 * wl)2302 static int wl12xx_init_fw(struct wl1271 *wl)
2303 {
2304 	int retries = WL1271_BOOT_RETRIES;
2305 	bool booted = false;
2306 	struct wiphy *wiphy = wl->hw->wiphy;
2307 	int ret;
2308 
2309 	while (retries) {
2310 		retries--;
2311 		ret = wl12xx_chip_wakeup(wl, false);
2312 		if (ret < 0)
2313 			goto power_off;
2314 
2315 		ret = wl->ops->boot(wl);
2316 		if (ret < 0)
2317 			goto power_off;
2318 
2319 		ret = wl1271_hw_init(wl);
2320 		if (ret < 0)
2321 			goto irq_disable;
2322 
2323 		booted = true;
2324 		break;
2325 
2326 irq_disable:
2327 		mutex_unlock(&wl->mutex);
2328 		/* Unlocking the mutex in the middle of handling is
2329 		   inherently unsafe. In this case we deem it safe to do,
2330 		   because we need to let any possibly pending IRQ out of
2331 		   the system (and while we are WLCORE_STATE_OFF the IRQ
2332 		   work function will not do anything.) Also, any other
2333 		   possible concurrent operations will fail due to the
2334 		   current state, hence the wl1271 struct should be safe. */
2335 		wlcore_disable_interrupts(wl);
2336 		wl1271_flush_deferred_work(wl);
2337 		cancel_work_sync(&wl->netstack_work);
2338 		mutex_lock(&wl->mutex);
2339 power_off:
2340 		wl1271_power_off(wl);
2341 	}
2342 
2343 	if (!booted) {
2344 		wl1271_error("firmware boot failed despite %d retries",
2345 			     WL1271_BOOT_RETRIES);
2346 		goto out;
2347 	}
2348 
2349 	wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2350 
2351 	/* update hw/fw version info in wiphy struct */
2352 	wiphy->hw_version = wl->chip.id;
2353 	strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2354 		sizeof(wiphy->fw_version));
2355 
2356 	/*
2357 	 * Now we know if 11a is supported (info from the NVS), so disable
2358 	 * 11a channels if not supported
2359 	 */
2360 	if (!wl->enable_11a)
2361 		wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2362 
2363 	wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2364 		     wl->enable_11a ? "" : "not ");
2365 
2366 	wl->state = WLCORE_STATE_ON;
2367 out:
2368 	return ret;
2369 }
2370 
wl12xx_dev_role_started(struct wl12xx_vif * wlvif)2371 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2372 {
2373 	return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2374 }
2375 
2376 /*
2377  * Check whether a fw switch (i.e. moving from one loaded
2378  * fw to another) is needed. This function is also responsible
2379  * for updating wl->last_vif_count, so it must be called before
2380  * loading a non-plt fw (so the correct fw (single-role/multi-role)
2381  * will be used).
2382  */
wl12xx_need_fw_change(struct wl1271 * wl,struct vif_counter_data vif_counter_data,bool add)2383 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2384 				  struct vif_counter_data vif_counter_data,
2385 				  bool add)
2386 {
2387 	enum wl12xx_fw_type current_fw = wl->fw_type;
2388 	u8 vif_count = vif_counter_data.counter;
2389 
2390 	if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2391 		return false;
2392 
2393 	/* increase the vif count if this is a new vif */
2394 	if (add && !vif_counter_data.cur_vif_running)
2395 		vif_count++;
2396 
2397 	wl->last_vif_count = vif_count;
2398 
2399 	/* no need for fw change if the device is OFF */
2400 	if (wl->state == WLCORE_STATE_OFF)
2401 		return false;
2402 
2403 	/* no need for fw change if a single fw is used */
2404 	if (!wl->mr_fw_name)
2405 		return false;
2406 
2407 	if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2408 		return true;
2409 	if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2410 		return true;
2411 
2412 	return false;
2413 }
2414 
2415 /*
2416  * Enter "forced psm". Make sure the sta is in psm against the ap,
2417  * to make the fw switch a bit more disconnection-persistent.
2418  */
wl12xx_force_active_psm(struct wl1271 * wl)2419 static void wl12xx_force_active_psm(struct wl1271 *wl)
2420 {
2421 	struct wl12xx_vif *wlvif;
2422 
2423 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
2424 		wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2425 	}
2426 }
2427 
2428 struct wlcore_hw_queue_iter_data {
2429 	unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2430 	/* current vif */
2431 	struct ieee80211_vif *vif;
2432 	/* is the current vif among those iterated */
2433 	bool cur_running;
2434 };
2435 
wlcore_hw_queue_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2436 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2437 				 struct ieee80211_vif *vif)
2438 {
2439 	struct wlcore_hw_queue_iter_data *iter_data = data;
2440 
2441 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
2442 	    WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2443 		return;
2444 
2445 	if (iter_data->cur_running || vif == iter_data->vif) {
2446 		iter_data->cur_running = true;
2447 		return;
2448 	}
2449 
2450 	__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2451 }
2452 
wlcore_allocate_hw_queue_base(struct wl1271 * wl,struct wl12xx_vif * wlvif)2453 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2454 					 struct wl12xx_vif *wlvif)
2455 {
2456 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2457 	struct wlcore_hw_queue_iter_data iter_data = {};
2458 	int i, q_base;
2459 
2460 	if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2461 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2462 		return 0;
2463 	}
2464 
2465 	iter_data.vif = vif;
2466 
2467 	/* mark all bits taken by active interfaces */
2468 	ieee80211_iterate_active_interfaces_atomic(wl->hw,
2469 					IEEE80211_IFACE_ITER_RESUME_ALL,
2470 					wlcore_hw_queue_iter, &iter_data);
2471 
2472 	/* the current vif is already running in mac80211 (resume/recovery) */
2473 	if (iter_data.cur_running) {
2474 		wlvif->hw_queue_base = vif->hw_queue[0];
2475 		wl1271_debug(DEBUG_MAC80211,
2476 			     "using pre-allocated hw queue base %d",
2477 			     wlvif->hw_queue_base);
2478 
2479 		/* interface type might have changed type */
2480 		goto adjust_cab_queue;
2481 	}
2482 
2483 	q_base = find_first_zero_bit(iter_data.hw_queue_map,
2484 				     WLCORE_NUM_MAC_ADDRESSES);
2485 	if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2486 		return -EBUSY;
2487 
2488 	wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2489 	wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2490 		     wlvif->hw_queue_base);
2491 
2492 	for (i = 0; i < NUM_TX_QUEUES; i++) {
2493 		wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2494 		/* register hw queues in mac80211 */
2495 		vif->hw_queue[i] = wlvif->hw_queue_base + i;
2496 	}
2497 
2498 adjust_cab_queue:
2499 	/* the last places are reserved for cab queues per interface */
2500 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2501 		vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2502 				 wlvif->hw_queue_base / NUM_TX_QUEUES;
2503 	else
2504 		vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2505 
2506 	return 0;
2507 }
2508 
wl1271_op_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2509 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2510 				   struct ieee80211_vif *vif)
2511 {
2512 	struct wl1271 *wl = hw->priv;
2513 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2514 	struct vif_counter_data vif_count;
2515 	int ret = 0;
2516 	u8 role_type;
2517 
2518 	if (wl->plt) {
2519 		wl1271_error("Adding Interface not allowed while in PLT mode");
2520 		return -EBUSY;
2521 	}
2522 
2523 	vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2524 			     IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2525 
2526 	wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2527 		     ieee80211_vif_type_p2p(vif), vif->addr);
2528 
2529 	wl12xx_get_vif_count(hw, vif, &vif_count);
2530 
2531 	mutex_lock(&wl->mutex);
2532 	ret = wl1271_ps_elp_wakeup(wl);
2533 	if (ret < 0)
2534 		goto out_unlock;
2535 
2536 	/*
2537 	 * in some very corner case HW recovery scenarios its possible to
2538 	 * get here before __wl1271_op_remove_interface is complete, so
2539 	 * opt out if that is the case.
2540 	 */
2541 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2542 	    test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2543 		ret = -EBUSY;
2544 		goto out;
2545 	}
2546 
2547 
2548 	ret = wl12xx_init_vif_data(wl, vif);
2549 	if (ret < 0)
2550 		goto out;
2551 
2552 	wlvif->wl = wl;
2553 	role_type = wl12xx_get_role_type(wl, wlvif);
2554 	if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2555 		ret = -EINVAL;
2556 		goto out;
2557 	}
2558 
2559 	ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2560 	if (ret < 0)
2561 		goto out;
2562 
2563 	if (wl12xx_need_fw_change(wl, vif_count, true)) {
2564 		wl12xx_force_active_psm(wl);
2565 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2566 		mutex_unlock(&wl->mutex);
2567 		wl1271_recovery_work(&wl->recovery_work);
2568 		return 0;
2569 	}
2570 
2571 	/*
2572 	 * TODO: after the nvs issue will be solved, move this block
2573 	 * to start(), and make sure here the driver is ON.
2574 	 */
2575 	if (wl->state == WLCORE_STATE_OFF) {
2576 		/*
2577 		 * we still need this in order to configure the fw
2578 		 * while uploading the nvs
2579 		 */
2580 		memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2581 
2582 		ret = wl12xx_init_fw(wl);
2583 		if (ret < 0)
2584 			goto out;
2585 	}
2586 
2587 	if (!wlcore_is_p2p_mgmt(wlvif)) {
2588 		ret = wl12xx_cmd_role_enable(wl, vif->addr,
2589 					     role_type, &wlvif->role_id);
2590 		if (ret < 0)
2591 			goto out;
2592 
2593 		ret = wl1271_init_vif_specific(wl, vif);
2594 		if (ret < 0)
2595 			goto out;
2596 
2597 	} else {
2598 		ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
2599 					     &wlvif->dev_role_id);
2600 		if (ret < 0)
2601 			goto out;
2602 
2603 		/* needed mainly for configuring rate policies */
2604 		ret = wl1271_sta_hw_init(wl, wlvif);
2605 		if (ret < 0)
2606 			goto out;
2607 	}
2608 
2609 	list_add(&wlvif->list, &wl->wlvif_list);
2610 	set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2611 
2612 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2613 		wl->ap_count++;
2614 	else
2615 		wl->sta_count++;
2616 out:
2617 	wl1271_ps_elp_sleep(wl);
2618 out_unlock:
2619 	mutex_unlock(&wl->mutex);
2620 
2621 	return ret;
2622 }
2623 
__wl1271_op_remove_interface(struct wl1271 * wl,struct ieee80211_vif * vif,bool reset_tx_queues)2624 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2625 					 struct ieee80211_vif *vif,
2626 					 bool reset_tx_queues)
2627 {
2628 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2629 	int i, ret;
2630 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2631 
2632 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2633 
2634 	if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2635 		return;
2636 
2637 	/* because of hardware recovery, we may get here twice */
2638 	if (wl->state == WLCORE_STATE_OFF)
2639 		return;
2640 
2641 	wl1271_info("down");
2642 
2643 	if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2644 	    wl->scan_wlvif == wlvif) {
2645 		/*
2646 		 * Rearm the tx watchdog just before idling scan. This
2647 		 * prevents just-finished scans from triggering the watchdog
2648 		 */
2649 		wl12xx_rearm_tx_watchdog_locked(wl);
2650 
2651 		wl->scan.state = WL1271_SCAN_STATE_IDLE;
2652 		memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2653 		wl->scan_wlvif = NULL;
2654 		wl->scan.req = NULL;
2655 		ieee80211_scan_completed(wl->hw, true);
2656 	}
2657 
2658 	if (wl->sched_vif == wlvif)
2659 		wl->sched_vif = NULL;
2660 
2661 	if (wl->roc_vif == vif) {
2662 		wl->roc_vif = NULL;
2663 		ieee80211_remain_on_channel_expired(wl->hw);
2664 	}
2665 
2666 	if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2667 		/* disable active roles */
2668 		ret = wl1271_ps_elp_wakeup(wl);
2669 		if (ret < 0)
2670 			goto deinit;
2671 
2672 		if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2673 		    wlvif->bss_type == BSS_TYPE_IBSS) {
2674 			if (wl12xx_dev_role_started(wlvif))
2675 				wl12xx_stop_dev(wl, wlvif);
2676 		}
2677 
2678 		if (!wlcore_is_p2p_mgmt(wlvif)) {
2679 			ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2680 			if (ret < 0)
2681 				goto deinit;
2682 		} else {
2683 			ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
2684 			if (ret < 0)
2685 				goto deinit;
2686 		}
2687 
2688 		wl1271_ps_elp_sleep(wl);
2689 	}
2690 deinit:
2691 	wl12xx_tx_reset_wlvif(wl, wlvif);
2692 
2693 	/* clear all hlids (except system_hlid) */
2694 	wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2695 
2696 	if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2697 	    wlvif->bss_type == BSS_TYPE_IBSS) {
2698 		wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2699 		wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2700 		wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2701 		wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2702 		wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2703 	} else {
2704 		wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2705 		wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2706 		wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2707 		wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2708 		for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2709 			wl12xx_free_rate_policy(wl,
2710 						&wlvif->ap.ucast_rate_idx[i]);
2711 		wl1271_free_ap_keys(wl, wlvif);
2712 	}
2713 
2714 	dev_kfree_skb(wlvif->probereq);
2715 	wlvif->probereq = NULL;
2716 	if (wl->last_wlvif == wlvif)
2717 		wl->last_wlvif = NULL;
2718 	list_del(&wlvif->list);
2719 	memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2720 	wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2721 	wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2722 
2723 	if (is_ap)
2724 		wl->ap_count--;
2725 	else
2726 		wl->sta_count--;
2727 
2728 	/*
2729 	 * Last AP, have more stations. Configure sleep auth according to STA.
2730 	 * Don't do thin on unintended recovery.
2731 	 */
2732 	if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2733 	    !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2734 		goto unlock;
2735 
2736 	if (wl->ap_count == 0 && is_ap) {
2737 		/* mask ap events */
2738 		wl->event_mask &= ~wl->ap_event_mask;
2739 		wl1271_event_unmask(wl);
2740 	}
2741 
2742 	if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2743 		u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2744 		/* Configure for power according to debugfs */
2745 		if (sta_auth != WL1271_PSM_ILLEGAL)
2746 			wl1271_acx_sleep_auth(wl, sta_auth);
2747 		/* Configure for ELP power saving */
2748 		else
2749 			wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2750 	}
2751 
2752 unlock:
2753 	mutex_unlock(&wl->mutex);
2754 
2755 	del_timer_sync(&wlvif->rx_streaming_timer);
2756 	cancel_work_sync(&wlvif->rx_streaming_enable_work);
2757 	cancel_work_sync(&wlvif->rx_streaming_disable_work);
2758 	cancel_delayed_work_sync(&wlvif->connection_loss_work);
2759 	cancel_delayed_work_sync(&wlvif->channel_switch_work);
2760 	cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
2761 
2762 	mutex_lock(&wl->mutex);
2763 }
2764 
wl1271_op_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)2765 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2766 				       struct ieee80211_vif *vif)
2767 {
2768 	struct wl1271 *wl = hw->priv;
2769 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2770 	struct wl12xx_vif *iter;
2771 	struct vif_counter_data vif_count;
2772 
2773 	wl12xx_get_vif_count(hw, vif, &vif_count);
2774 	mutex_lock(&wl->mutex);
2775 
2776 	if (wl->state == WLCORE_STATE_OFF ||
2777 	    !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2778 		goto out;
2779 
2780 	/*
2781 	 * wl->vif can be null here if someone shuts down the interface
2782 	 * just when hardware recovery has been started.
2783 	 */
2784 	wl12xx_for_each_wlvif(wl, iter) {
2785 		if (iter != wlvif)
2786 			continue;
2787 
2788 		__wl1271_op_remove_interface(wl, vif, true);
2789 		break;
2790 	}
2791 	WARN_ON(iter != wlvif);
2792 	if (wl12xx_need_fw_change(wl, vif_count, false)) {
2793 		wl12xx_force_active_psm(wl);
2794 		set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2795 		wl12xx_queue_recovery_work(wl);
2796 	}
2797 out:
2798 	mutex_unlock(&wl->mutex);
2799 }
2800 
wl12xx_op_change_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum nl80211_iftype new_type,bool p2p)2801 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2802 				      struct ieee80211_vif *vif,
2803 				      enum nl80211_iftype new_type, bool p2p)
2804 {
2805 	struct wl1271 *wl = hw->priv;
2806 	int ret;
2807 
2808 	set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2809 	wl1271_op_remove_interface(hw, vif);
2810 
2811 	vif->type = new_type;
2812 	vif->p2p = p2p;
2813 	ret = wl1271_op_add_interface(hw, vif);
2814 
2815 	clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2816 	return ret;
2817 }
2818 
wlcore_join(struct wl1271 * wl,struct wl12xx_vif * wlvif)2819 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2820 {
2821 	int ret;
2822 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2823 
2824 	/*
2825 	 * One of the side effects of the JOIN command is that is clears
2826 	 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2827 	 * to a WPA/WPA2 access point will therefore kill the data-path.
2828 	 * Currently the only valid scenario for JOIN during association
2829 	 * is on roaming, in which case we will also be given new keys.
2830 	 * Keep the below message for now, unless it starts bothering
2831 	 * users who really like to roam a lot :)
2832 	 */
2833 	if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2834 		wl1271_info("JOIN while associated.");
2835 
2836 	/* clear encryption type */
2837 	wlvif->encryption_type = KEY_NONE;
2838 
2839 	if (is_ibss)
2840 		ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2841 	else {
2842 		if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2843 			/*
2844 			 * TODO: this is an ugly workaround for wl12xx fw
2845 			 * bug - we are not able to tx/rx after the first
2846 			 * start_sta, so make dummy start+stop calls,
2847 			 * and then call start_sta again.
2848 			 * this should be fixed in the fw.
2849 			 */
2850 			wl12xx_cmd_role_start_sta(wl, wlvif);
2851 			wl12xx_cmd_role_stop_sta(wl, wlvif);
2852 		}
2853 
2854 		ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2855 	}
2856 
2857 	return ret;
2858 }
2859 
wl1271_ssid_set(struct wl12xx_vif * wlvif,struct sk_buff * skb,int offset)2860 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2861 			    int offset)
2862 {
2863 	u8 ssid_len;
2864 	const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2865 					 skb->len - offset);
2866 
2867 	if (!ptr) {
2868 		wl1271_error("No SSID in IEs!");
2869 		return -ENOENT;
2870 	}
2871 
2872 	ssid_len = ptr[1];
2873 	if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2874 		wl1271_error("SSID is too long!");
2875 		return -EINVAL;
2876 	}
2877 
2878 	wlvif->ssid_len = ssid_len;
2879 	memcpy(wlvif->ssid, ptr+2, ssid_len);
2880 	return 0;
2881 }
2882 
wlcore_set_ssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)2883 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2884 {
2885 	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2886 	struct sk_buff *skb;
2887 	int ieoffset;
2888 
2889 	/* we currently only support setting the ssid from the ap probe req */
2890 	if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2891 		return -EINVAL;
2892 
2893 	skb = ieee80211_ap_probereq_get(wl->hw, vif);
2894 	if (!skb)
2895 		return -EINVAL;
2896 
2897 	ieoffset = offsetof(struct ieee80211_mgmt,
2898 			    u.probe_req.variable);
2899 	wl1271_ssid_set(wlvif, skb, ieoffset);
2900 	dev_kfree_skb(skb);
2901 
2902 	return 0;
2903 }
2904 
wlcore_set_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)2905 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2906 			    struct ieee80211_bss_conf *bss_conf,
2907 			    u32 sta_rate_set)
2908 {
2909 	int ieoffset;
2910 	int ret;
2911 
2912 	wlvif->aid = bss_conf->aid;
2913 	wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2914 	wlvif->beacon_int = bss_conf->beacon_int;
2915 	wlvif->wmm_enabled = bss_conf->qos;
2916 
2917 	set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2918 
2919 	/*
2920 	 * with wl1271, we don't need to update the
2921 	 * beacon_int and dtim_period, because the firmware
2922 	 * updates it by itself when the first beacon is
2923 	 * received after a join.
2924 	 */
2925 	ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2926 	if (ret < 0)
2927 		return ret;
2928 
2929 	/*
2930 	 * Get a template for hardware connection maintenance
2931 	 */
2932 	dev_kfree_skb(wlvif->probereq);
2933 	wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2934 							wlvif,
2935 							NULL);
2936 	ieoffset = offsetof(struct ieee80211_mgmt,
2937 			    u.probe_req.variable);
2938 	wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2939 
2940 	/* enable the connection monitoring feature */
2941 	ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2942 	if (ret < 0)
2943 		return ret;
2944 
2945 	/*
2946 	 * The join command disable the keep-alive mode, shut down its process,
2947 	 * and also clear the template config, so we need to reset it all after
2948 	 * the join. The acx_aid starts the keep-alive process, and the order
2949 	 * of the commands below is relevant.
2950 	 */
2951 	ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2952 	if (ret < 0)
2953 		return ret;
2954 
2955 	ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2956 	if (ret < 0)
2957 		return ret;
2958 
2959 	ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2960 	if (ret < 0)
2961 		return ret;
2962 
2963 	ret = wl1271_acx_keep_alive_config(wl, wlvif,
2964 					   wlvif->sta.klv_template_id,
2965 					   ACX_KEEP_ALIVE_TPL_VALID);
2966 	if (ret < 0)
2967 		return ret;
2968 
2969 	/*
2970 	 * The default fw psm configuration is AUTO, while mac80211 default
2971 	 * setting is off (ACTIVE), so sync the fw with the correct value.
2972 	 */
2973 	ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2974 	if (ret < 0)
2975 		return ret;
2976 
2977 	if (sta_rate_set) {
2978 		wlvif->rate_set =
2979 			wl1271_tx_enabled_rates_get(wl,
2980 						    sta_rate_set,
2981 						    wlvif->band);
2982 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2983 		if (ret < 0)
2984 			return ret;
2985 	}
2986 
2987 	return ret;
2988 }
2989 
wlcore_unset_assoc(struct wl1271 * wl,struct wl12xx_vif * wlvif)2990 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2991 {
2992 	int ret;
2993 	bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2994 
2995 	/* make sure we are connected (sta) joined */
2996 	if (sta &&
2997 	    !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2998 		return false;
2999 
3000 	/* make sure we are joined (ibss) */
3001 	if (!sta &&
3002 	    test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
3003 		return false;
3004 
3005 	if (sta) {
3006 		/* use defaults when not associated */
3007 		wlvif->aid = 0;
3008 
3009 		/* free probe-request template */
3010 		dev_kfree_skb(wlvif->probereq);
3011 		wlvif->probereq = NULL;
3012 
3013 		/* disable connection monitor features */
3014 		ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
3015 		if (ret < 0)
3016 			return ret;
3017 
3018 		/* Disable the keep-alive feature */
3019 		ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
3020 		if (ret < 0)
3021 			return ret;
3022 
3023 		/* disable beacon filtering */
3024 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
3025 		if (ret < 0)
3026 			return ret;
3027 	}
3028 
3029 	if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
3030 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
3031 
3032 		wl12xx_cmd_stop_channel_switch(wl, wlvif);
3033 		ieee80211_chswitch_done(vif, false);
3034 		cancel_delayed_work(&wlvif->channel_switch_work);
3035 	}
3036 
3037 	/* invalidate keep-alive template */
3038 	wl1271_acx_keep_alive_config(wl, wlvif,
3039 				     wlvif->sta.klv_template_id,
3040 				     ACX_KEEP_ALIVE_TPL_INVALID);
3041 
3042 	return 0;
3043 }
3044 
wl1271_set_band_rate(struct wl1271 * wl,struct wl12xx_vif * wlvif)3045 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3046 {
3047 	wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
3048 	wlvif->rate_set = wlvif->basic_rate_set;
3049 }
3050 
wl1271_sta_handle_idle(struct wl1271 * wl,struct wl12xx_vif * wlvif,bool idle)3051 static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3052 				   bool idle)
3053 {
3054 	bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3055 
3056 	if (idle == cur_idle)
3057 		return;
3058 
3059 	if (idle) {
3060 		clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3061 	} else {
3062 		/* The current firmware only supports sched_scan in idle */
3063 		if (wl->sched_vif == wlvif)
3064 			wl->ops->sched_scan_stop(wl, wlvif);
3065 
3066 		set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
3067 	}
3068 }
3069 
wl12xx_config_vif(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_conf * conf,u32 changed)3070 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3071 			     struct ieee80211_conf *conf, u32 changed)
3072 {
3073 	int ret;
3074 
3075 	if (wlcore_is_p2p_mgmt(wlvif))
3076 		return 0;
3077 
3078 	if (conf->power_level != wlvif->power_level) {
3079 		ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
3080 		if (ret < 0)
3081 			return ret;
3082 
3083 		wlvif->power_level = conf->power_level;
3084 	}
3085 
3086 	return 0;
3087 }
3088 
wl1271_op_config(struct ieee80211_hw * hw,u32 changed)3089 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
3090 {
3091 	struct wl1271 *wl = hw->priv;
3092 	struct wl12xx_vif *wlvif;
3093 	struct ieee80211_conf *conf = &hw->conf;
3094 	int ret = 0;
3095 
3096 	wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
3097 		     " changed 0x%x",
3098 		     conf->flags & IEEE80211_CONF_PS ? "on" : "off",
3099 		     conf->power_level,
3100 		     conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
3101 			 changed);
3102 
3103 	mutex_lock(&wl->mutex);
3104 
3105 	if (changed & IEEE80211_CONF_CHANGE_POWER)
3106 		wl->power_level = conf->power_level;
3107 
3108 	if (unlikely(wl->state != WLCORE_STATE_ON))
3109 		goto out;
3110 
3111 	ret = wl1271_ps_elp_wakeup(wl);
3112 	if (ret < 0)
3113 		goto out;
3114 
3115 	/* configure each interface */
3116 	wl12xx_for_each_wlvif(wl, wlvif) {
3117 		ret = wl12xx_config_vif(wl, wlvif, conf, changed);
3118 		if (ret < 0)
3119 			goto out_sleep;
3120 	}
3121 
3122 out_sleep:
3123 	wl1271_ps_elp_sleep(wl);
3124 
3125 out:
3126 	mutex_unlock(&wl->mutex);
3127 
3128 	return ret;
3129 }
3130 
3131 struct wl1271_filter_params {
3132 	bool enabled;
3133 	int mc_list_length;
3134 	u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
3135 };
3136 
wl1271_op_prepare_multicast(struct ieee80211_hw * hw,struct netdev_hw_addr_list * mc_list)3137 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
3138 				       struct netdev_hw_addr_list *mc_list)
3139 {
3140 	struct wl1271_filter_params *fp;
3141 	struct netdev_hw_addr *ha;
3142 
3143 	fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
3144 	if (!fp) {
3145 		wl1271_error("Out of memory setting filters.");
3146 		return 0;
3147 	}
3148 
3149 	/* update multicast filtering parameters */
3150 	fp->mc_list_length = 0;
3151 	if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
3152 		fp->enabled = false;
3153 	} else {
3154 		fp->enabled = true;
3155 		netdev_hw_addr_list_for_each(ha, mc_list) {
3156 			memcpy(fp->mc_list[fp->mc_list_length],
3157 					ha->addr, ETH_ALEN);
3158 			fp->mc_list_length++;
3159 		}
3160 	}
3161 
3162 	return (u64)(unsigned long)fp;
3163 }
3164 
3165 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
3166 				  FIF_ALLMULTI | \
3167 				  FIF_FCSFAIL | \
3168 				  FIF_BCN_PRBRESP_PROMISC | \
3169 				  FIF_CONTROL | \
3170 				  FIF_OTHER_BSS)
3171 
wl1271_op_configure_filter(struct ieee80211_hw * hw,unsigned int changed,unsigned int * total,u64 multicast)3172 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
3173 				       unsigned int changed,
3174 				       unsigned int *total, u64 multicast)
3175 {
3176 	struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
3177 	struct wl1271 *wl = hw->priv;
3178 	struct wl12xx_vif *wlvif;
3179 
3180 	int ret;
3181 
3182 	wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3183 		     " total %x", changed, *total);
3184 
3185 	mutex_lock(&wl->mutex);
3186 
3187 	*total &= WL1271_SUPPORTED_FILTERS;
3188 	changed &= WL1271_SUPPORTED_FILTERS;
3189 
3190 	if (unlikely(wl->state != WLCORE_STATE_ON))
3191 		goto out;
3192 
3193 	ret = wl1271_ps_elp_wakeup(wl);
3194 	if (ret < 0)
3195 		goto out;
3196 
3197 	wl12xx_for_each_wlvif(wl, wlvif) {
3198 		if (wlcore_is_p2p_mgmt(wlvif))
3199 			continue;
3200 
3201 		if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3202 			if (*total & FIF_ALLMULTI)
3203 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3204 								   false,
3205 								   NULL, 0);
3206 			else if (fp)
3207 				ret = wl1271_acx_group_address_tbl(wl, wlvif,
3208 							fp->enabled,
3209 							fp->mc_list,
3210 							fp->mc_list_length);
3211 			if (ret < 0)
3212 				goto out_sleep;
3213 		}
3214 	}
3215 
3216 	/*
3217 	 * the fw doesn't provide an api to configure the filters. instead,
3218 	 * the filters configuration is based on the active roles / ROC
3219 	 * state.
3220 	 */
3221 
3222 out_sleep:
3223 	wl1271_ps_elp_sleep(wl);
3224 
3225 out:
3226 	mutex_unlock(&wl->mutex);
3227 	kfree(fp);
3228 }
3229 
wl1271_record_ap_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 id,u8 key_type,u8 key_size,const u8 * key,u8 hlid,u32 tx_seq_32,u16 tx_seq_16)3230 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3231 				u8 id, u8 key_type, u8 key_size,
3232 				const u8 *key, u8 hlid, u32 tx_seq_32,
3233 				u16 tx_seq_16)
3234 {
3235 	struct wl1271_ap_key *ap_key;
3236 	int i;
3237 
3238 	wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3239 
3240 	if (key_size > MAX_KEY_SIZE)
3241 		return -EINVAL;
3242 
3243 	/*
3244 	 * Find next free entry in ap_keys. Also check we are not replacing
3245 	 * an existing key.
3246 	 */
3247 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3248 		if (wlvif->ap.recorded_keys[i] == NULL)
3249 			break;
3250 
3251 		if (wlvif->ap.recorded_keys[i]->id == id) {
3252 			wl1271_warning("trying to record key replacement");
3253 			return -EINVAL;
3254 		}
3255 	}
3256 
3257 	if (i == MAX_NUM_KEYS)
3258 		return -EBUSY;
3259 
3260 	ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3261 	if (!ap_key)
3262 		return -ENOMEM;
3263 
3264 	ap_key->id = id;
3265 	ap_key->key_type = key_type;
3266 	ap_key->key_size = key_size;
3267 	memcpy(ap_key->key, key, key_size);
3268 	ap_key->hlid = hlid;
3269 	ap_key->tx_seq_32 = tx_seq_32;
3270 	ap_key->tx_seq_16 = tx_seq_16;
3271 
3272 	wlvif->ap.recorded_keys[i] = ap_key;
3273 	return 0;
3274 }
3275 
wl1271_free_ap_keys(struct wl1271 * wl,struct wl12xx_vif * wlvif)3276 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3277 {
3278 	int i;
3279 
3280 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3281 		kfree(wlvif->ap.recorded_keys[i]);
3282 		wlvif->ap.recorded_keys[i] = NULL;
3283 	}
3284 }
3285 
wl1271_ap_init_hwenc(struct wl1271 * wl,struct wl12xx_vif * wlvif)3286 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3287 {
3288 	int i, ret = 0;
3289 	struct wl1271_ap_key *key;
3290 	bool wep_key_added = false;
3291 
3292 	for (i = 0; i < MAX_NUM_KEYS; i++) {
3293 		u8 hlid;
3294 		if (wlvif->ap.recorded_keys[i] == NULL)
3295 			break;
3296 
3297 		key = wlvif->ap.recorded_keys[i];
3298 		hlid = key->hlid;
3299 		if (hlid == WL12XX_INVALID_LINK_ID)
3300 			hlid = wlvif->ap.bcast_hlid;
3301 
3302 		ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3303 					    key->id, key->key_type,
3304 					    key->key_size, key->key,
3305 					    hlid, key->tx_seq_32,
3306 					    key->tx_seq_16);
3307 		if (ret < 0)
3308 			goto out;
3309 
3310 		if (key->key_type == KEY_WEP)
3311 			wep_key_added = true;
3312 	}
3313 
3314 	if (wep_key_added) {
3315 		ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3316 						     wlvif->ap.bcast_hlid);
3317 		if (ret < 0)
3318 			goto out;
3319 	}
3320 
3321 out:
3322 	wl1271_free_ap_keys(wl, wlvif);
3323 	return ret;
3324 }
3325 
wl1271_set_key(struct wl1271 * wl,struct wl12xx_vif * wlvif,u16 action,u8 id,u8 key_type,u8 key_size,const u8 * key,u32 tx_seq_32,u16 tx_seq_16,struct ieee80211_sta * sta)3326 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3327 		       u16 action, u8 id, u8 key_type,
3328 		       u8 key_size, const u8 *key, u32 tx_seq_32,
3329 		       u16 tx_seq_16, struct ieee80211_sta *sta)
3330 {
3331 	int ret;
3332 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3333 
3334 	if (is_ap) {
3335 		struct wl1271_station *wl_sta;
3336 		u8 hlid;
3337 
3338 		if (sta) {
3339 			wl_sta = (struct wl1271_station *)sta->drv_priv;
3340 			hlid = wl_sta->hlid;
3341 		} else {
3342 			hlid = wlvif->ap.bcast_hlid;
3343 		}
3344 
3345 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3346 			/*
3347 			 * We do not support removing keys after AP shutdown.
3348 			 * Pretend we do to make mac80211 happy.
3349 			 */
3350 			if (action != KEY_ADD_OR_REPLACE)
3351 				return 0;
3352 
3353 			ret = wl1271_record_ap_key(wl, wlvif, id,
3354 					     key_type, key_size,
3355 					     key, hlid, tx_seq_32,
3356 					     tx_seq_16);
3357 		} else {
3358 			ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3359 					     id, key_type, key_size,
3360 					     key, hlid, tx_seq_32,
3361 					     tx_seq_16);
3362 		}
3363 
3364 		if (ret < 0)
3365 			return ret;
3366 	} else {
3367 		const u8 *addr;
3368 		static const u8 bcast_addr[ETH_ALEN] = {
3369 			0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3370 		};
3371 
3372 		addr = sta ? sta->addr : bcast_addr;
3373 
3374 		if (is_zero_ether_addr(addr)) {
3375 			/* We dont support TX only encryption */
3376 			return -EOPNOTSUPP;
3377 		}
3378 
3379 		/* The wl1271 does not allow to remove unicast keys - they
3380 		   will be cleared automatically on next CMD_JOIN. Ignore the
3381 		   request silently, as we dont want the mac80211 to emit
3382 		   an error message. */
3383 		if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3384 			return 0;
3385 
3386 		/* don't remove key if hlid was already deleted */
3387 		if (action == KEY_REMOVE &&
3388 		    wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3389 			return 0;
3390 
3391 		ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3392 					     id, key_type, key_size,
3393 					     key, addr, tx_seq_32,
3394 					     tx_seq_16);
3395 		if (ret < 0)
3396 			return ret;
3397 
3398 	}
3399 
3400 	return 0;
3401 }
3402 
wlcore_op_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3403 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3404 			     struct ieee80211_vif *vif,
3405 			     struct ieee80211_sta *sta,
3406 			     struct ieee80211_key_conf *key_conf)
3407 {
3408 	struct wl1271 *wl = hw->priv;
3409 	int ret;
3410 	bool might_change_spare =
3411 		key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3412 		key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3413 
3414 	if (might_change_spare) {
3415 		/*
3416 		 * stop the queues and flush to ensure the next packets are
3417 		 * in sync with FW spare block accounting
3418 		 */
3419 		wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3420 		wl1271_tx_flush(wl);
3421 	}
3422 
3423 	mutex_lock(&wl->mutex);
3424 
3425 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3426 		ret = -EAGAIN;
3427 		goto out_wake_queues;
3428 	}
3429 
3430 	ret = wl1271_ps_elp_wakeup(wl);
3431 	if (ret < 0)
3432 		goto out_wake_queues;
3433 
3434 	ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3435 
3436 	wl1271_ps_elp_sleep(wl);
3437 
3438 out_wake_queues:
3439 	if (might_change_spare)
3440 		wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3441 
3442 	mutex_unlock(&wl->mutex);
3443 
3444 	return ret;
3445 }
3446 
wlcore_set_key(struct wl1271 * wl,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key_conf)3447 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3448 		   struct ieee80211_vif *vif,
3449 		   struct ieee80211_sta *sta,
3450 		   struct ieee80211_key_conf *key_conf)
3451 {
3452 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3453 	int ret;
3454 	u32 tx_seq_32 = 0;
3455 	u16 tx_seq_16 = 0;
3456 	u8 key_type;
3457 	u8 hlid;
3458 
3459 	wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3460 
3461 	wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3462 	wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3463 		     key_conf->cipher, key_conf->keyidx,
3464 		     key_conf->keylen, key_conf->flags);
3465 	wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3466 
3467 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3468 		if (sta) {
3469 			struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3470 			hlid = wl_sta->hlid;
3471 		} else {
3472 			hlid = wlvif->ap.bcast_hlid;
3473 		}
3474 	else
3475 		hlid = wlvif->sta.hlid;
3476 
3477 	if (hlid != WL12XX_INVALID_LINK_ID) {
3478 		u64 tx_seq = wl->links[hlid].total_freed_pkts;
3479 		tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3480 		tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3481 	}
3482 
3483 	switch (key_conf->cipher) {
3484 	case WLAN_CIPHER_SUITE_WEP40:
3485 	case WLAN_CIPHER_SUITE_WEP104:
3486 		key_type = KEY_WEP;
3487 
3488 		key_conf->hw_key_idx = key_conf->keyidx;
3489 		break;
3490 	case WLAN_CIPHER_SUITE_TKIP:
3491 		key_type = KEY_TKIP;
3492 		key_conf->hw_key_idx = key_conf->keyidx;
3493 		break;
3494 	case WLAN_CIPHER_SUITE_CCMP:
3495 		key_type = KEY_AES;
3496 		key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3497 		break;
3498 	case WL1271_CIPHER_SUITE_GEM:
3499 		key_type = KEY_GEM;
3500 		break;
3501 	default:
3502 		wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3503 
3504 		return -EOPNOTSUPP;
3505 	}
3506 
3507 	switch (cmd) {
3508 	case SET_KEY:
3509 		ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3510 				 key_conf->keyidx, key_type,
3511 				 key_conf->keylen, key_conf->key,
3512 				 tx_seq_32, tx_seq_16, sta);
3513 		if (ret < 0) {
3514 			wl1271_error("Could not add or replace key");
3515 			return ret;
3516 		}
3517 
3518 		/*
3519 		 * reconfiguring arp response if the unicast (or common)
3520 		 * encryption key type was changed
3521 		 */
3522 		if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3523 		    (sta || key_type == KEY_WEP) &&
3524 		    wlvif->encryption_type != key_type) {
3525 			wlvif->encryption_type = key_type;
3526 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3527 			if (ret < 0) {
3528 				wl1271_warning("build arp rsp failed: %d", ret);
3529 				return ret;
3530 			}
3531 		}
3532 		break;
3533 
3534 	case DISABLE_KEY:
3535 		ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3536 				     key_conf->keyidx, key_type,
3537 				     key_conf->keylen, key_conf->key,
3538 				     0, 0, sta);
3539 		if (ret < 0) {
3540 			wl1271_error("Could not remove key");
3541 			return ret;
3542 		}
3543 		break;
3544 
3545 	default:
3546 		wl1271_error("Unsupported key cmd 0x%x", cmd);
3547 		return -EOPNOTSUPP;
3548 	}
3549 
3550 	return ret;
3551 }
3552 EXPORT_SYMBOL_GPL(wlcore_set_key);
3553 
wl1271_op_set_default_key_idx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int key_idx)3554 static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
3555 					  struct ieee80211_vif *vif,
3556 					  int key_idx)
3557 {
3558 	struct wl1271 *wl = hw->priv;
3559 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3560 	int ret;
3561 
3562 	wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
3563 		     key_idx);
3564 
3565 	/* we don't handle unsetting of default key */
3566 	if (key_idx == -1)
3567 		return;
3568 
3569 	mutex_lock(&wl->mutex);
3570 
3571 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3572 		ret = -EAGAIN;
3573 		goto out_unlock;
3574 	}
3575 
3576 	ret = wl1271_ps_elp_wakeup(wl);
3577 	if (ret < 0)
3578 		goto out_unlock;
3579 
3580 	wlvif->default_key = key_idx;
3581 
3582 	/* the default WEP key needs to be configured at least once */
3583 	if (wlvif->encryption_type == KEY_WEP) {
3584 		ret = wl12xx_cmd_set_default_wep_key(wl,
3585 				key_idx,
3586 				wlvif->sta.hlid);
3587 		if (ret < 0)
3588 			goto out_sleep;
3589 	}
3590 
3591 out_sleep:
3592 	wl1271_ps_elp_sleep(wl);
3593 
3594 out_unlock:
3595 	mutex_unlock(&wl->mutex);
3596 }
3597 
wlcore_regdomain_config(struct wl1271 * wl)3598 void wlcore_regdomain_config(struct wl1271 *wl)
3599 {
3600 	int ret;
3601 
3602 	if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3603 		return;
3604 
3605 	mutex_lock(&wl->mutex);
3606 
3607 	if (unlikely(wl->state != WLCORE_STATE_ON))
3608 		goto out;
3609 
3610 	ret = wl1271_ps_elp_wakeup(wl);
3611 	if (ret < 0)
3612 		goto out;
3613 
3614 	ret = wlcore_cmd_regdomain_config_locked(wl);
3615 	if (ret < 0) {
3616 		wl12xx_queue_recovery_work(wl);
3617 		goto out;
3618 	}
3619 
3620 	wl1271_ps_elp_sleep(wl);
3621 out:
3622 	mutex_unlock(&wl->mutex);
3623 }
3624 
wl1271_op_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)3625 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3626 			     struct ieee80211_vif *vif,
3627 			     struct ieee80211_scan_request *hw_req)
3628 {
3629 	struct cfg80211_scan_request *req = &hw_req->req;
3630 	struct wl1271 *wl = hw->priv;
3631 	int ret;
3632 	u8 *ssid = NULL;
3633 	size_t len = 0;
3634 
3635 	wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3636 
3637 	if (req->n_ssids) {
3638 		ssid = req->ssids[0].ssid;
3639 		len = req->ssids[0].ssid_len;
3640 	}
3641 
3642 	mutex_lock(&wl->mutex);
3643 
3644 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3645 		/*
3646 		 * We cannot return -EBUSY here because cfg80211 will expect
3647 		 * a call to ieee80211_scan_completed if we do - in this case
3648 		 * there won't be any call.
3649 		 */
3650 		ret = -EAGAIN;
3651 		goto out;
3652 	}
3653 
3654 	ret = wl1271_ps_elp_wakeup(wl);
3655 	if (ret < 0)
3656 		goto out;
3657 
3658 	/* fail if there is any role in ROC */
3659 	if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3660 		/* don't allow scanning right now */
3661 		ret = -EBUSY;
3662 		goto out_sleep;
3663 	}
3664 
3665 	ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3666 out_sleep:
3667 	wl1271_ps_elp_sleep(wl);
3668 out:
3669 	mutex_unlock(&wl->mutex);
3670 
3671 	return ret;
3672 }
3673 
wl1271_op_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3674 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3675 				     struct ieee80211_vif *vif)
3676 {
3677 	struct wl1271 *wl = hw->priv;
3678 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3679 	int ret;
3680 
3681 	wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3682 
3683 	mutex_lock(&wl->mutex);
3684 
3685 	if (unlikely(wl->state != WLCORE_STATE_ON))
3686 		goto out;
3687 
3688 	if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3689 		goto out;
3690 
3691 	ret = wl1271_ps_elp_wakeup(wl);
3692 	if (ret < 0)
3693 		goto out;
3694 
3695 	if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3696 		ret = wl->ops->scan_stop(wl, wlvif);
3697 		if (ret < 0)
3698 			goto out_sleep;
3699 	}
3700 
3701 	/*
3702 	 * Rearm the tx watchdog just before idling scan. This
3703 	 * prevents just-finished scans from triggering the watchdog
3704 	 */
3705 	wl12xx_rearm_tx_watchdog_locked(wl);
3706 
3707 	wl->scan.state = WL1271_SCAN_STATE_IDLE;
3708 	memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3709 	wl->scan_wlvif = NULL;
3710 	wl->scan.req = NULL;
3711 	ieee80211_scan_completed(wl->hw, true);
3712 
3713 out_sleep:
3714 	wl1271_ps_elp_sleep(wl);
3715 out:
3716 	mutex_unlock(&wl->mutex);
3717 
3718 	cancel_delayed_work_sync(&wl->scan_complete_work);
3719 }
3720 
wl1271_op_sched_scan_start(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct cfg80211_sched_scan_request * req,struct ieee80211_scan_ies * ies)3721 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3722 				      struct ieee80211_vif *vif,
3723 				      struct cfg80211_sched_scan_request *req,
3724 				      struct ieee80211_scan_ies *ies)
3725 {
3726 	struct wl1271 *wl = hw->priv;
3727 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3728 	int ret;
3729 
3730 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3731 
3732 	mutex_lock(&wl->mutex);
3733 
3734 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3735 		ret = -EAGAIN;
3736 		goto out;
3737 	}
3738 
3739 	ret = wl1271_ps_elp_wakeup(wl);
3740 	if (ret < 0)
3741 		goto out;
3742 
3743 	ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3744 	if (ret < 0)
3745 		goto out_sleep;
3746 
3747 	wl->sched_vif = wlvif;
3748 
3749 out_sleep:
3750 	wl1271_ps_elp_sleep(wl);
3751 out:
3752 	mutex_unlock(&wl->mutex);
3753 	return ret;
3754 }
3755 
wl1271_op_sched_scan_stop(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3756 static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3757 				     struct ieee80211_vif *vif)
3758 {
3759 	struct wl1271 *wl = hw->priv;
3760 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3761 	int ret;
3762 
3763 	wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3764 
3765 	mutex_lock(&wl->mutex);
3766 
3767 	if (unlikely(wl->state != WLCORE_STATE_ON))
3768 		goto out;
3769 
3770 	ret = wl1271_ps_elp_wakeup(wl);
3771 	if (ret < 0)
3772 		goto out;
3773 
3774 	wl->ops->sched_scan_stop(wl, wlvif);
3775 
3776 	wl1271_ps_elp_sleep(wl);
3777 out:
3778 	mutex_unlock(&wl->mutex);
3779 
3780 	return 0;
3781 }
3782 
wl1271_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)3783 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3784 {
3785 	struct wl1271 *wl = hw->priv;
3786 	int ret = 0;
3787 
3788 	mutex_lock(&wl->mutex);
3789 
3790 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3791 		ret = -EAGAIN;
3792 		goto out;
3793 	}
3794 
3795 	ret = wl1271_ps_elp_wakeup(wl);
3796 	if (ret < 0)
3797 		goto out;
3798 
3799 	ret = wl1271_acx_frag_threshold(wl, value);
3800 	if (ret < 0)
3801 		wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3802 
3803 	wl1271_ps_elp_sleep(wl);
3804 
3805 out:
3806 	mutex_unlock(&wl->mutex);
3807 
3808 	return ret;
3809 }
3810 
wl1271_op_set_rts_threshold(struct ieee80211_hw * hw,u32 value)3811 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3812 {
3813 	struct wl1271 *wl = hw->priv;
3814 	struct wl12xx_vif *wlvif;
3815 	int ret = 0;
3816 
3817 	mutex_lock(&wl->mutex);
3818 
3819 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
3820 		ret = -EAGAIN;
3821 		goto out;
3822 	}
3823 
3824 	ret = wl1271_ps_elp_wakeup(wl);
3825 	if (ret < 0)
3826 		goto out;
3827 
3828 	wl12xx_for_each_wlvif(wl, wlvif) {
3829 		ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3830 		if (ret < 0)
3831 			wl1271_warning("set rts threshold failed: %d", ret);
3832 	}
3833 	wl1271_ps_elp_sleep(wl);
3834 
3835 out:
3836 	mutex_unlock(&wl->mutex);
3837 
3838 	return ret;
3839 }
3840 
wl12xx_remove_ie(struct sk_buff * skb,u8 eid,int ieoffset)3841 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3842 {
3843 	int len;
3844 	const u8 *next, *end = skb->data + skb->len;
3845 	u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3846 					skb->len - ieoffset);
3847 	if (!ie)
3848 		return;
3849 	len = ie[1] + 2;
3850 	next = ie + len;
3851 	memmove(ie, next, end - next);
3852 	skb_trim(skb, skb->len - len);
3853 }
3854 
wl12xx_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,int ieoffset)3855 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3856 					    unsigned int oui, u8 oui_type,
3857 					    int ieoffset)
3858 {
3859 	int len;
3860 	const u8 *next, *end = skb->data + skb->len;
3861 	u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3862 					       skb->data + ieoffset,
3863 					       skb->len - ieoffset);
3864 	if (!ie)
3865 		return;
3866 	len = ie[1] + 2;
3867 	next = ie + len;
3868 	memmove(ie, next, end - next);
3869 	skb_trim(skb, skb->len - len);
3870 }
3871 
wl1271_ap_set_probe_resp_tmpl(struct wl1271 * wl,u32 rates,struct ieee80211_vif * vif)3872 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3873 					 struct ieee80211_vif *vif)
3874 {
3875 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3876 	struct sk_buff *skb;
3877 	int ret;
3878 
3879 	skb = ieee80211_proberesp_get(wl->hw, vif);
3880 	if (!skb)
3881 		return -EOPNOTSUPP;
3882 
3883 	ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3884 				      CMD_TEMPL_AP_PROBE_RESPONSE,
3885 				      skb->data,
3886 				      skb->len, 0,
3887 				      rates);
3888 	dev_kfree_skb(skb);
3889 
3890 	if (ret < 0)
3891 		goto out;
3892 
3893 	wl1271_debug(DEBUG_AP, "probe response updated");
3894 	set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3895 
3896 out:
3897 	return ret;
3898 }
3899 
wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 * wl,struct ieee80211_vif * vif,u8 * probe_rsp_data,size_t probe_rsp_len,u32 rates)3900 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3901 					     struct ieee80211_vif *vif,
3902 					     u8 *probe_rsp_data,
3903 					     size_t probe_rsp_len,
3904 					     u32 rates)
3905 {
3906 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3907 	struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3908 	u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3909 	int ssid_ie_offset, ie_offset, templ_len;
3910 	const u8 *ptr;
3911 
3912 	/* no need to change probe response if the SSID is set correctly */
3913 	if (wlvif->ssid_len > 0)
3914 		return wl1271_cmd_template_set(wl, wlvif->role_id,
3915 					       CMD_TEMPL_AP_PROBE_RESPONSE,
3916 					       probe_rsp_data,
3917 					       probe_rsp_len, 0,
3918 					       rates);
3919 
3920 	if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3921 		wl1271_error("probe_rsp template too big");
3922 		return -EINVAL;
3923 	}
3924 
3925 	/* start searching from IE offset */
3926 	ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3927 
3928 	ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3929 			       probe_rsp_len - ie_offset);
3930 	if (!ptr) {
3931 		wl1271_error("No SSID in beacon!");
3932 		return -EINVAL;
3933 	}
3934 
3935 	ssid_ie_offset = ptr - probe_rsp_data;
3936 	ptr += (ptr[1] + 2);
3937 
3938 	memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3939 
3940 	/* insert SSID from bss_conf */
3941 	probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3942 	probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3943 	memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3944 	       bss_conf->ssid, bss_conf->ssid_len);
3945 	templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3946 
3947 	memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3948 	       ptr, probe_rsp_len - (ptr - probe_rsp_data));
3949 	templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3950 
3951 	return wl1271_cmd_template_set(wl, wlvif->role_id,
3952 				       CMD_TEMPL_AP_PROBE_RESPONSE,
3953 				       probe_rsp_templ,
3954 				       templ_len, 0,
3955 				       rates);
3956 }
3957 
wl1271_bss_erp_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)3958 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3959 				       struct ieee80211_vif *vif,
3960 				       struct ieee80211_bss_conf *bss_conf,
3961 				       u32 changed)
3962 {
3963 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3964 	int ret = 0;
3965 
3966 	if (changed & BSS_CHANGED_ERP_SLOT) {
3967 		if (bss_conf->use_short_slot)
3968 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3969 		else
3970 			ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3971 		if (ret < 0) {
3972 			wl1271_warning("Set slot time failed %d", ret);
3973 			goto out;
3974 		}
3975 	}
3976 
3977 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3978 		if (bss_conf->use_short_preamble)
3979 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3980 		else
3981 			wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3982 	}
3983 
3984 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3985 		if (bss_conf->use_cts_prot)
3986 			ret = wl1271_acx_cts_protect(wl, wlvif,
3987 						     CTSPROTECT_ENABLE);
3988 		else
3989 			ret = wl1271_acx_cts_protect(wl, wlvif,
3990 						     CTSPROTECT_DISABLE);
3991 		if (ret < 0) {
3992 			wl1271_warning("Set ctsprotect failed %d", ret);
3993 			goto out;
3994 		}
3995 	}
3996 
3997 out:
3998 	return ret;
3999 }
4000 
wlcore_set_beacon_template(struct wl1271 * wl,struct ieee80211_vif * vif,bool is_ap)4001 static int wlcore_set_beacon_template(struct wl1271 *wl,
4002 				      struct ieee80211_vif *vif,
4003 				      bool is_ap)
4004 {
4005 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4006 	struct ieee80211_hdr *hdr;
4007 	u32 min_rate;
4008 	int ret;
4009 	int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
4010 	struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
4011 	u16 tmpl_id;
4012 
4013 	if (!beacon) {
4014 		ret = -EINVAL;
4015 		goto out;
4016 	}
4017 
4018 	wl1271_debug(DEBUG_MASTER, "beacon updated");
4019 
4020 	ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
4021 	if (ret < 0) {
4022 		dev_kfree_skb(beacon);
4023 		goto out;
4024 	}
4025 	min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4026 	tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
4027 		CMD_TEMPL_BEACON;
4028 	ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
4029 				      beacon->data,
4030 				      beacon->len, 0,
4031 				      min_rate);
4032 	if (ret < 0) {
4033 		dev_kfree_skb(beacon);
4034 		goto out;
4035 	}
4036 
4037 	wlvif->wmm_enabled =
4038 		cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
4039 					WLAN_OUI_TYPE_MICROSOFT_WMM,
4040 					beacon->data + ieoffset,
4041 					beacon->len - ieoffset);
4042 
4043 	/*
4044 	 * In case we already have a probe-resp beacon set explicitly
4045 	 * by usermode, don't use the beacon data.
4046 	 */
4047 	if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
4048 		goto end_bcn;
4049 
4050 	/* remove TIM ie from probe response */
4051 	wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
4052 
4053 	/*
4054 	 * remove p2p ie from probe response.
4055 	 * the fw reponds to probe requests that don't include
4056 	 * the p2p ie. probe requests with p2p ie will be passed,
4057 	 * and will be responded by the supplicant (the spec
4058 	 * forbids including the p2p ie when responding to probe
4059 	 * requests that didn't include it).
4060 	 */
4061 	wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
4062 				WLAN_OUI_TYPE_WFA_P2P, ieoffset);
4063 
4064 	hdr = (struct ieee80211_hdr *) beacon->data;
4065 	hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
4066 					 IEEE80211_STYPE_PROBE_RESP);
4067 	if (is_ap)
4068 		ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
4069 							   beacon->data,
4070 							   beacon->len,
4071 							   min_rate);
4072 	else
4073 		ret = wl1271_cmd_template_set(wl, wlvif->role_id,
4074 					      CMD_TEMPL_PROBE_RESPONSE,
4075 					      beacon->data,
4076 					      beacon->len, 0,
4077 					      min_rate);
4078 end_bcn:
4079 	dev_kfree_skb(beacon);
4080 	if (ret < 0)
4081 		goto out;
4082 
4083 out:
4084 	return ret;
4085 }
4086 
wl1271_bss_beacon_info_changed(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4087 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
4088 					  struct ieee80211_vif *vif,
4089 					  struct ieee80211_bss_conf *bss_conf,
4090 					  u32 changed)
4091 {
4092 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4093 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4094 	int ret = 0;
4095 
4096 	if (changed & BSS_CHANGED_BEACON_INT) {
4097 		wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
4098 			bss_conf->beacon_int);
4099 
4100 		wlvif->beacon_int = bss_conf->beacon_int;
4101 	}
4102 
4103 	if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
4104 		u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4105 
4106 		wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
4107 	}
4108 
4109 	if (changed & BSS_CHANGED_BEACON) {
4110 		ret = wlcore_set_beacon_template(wl, vif, is_ap);
4111 		if (ret < 0)
4112 			goto out;
4113 	}
4114 
4115 out:
4116 	if (ret != 0)
4117 		wl1271_error("beacon info change failed: %d", ret);
4118 	return ret;
4119 }
4120 
4121 /* AP mode changes */
wl1271_bss_info_changed_ap(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4122 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
4123 				       struct ieee80211_vif *vif,
4124 				       struct ieee80211_bss_conf *bss_conf,
4125 				       u32 changed)
4126 {
4127 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4128 	int ret = 0;
4129 
4130 	if (changed & BSS_CHANGED_BASIC_RATES) {
4131 		u32 rates = bss_conf->basic_rates;
4132 
4133 		wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
4134 								 wlvif->band);
4135 		wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
4136 							wlvif->basic_rate_set);
4137 
4138 		ret = wl1271_init_ap_rates(wl, wlvif);
4139 		if (ret < 0) {
4140 			wl1271_error("AP rate policy change failed %d", ret);
4141 			goto out;
4142 		}
4143 
4144 		ret = wl1271_ap_init_templates(wl, vif);
4145 		if (ret < 0)
4146 			goto out;
4147 
4148 		ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
4149 		if (ret < 0)
4150 			goto out;
4151 
4152 		ret = wlcore_set_beacon_template(wl, vif, true);
4153 		if (ret < 0)
4154 			goto out;
4155 	}
4156 
4157 	ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
4158 	if (ret < 0)
4159 		goto out;
4160 
4161 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
4162 		if (bss_conf->enable_beacon) {
4163 			if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4164 				ret = wl12xx_cmd_role_start_ap(wl, wlvif);
4165 				if (ret < 0)
4166 					goto out;
4167 
4168 				ret = wl1271_ap_init_hwenc(wl, wlvif);
4169 				if (ret < 0)
4170 					goto out;
4171 
4172 				set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4173 				wl1271_debug(DEBUG_AP, "started AP");
4174 			}
4175 		} else {
4176 			if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
4177 				/*
4178 				 * AP might be in ROC in case we have just
4179 				 * sent auth reply. handle it.
4180 				 */
4181 				if (test_bit(wlvif->role_id, wl->roc_map))
4182 					wl12xx_croc(wl, wlvif->role_id);
4183 
4184 				ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
4185 				if (ret < 0)
4186 					goto out;
4187 
4188 				clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
4189 				clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
4190 					  &wlvif->flags);
4191 				wl1271_debug(DEBUG_AP, "stopped AP");
4192 			}
4193 		}
4194 	}
4195 
4196 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4197 	if (ret < 0)
4198 		goto out;
4199 
4200 	/* Handle HT information change */
4201 	if ((changed & BSS_CHANGED_HT) &&
4202 	    (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
4203 		ret = wl1271_acx_set_ht_information(wl, wlvif,
4204 					bss_conf->ht_operation_mode);
4205 		if (ret < 0) {
4206 			wl1271_warning("Set ht information failed %d", ret);
4207 			goto out;
4208 		}
4209 	}
4210 
4211 out:
4212 	return;
4213 }
4214 
wlcore_set_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_bss_conf * bss_conf,u32 sta_rate_set)4215 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
4216 			    struct ieee80211_bss_conf *bss_conf,
4217 			    u32 sta_rate_set)
4218 {
4219 	u32 rates;
4220 	int ret;
4221 
4222 	wl1271_debug(DEBUG_MAC80211,
4223 	     "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
4224 	     bss_conf->bssid, bss_conf->aid,
4225 	     bss_conf->beacon_int,
4226 	     bss_conf->basic_rates, sta_rate_set);
4227 
4228 	wlvif->beacon_int = bss_conf->beacon_int;
4229 	rates = bss_conf->basic_rates;
4230 	wlvif->basic_rate_set =
4231 		wl1271_tx_enabled_rates_get(wl, rates,
4232 					    wlvif->band);
4233 	wlvif->basic_rate =
4234 		wl1271_tx_min_rate_get(wl,
4235 				       wlvif->basic_rate_set);
4236 
4237 	if (sta_rate_set)
4238 		wlvif->rate_set =
4239 			wl1271_tx_enabled_rates_get(wl,
4240 						sta_rate_set,
4241 						wlvif->band);
4242 
4243 	/* we only support sched_scan while not connected */
4244 	if (wl->sched_vif == wlvif)
4245 		wl->ops->sched_scan_stop(wl, wlvif);
4246 
4247 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4248 	if (ret < 0)
4249 		return ret;
4250 
4251 	ret = wl12xx_cmd_build_null_data(wl, wlvif);
4252 	if (ret < 0)
4253 		return ret;
4254 
4255 	ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4256 	if (ret < 0)
4257 		return ret;
4258 
4259 	wlcore_set_ssid(wl, wlvif);
4260 
4261 	set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4262 
4263 	return 0;
4264 }
4265 
wlcore_clear_bssid(struct wl1271 * wl,struct wl12xx_vif * wlvif)4266 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4267 {
4268 	int ret;
4269 
4270 	/* revert back to minimum rates for the current band */
4271 	wl1271_set_band_rate(wl, wlvif);
4272 	wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4273 
4274 	ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4275 	if (ret < 0)
4276 		return ret;
4277 
4278 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4279 	    test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4280 		ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4281 		if (ret < 0)
4282 			return ret;
4283 	}
4284 
4285 	clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4286 	return 0;
4287 }
4288 /* STA/IBSS mode changes */
wl1271_bss_info_changed_sta(struct wl1271 * wl,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4289 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4290 					struct ieee80211_vif *vif,
4291 					struct ieee80211_bss_conf *bss_conf,
4292 					u32 changed)
4293 {
4294 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4295 	bool do_join = false;
4296 	bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4297 	bool ibss_joined = false;
4298 	u32 sta_rate_set = 0;
4299 	int ret;
4300 	struct ieee80211_sta *sta;
4301 	bool sta_exists = false;
4302 	struct ieee80211_sta_ht_cap sta_ht_cap;
4303 
4304 	if (is_ibss) {
4305 		ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4306 						     changed);
4307 		if (ret < 0)
4308 			goto out;
4309 	}
4310 
4311 	if (changed & BSS_CHANGED_IBSS) {
4312 		if (bss_conf->ibss_joined) {
4313 			set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4314 			ibss_joined = true;
4315 		} else {
4316 			wlcore_unset_assoc(wl, wlvif);
4317 			wl12xx_cmd_role_stop_sta(wl, wlvif);
4318 		}
4319 	}
4320 
4321 	if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4322 		do_join = true;
4323 
4324 	/* Need to update the SSID (for filtering etc) */
4325 	if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4326 		do_join = true;
4327 
4328 	if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4329 		wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4330 			     bss_conf->enable_beacon ? "enabled" : "disabled");
4331 
4332 		do_join = true;
4333 	}
4334 
4335 	if (changed & BSS_CHANGED_IDLE && !is_ibss)
4336 		wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
4337 
4338 	if (changed & BSS_CHANGED_CQM) {
4339 		bool enable = false;
4340 		if (bss_conf->cqm_rssi_thold)
4341 			enable = true;
4342 		ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4343 						  bss_conf->cqm_rssi_thold,
4344 						  bss_conf->cqm_rssi_hyst);
4345 		if (ret < 0)
4346 			goto out;
4347 		wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4348 	}
4349 
4350 	if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4351 		       BSS_CHANGED_ASSOC)) {
4352 		rcu_read_lock();
4353 		sta = ieee80211_find_sta(vif, bss_conf->bssid);
4354 		if (sta) {
4355 			u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4356 
4357 			/* save the supp_rates of the ap */
4358 			sta_rate_set = sta->supp_rates[wlvif->band];
4359 			if (sta->ht_cap.ht_supported)
4360 				sta_rate_set |=
4361 					(rx_mask[0] << HW_HT_RATES_OFFSET) |
4362 					(rx_mask[1] << HW_MIMO_RATES_OFFSET);
4363 			sta_ht_cap = sta->ht_cap;
4364 			sta_exists = true;
4365 		}
4366 
4367 		rcu_read_unlock();
4368 	}
4369 
4370 	if (changed & BSS_CHANGED_BSSID) {
4371 		if (!is_zero_ether_addr(bss_conf->bssid)) {
4372 			ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4373 					       sta_rate_set);
4374 			if (ret < 0)
4375 				goto out;
4376 
4377 			/* Need to update the BSSID (for filtering etc) */
4378 			do_join = true;
4379 		} else {
4380 			ret = wlcore_clear_bssid(wl, wlvif);
4381 			if (ret < 0)
4382 				goto out;
4383 		}
4384 	}
4385 
4386 	if (changed & BSS_CHANGED_IBSS) {
4387 		wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4388 			     bss_conf->ibss_joined);
4389 
4390 		if (bss_conf->ibss_joined) {
4391 			u32 rates = bss_conf->basic_rates;
4392 			wlvif->basic_rate_set =
4393 				wl1271_tx_enabled_rates_get(wl, rates,
4394 							    wlvif->band);
4395 			wlvif->basic_rate =
4396 				wl1271_tx_min_rate_get(wl,
4397 						       wlvif->basic_rate_set);
4398 
4399 			/* by default, use 11b + OFDM rates */
4400 			wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4401 			ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4402 			if (ret < 0)
4403 				goto out;
4404 		}
4405 	}
4406 
4407 	if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
4408 		/* enable beacon filtering */
4409 		ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
4410 		if (ret < 0)
4411 			goto out;
4412 	}
4413 
4414 	ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4415 	if (ret < 0)
4416 		goto out;
4417 
4418 	if (do_join) {
4419 		ret = wlcore_join(wl, wlvif);
4420 		if (ret < 0) {
4421 			wl1271_warning("cmd join failed %d", ret);
4422 			goto out;
4423 		}
4424 	}
4425 
4426 	if (changed & BSS_CHANGED_ASSOC) {
4427 		if (bss_conf->assoc) {
4428 			ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4429 					       sta_rate_set);
4430 			if (ret < 0)
4431 				goto out;
4432 
4433 			if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4434 				wl12xx_set_authorized(wl, wlvif);
4435 		} else {
4436 			wlcore_unset_assoc(wl, wlvif);
4437 		}
4438 	}
4439 
4440 	if (changed & BSS_CHANGED_PS) {
4441 		if ((bss_conf->ps) &&
4442 		    test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4443 		    !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4444 			int ps_mode;
4445 			char *ps_mode_str;
4446 
4447 			if (wl->conf.conn.forced_ps) {
4448 				ps_mode = STATION_POWER_SAVE_MODE;
4449 				ps_mode_str = "forced";
4450 			} else {
4451 				ps_mode = STATION_AUTO_PS_MODE;
4452 				ps_mode_str = "auto";
4453 			}
4454 
4455 			wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4456 
4457 			ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4458 			if (ret < 0)
4459 				wl1271_warning("enter %s ps failed %d",
4460 					       ps_mode_str, ret);
4461 		} else if (!bss_conf->ps &&
4462 			   test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4463 			wl1271_debug(DEBUG_PSM, "auto ps disabled");
4464 
4465 			ret = wl1271_ps_set_mode(wl, wlvif,
4466 						 STATION_ACTIVE_MODE);
4467 			if (ret < 0)
4468 				wl1271_warning("exit auto ps failed %d", ret);
4469 		}
4470 	}
4471 
4472 	/* Handle new association with HT. Do this after join. */
4473 	if (sta_exists) {
4474 		bool enabled =
4475 			bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4476 
4477 		ret = wlcore_hw_set_peer_cap(wl,
4478 					     &sta_ht_cap,
4479 					     enabled,
4480 					     wlvif->rate_set,
4481 					     wlvif->sta.hlid);
4482 		if (ret < 0) {
4483 			wl1271_warning("Set ht cap failed %d", ret);
4484 			goto out;
4485 
4486 		}
4487 
4488 		if (enabled) {
4489 			ret = wl1271_acx_set_ht_information(wl, wlvif,
4490 						bss_conf->ht_operation_mode);
4491 			if (ret < 0) {
4492 				wl1271_warning("Set ht information failed %d",
4493 					       ret);
4494 				goto out;
4495 			}
4496 		}
4497 	}
4498 
4499 	/* Handle arp filtering. Done after join. */
4500 	if ((changed & BSS_CHANGED_ARP_FILTER) ||
4501 	    (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4502 		__be32 addr = bss_conf->arp_addr_list[0];
4503 		wlvif->sta.qos = bss_conf->qos;
4504 		WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4505 
4506 		if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4507 			wlvif->ip_addr = addr;
4508 			/*
4509 			 * The template should have been configured only upon
4510 			 * association. however, it seems that the correct ip
4511 			 * isn't being set (when sending), so we have to
4512 			 * reconfigure the template upon every ip change.
4513 			 */
4514 			ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4515 			if (ret < 0) {
4516 				wl1271_warning("build arp rsp failed: %d", ret);
4517 				goto out;
4518 			}
4519 
4520 			ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4521 				(ACX_ARP_FILTER_ARP_FILTERING |
4522 				 ACX_ARP_FILTER_AUTO_ARP),
4523 				addr);
4524 		} else {
4525 			wlvif->ip_addr = 0;
4526 			ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4527 		}
4528 
4529 		if (ret < 0)
4530 			goto out;
4531 	}
4532 
4533 out:
4534 	return;
4535 }
4536 
wl1271_op_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf,u32 changed)4537 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4538 				       struct ieee80211_vif *vif,
4539 				       struct ieee80211_bss_conf *bss_conf,
4540 				       u32 changed)
4541 {
4542 	struct wl1271 *wl = hw->priv;
4543 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4544 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4545 	int ret;
4546 
4547 	wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4548 		     wlvif->role_id, (int)changed);
4549 
4550 	/*
4551 	 * make sure to cancel pending disconnections if our association
4552 	 * state changed
4553 	 */
4554 	if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4555 		cancel_delayed_work_sync(&wlvif->connection_loss_work);
4556 
4557 	if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4558 	    !bss_conf->enable_beacon)
4559 		wl1271_tx_flush(wl);
4560 
4561 	mutex_lock(&wl->mutex);
4562 
4563 	if (unlikely(wl->state != WLCORE_STATE_ON))
4564 		goto out;
4565 
4566 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4567 		goto out;
4568 
4569 	ret = wl1271_ps_elp_wakeup(wl);
4570 	if (ret < 0)
4571 		goto out;
4572 
4573 	if ((changed & BSS_CHANGED_TXPOWER) &&
4574 	    bss_conf->txpower != wlvif->power_level) {
4575 
4576 		ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
4577 		if (ret < 0)
4578 			goto out;
4579 
4580 		wlvif->power_level = bss_conf->txpower;
4581 	}
4582 
4583 	if (is_ap)
4584 		wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4585 	else
4586 		wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4587 
4588 	wl1271_ps_elp_sleep(wl);
4589 
4590 out:
4591 	mutex_unlock(&wl->mutex);
4592 }
4593 
wlcore_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4594 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4595 				 struct ieee80211_chanctx_conf *ctx)
4596 {
4597 	wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4598 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4599 		     cfg80211_get_chandef_type(&ctx->def));
4600 	return 0;
4601 }
4602 
wlcore_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)4603 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4604 				     struct ieee80211_chanctx_conf *ctx)
4605 {
4606 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4607 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4608 		     cfg80211_get_chandef_type(&ctx->def));
4609 }
4610 
wlcore_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)4611 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4612 				     struct ieee80211_chanctx_conf *ctx,
4613 				     u32 changed)
4614 {
4615 	struct wl1271 *wl = hw->priv;
4616 	struct wl12xx_vif *wlvif;
4617 	int ret;
4618 	int channel = ieee80211_frequency_to_channel(
4619 		ctx->def.chan->center_freq);
4620 
4621 	wl1271_debug(DEBUG_MAC80211,
4622 		     "mac80211 change chanctx %d (type %d) changed 0x%x",
4623 		     channel, cfg80211_get_chandef_type(&ctx->def), changed);
4624 
4625 	mutex_lock(&wl->mutex);
4626 
4627 	ret = wl1271_ps_elp_wakeup(wl);
4628 	if (ret < 0)
4629 		goto out;
4630 
4631 	wl12xx_for_each_wlvif(wl, wlvif) {
4632 		struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4633 
4634 		rcu_read_lock();
4635 		if (rcu_access_pointer(vif->chanctx_conf) != ctx) {
4636 			rcu_read_unlock();
4637 			continue;
4638 		}
4639 		rcu_read_unlock();
4640 
4641 		/* start radar if needed */
4642 		if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
4643 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
4644 		    ctx->radar_enabled && !wlvif->radar_enabled &&
4645 		    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4646 			wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4647 			wlcore_hw_set_cac(wl, wlvif, true);
4648 			wlvif->radar_enabled = true;
4649 		}
4650 	}
4651 
4652 	wl1271_ps_elp_sleep(wl);
4653 out:
4654 	mutex_unlock(&wl->mutex);
4655 }
4656 
wlcore_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4657 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4658 					struct ieee80211_vif *vif,
4659 					struct ieee80211_chanctx_conf *ctx)
4660 {
4661 	struct wl1271 *wl = hw->priv;
4662 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4663 	int channel = ieee80211_frequency_to_channel(
4664 		ctx->def.chan->center_freq);
4665 	int ret = -EINVAL;
4666 
4667 	wl1271_debug(DEBUG_MAC80211,
4668 		     "mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
4669 		     wlvif->role_id, channel,
4670 		     cfg80211_get_chandef_type(&ctx->def),
4671 		     ctx->radar_enabled, ctx->def.chan->dfs_state);
4672 
4673 	mutex_lock(&wl->mutex);
4674 
4675 	if (unlikely(wl->state != WLCORE_STATE_ON))
4676 		goto out;
4677 
4678 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4679 		goto out;
4680 
4681 	ret = wl1271_ps_elp_wakeup(wl);
4682 	if (ret < 0)
4683 		goto out;
4684 
4685 	wlvif->band = ctx->def.chan->band;
4686 	wlvif->channel = channel;
4687 	wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4688 
4689 	/* update default rates according to the band */
4690 	wl1271_set_band_rate(wl, wlvif);
4691 
4692 	if (ctx->radar_enabled &&
4693 	    ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
4694 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4695 		wlcore_hw_set_cac(wl, wlvif, true);
4696 		wlvif->radar_enabled = true;
4697 	}
4698 
4699 	wl1271_ps_elp_sleep(wl);
4700 out:
4701 	mutex_unlock(&wl->mutex);
4702 
4703 	return 0;
4704 }
4705 
wlcore_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_chanctx_conf * ctx)4706 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4707 					   struct ieee80211_vif *vif,
4708 					   struct ieee80211_chanctx_conf *ctx)
4709 {
4710 	struct wl1271 *wl = hw->priv;
4711 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4712 	int ret;
4713 
4714 	wl1271_debug(DEBUG_MAC80211,
4715 		     "mac80211 unassign chanctx (role %d) %d (type %d)",
4716 		     wlvif->role_id,
4717 		     ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4718 		     cfg80211_get_chandef_type(&ctx->def));
4719 
4720 	wl1271_tx_flush(wl);
4721 
4722 	mutex_lock(&wl->mutex);
4723 
4724 	if (unlikely(wl->state != WLCORE_STATE_ON))
4725 		goto out;
4726 
4727 	if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4728 		goto out;
4729 
4730 	ret = wl1271_ps_elp_wakeup(wl);
4731 	if (ret < 0)
4732 		goto out;
4733 
4734 	if (wlvif->radar_enabled) {
4735 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4736 		wlcore_hw_set_cac(wl, wlvif, false);
4737 		wlvif->radar_enabled = false;
4738 	}
4739 
4740 	wl1271_ps_elp_sleep(wl);
4741 out:
4742 	mutex_unlock(&wl->mutex);
4743 }
4744 
__wlcore_switch_vif_chan(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_chanctx_conf * new_ctx)4745 static int __wlcore_switch_vif_chan(struct wl1271 *wl,
4746 				    struct wl12xx_vif *wlvif,
4747 				    struct ieee80211_chanctx_conf *new_ctx)
4748 {
4749 	int channel = ieee80211_frequency_to_channel(
4750 		new_ctx->def.chan->center_freq);
4751 
4752 	wl1271_debug(DEBUG_MAC80211,
4753 		     "switch vif (role %d) %d -> %d chan_type: %d",
4754 		     wlvif->role_id, wlvif->channel, channel,
4755 		     cfg80211_get_chandef_type(&new_ctx->def));
4756 
4757 	if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
4758 		return 0;
4759 
4760 	if (wlvif->radar_enabled) {
4761 		wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
4762 		wlcore_hw_set_cac(wl, wlvif, false);
4763 		wlvif->radar_enabled = false;
4764 	}
4765 
4766 	wlvif->band = new_ctx->def.chan->band;
4767 	wlvif->channel = channel;
4768 	wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
4769 
4770 	/* start radar if needed */
4771 	if (new_ctx->radar_enabled) {
4772 		wl1271_debug(DEBUG_MAC80211, "Start radar detection");
4773 		wlcore_hw_set_cac(wl, wlvif, true);
4774 		wlvif->radar_enabled = true;
4775 	}
4776 
4777 	return 0;
4778 }
4779 
4780 static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)4781 wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
4782 			     struct ieee80211_vif_chanctx_switch *vifs,
4783 			     int n_vifs,
4784 			     enum ieee80211_chanctx_switch_mode mode)
4785 {
4786 	struct wl1271 *wl = hw->priv;
4787 	int i, ret;
4788 
4789 	wl1271_debug(DEBUG_MAC80211,
4790 		     "mac80211 switch chanctx n_vifs %d mode %d",
4791 		     n_vifs, mode);
4792 
4793 	mutex_lock(&wl->mutex);
4794 
4795 	ret = wl1271_ps_elp_wakeup(wl);
4796 	if (ret < 0)
4797 		goto out;
4798 
4799 	for (i = 0; i < n_vifs; i++) {
4800 		struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
4801 
4802 		ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
4803 		if (ret)
4804 			goto out_sleep;
4805 	}
4806 out_sleep:
4807 	wl1271_ps_elp_sleep(wl);
4808 out:
4809 	mutex_unlock(&wl->mutex);
4810 
4811 	return 0;
4812 }
4813 
wl1271_op_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u16 queue,const struct ieee80211_tx_queue_params * params)4814 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4815 			     struct ieee80211_vif *vif, u16 queue,
4816 			     const struct ieee80211_tx_queue_params *params)
4817 {
4818 	struct wl1271 *wl = hw->priv;
4819 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4820 	u8 ps_scheme;
4821 	int ret = 0;
4822 
4823 	if (wlcore_is_p2p_mgmt(wlvif))
4824 		return 0;
4825 
4826 	mutex_lock(&wl->mutex);
4827 
4828 	wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4829 
4830 	if (params->uapsd)
4831 		ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4832 	else
4833 		ps_scheme = CONF_PS_SCHEME_LEGACY;
4834 
4835 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4836 		goto out;
4837 
4838 	ret = wl1271_ps_elp_wakeup(wl);
4839 	if (ret < 0)
4840 		goto out;
4841 
4842 	/*
4843 	 * the txop is confed in units of 32us by the mac80211,
4844 	 * we need us
4845 	 */
4846 	ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4847 				params->cw_min, params->cw_max,
4848 				params->aifs, params->txop << 5);
4849 	if (ret < 0)
4850 		goto out_sleep;
4851 
4852 	ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4853 				 CONF_CHANNEL_TYPE_EDCF,
4854 				 wl1271_tx_get_queue(queue),
4855 				 ps_scheme, CONF_ACK_POLICY_LEGACY,
4856 				 0, 0);
4857 
4858 out_sleep:
4859 	wl1271_ps_elp_sleep(wl);
4860 
4861 out:
4862 	mutex_unlock(&wl->mutex);
4863 
4864 	return ret;
4865 }
4866 
wl1271_op_get_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif)4867 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4868 			     struct ieee80211_vif *vif)
4869 {
4870 
4871 	struct wl1271 *wl = hw->priv;
4872 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4873 	u64 mactime = ULLONG_MAX;
4874 	int ret;
4875 
4876 	wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4877 
4878 	mutex_lock(&wl->mutex);
4879 
4880 	if (unlikely(wl->state != WLCORE_STATE_ON))
4881 		goto out;
4882 
4883 	ret = wl1271_ps_elp_wakeup(wl);
4884 	if (ret < 0)
4885 		goto out;
4886 
4887 	ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4888 	if (ret < 0)
4889 		goto out_sleep;
4890 
4891 out_sleep:
4892 	wl1271_ps_elp_sleep(wl);
4893 
4894 out:
4895 	mutex_unlock(&wl->mutex);
4896 	return mactime;
4897 }
4898 
wl1271_op_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)4899 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4900 				struct survey_info *survey)
4901 {
4902 	struct ieee80211_conf *conf = &hw->conf;
4903 
4904 	if (idx != 0)
4905 		return -ENOENT;
4906 
4907 	survey->channel = conf->chandef.chan;
4908 	survey->filled = 0;
4909 	return 0;
4910 }
4911 
wl1271_allocate_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4912 static int wl1271_allocate_sta(struct wl1271 *wl,
4913 			     struct wl12xx_vif *wlvif,
4914 			     struct ieee80211_sta *sta)
4915 {
4916 	struct wl1271_station *wl_sta;
4917 	int ret;
4918 
4919 
4920 	if (wl->active_sta_count >= wl->max_ap_stations) {
4921 		wl1271_warning("could not allocate HLID - too much stations");
4922 		return -EBUSY;
4923 	}
4924 
4925 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4926 	ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4927 	if (ret < 0) {
4928 		wl1271_warning("could not allocate HLID - too many links");
4929 		return -EBUSY;
4930 	}
4931 
4932 	/* use the previous security seq, if this is a recovery/resume */
4933 	wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4934 
4935 	set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4936 	memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4937 	wl->active_sta_count++;
4938 	return 0;
4939 }
4940 
wl1271_free_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)4941 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4942 {
4943 	if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4944 		return;
4945 
4946 	clear_bit(hlid, wlvif->ap.sta_hlid_map);
4947 	__clear_bit(hlid, &wl->ap_ps_map);
4948 	__clear_bit(hlid, &wl->ap_fw_ps_map);
4949 
4950 	/*
4951 	 * save the last used PN in the private part of iee80211_sta,
4952 	 * in case of recovery/suspend
4953 	 */
4954 	wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
4955 
4956 	wl12xx_free_link(wl, wlvif, &hlid);
4957 	wl->active_sta_count--;
4958 
4959 	/*
4960 	 * rearm the tx watchdog when the last STA is freed - give the FW a
4961 	 * chance to return STA-buffered packets before complaining.
4962 	 */
4963 	if (wl->active_sta_count == 0)
4964 		wl12xx_rearm_tx_watchdog_locked(wl);
4965 }
4966 
wl12xx_sta_add(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4967 static int wl12xx_sta_add(struct wl1271 *wl,
4968 			  struct wl12xx_vif *wlvif,
4969 			  struct ieee80211_sta *sta)
4970 {
4971 	struct wl1271_station *wl_sta;
4972 	int ret = 0;
4973 	u8 hlid;
4974 
4975 	wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4976 
4977 	ret = wl1271_allocate_sta(wl, wlvif, sta);
4978 	if (ret < 0)
4979 		return ret;
4980 
4981 	wl_sta = (struct wl1271_station *)sta->drv_priv;
4982 	hlid = wl_sta->hlid;
4983 
4984 	ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4985 	if (ret < 0)
4986 		wl1271_free_sta(wl, wlvif, hlid);
4987 
4988 	return ret;
4989 }
4990 
wl12xx_sta_remove(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta)4991 static int wl12xx_sta_remove(struct wl1271 *wl,
4992 			     struct wl12xx_vif *wlvif,
4993 			     struct ieee80211_sta *sta)
4994 {
4995 	struct wl1271_station *wl_sta;
4996 	int ret = 0, id;
4997 
4998 	wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4999 
5000 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5001 	id = wl_sta->hlid;
5002 	if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
5003 		return -EINVAL;
5004 
5005 	ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
5006 	if (ret < 0)
5007 		return ret;
5008 
5009 	wl1271_free_sta(wl, wlvif, wl_sta->hlid);
5010 	return ret;
5011 }
5012 
wlcore_roc_if_possible(struct wl1271 * wl,struct wl12xx_vif * wlvif)5013 static void wlcore_roc_if_possible(struct wl1271 *wl,
5014 				   struct wl12xx_vif *wlvif)
5015 {
5016 	if (find_first_bit(wl->roc_map,
5017 			   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
5018 		return;
5019 
5020 	if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
5021 		return;
5022 
5023 	wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
5024 }
5025 
5026 /*
5027  * when wl_sta is NULL, we treat this call as if coming from a
5028  * pending auth reply.
5029  * wl->mutex must be taken and the FW must be awake when the call
5030  * takes place.
5031  */
wlcore_update_inconn_sta(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct wl1271_station * wl_sta,bool in_conn)5032 void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
5033 			      struct wl1271_station *wl_sta, bool in_conn)
5034 {
5035 	if (in_conn) {
5036 		if (WARN_ON(wl_sta && wl_sta->in_connection))
5037 			return;
5038 
5039 		if (!wlvif->ap_pending_auth_reply &&
5040 		    !wlvif->inconn_count)
5041 			wlcore_roc_if_possible(wl, wlvif);
5042 
5043 		if (wl_sta) {
5044 			wl_sta->in_connection = true;
5045 			wlvif->inconn_count++;
5046 		} else {
5047 			wlvif->ap_pending_auth_reply = true;
5048 		}
5049 	} else {
5050 		if (wl_sta && !wl_sta->in_connection)
5051 			return;
5052 
5053 		if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
5054 			return;
5055 
5056 		if (WARN_ON(wl_sta && !wlvif->inconn_count))
5057 			return;
5058 
5059 		if (wl_sta) {
5060 			wl_sta->in_connection = false;
5061 			wlvif->inconn_count--;
5062 		} else {
5063 			wlvif->ap_pending_auth_reply = false;
5064 		}
5065 
5066 		if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
5067 		    test_bit(wlvif->role_id, wl->roc_map))
5068 			wl12xx_croc(wl, wlvif->role_id);
5069 	}
5070 }
5071 
wl12xx_update_sta_state(struct wl1271 * wl,struct wl12xx_vif * wlvif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5072 static int wl12xx_update_sta_state(struct wl1271 *wl,
5073 				   struct wl12xx_vif *wlvif,
5074 				   struct ieee80211_sta *sta,
5075 				   enum ieee80211_sta_state old_state,
5076 				   enum ieee80211_sta_state new_state)
5077 {
5078 	struct wl1271_station *wl_sta;
5079 	bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
5080 	bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
5081 	int ret;
5082 
5083 	wl_sta = (struct wl1271_station *)sta->drv_priv;
5084 
5085 	/* Add station (AP mode) */
5086 	if (is_ap &&
5087 	    old_state == IEEE80211_STA_NOTEXIST &&
5088 	    new_state == IEEE80211_STA_NONE) {
5089 		ret = wl12xx_sta_add(wl, wlvif, sta);
5090 		if (ret)
5091 			return ret;
5092 
5093 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
5094 	}
5095 
5096 	/* Remove station (AP mode) */
5097 	if (is_ap &&
5098 	    old_state == IEEE80211_STA_NONE &&
5099 	    new_state == IEEE80211_STA_NOTEXIST) {
5100 		/* must not fail */
5101 		wl12xx_sta_remove(wl, wlvif, sta);
5102 
5103 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5104 	}
5105 
5106 	/* Authorize station (AP mode) */
5107 	if (is_ap &&
5108 	    new_state == IEEE80211_STA_AUTHORIZED) {
5109 		ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
5110 		if (ret < 0)
5111 			return ret;
5112 
5113 		ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
5114 						     wl_sta->hlid);
5115 		if (ret)
5116 			return ret;
5117 
5118 		wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
5119 	}
5120 
5121 	/* Authorize station */
5122 	if (is_sta &&
5123 	    new_state == IEEE80211_STA_AUTHORIZED) {
5124 		set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5125 		ret = wl12xx_set_authorized(wl, wlvif);
5126 		if (ret)
5127 			return ret;
5128 	}
5129 
5130 	if (is_sta &&
5131 	    old_state == IEEE80211_STA_AUTHORIZED &&
5132 	    new_state == IEEE80211_STA_ASSOC) {
5133 		clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
5134 		clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
5135 	}
5136 
5137 	/* save seq number on disassoc (suspend) */
5138 	if (is_sta &&
5139 	    old_state == IEEE80211_STA_ASSOC &&
5140 	    new_state == IEEE80211_STA_AUTH) {
5141 		wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
5142 		wlvif->total_freed_pkts = 0;
5143 	}
5144 
5145 	/* restore seq number on assoc (resume) */
5146 	if (is_sta &&
5147 	    old_state == IEEE80211_STA_AUTH &&
5148 	    new_state == IEEE80211_STA_ASSOC) {
5149 		wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
5150 	}
5151 
5152 	/* clear ROCs on failure or authorization */
5153 	if (is_sta &&
5154 	    (new_state == IEEE80211_STA_AUTHORIZED ||
5155 	     new_state == IEEE80211_STA_NOTEXIST)) {
5156 		if (test_bit(wlvif->role_id, wl->roc_map))
5157 			wl12xx_croc(wl, wlvif->role_id);
5158 	}
5159 
5160 	if (is_sta &&
5161 	    old_state == IEEE80211_STA_NOTEXIST &&
5162 	    new_state == IEEE80211_STA_NONE) {
5163 		if (find_first_bit(wl->roc_map,
5164 				   WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
5165 			WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
5166 			wl12xx_roc(wl, wlvif, wlvif->role_id,
5167 				   wlvif->band, wlvif->channel);
5168 		}
5169 	}
5170 	return 0;
5171 }
5172 
wl12xx_op_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)5173 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
5174 			       struct ieee80211_vif *vif,
5175 			       struct ieee80211_sta *sta,
5176 			       enum ieee80211_sta_state old_state,
5177 			       enum ieee80211_sta_state new_state)
5178 {
5179 	struct wl1271 *wl = hw->priv;
5180 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5181 	int ret;
5182 
5183 	wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
5184 		     sta->aid, old_state, new_state);
5185 
5186 	mutex_lock(&wl->mutex);
5187 
5188 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5189 		ret = -EBUSY;
5190 		goto out;
5191 	}
5192 
5193 	ret = wl1271_ps_elp_wakeup(wl);
5194 	if (ret < 0)
5195 		goto out;
5196 
5197 	ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
5198 
5199 	wl1271_ps_elp_sleep(wl);
5200 out:
5201 	mutex_unlock(&wl->mutex);
5202 	if (new_state < old_state)
5203 		return 0;
5204 	return ret;
5205 }
5206 
wl1271_op_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,enum ieee80211_ampdu_mlme_action action,struct ieee80211_sta * sta,u16 tid,u16 * ssn,u8 buf_size)5207 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
5208 				  struct ieee80211_vif *vif,
5209 				  enum ieee80211_ampdu_mlme_action action,
5210 				  struct ieee80211_sta *sta, u16 tid, u16 *ssn,
5211 				  u8 buf_size)
5212 {
5213 	struct wl1271 *wl = hw->priv;
5214 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5215 	int ret;
5216 	u8 hlid, *ba_bitmap;
5217 
5218 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
5219 		     tid);
5220 
5221 	/* sanity check - the fields in FW are only 8bits wide */
5222 	if (WARN_ON(tid > 0xFF))
5223 		return -ENOTSUPP;
5224 
5225 	mutex_lock(&wl->mutex);
5226 
5227 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5228 		ret = -EAGAIN;
5229 		goto out;
5230 	}
5231 
5232 	if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
5233 		hlid = wlvif->sta.hlid;
5234 	} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
5235 		struct wl1271_station *wl_sta;
5236 
5237 		wl_sta = (struct wl1271_station *)sta->drv_priv;
5238 		hlid = wl_sta->hlid;
5239 	} else {
5240 		ret = -EINVAL;
5241 		goto out;
5242 	}
5243 
5244 	ba_bitmap = &wl->links[hlid].ba_bitmap;
5245 
5246 	ret = wl1271_ps_elp_wakeup(wl);
5247 	if (ret < 0)
5248 		goto out;
5249 
5250 	wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
5251 		     tid, action);
5252 
5253 	switch (action) {
5254 	case IEEE80211_AMPDU_RX_START:
5255 		if (!wlvif->ba_support || !wlvif->ba_allowed) {
5256 			ret = -ENOTSUPP;
5257 			break;
5258 		}
5259 
5260 		if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
5261 			ret = -EBUSY;
5262 			wl1271_error("exceeded max RX BA sessions");
5263 			break;
5264 		}
5265 
5266 		if (*ba_bitmap & BIT(tid)) {
5267 			ret = -EINVAL;
5268 			wl1271_error("cannot enable RX BA session on active "
5269 				     "tid: %d", tid);
5270 			break;
5271 		}
5272 
5273 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
5274 							 hlid);
5275 		if (!ret) {
5276 			*ba_bitmap |= BIT(tid);
5277 			wl->ba_rx_session_count++;
5278 		}
5279 		break;
5280 
5281 	case IEEE80211_AMPDU_RX_STOP:
5282 		if (!(*ba_bitmap & BIT(tid))) {
5283 			/*
5284 			 * this happens on reconfig - so only output a debug
5285 			 * message for now, and don't fail the function.
5286 			 */
5287 			wl1271_debug(DEBUG_MAC80211,
5288 				     "no active RX BA session on tid: %d",
5289 				     tid);
5290 			ret = 0;
5291 			break;
5292 		}
5293 
5294 		ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
5295 							 hlid);
5296 		if (!ret) {
5297 			*ba_bitmap &= ~BIT(tid);
5298 			wl->ba_rx_session_count--;
5299 		}
5300 		break;
5301 
5302 	/*
5303 	 * The BA initiator session management in FW independently.
5304 	 * Falling break here on purpose for all TX APDU commands.
5305 	 */
5306 	case IEEE80211_AMPDU_TX_START:
5307 	case IEEE80211_AMPDU_TX_STOP_CONT:
5308 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
5309 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
5310 	case IEEE80211_AMPDU_TX_OPERATIONAL:
5311 		ret = -EINVAL;
5312 		break;
5313 
5314 	default:
5315 		wl1271_error("Incorrect ampdu action id=%x\n", action);
5316 		ret = -EINVAL;
5317 	}
5318 
5319 	wl1271_ps_elp_sleep(wl);
5320 
5321 out:
5322 	mutex_unlock(&wl->mutex);
5323 
5324 	return ret;
5325 }
5326 
wl12xx_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)5327 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
5328 				   struct ieee80211_vif *vif,
5329 				   const struct cfg80211_bitrate_mask *mask)
5330 {
5331 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5332 	struct wl1271 *wl = hw->priv;
5333 	int i, ret = 0;
5334 
5335 	wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
5336 		mask->control[NL80211_BAND_2GHZ].legacy,
5337 		mask->control[NL80211_BAND_5GHZ].legacy);
5338 
5339 	mutex_lock(&wl->mutex);
5340 
5341 	for (i = 0; i < WLCORE_NUM_BANDS; i++)
5342 		wlvif->bitrate_masks[i] =
5343 			wl1271_tx_enabled_rates_get(wl,
5344 						    mask->control[i].legacy,
5345 						    i);
5346 
5347 	if (unlikely(wl->state != WLCORE_STATE_ON))
5348 		goto out;
5349 
5350 	if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
5351 	    !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
5352 
5353 		ret = wl1271_ps_elp_wakeup(wl);
5354 		if (ret < 0)
5355 			goto out;
5356 
5357 		wl1271_set_band_rate(wl, wlvif);
5358 		wlvif->basic_rate =
5359 			wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
5360 		ret = wl1271_acx_sta_rate_policies(wl, wlvif);
5361 
5362 		wl1271_ps_elp_sleep(wl);
5363 	}
5364 out:
5365 	mutex_unlock(&wl->mutex);
5366 
5367 	return ret;
5368 }
5369 
wl12xx_op_channel_switch(struct ieee80211_hw * hw,struct ieee80211_channel_switch * ch_switch)5370 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
5371 				     struct ieee80211_channel_switch *ch_switch)
5372 {
5373 	struct wl1271 *wl = hw->priv;
5374 	struct wl12xx_vif *wlvif;
5375 	int ret;
5376 
5377 	wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
5378 
5379 	wl1271_tx_flush(wl);
5380 
5381 	mutex_lock(&wl->mutex);
5382 
5383 	if (unlikely(wl->state == WLCORE_STATE_OFF)) {
5384 		wl12xx_for_each_wlvif_sta(wl, wlvif) {
5385 			struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
5386 
5387 			if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5388 				continue;
5389 
5390 			ieee80211_chswitch_done(vif, false);
5391 		}
5392 		goto out;
5393 	} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
5394 		goto out;
5395 	}
5396 
5397 	ret = wl1271_ps_elp_wakeup(wl);
5398 	if (ret < 0)
5399 		goto out;
5400 
5401 	/* TODO: change mac80211 to pass vif as param */
5402 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
5403 		unsigned long delay_usec;
5404 
5405 		if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
5406 			continue;
5407 
5408 		ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
5409 		if (ret)
5410 			goto out_sleep;
5411 
5412 		set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
5413 
5414 		/* indicate failure 5 seconds after channel switch time */
5415 		delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
5416 			     ch_switch->count;
5417 		ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
5418 				usecs_to_jiffies(delay_usec) +
5419 				msecs_to_jiffies(5000));
5420 	}
5421 
5422 out_sleep:
5423 	wl1271_ps_elp_sleep(wl);
5424 
5425 out:
5426 	mutex_unlock(&wl->mutex);
5427 }
5428 
wlcore_op_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)5429 static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
5430 			    u32 queues, bool drop)
5431 {
5432 	struct wl1271 *wl = hw->priv;
5433 
5434 	wl1271_tx_flush(wl);
5435 }
5436 
wlcore_op_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)5437 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5438 				       struct ieee80211_vif *vif,
5439 				       struct ieee80211_channel *chan,
5440 				       int duration,
5441 				       enum ieee80211_roc_type type)
5442 {
5443 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5444 	struct wl1271 *wl = hw->priv;
5445 	int channel, ret = 0;
5446 
5447 	channel = ieee80211_frequency_to_channel(chan->center_freq);
5448 
5449 	wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5450 		     channel, wlvif->role_id);
5451 
5452 	mutex_lock(&wl->mutex);
5453 
5454 	if (unlikely(wl->state != WLCORE_STATE_ON))
5455 		goto out;
5456 
5457 	/* return EBUSY if we can't ROC right now */
5458 	if (WARN_ON(wl->roc_vif ||
5459 		    find_first_bit(wl->roc_map,
5460 				   WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5461 		ret = -EBUSY;
5462 		goto out;
5463 	}
5464 
5465 	ret = wl1271_ps_elp_wakeup(wl);
5466 	if (ret < 0)
5467 		goto out;
5468 
5469 	ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5470 	if (ret < 0)
5471 		goto out_sleep;
5472 
5473 	wl->roc_vif = vif;
5474 	ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5475 				     msecs_to_jiffies(duration));
5476 out_sleep:
5477 	wl1271_ps_elp_sleep(wl);
5478 out:
5479 	mutex_unlock(&wl->mutex);
5480 	return ret;
5481 }
5482 
__wlcore_roc_completed(struct wl1271 * wl)5483 static int __wlcore_roc_completed(struct wl1271 *wl)
5484 {
5485 	struct wl12xx_vif *wlvif;
5486 	int ret;
5487 
5488 	/* already completed */
5489 	if (unlikely(!wl->roc_vif))
5490 		return 0;
5491 
5492 	wlvif = wl12xx_vif_to_data(wl->roc_vif);
5493 
5494 	if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5495 		return -EBUSY;
5496 
5497 	ret = wl12xx_stop_dev(wl, wlvif);
5498 	if (ret < 0)
5499 		return ret;
5500 
5501 	wl->roc_vif = NULL;
5502 
5503 	return 0;
5504 }
5505 
wlcore_roc_completed(struct wl1271 * wl)5506 static int wlcore_roc_completed(struct wl1271 *wl)
5507 {
5508 	int ret;
5509 
5510 	wl1271_debug(DEBUG_MAC80211, "roc complete");
5511 
5512 	mutex_lock(&wl->mutex);
5513 
5514 	if (unlikely(wl->state != WLCORE_STATE_ON)) {
5515 		ret = -EBUSY;
5516 		goto out;
5517 	}
5518 
5519 	ret = wl1271_ps_elp_wakeup(wl);
5520 	if (ret < 0)
5521 		goto out;
5522 
5523 	ret = __wlcore_roc_completed(wl);
5524 
5525 	wl1271_ps_elp_sleep(wl);
5526 out:
5527 	mutex_unlock(&wl->mutex);
5528 
5529 	return ret;
5530 }
5531 
wlcore_roc_complete_work(struct work_struct * work)5532 static void wlcore_roc_complete_work(struct work_struct *work)
5533 {
5534 	struct delayed_work *dwork;
5535 	struct wl1271 *wl;
5536 	int ret;
5537 
5538 	dwork = container_of(work, struct delayed_work, work);
5539 	wl = container_of(dwork, struct wl1271, roc_complete_work);
5540 
5541 	ret = wlcore_roc_completed(wl);
5542 	if (!ret)
5543 		ieee80211_remain_on_channel_expired(wl->hw);
5544 }
5545 
wlcore_op_cancel_remain_on_channel(struct ieee80211_hw * hw)5546 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5547 {
5548 	struct wl1271 *wl = hw->priv;
5549 
5550 	wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5551 
5552 	/* TODO: per-vif */
5553 	wl1271_tx_flush(wl);
5554 
5555 	/*
5556 	 * we can't just flush_work here, because it might deadlock
5557 	 * (as we might get called from the same workqueue)
5558 	 */
5559 	cancel_delayed_work_sync(&wl->roc_complete_work);
5560 	wlcore_roc_completed(wl);
5561 
5562 	return 0;
5563 }
5564 
wlcore_op_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)5565 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5566 				    struct ieee80211_vif *vif,
5567 				    struct ieee80211_sta *sta,
5568 				    u32 changed)
5569 {
5570 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5571 	struct wl1271 *wl = hw->priv;
5572 
5573 	wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5574 }
5575 
wlcore_op_get_rssi(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,s8 * rssi_dbm)5576 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5577 			       struct ieee80211_vif *vif,
5578 			       struct ieee80211_sta *sta,
5579 			       s8 *rssi_dbm)
5580 {
5581 	struct wl1271 *wl = hw->priv;
5582 	struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5583 	int ret = 0;
5584 
5585 	wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5586 
5587 	mutex_lock(&wl->mutex);
5588 
5589 	if (unlikely(wl->state != WLCORE_STATE_ON))
5590 		goto out;
5591 
5592 	ret = wl1271_ps_elp_wakeup(wl);
5593 	if (ret < 0)
5594 		goto out_sleep;
5595 
5596 	ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5597 	if (ret < 0)
5598 		goto out_sleep;
5599 
5600 out_sleep:
5601 	wl1271_ps_elp_sleep(wl);
5602 
5603 out:
5604 	mutex_unlock(&wl->mutex);
5605 
5606 	return ret;
5607 }
5608 
wl1271_tx_frames_pending(struct ieee80211_hw * hw)5609 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5610 {
5611 	struct wl1271 *wl = hw->priv;
5612 	bool ret = false;
5613 
5614 	mutex_lock(&wl->mutex);
5615 
5616 	if (unlikely(wl->state != WLCORE_STATE_ON))
5617 		goto out;
5618 
5619 	/* packets are considered pending if in the TX queue or the FW */
5620 	ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5621 out:
5622 	mutex_unlock(&wl->mutex);
5623 
5624 	return ret;
5625 }
5626 
5627 /* can't be const, mac80211 writes to this */
5628 static struct ieee80211_rate wl1271_rates[] = {
5629 	{ .bitrate = 10,
5630 	  .hw_value = CONF_HW_BIT_RATE_1MBPS,
5631 	  .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5632 	{ .bitrate = 20,
5633 	  .hw_value = CONF_HW_BIT_RATE_2MBPS,
5634 	  .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5635 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5636 	{ .bitrate = 55,
5637 	  .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5638 	  .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5639 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5640 	{ .bitrate = 110,
5641 	  .hw_value = CONF_HW_BIT_RATE_11MBPS,
5642 	  .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5643 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5644 	{ .bitrate = 60,
5645 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5646 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5647 	{ .bitrate = 90,
5648 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5649 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5650 	{ .bitrate = 120,
5651 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5652 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5653 	{ .bitrate = 180,
5654 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5655 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5656 	{ .bitrate = 240,
5657 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5658 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5659 	{ .bitrate = 360,
5660 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5661 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5662 	{ .bitrate = 480,
5663 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5664 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5665 	{ .bitrate = 540,
5666 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5667 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5668 };
5669 
5670 /* can't be const, mac80211 writes to this */
5671 static struct ieee80211_channel wl1271_channels[] = {
5672 	{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5673 	{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5674 	{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5675 	{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5676 	{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5677 	{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5678 	{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5679 	{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5680 	{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5681 	{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5682 	{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5683 	{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5684 	{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5685 	{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5686 };
5687 
5688 /* can't be const, mac80211 writes to this */
5689 static struct ieee80211_supported_band wl1271_band_2ghz = {
5690 	.channels = wl1271_channels,
5691 	.n_channels = ARRAY_SIZE(wl1271_channels),
5692 	.bitrates = wl1271_rates,
5693 	.n_bitrates = ARRAY_SIZE(wl1271_rates),
5694 };
5695 
5696 /* 5 GHz data rates for WL1273 */
5697 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5698 	{ .bitrate = 60,
5699 	  .hw_value = CONF_HW_BIT_RATE_6MBPS,
5700 	  .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5701 	{ .bitrate = 90,
5702 	  .hw_value = CONF_HW_BIT_RATE_9MBPS,
5703 	  .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5704 	{ .bitrate = 120,
5705 	  .hw_value = CONF_HW_BIT_RATE_12MBPS,
5706 	  .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5707 	{ .bitrate = 180,
5708 	  .hw_value = CONF_HW_BIT_RATE_18MBPS,
5709 	  .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5710 	{ .bitrate = 240,
5711 	  .hw_value = CONF_HW_BIT_RATE_24MBPS,
5712 	  .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5713 	{ .bitrate = 360,
5714 	 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5715 	 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5716 	{ .bitrate = 480,
5717 	  .hw_value = CONF_HW_BIT_RATE_48MBPS,
5718 	  .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5719 	{ .bitrate = 540,
5720 	  .hw_value = CONF_HW_BIT_RATE_54MBPS,
5721 	  .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5722 };
5723 
5724 /* 5 GHz band channels for WL1273 */
5725 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5726 	{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5727 	{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5728 	{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5729 	{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5730 	{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5731 	{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5732 	{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5733 	{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5734 	{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5735 	{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5736 	{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5737 	{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5738 	{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5739 	{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5740 	{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5741 	{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5742 	{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5743 	{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5744 	{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5745 	{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5746 	{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5747 	{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5748 	{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5749 	{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5750 	{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5751 	{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5752 	{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5753 	{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5754 	{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5755 	{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5756 	{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5757 };
5758 
5759 static struct ieee80211_supported_band wl1271_band_5ghz = {
5760 	.channels = wl1271_channels_5ghz,
5761 	.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5762 	.bitrates = wl1271_rates_5ghz,
5763 	.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5764 };
5765 
5766 static const struct ieee80211_ops wl1271_ops = {
5767 	.start = wl1271_op_start,
5768 	.stop = wlcore_op_stop,
5769 	.add_interface = wl1271_op_add_interface,
5770 	.remove_interface = wl1271_op_remove_interface,
5771 	.change_interface = wl12xx_op_change_interface,
5772 #ifdef CONFIG_PM
5773 	.suspend = wl1271_op_suspend,
5774 	.resume = wl1271_op_resume,
5775 #endif
5776 	.config = wl1271_op_config,
5777 	.prepare_multicast = wl1271_op_prepare_multicast,
5778 	.configure_filter = wl1271_op_configure_filter,
5779 	.tx = wl1271_op_tx,
5780 	.set_key = wlcore_op_set_key,
5781 	.hw_scan = wl1271_op_hw_scan,
5782 	.cancel_hw_scan = wl1271_op_cancel_hw_scan,
5783 	.sched_scan_start = wl1271_op_sched_scan_start,
5784 	.sched_scan_stop = wl1271_op_sched_scan_stop,
5785 	.bss_info_changed = wl1271_op_bss_info_changed,
5786 	.set_frag_threshold = wl1271_op_set_frag_threshold,
5787 	.set_rts_threshold = wl1271_op_set_rts_threshold,
5788 	.conf_tx = wl1271_op_conf_tx,
5789 	.get_tsf = wl1271_op_get_tsf,
5790 	.get_survey = wl1271_op_get_survey,
5791 	.sta_state = wl12xx_op_sta_state,
5792 	.ampdu_action = wl1271_op_ampdu_action,
5793 	.tx_frames_pending = wl1271_tx_frames_pending,
5794 	.set_bitrate_mask = wl12xx_set_bitrate_mask,
5795 	.set_default_unicast_key = wl1271_op_set_default_key_idx,
5796 	.channel_switch = wl12xx_op_channel_switch,
5797 	.flush = wlcore_op_flush,
5798 	.remain_on_channel = wlcore_op_remain_on_channel,
5799 	.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5800 	.add_chanctx = wlcore_op_add_chanctx,
5801 	.remove_chanctx = wlcore_op_remove_chanctx,
5802 	.change_chanctx = wlcore_op_change_chanctx,
5803 	.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5804 	.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5805 	.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
5806 	.sta_rc_update = wlcore_op_sta_rc_update,
5807 	.get_rssi = wlcore_op_get_rssi,
5808 	CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5809 };
5810 
5811 
wlcore_rate_to_idx(struct wl1271 * wl,u8 rate,enum ieee80211_band band)5812 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5813 {
5814 	u8 idx;
5815 
5816 	BUG_ON(band >= 2);
5817 
5818 	if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5819 		wl1271_error("Illegal RX rate from HW: %d", rate);
5820 		return 0;
5821 	}
5822 
5823 	idx = wl->band_rate_to_idx[band][rate];
5824 	if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5825 		wl1271_error("Unsupported RX rate from HW: %d", rate);
5826 		return 0;
5827 	}
5828 
5829 	return idx;
5830 }
5831 
wl12xx_derive_mac_addresses(struct wl1271 * wl,u32 oui,u32 nic)5832 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5833 {
5834 	int i;
5835 
5836 	wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5837 		     oui, nic);
5838 
5839 	if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5840 		wl1271_warning("NIC part of the MAC address wraps around!");
5841 
5842 	for (i = 0; i < wl->num_mac_addr; i++) {
5843 		wl->addresses[i].addr[0] = (u8)(oui >> 16);
5844 		wl->addresses[i].addr[1] = (u8)(oui >> 8);
5845 		wl->addresses[i].addr[2] = (u8) oui;
5846 		wl->addresses[i].addr[3] = (u8)(nic >> 16);
5847 		wl->addresses[i].addr[4] = (u8)(nic >> 8);
5848 		wl->addresses[i].addr[5] = (u8) nic;
5849 		nic++;
5850 	}
5851 
5852 	/* we may be one address short at the most */
5853 	WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5854 
5855 	/*
5856 	 * turn on the LAA bit in the first address and use it as
5857 	 * the last address.
5858 	 */
5859 	if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5860 		int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5861 		memcpy(&wl->addresses[idx], &wl->addresses[0],
5862 		       sizeof(wl->addresses[0]));
5863 		/* LAA bit */
5864 		wl->addresses[idx].addr[0] |= BIT(1);
5865 	}
5866 
5867 	wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5868 	wl->hw->wiphy->addresses = wl->addresses;
5869 }
5870 
wl12xx_get_hw_info(struct wl1271 * wl)5871 static int wl12xx_get_hw_info(struct wl1271 *wl)
5872 {
5873 	int ret;
5874 
5875 	ret = wl12xx_set_power_on(wl);
5876 	if (ret < 0)
5877 		return ret;
5878 
5879 	ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5880 	if (ret < 0)
5881 		goto out;
5882 
5883 	wl->fuse_oui_addr = 0;
5884 	wl->fuse_nic_addr = 0;
5885 
5886 	ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5887 	if (ret < 0)
5888 		goto out;
5889 
5890 	if (wl->ops->get_mac)
5891 		ret = wl->ops->get_mac(wl);
5892 
5893 out:
5894 	wl1271_power_off(wl);
5895 	return ret;
5896 }
5897 
wl1271_register_hw(struct wl1271 * wl)5898 static int wl1271_register_hw(struct wl1271 *wl)
5899 {
5900 	int ret;
5901 	u32 oui_addr = 0, nic_addr = 0;
5902 
5903 	if (wl->mac80211_registered)
5904 		return 0;
5905 
5906 	if (wl->nvs_len >= 12) {
5907 		/* NOTE: The wl->nvs->nvs element must be first, in
5908 		 * order to simplify the casting, we assume it is at
5909 		 * the beginning of the wl->nvs structure.
5910 		 */
5911 		u8 *nvs_ptr = (u8 *)wl->nvs;
5912 
5913 		oui_addr =
5914 			(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5915 		nic_addr =
5916 			(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5917 	}
5918 
5919 	/* if the MAC address is zeroed in the NVS derive from fuse */
5920 	if (oui_addr == 0 && nic_addr == 0) {
5921 		oui_addr = wl->fuse_oui_addr;
5922 		/* fuse has the BD_ADDR, the WLAN addresses are the next two */
5923 		nic_addr = wl->fuse_nic_addr + 1;
5924 	}
5925 
5926 	wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5927 
5928 	ret = ieee80211_register_hw(wl->hw);
5929 	if (ret < 0) {
5930 		wl1271_error("unable to register mac80211 hw: %d", ret);
5931 		goto out;
5932 	}
5933 
5934 	wl->mac80211_registered = true;
5935 
5936 	wl1271_debugfs_init(wl);
5937 
5938 	wl1271_notice("loaded");
5939 
5940 out:
5941 	return ret;
5942 }
5943 
wl1271_unregister_hw(struct wl1271 * wl)5944 static void wl1271_unregister_hw(struct wl1271 *wl)
5945 {
5946 	if (wl->plt)
5947 		wl1271_plt_stop(wl);
5948 
5949 	ieee80211_unregister_hw(wl->hw);
5950 	wl->mac80211_registered = false;
5951 
5952 }
5953 
wl1271_init_ieee80211(struct wl1271 * wl)5954 static int wl1271_init_ieee80211(struct wl1271 *wl)
5955 {
5956 	int i;
5957 	static const u32 cipher_suites[] = {
5958 		WLAN_CIPHER_SUITE_WEP40,
5959 		WLAN_CIPHER_SUITE_WEP104,
5960 		WLAN_CIPHER_SUITE_TKIP,
5961 		WLAN_CIPHER_SUITE_CCMP,
5962 		WL1271_CIPHER_SUITE_GEM,
5963 	};
5964 
5965 	/* The tx descriptor buffer */
5966 	wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5967 
5968 	if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5969 		wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5970 
5971 	/* unit us */
5972 	/* FIXME: find a proper value */
5973 	wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5974 
5975 	wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5976 		IEEE80211_HW_SUPPORTS_PS |
5977 		IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5978 		IEEE80211_HW_SUPPORTS_UAPSD |
5979 		IEEE80211_HW_HAS_RATE_CONTROL |
5980 		IEEE80211_HW_CONNECTION_MONITOR |
5981 		IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5982 		IEEE80211_HW_SPECTRUM_MGMT |
5983 		IEEE80211_HW_AP_LINK_PS |
5984 		IEEE80211_HW_AMPDU_AGGREGATION |
5985 		IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5986 		IEEE80211_HW_QUEUE_CONTROL |
5987 		IEEE80211_HW_CHANCTX_STA_CSA;
5988 
5989 	wl->hw->wiphy->cipher_suites = cipher_suites;
5990 	wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5991 
5992 	wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5993 					 BIT(NL80211_IFTYPE_AP) |
5994 					 BIT(NL80211_IFTYPE_P2P_DEVICE) |
5995 					 BIT(NL80211_IFTYPE_P2P_CLIENT) |
5996 					 BIT(NL80211_IFTYPE_P2P_GO);
5997 	wl->hw->wiphy->max_scan_ssids = 1;
5998 	wl->hw->wiphy->max_sched_scan_ssids = 16;
5999 	wl->hw->wiphy->max_match_sets = 16;
6000 	/*
6001 	 * Maximum length of elements in scanning probe request templates
6002 	 * should be the maximum length possible for a template, without
6003 	 * the IEEE80211 header of the template
6004 	 */
6005 	wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6006 			sizeof(struct ieee80211_header);
6007 
6008 	wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
6009 		sizeof(struct ieee80211_header);
6010 
6011 	wl->hw->wiphy->max_remain_on_channel_duration = 30000;
6012 
6013 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
6014 				WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
6015 				WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
6016 
6017 	/* make sure all our channels fit in the scanned_ch bitmask */
6018 	BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
6019 		     ARRAY_SIZE(wl1271_channels_5ghz) >
6020 		     WL1271_MAX_CHANNELS);
6021 	/*
6022 	* clear channel flags from the previous usage
6023 	* and restore max_power & max_antenna_gain values.
6024 	*/
6025 	for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
6026 		wl1271_band_2ghz.channels[i].flags = 0;
6027 		wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6028 		wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
6029 	}
6030 
6031 	for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
6032 		wl1271_band_5ghz.channels[i].flags = 0;
6033 		wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
6034 		wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
6035 	}
6036 
6037 	/*
6038 	 * We keep local copies of the band structs because we need to
6039 	 * modify them on a per-device basis.
6040 	 */
6041 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
6042 	       sizeof(wl1271_band_2ghz));
6043 	memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
6044 	       &wl->ht_cap[IEEE80211_BAND_2GHZ],
6045 	       sizeof(*wl->ht_cap));
6046 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
6047 	       sizeof(wl1271_band_5ghz));
6048 	memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
6049 	       &wl->ht_cap[IEEE80211_BAND_5GHZ],
6050 	       sizeof(*wl->ht_cap));
6051 
6052 	wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
6053 		&wl->bands[IEEE80211_BAND_2GHZ];
6054 	wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
6055 		&wl->bands[IEEE80211_BAND_5GHZ];
6056 
6057 	/*
6058 	 * allow 4 queues per mac address we support +
6059 	 * 1 cab queue per mac + one global offchannel Tx queue
6060 	 */
6061 	wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
6062 
6063 	/* the last queue is the offchannel queue */
6064 	wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
6065 	wl->hw->max_rates = 1;
6066 
6067 	wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
6068 
6069 	/* the FW answers probe-requests in AP-mode */
6070 	wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
6071 	wl->hw->wiphy->probe_resp_offload =
6072 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
6073 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
6074 		NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
6075 
6076 	/* allowed interface combinations */
6077 	wl->hw->wiphy->iface_combinations = wl->iface_combinations;
6078 	wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
6079 
6080 	/* register vendor commands */
6081 	wlcore_set_vendor_commands(wl->hw->wiphy);
6082 
6083 	SET_IEEE80211_DEV(wl->hw, wl->dev);
6084 
6085 	wl->hw->sta_data_size = sizeof(struct wl1271_station);
6086 	wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
6087 
6088 	wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
6089 
6090 	return 0;
6091 }
6092 
wlcore_alloc_hw(size_t priv_size,u32 aggr_buf_size,u32 mbox_size)6093 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
6094 				     u32 mbox_size)
6095 {
6096 	struct ieee80211_hw *hw;
6097 	struct wl1271 *wl;
6098 	int i, j, ret;
6099 	unsigned int order;
6100 
6101 	hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
6102 	if (!hw) {
6103 		wl1271_error("could not alloc ieee80211_hw");
6104 		ret = -ENOMEM;
6105 		goto err_hw_alloc;
6106 	}
6107 
6108 	wl = hw->priv;
6109 	memset(wl, 0, sizeof(*wl));
6110 
6111 	wl->priv = kzalloc(priv_size, GFP_KERNEL);
6112 	if (!wl->priv) {
6113 		wl1271_error("could not alloc wl priv");
6114 		ret = -ENOMEM;
6115 		goto err_priv_alloc;
6116 	}
6117 
6118 	INIT_LIST_HEAD(&wl->wlvif_list);
6119 
6120 	wl->hw = hw;
6121 
6122 	/*
6123 	 * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
6124 	 * we don't allocate any additional resource here, so that's fine.
6125 	 */
6126 	for (i = 0; i < NUM_TX_QUEUES; i++)
6127 		for (j = 0; j < WLCORE_MAX_LINKS; j++)
6128 			skb_queue_head_init(&wl->links[j].tx_queue[i]);
6129 
6130 	skb_queue_head_init(&wl->deferred_rx_queue);
6131 	skb_queue_head_init(&wl->deferred_tx_queue);
6132 
6133 	INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
6134 	INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
6135 	INIT_WORK(&wl->tx_work, wl1271_tx_work);
6136 	INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
6137 	INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
6138 	INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
6139 	INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
6140 
6141 	wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
6142 	if (!wl->freezable_wq) {
6143 		ret = -ENOMEM;
6144 		goto err_hw;
6145 	}
6146 
6147 	wl->channel = 0;
6148 	wl->rx_counter = 0;
6149 	wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
6150 	wl->band = IEEE80211_BAND_2GHZ;
6151 	wl->channel_type = NL80211_CHAN_NO_HT;
6152 	wl->flags = 0;
6153 	wl->sg_enabled = true;
6154 	wl->sleep_auth = WL1271_PSM_ILLEGAL;
6155 	wl->recovery_count = 0;
6156 	wl->hw_pg_ver = -1;
6157 	wl->ap_ps_map = 0;
6158 	wl->ap_fw_ps_map = 0;
6159 	wl->quirks = 0;
6160 	wl->system_hlid = WL12XX_SYSTEM_HLID;
6161 	wl->active_sta_count = 0;
6162 	wl->active_link_count = 0;
6163 	wl->fwlog_size = 0;
6164 	init_waitqueue_head(&wl->fwlog_waitq);
6165 
6166 	/* The system link is always allocated */
6167 	__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
6168 
6169 	memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
6170 	for (i = 0; i < wl->num_tx_desc; i++)
6171 		wl->tx_frames[i] = NULL;
6172 
6173 	spin_lock_init(&wl->wl_lock);
6174 
6175 	wl->state = WLCORE_STATE_OFF;
6176 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6177 	mutex_init(&wl->mutex);
6178 	mutex_init(&wl->flush_mutex);
6179 	init_completion(&wl->nvs_loading_complete);
6180 
6181 	order = get_order(aggr_buf_size);
6182 	wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
6183 	if (!wl->aggr_buf) {
6184 		ret = -ENOMEM;
6185 		goto err_wq;
6186 	}
6187 	wl->aggr_buf_size = aggr_buf_size;
6188 
6189 	wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
6190 	if (!wl->dummy_packet) {
6191 		ret = -ENOMEM;
6192 		goto err_aggr;
6193 	}
6194 
6195 	/* Allocate one page for the FW log */
6196 	wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
6197 	if (!wl->fwlog) {
6198 		ret = -ENOMEM;
6199 		goto err_dummy_packet;
6200 	}
6201 
6202 	wl->mbox_size = mbox_size;
6203 	wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
6204 	if (!wl->mbox) {
6205 		ret = -ENOMEM;
6206 		goto err_fwlog;
6207 	}
6208 
6209 	wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
6210 	if (!wl->buffer_32) {
6211 		ret = -ENOMEM;
6212 		goto err_mbox;
6213 	}
6214 
6215 	return hw;
6216 
6217 err_mbox:
6218 	kfree(wl->mbox);
6219 
6220 err_fwlog:
6221 	free_page((unsigned long)wl->fwlog);
6222 
6223 err_dummy_packet:
6224 	dev_kfree_skb(wl->dummy_packet);
6225 
6226 err_aggr:
6227 	free_pages((unsigned long)wl->aggr_buf, order);
6228 
6229 err_wq:
6230 	destroy_workqueue(wl->freezable_wq);
6231 
6232 err_hw:
6233 	wl1271_debugfs_exit(wl);
6234 	kfree(wl->priv);
6235 
6236 err_priv_alloc:
6237 	ieee80211_free_hw(hw);
6238 
6239 err_hw_alloc:
6240 
6241 	return ERR_PTR(ret);
6242 }
6243 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
6244 
wlcore_free_hw(struct wl1271 * wl)6245 int wlcore_free_hw(struct wl1271 *wl)
6246 {
6247 	/* Unblock any fwlog readers */
6248 	mutex_lock(&wl->mutex);
6249 	wl->fwlog_size = -1;
6250 	wake_up_interruptible_all(&wl->fwlog_waitq);
6251 	mutex_unlock(&wl->mutex);
6252 
6253 	wlcore_sysfs_free(wl);
6254 
6255 	kfree(wl->buffer_32);
6256 	kfree(wl->mbox);
6257 	free_page((unsigned long)wl->fwlog);
6258 	dev_kfree_skb(wl->dummy_packet);
6259 	free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6260 
6261 	wl1271_debugfs_exit(wl);
6262 
6263 	vfree(wl->fw);
6264 	wl->fw = NULL;
6265 	wl->fw_type = WL12XX_FW_TYPE_NONE;
6266 	kfree(wl->nvs);
6267 	wl->nvs = NULL;
6268 
6269 	kfree(wl->raw_fw_status);
6270 	kfree(wl->fw_status);
6271 	kfree(wl->tx_res_if);
6272 	destroy_workqueue(wl->freezable_wq);
6273 
6274 	kfree(wl->priv);
6275 	ieee80211_free_hw(wl->hw);
6276 
6277 	return 0;
6278 }
6279 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6280 
6281 #ifdef CONFIG_PM
6282 static const struct wiphy_wowlan_support wlcore_wowlan_support = {
6283 	.flags = WIPHY_WOWLAN_ANY,
6284 	.n_patterns = WL1271_MAX_RX_FILTERS,
6285 	.pattern_min_len = 1,
6286 	.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
6287 };
6288 #endif
6289 
wlcore_hardirq(int irq,void * cookie)6290 static irqreturn_t wlcore_hardirq(int irq, void *cookie)
6291 {
6292 	return IRQ_WAKE_THREAD;
6293 }
6294 
wlcore_nvs_cb(const struct firmware * fw,void * context)6295 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6296 {
6297 	struct wl1271 *wl = context;
6298 	struct platform_device *pdev = wl->pdev;
6299 	struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
6300 	struct resource *res;
6301 
6302 	int ret;
6303 	irq_handler_t hardirq_fn = NULL;
6304 
6305 	if (fw) {
6306 		wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6307 		if (!wl->nvs) {
6308 			wl1271_error("Could not allocate nvs data");
6309 			goto out;
6310 		}
6311 		wl->nvs_len = fw->size;
6312 	} else {
6313 		wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6314 			     WL12XX_NVS_NAME);
6315 		wl->nvs = NULL;
6316 		wl->nvs_len = 0;
6317 	}
6318 
6319 	ret = wl->ops->setup(wl);
6320 	if (ret < 0)
6321 		goto out_free_nvs;
6322 
6323 	BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6324 
6325 	/* adjust some runtime configuration parameters */
6326 	wlcore_adjust_conf(wl);
6327 
6328 	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
6329 	if (!res) {
6330 		wl1271_error("Could not get IRQ resource");
6331 		goto out_free_nvs;
6332 	}
6333 
6334 	wl->irq = res->start;
6335 	wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
6336 	wl->if_ops = pdev_data->if_ops;
6337 
6338 	if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
6339 		hardirq_fn = wlcore_hardirq;
6340 	else
6341 		wl->irq_flags |= IRQF_ONESHOT;
6342 
6343 	ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
6344 				   wl->irq_flags, pdev->name, wl);
6345 	if (ret < 0) {
6346 		wl1271_error("request_irq() failed: %d", ret);
6347 		goto out_free_nvs;
6348 	}
6349 
6350 #ifdef CONFIG_PM
6351 	ret = enable_irq_wake(wl->irq);
6352 	if (!ret) {
6353 		wl->irq_wake_enabled = true;
6354 		device_init_wakeup(wl->dev, 1);
6355 		if (pdev_data->pwr_in_suspend)
6356 			wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
6357 	}
6358 #endif
6359 	disable_irq(wl->irq);
6360 
6361 	ret = wl12xx_get_hw_info(wl);
6362 	if (ret < 0) {
6363 		wl1271_error("couldn't get hw info");
6364 		goto out_irq;
6365 	}
6366 
6367 	ret = wl->ops->identify_chip(wl);
6368 	if (ret < 0)
6369 		goto out_irq;
6370 
6371 	ret = wl1271_init_ieee80211(wl);
6372 	if (ret)
6373 		goto out_irq;
6374 
6375 	ret = wl1271_register_hw(wl);
6376 	if (ret)
6377 		goto out_irq;
6378 
6379 	ret = wlcore_sysfs_init(wl);
6380 	if (ret)
6381 		goto out_unreg;
6382 
6383 	wl->initialized = true;
6384 	goto out;
6385 
6386 out_unreg:
6387 	wl1271_unregister_hw(wl);
6388 
6389 out_irq:
6390 	free_irq(wl->irq, wl);
6391 
6392 out_free_nvs:
6393 	kfree(wl->nvs);
6394 
6395 out:
6396 	release_firmware(fw);
6397 	complete_all(&wl->nvs_loading_complete);
6398 }
6399 
wlcore_probe(struct wl1271 * wl,struct platform_device * pdev)6400 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6401 {
6402 	int ret;
6403 
6404 	if (!wl->ops || !wl->ptable)
6405 		return -EINVAL;
6406 
6407 	wl->dev = &pdev->dev;
6408 	wl->pdev = pdev;
6409 	platform_set_drvdata(pdev, wl);
6410 
6411 	ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6412 				      WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6413 				      wl, wlcore_nvs_cb);
6414 	if (ret < 0) {
6415 		wl1271_error("request_firmware_nowait failed: %d", ret);
6416 		complete_all(&wl->nvs_loading_complete);
6417 	}
6418 
6419 	return ret;
6420 }
6421 EXPORT_SYMBOL_GPL(wlcore_probe);
6422 
wlcore_remove(struct platform_device * pdev)6423 int wlcore_remove(struct platform_device *pdev)
6424 {
6425 	struct wl1271 *wl = platform_get_drvdata(pdev);
6426 
6427 	wait_for_completion(&wl->nvs_loading_complete);
6428 	if (!wl->initialized)
6429 		return 0;
6430 
6431 	if (wl->irq_wake_enabled) {
6432 		device_init_wakeup(wl->dev, 0);
6433 		disable_irq_wake(wl->irq);
6434 	}
6435 	wl1271_unregister_hw(wl);
6436 	free_irq(wl->irq, wl);
6437 	wlcore_free_hw(wl);
6438 
6439 	return 0;
6440 }
6441 EXPORT_SYMBOL_GPL(wlcore_remove);
6442 
6443 u32 wl12xx_debug_level = DEBUG_NONE;
6444 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6445 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6446 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6447 
6448 module_param_named(fwlog, fwlog_param, charp, 0);
6449 MODULE_PARM_DESC(fwlog,
6450 		 "FW logger options: continuous, ondemand, dbgpins or disable");
6451 
6452 module_param(fwlog_mem_blocks, int, S_IRUSR | S_IWUSR);
6453 MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
6454 
6455 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6456 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6457 
6458 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6459 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6460 
6461 MODULE_LICENSE("GPL");
6462 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6463 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6464 MODULE_FIRMWARE(WL12XX_NVS_NAME);
6465