1 /*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24 #include "ps.h"
25 #include "io.h"
26 #include "tx.h"
27 #include "debug.h"
28
29 #define WL1271_WAKEUP_TIMEOUT 500
30
31 #define ELP_ENTRY_DELAY 30
32 #define ELP_ENTRY_DELAY_FORCE_PS 5
33
wl1271_elp_work(struct work_struct * work)34 void wl1271_elp_work(struct work_struct *work)
35 {
36 struct delayed_work *dwork;
37 struct wl1271 *wl;
38 struct wl12xx_vif *wlvif;
39 int ret;
40
41 dwork = to_delayed_work(work);
42 wl = container_of(dwork, struct wl1271, elp_work);
43
44 wl1271_debug(DEBUG_PSM, "elp work");
45
46 mutex_lock(&wl->mutex);
47
48 if (unlikely(wl->state != WLCORE_STATE_ON))
49 goto out;
50
51 /* our work might have been already cancelled */
52 if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
53 goto out;
54
55 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
56 goto out;
57
58 wl12xx_for_each_wlvif(wl, wlvif) {
59 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
60 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
61 goto out;
62 }
63
64 wl1271_debug(DEBUG_PSM, "chip to elp");
65 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
66 if (ret < 0) {
67 wl12xx_queue_recovery_work(wl);
68 goto out;
69 }
70
71 set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
72
73 out:
74 mutex_unlock(&wl->mutex);
75 }
76
77 /* Routines to toggle sleep mode while in ELP */
wl1271_ps_elp_sleep(struct wl1271 * wl)78 void wl1271_ps_elp_sleep(struct wl1271 *wl)
79 {
80 struct wl12xx_vif *wlvif;
81 u32 timeout;
82
83 /* We do not enter elp sleep in PLT mode */
84 if (wl->plt)
85 return;
86
87 if (wl->sleep_auth != WL1271_PSM_ELP)
88 return;
89
90 /* we shouldn't get consecutive sleep requests */
91 if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
92 return;
93
94 wl12xx_for_each_wlvif(wl, wlvif) {
95 if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
96 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
97 return;
98 }
99
100 timeout = wl->conf.conn.forced_ps ?
101 ELP_ENTRY_DELAY_FORCE_PS : ELP_ENTRY_DELAY;
102 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
103 msecs_to_jiffies(timeout));
104 }
105 EXPORT_SYMBOL_GPL(wl1271_ps_elp_sleep);
106
wl1271_ps_elp_wakeup(struct wl1271 * wl)107 int wl1271_ps_elp_wakeup(struct wl1271 *wl)
108 {
109 DECLARE_COMPLETION_ONSTACK(compl);
110 unsigned long flags;
111 int ret;
112 unsigned long start_time = jiffies;
113 bool pending = false;
114
115 /*
116 * we might try to wake up even if we didn't go to sleep
117 * before (e.g. on boot)
118 */
119 if (!test_and_clear_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags))
120 return 0;
121
122 /* don't cancel_sync as it might contend for a mutex and deadlock */
123 cancel_delayed_work(&wl->elp_work);
124
125 if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
126 return 0;
127
128 wl1271_debug(DEBUG_PSM, "waking up chip from elp");
129
130 /*
131 * The spinlock is required here to synchronize both the work and
132 * the completion variable in one entity.
133 */
134 spin_lock_irqsave(&wl->wl_lock, flags);
135 if (test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
136 pending = true;
137 else
138 wl->elp_compl = &compl;
139 spin_unlock_irqrestore(&wl->wl_lock, flags);
140
141 ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
142 if (ret < 0) {
143 wl12xx_queue_recovery_work(wl);
144 goto err;
145 }
146
147 if (!pending) {
148 ret = wait_for_completion_timeout(
149 &compl, msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
150 if (ret == 0) {
151 wl1271_error("ELP wakeup timeout!");
152 wl12xx_queue_recovery_work(wl);
153 ret = -ETIMEDOUT;
154 goto err;
155 }
156 }
157
158 clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
159
160 wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
161 jiffies_to_msecs(jiffies - start_time));
162 goto out;
163
164 err:
165 spin_lock_irqsave(&wl->wl_lock, flags);
166 wl->elp_compl = NULL;
167 spin_unlock_irqrestore(&wl->wl_lock, flags);
168 return ret;
169
170 out:
171 return 0;
172 }
173 EXPORT_SYMBOL_GPL(wl1271_ps_elp_wakeup);
174
wl1271_ps_set_mode(struct wl1271 * wl,struct wl12xx_vif * wlvif,enum wl1271_cmd_ps_mode mode)175 int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
176 enum wl1271_cmd_ps_mode mode)
177 {
178 int ret;
179 u16 timeout = wl->conf.conn.dynamic_ps_timeout;
180
181 switch (mode) {
182 case STATION_AUTO_PS_MODE:
183 case STATION_POWER_SAVE_MODE:
184 wl1271_debug(DEBUG_PSM, "entering psm (mode=%d,timeout=%u)",
185 mode, timeout);
186
187 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
188 wl->conf.conn.wake_up_event,
189 wl->conf.conn.listen_interval);
190 if (ret < 0) {
191 wl1271_error("couldn't set wake up conditions");
192 return ret;
193 }
194
195 ret = wl1271_cmd_ps_mode(wl, wlvif, mode, timeout);
196 if (ret < 0)
197 return ret;
198
199 set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
200
201 /*
202 * enable beacon early termination.
203 * Not relevant for 5GHz and for high rates.
204 */
205 if ((wlvif->band == NL80211_BAND_2GHZ) &&
206 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
207 ret = wl1271_acx_bet_enable(wl, wlvif, true);
208 if (ret < 0)
209 return ret;
210 }
211 break;
212 case STATION_ACTIVE_MODE:
213 wl1271_debug(DEBUG_PSM, "leaving psm");
214
215 /* disable beacon early termination */
216 if ((wlvif->band == NL80211_BAND_2GHZ) &&
217 (wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
218 ret = wl1271_acx_bet_enable(wl, wlvif, false);
219 if (ret < 0)
220 return ret;
221 }
222
223 ret = wl1271_cmd_ps_mode(wl, wlvif, mode, 0);
224 if (ret < 0)
225 return ret;
226
227 clear_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
228 break;
229 default:
230 wl1271_warning("trying to set ps to unsupported mode %d", mode);
231 ret = -EINVAL;
232 }
233
234 return ret;
235 }
236
wl1271_ps_filter_frames(struct wl1271 * wl,u8 hlid)237 static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
238 {
239 int i;
240 struct sk_buff *skb;
241 struct ieee80211_tx_info *info;
242 unsigned long flags;
243 int filtered[NUM_TX_QUEUES];
244 struct wl1271_link *lnk = &wl->links[hlid];
245
246 /* filter all frames currently in the low level queues for this hlid */
247 for (i = 0; i < NUM_TX_QUEUES; i++) {
248 filtered[i] = 0;
249 while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
250 filtered[i]++;
251
252 if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
253 continue;
254
255 info = IEEE80211_SKB_CB(skb);
256 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
257 info->status.rates[0].idx = -1;
258 ieee80211_tx_status_ni(wl->hw, skb);
259 }
260 }
261
262 spin_lock_irqsave(&wl->wl_lock, flags);
263 for (i = 0; i < NUM_TX_QUEUES; i++) {
264 wl->tx_queue_count[i] -= filtered[i];
265 if (lnk->wlvif)
266 lnk->wlvif->tx_queue_count[i] -= filtered[i];
267 }
268 spin_unlock_irqrestore(&wl->wl_lock, flags);
269
270 wl1271_handle_tx_low_watermark(wl);
271 }
272
wl12xx_ps_link_start(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid,bool clean_queues)273 void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
274 u8 hlid, bool clean_queues)
275 {
276 struct ieee80211_sta *sta;
277 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
278
279 if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
280 return;
281
282 if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
283 test_bit(hlid, &wl->ap_ps_map))
284 return;
285
286 wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
287 "clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
288 clean_queues);
289
290 rcu_read_lock();
291 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
292 if (!sta) {
293 wl1271_error("could not find sta %pM for starting ps",
294 wl->links[hlid].addr);
295 rcu_read_unlock();
296 return;
297 }
298
299 ieee80211_sta_ps_transition_ni(sta, true);
300 rcu_read_unlock();
301
302 /* do we want to filter all frames from this link's queues? */
303 if (clean_queues)
304 wl1271_ps_filter_frames(wl, hlid);
305
306 __set_bit(hlid, &wl->ap_ps_map);
307 }
308
wl12xx_ps_link_end(struct wl1271 * wl,struct wl12xx_vif * wlvif,u8 hlid)309 void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
310 {
311 struct ieee80211_sta *sta;
312 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
313
314 if (!test_bit(hlid, &wl->ap_ps_map))
315 return;
316
317 wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
318
319 __clear_bit(hlid, &wl->ap_ps_map);
320
321 rcu_read_lock();
322 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
323 if (!sta) {
324 wl1271_error("could not find sta %pM for ending ps",
325 wl->links[hlid].addr);
326 goto end;
327 }
328
329 ieee80211_sta_ps_transition_ni(sta, false);
330 end:
331 rcu_read_unlock();
332 }
333