1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
4 *
5 * Copyright (c) 2010, ST-Ericsson
6 * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
7 *
8 * Based on:
9 * ST-Ericsson UMAC CW1200 driver, which is
10 * Copyright (c) 2010, ST-Ericsson
11 * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
12 */
13
14 #include <linux/module.h>
15 #include <net/mac80211.h>
16 #include <linux/kthread.h>
17 #include <linux/timer.h>
18
19 #include "cw1200.h"
20 #include "bh.h"
21 #include "hwio.h"
22 #include "wsm.h"
23 #include "hwbus.h"
24 #include "debug.h"
25 #include "fwio.h"
26
27 static int cw1200_bh(void *arg);
28
29 #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4)
30 /* an SPI message cannot be bigger than (2"12-1)*2 bytes
31 * "*2" to cvt to bytes
32 */
33 #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2)
34 #define PIGGYBACK_CTRL_REG (2)
35 #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
36
37 /* Suspend state privates */
38 enum cw1200_bh_pm_state {
39 CW1200_BH_RESUMED = 0,
40 CW1200_BH_SUSPEND,
41 CW1200_BH_SUSPENDED,
42 CW1200_BH_RESUME,
43 };
44
cw1200_bh_work(struct work_struct * work)45 static void cw1200_bh_work(struct work_struct *work)
46 {
47 struct cw1200_common *priv =
48 container_of(work, struct cw1200_common, bh_work);
49 cw1200_bh(priv);
50 }
51
cw1200_register_bh(struct cw1200_common * priv)52 int cw1200_register_bh(struct cw1200_common *priv)
53 {
54 int err = 0;
55 /* Realtime workqueue */
56 priv->bh_workqueue = alloc_workqueue("cw1200_bh",
57 WQ_MEM_RECLAIM | WQ_HIGHPRI
58 | WQ_CPU_INTENSIVE, 1);
59
60 if (!priv->bh_workqueue)
61 return -ENOMEM;
62
63 INIT_WORK(&priv->bh_work, cw1200_bh_work);
64
65 pr_debug("[BH] register.\n");
66
67 atomic_set(&priv->bh_rx, 0);
68 atomic_set(&priv->bh_tx, 0);
69 atomic_set(&priv->bh_term, 0);
70 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
71 priv->bh_error = 0;
72 priv->hw_bufs_used = 0;
73 priv->buf_id_tx = 0;
74 priv->buf_id_rx = 0;
75 init_waitqueue_head(&priv->bh_wq);
76 init_waitqueue_head(&priv->bh_evt_wq);
77
78 err = !queue_work(priv->bh_workqueue, &priv->bh_work);
79 WARN_ON(err);
80 return err;
81 }
82
cw1200_unregister_bh(struct cw1200_common * priv)83 void cw1200_unregister_bh(struct cw1200_common *priv)
84 {
85 atomic_inc(&priv->bh_term);
86 wake_up(&priv->bh_wq);
87
88 flush_workqueue(priv->bh_workqueue);
89
90 destroy_workqueue(priv->bh_workqueue);
91 priv->bh_workqueue = NULL;
92
93 pr_debug("[BH] unregistered.\n");
94 }
95
cw1200_irq_handler(struct cw1200_common * priv)96 void cw1200_irq_handler(struct cw1200_common *priv)
97 {
98 pr_debug("[BH] irq.\n");
99
100 /* Disable Interrupts! */
101 /* NOTE: hwbus_ops->lock already held */
102 __cw1200_irq_enable(priv, 0);
103
104 if (/* WARN_ON */(priv->bh_error))
105 return;
106
107 if (atomic_inc_return(&priv->bh_rx) == 1)
108 wake_up(&priv->bh_wq);
109 }
110 EXPORT_SYMBOL_GPL(cw1200_irq_handler);
111
cw1200_bh_wakeup(struct cw1200_common * priv)112 void cw1200_bh_wakeup(struct cw1200_common *priv)
113 {
114 pr_debug("[BH] wakeup.\n");
115 if (priv->bh_error) {
116 pr_err("[BH] wakeup failed (BH error)\n");
117 return;
118 }
119
120 if (atomic_inc_return(&priv->bh_tx) == 1)
121 wake_up(&priv->bh_wq);
122 }
123
cw1200_bh_suspend(struct cw1200_common * priv)124 int cw1200_bh_suspend(struct cw1200_common *priv)
125 {
126 pr_debug("[BH] suspend.\n");
127 if (priv->bh_error) {
128 wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
129 return -EINVAL;
130 }
131
132 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
133 wake_up(&priv->bh_wq);
134 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
135 (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
136 1 * HZ) ? 0 : -ETIMEDOUT;
137 }
138
cw1200_bh_resume(struct cw1200_common * priv)139 int cw1200_bh_resume(struct cw1200_common *priv)
140 {
141 pr_debug("[BH] resume.\n");
142 if (priv->bh_error) {
143 wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
144 return -EINVAL;
145 }
146
147 atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
148 wake_up(&priv->bh_wq);
149 return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
150 (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
151 1 * HZ) ? 0 : -ETIMEDOUT;
152 }
153
wsm_alloc_tx_buffer(struct cw1200_common * priv)154 static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
155 {
156 ++priv->hw_bufs_used;
157 }
158
wsm_release_tx_buffer(struct cw1200_common * priv,int count)159 int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
160 {
161 int ret = 0;
162 int hw_bufs_used = priv->hw_bufs_used;
163
164 priv->hw_bufs_used -= count;
165 if (WARN_ON(priv->hw_bufs_used < 0))
166 ret = -1;
167 else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
168 ret = 1;
169 if (!priv->hw_bufs_used)
170 wake_up(&priv->bh_evt_wq);
171 return ret;
172 }
173
cw1200_bh_read_ctrl_reg(struct cw1200_common * priv,u16 * ctrl_reg)174 static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
175 u16 *ctrl_reg)
176 {
177 int ret;
178
179 ret = cw1200_reg_read_16(priv,
180 ST90TDS_CONTROL_REG_ID, ctrl_reg);
181 if (ret) {
182 ret = cw1200_reg_read_16(priv,
183 ST90TDS_CONTROL_REG_ID, ctrl_reg);
184 if (ret)
185 pr_err("[BH] Failed to read control register.\n");
186 }
187
188 return ret;
189 }
190
cw1200_device_wakeup(struct cw1200_common * priv)191 static int cw1200_device_wakeup(struct cw1200_common *priv)
192 {
193 u16 ctrl_reg;
194 int ret;
195
196 pr_debug("[BH] Device wakeup.\n");
197
198 /* First, set the dpll register */
199 ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
200 cw1200_dpll_from_clk(priv->hw_refclk));
201 if (WARN_ON(ret))
202 return ret;
203
204 /* To force the device to be always-on, the host sets WLAN_UP to 1 */
205 ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
206 ST90TDS_CONT_WUP_BIT);
207 if (WARN_ON(ret))
208 return ret;
209
210 ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
211 if (WARN_ON(ret))
212 return ret;
213
214 /* If the device returns WLAN_RDY as 1, the device is active and will
215 * remain active.
216 */
217 if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
218 pr_debug("[BH] Device awake.\n");
219 return 1;
220 }
221
222 return 0;
223 }
224
225 /* Must be called from BH thraed. */
cw1200_enable_powersave(struct cw1200_common * priv,bool enable)226 void cw1200_enable_powersave(struct cw1200_common *priv,
227 bool enable)
228 {
229 pr_debug("[BH] Powerave is %s.\n",
230 enable ? "enabled" : "disabled");
231 priv->powersave_enabled = enable;
232 }
233
cw1200_bh_rx_helper(struct cw1200_common * priv,uint16_t * ctrl_reg,int * tx)234 static int cw1200_bh_rx_helper(struct cw1200_common *priv,
235 uint16_t *ctrl_reg,
236 int *tx)
237 {
238 size_t read_len = 0;
239 struct sk_buff *skb_rx = NULL;
240 struct wsm_hdr *wsm;
241 size_t wsm_len;
242 u16 wsm_id;
243 u8 wsm_seq;
244 int rx_resync = 1;
245
246 size_t alloc_len;
247 u8 *data;
248
249 read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
250 if (!read_len)
251 return 0; /* No more work */
252
253 if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
254 (read_len > EFFECTIVE_BUF_SIZE))) {
255 pr_debug("Invalid read len: %zu (%04x)",
256 read_len, *ctrl_reg);
257 goto err;
258 }
259
260 /* Add SIZE of PIGGYBACK reg (CONTROL Reg)
261 * to the NEXT Message length + 2 Bytes for SKB
262 */
263 read_len = read_len + 2;
264
265 alloc_len = priv->hwbus_ops->align_size(
266 priv->hwbus_priv, read_len);
267
268 /* Check if not exceeding CW1200 capabilities */
269 if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
270 pr_debug("Read aligned len: %zu\n",
271 alloc_len);
272 }
273
274 skb_rx = dev_alloc_skb(alloc_len);
275 if (WARN_ON(!skb_rx))
276 goto err;
277
278 skb_trim(skb_rx, 0);
279 skb_put(skb_rx, read_len);
280 data = skb_rx->data;
281 if (WARN_ON(!data))
282 goto err;
283
284 if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
285 pr_err("rx blew up, len %zu\n", alloc_len);
286 goto err;
287 }
288
289 /* Piggyback */
290 *ctrl_reg = __le16_to_cpu(
291 ((__le16 *)data)[alloc_len / 2 - 1]);
292
293 wsm = (struct wsm_hdr *)data;
294 wsm_len = __le16_to_cpu(wsm->len);
295 if (WARN_ON(wsm_len > read_len))
296 goto err;
297
298 if (priv->wsm_enable_wsm_dumps)
299 print_hex_dump_bytes("<-- ",
300 DUMP_PREFIX_NONE,
301 data, wsm_len);
302
303 wsm_id = __le16_to_cpu(wsm->id) & 0xFFF;
304 wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
305
306 skb_trim(skb_rx, wsm_len);
307
308 if (wsm_id == 0x0800) {
309 wsm_handle_exception(priv,
310 &data[sizeof(*wsm)],
311 wsm_len - sizeof(*wsm));
312 goto err;
313 } else if (!rx_resync) {
314 if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
315 goto err;
316 }
317 priv->wsm_rx_seq = (wsm_seq + 1) & 7;
318 rx_resync = 0;
319
320 if (wsm_id & 0x0400) {
321 int rc = wsm_release_tx_buffer(priv, 1);
322 if (WARN_ON(rc < 0))
323 return rc;
324 else if (rc > 0)
325 *tx = 1;
326 }
327
328 /* cw1200_wsm_rx takes care on SKB livetime */
329 if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
330 goto err;
331
332 if (skb_rx) {
333 dev_kfree_skb(skb_rx);
334 skb_rx = NULL;
335 }
336
337 return 0;
338
339 err:
340 if (skb_rx) {
341 dev_kfree_skb(skb_rx);
342 skb_rx = NULL;
343 }
344 return -1;
345 }
346
cw1200_bh_tx_helper(struct cw1200_common * priv,int * pending_tx,int * tx_burst)347 static int cw1200_bh_tx_helper(struct cw1200_common *priv,
348 int *pending_tx,
349 int *tx_burst)
350 {
351 size_t tx_len;
352 u8 *data;
353 int ret;
354 struct wsm_hdr *wsm;
355
356 if (priv->device_can_sleep) {
357 ret = cw1200_device_wakeup(priv);
358 if (WARN_ON(ret < 0)) { /* Error in wakeup */
359 *pending_tx = 1;
360 return 0;
361 } else if (ret) { /* Woke up */
362 priv->device_can_sleep = false;
363 } else { /* Did not awake */
364 *pending_tx = 1;
365 return 0;
366 }
367 }
368
369 wsm_alloc_tx_buffer(priv);
370 ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
371 if (ret <= 0) {
372 wsm_release_tx_buffer(priv, 1);
373 if (WARN_ON(ret < 0))
374 return ret; /* Error */
375 return 0; /* No work */
376 }
377
378 wsm = (struct wsm_hdr *)data;
379 BUG_ON(tx_len < sizeof(*wsm));
380 BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
381
382 atomic_inc(&priv->bh_tx);
383
384 tx_len = priv->hwbus_ops->align_size(
385 priv->hwbus_priv, tx_len);
386
387 /* Check if not exceeding CW1200 capabilities */
388 if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
389 pr_debug("Write aligned len: %zu\n", tx_len);
390
391 wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
392 wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
393
394 if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
395 pr_err("tx blew up, len %zu\n", tx_len);
396 wsm_release_tx_buffer(priv, 1);
397 return -1; /* Error */
398 }
399
400 if (priv->wsm_enable_wsm_dumps)
401 print_hex_dump_bytes("--> ",
402 DUMP_PREFIX_NONE,
403 data,
404 __le16_to_cpu(wsm->len));
405
406 wsm_txed(priv, data);
407 priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
408
409 if (*tx_burst > 1) {
410 cw1200_debug_tx_burst(priv);
411 return 1; /* Work remains */
412 }
413
414 return 0;
415 }
416
cw1200_bh(void * arg)417 static int cw1200_bh(void *arg)
418 {
419 struct cw1200_common *priv = arg;
420 int rx, tx, term, suspend;
421 u16 ctrl_reg = 0;
422 int tx_allowed;
423 int pending_tx = 0;
424 int tx_burst;
425 long status;
426 u32 dummy;
427 int ret;
428
429 for (;;) {
430 if (!priv->hw_bufs_used &&
431 priv->powersave_enabled &&
432 !priv->device_can_sleep &&
433 !atomic_read(&priv->recent_scan)) {
434 status = 1 * HZ;
435 pr_debug("[BH] Device wakedown. No data.\n");
436 cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
437 priv->device_can_sleep = true;
438 } else if (priv->hw_bufs_used) {
439 /* Interrupt loss detection */
440 status = 1 * HZ;
441 } else {
442 status = MAX_SCHEDULE_TIMEOUT;
443 }
444
445 /* Dummy Read for SDIO retry mechanism*/
446 if ((priv->hw_type != -1) &&
447 (atomic_read(&priv->bh_rx) == 0) &&
448 (atomic_read(&priv->bh_tx) == 0))
449 cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
450 &dummy, sizeof(dummy));
451
452 pr_debug("[BH] waiting ...\n");
453 status = wait_event_interruptible_timeout(priv->bh_wq, ({
454 rx = atomic_xchg(&priv->bh_rx, 0);
455 tx = atomic_xchg(&priv->bh_tx, 0);
456 term = atomic_xchg(&priv->bh_term, 0);
457 suspend = pending_tx ?
458 0 : atomic_read(&priv->bh_suspend);
459 (rx || tx || term || suspend || priv->bh_error);
460 }), status);
461
462 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
463 rx, tx, term, suspend, priv->bh_error, status);
464
465 /* Did an error occur? */
466 if ((status < 0 && status != -ERESTARTSYS) ||
467 term || priv->bh_error) {
468 break;
469 }
470 if (!status) { /* wait_event timed out */
471 unsigned long timestamp = jiffies;
472 long timeout;
473 int pending = 0;
474 int i;
475
476 /* Check to see if we have any outstanding frames */
477 if (priv->hw_bufs_used && (!rx || !tx)) {
478 wiphy_warn(priv->hw->wiphy,
479 "Missed interrupt? (%d frames outstanding)\n",
480 priv->hw_bufs_used);
481 rx = 1;
482
483 /* Get a timestamp of "oldest" frame */
484 for (i = 0; i < 4; ++i)
485 pending += cw1200_queue_get_xmit_timestamp(
486 &priv->tx_queue[i],
487 ×tamp,
488 priv->pending_frame_id);
489
490 /* Check if frame transmission is timed out.
491 * Add an extra second with respect to possible
492 * interrupt loss.
493 */
494 timeout = timestamp +
495 WSM_CMD_LAST_CHANCE_TIMEOUT +
496 1 * HZ -
497 jiffies;
498
499 /* And terminate BH thread if the frame is "stuck" */
500 if (pending && timeout < 0) {
501 wiphy_warn(priv->hw->wiphy,
502 "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
503 priv->hw_bufs_used, pending,
504 timestamp, jiffies);
505 break;
506 }
507 } else if (!priv->device_can_sleep &&
508 !atomic_read(&priv->recent_scan)) {
509 pr_debug("[BH] Device wakedown. Timeout.\n");
510 cw1200_reg_write_16(priv,
511 ST90TDS_CONTROL_REG_ID, 0);
512 priv->device_can_sleep = true;
513 }
514 goto done;
515 } else if (suspend) {
516 pr_debug("[BH] Device suspend.\n");
517 if (priv->powersave_enabled) {
518 pr_debug("[BH] Device wakedown. Suspend.\n");
519 cw1200_reg_write_16(priv,
520 ST90TDS_CONTROL_REG_ID, 0);
521 priv->device_can_sleep = true;
522 }
523
524 atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
525 wake_up(&priv->bh_evt_wq);
526 status = wait_event_interruptible(priv->bh_wq,
527 CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
528 if (status < 0) {
529 wiphy_err(priv->hw->wiphy,
530 "Failed to wait for resume: %ld.\n",
531 status);
532 break;
533 }
534 pr_debug("[BH] Device resume.\n");
535 atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
536 wake_up(&priv->bh_evt_wq);
537 atomic_inc(&priv->bh_rx);
538 goto done;
539 }
540
541 rx:
542 tx += pending_tx;
543 pending_tx = 0;
544
545 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
546 break;
547
548 /* Don't bother trying to rx unless we have data to read */
549 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
550 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
551 if (ret < 0)
552 break;
553 /* Double up here if there's more data.. */
554 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
555 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
556 if (ret < 0)
557 break;
558 }
559 }
560
561 tx:
562 if (tx) {
563 tx = 0;
564
565 BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
566 tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
567 tx_allowed = tx_burst > 0;
568
569 if (!tx_allowed) {
570 /* Buffers full. Ensure we process tx
571 * after we handle rx..
572 */
573 pending_tx = tx;
574 goto done_rx;
575 }
576 ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
577 if (ret < 0)
578 break;
579 if (ret > 0) /* More to transmit */
580 tx = ret;
581
582 /* Re-read ctrl reg */
583 if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
584 break;
585 }
586
587 done_rx:
588 if (priv->bh_error)
589 break;
590 if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
591 goto rx;
592 if (tx)
593 goto tx;
594
595 done:
596 /* Re-enable device interrupts */
597 priv->hwbus_ops->lock(priv->hwbus_priv);
598 __cw1200_irq_enable(priv, 1);
599 priv->hwbus_ops->unlock(priv->hwbus_priv);
600 }
601
602 /* Explicitly disable device interrupts */
603 priv->hwbus_ops->lock(priv->hwbus_priv);
604 __cw1200_irq_enable(priv, 0);
605 priv->hwbus_ops->unlock(priv->hwbus_priv);
606
607 if (!term) {
608 pr_err("[BH] Fatal error, exiting.\n");
609 priv->bh_error = 1;
610 /* TODO: schedule_work(recovery) */
611 }
612 return 0;
613 }
614