• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers
3  *
4  * Copyright (c) 2010, ST-Ericsson
5  * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
6  *
7  * Based on:
8  * ST-Ericsson UMAC CW1200 driver, which is
9  * Copyright (c) 2010, ST-Ericsson
10  * Author: Ajitpal Singh <ajitpal.singh@stericsson.com>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  */
16 
17 #include <linux/module.h>
18 #include <net/mac80211.h>
19 #include <linux/kthread.h>
20 #include <linux/timer.h>
21 
22 #include "cw1200.h"
23 #include "bh.h"
24 #include "hwio.h"
25 #include "wsm.h"
26 #include "hwbus.h"
27 #include "debug.h"
28 #include "fwio.h"
29 
30 static int cw1200_bh(void *arg);
31 
32 #define DOWNLOAD_BLOCK_SIZE_WR	(0x1000 - 4)
33 /* an SPI message cannot be bigger than (2"12-1)*2 bytes
34  * "*2" to cvt to bytes
35  */
36 #define MAX_SZ_RD_WR_BUFFERS	(DOWNLOAD_BLOCK_SIZE_WR*2)
37 #define PIGGYBACK_CTRL_REG	(2)
38 #define EFFECTIVE_BUF_SIZE	(MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG)
39 
40 /* Suspend state privates */
41 enum cw1200_bh_pm_state {
42 	CW1200_BH_RESUMED = 0,
43 	CW1200_BH_SUSPEND,
44 	CW1200_BH_SUSPENDED,
45 	CW1200_BH_RESUME,
46 };
47 
48 typedef int (*cw1200_wsm_handler)(struct cw1200_common *priv,
49 	u8 *data, size_t size);
50 
cw1200_bh_work(struct work_struct * work)51 static void cw1200_bh_work(struct work_struct *work)
52 {
53 	struct cw1200_common *priv =
54 	container_of(work, struct cw1200_common, bh_work);
55 	cw1200_bh(priv);
56 }
57 
cw1200_register_bh(struct cw1200_common * priv)58 int cw1200_register_bh(struct cw1200_common *priv)
59 {
60 	int err = 0;
61 	/* Realtime workqueue */
62 	priv->bh_workqueue = alloc_workqueue("cw1200_bh",
63 				WQ_MEM_RECLAIM | WQ_HIGHPRI
64 				| WQ_CPU_INTENSIVE, 1);
65 
66 	if (!priv->bh_workqueue)
67 		return -ENOMEM;
68 
69 	INIT_WORK(&priv->bh_work, cw1200_bh_work);
70 
71 	pr_debug("[BH] register.\n");
72 
73 	atomic_set(&priv->bh_rx, 0);
74 	atomic_set(&priv->bh_tx, 0);
75 	atomic_set(&priv->bh_term, 0);
76 	atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
77 	priv->bh_error = 0;
78 	priv->hw_bufs_used = 0;
79 	priv->buf_id_tx = 0;
80 	priv->buf_id_rx = 0;
81 	init_waitqueue_head(&priv->bh_wq);
82 	init_waitqueue_head(&priv->bh_evt_wq);
83 
84 	err = !queue_work(priv->bh_workqueue, &priv->bh_work);
85 	WARN_ON(err);
86 	return err;
87 }
88 
cw1200_unregister_bh(struct cw1200_common * priv)89 void cw1200_unregister_bh(struct cw1200_common *priv)
90 {
91 	atomic_add(1, &priv->bh_term);
92 	wake_up(&priv->bh_wq);
93 
94 	flush_workqueue(priv->bh_workqueue);
95 
96 	destroy_workqueue(priv->bh_workqueue);
97 	priv->bh_workqueue = NULL;
98 
99 	pr_debug("[BH] unregistered.\n");
100 }
101 
cw1200_irq_handler(struct cw1200_common * priv)102 void cw1200_irq_handler(struct cw1200_common *priv)
103 {
104 	pr_debug("[BH] irq.\n");
105 
106 	/* Disable Interrupts! */
107 	/* NOTE:  hwbus_ops->lock already held */
108 	__cw1200_irq_enable(priv, 0);
109 
110 	if (/* WARN_ON */(priv->bh_error))
111 		return;
112 
113 	if (atomic_add_return(1, &priv->bh_rx) == 1)
114 		wake_up(&priv->bh_wq);
115 }
116 EXPORT_SYMBOL_GPL(cw1200_irq_handler);
117 
cw1200_bh_wakeup(struct cw1200_common * priv)118 void cw1200_bh_wakeup(struct cw1200_common *priv)
119 {
120 	pr_debug("[BH] wakeup.\n");
121 	if (priv->bh_error) {
122 		pr_err("[BH] wakeup failed (BH error)\n");
123 		return;
124 	}
125 
126 	if (atomic_add_return(1, &priv->bh_tx) == 1)
127 		wake_up(&priv->bh_wq);
128 }
129 
cw1200_bh_suspend(struct cw1200_common * priv)130 int cw1200_bh_suspend(struct cw1200_common *priv)
131 {
132 	pr_debug("[BH] suspend.\n");
133 	if (priv->bh_error) {
134 		wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n");
135 		return -EINVAL;
136 	}
137 
138 	atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND);
139 	wake_up(&priv->bh_wq);
140 	return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
141 		(CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)),
142 		 1 * HZ) ? 0 : -ETIMEDOUT;
143 }
144 
cw1200_bh_resume(struct cw1200_common * priv)145 int cw1200_bh_resume(struct cw1200_common *priv)
146 {
147 	pr_debug("[BH] resume.\n");
148 	if (priv->bh_error) {
149 		wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n");
150 		return -EINVAL;
151 	}
152 
153 	atomic_set(&priv->bh_suspend, CW1200_BH_RESUME);
154 	wake_up(&priv->bh_wq);
155 	return wait_event_timeout(priv->bh_evt_wq, priv->bh_error ||
156 		(CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)),
157 		1 * HZ) ? 0 : -ETIMEDOUT;
158 }
159 
wsm_alloc_tx_buffer(struct cw1200_common * priv)160 static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv)
161 {
162 	++priv->hw_bufs_used;
163 }
164 
wsm_release_tx_buffer(struct cw1200_common * priv,int count)165 int wsm_release_tx_buffer(struct cw1200_common *priv, int count)
166 {
167 	int ret = 0;
168 	int hw_bufs_used = priv->hw_bufs_used;
169 
170 	priv->hw_bufs_used -= count;
171 	if (WARN_ON(priv->hw_bufs_used < 0))
172 		ret = -1;
173 	else if (hw_bufs_used >= priv->wsm_caps.input_buffers)
174 		ret = 1;
175 	if (!priv->hw_bufs_used)
176 		wake_up(&priv->bh_evt_wq);
177 	return ret;
178 }
179 
cw1200_bh_read_ctrl_reg(struct cw1200_common * priv,u16 * ctrl_reg)180 static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv,
181 					  u16 *ctrl_reg)
182 {
183 	int ret;
184 
185 	ret = cw1200_reg_read_16(priv,
186 			ST90TDS_CONTROL_REG_ID, ctrl_reg);
187 	if (ret) {
188 		ret = cw1200_reg_read_16(priv,
189 				ST90TDS_CONTROL_REG_ID, ctrl_reg);
190 		if (ret)
191 			pr_err("[BH] Failed to read control register.\n");
192 	}
193 
194 	return ret;
195 }
196 
cw1200_device_wakeup(struct cw1200_common * priv)197 static int cw1200_device_wakeup(struct cw1200_common *priv)
198 {
199 	u16 ctrl_reg;
200 	int ret;
201 
202 	pr_debug("[BH] Device wakeup.\n");
203 
204 	/* First, set the dpll register */
205 	ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID,
206 				  cw1200_dpll_from_clk(priv->hw_refclk));
207 	if (WARN_ON(ret))
208 		return ret;
209 
210 	/* To force the device to be always-on, the host sets WLAN_UP to 1 */
211 	ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID,
212 			ST90TDS_CONT_WUP_BIT);
213 	if (WARN_ON(ret))
214 		return ret;
215 
216 	ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg);
217 	if (WARN_ON(ret))
218 		return ret;
219 
220 	/* If the device returns WLAN_RDY as 1, the device is active and will
221 	 * remain active.
222 	 */
223 	if (ctrl_reg & ST90TDS_CONT_RDY_BIT) {
224 		pr_debug("[BH] Device awake.\n");
225 		return 1;
226 	}
227 
228 	return 0;
229 }
230 
231 /* Must be called from BH thraed. */
cw1200_enable_powersave(struct cw1200_common * priv,bool enable)232 void cw1200_enable_powersave(struct cw1200_common *priv,
233 			     bool enable)
234 {
235 	pr_debug("[BH] Powerave is %s.\n",
236 		 enable ? "enabled" : "disabled");
237 	priv->powersave_enabled = enable;
238 }
239 
cw1200_bh_rx_helper(struct cw1200_common * priv,uint16_t * ctrl_reg,int * tx)240 static int cw1200_bh_rx_helper(struct cw1200_common *priv,
241 			       uint16_t *ctrl_reg,
242 			       int *tx)
243 {
244 	size_t read_len = 0;
245 	struct sk_buff *skb_rx = NULL;
246 	struct wsm_hdr *wsm;
247 	size_t wsm_len;
248 	u16 wsm_id;
249 	u8 wsm_seq;
250 	int rx_resync = 1;
251 
252 	size_t alloc_len;
253 	u8 *data;
254 
255 	read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2;
256 	if (!read_len)
257 		return 0; /* No more work */
258 
259 	if (WARN_ON((read_len < sizeof(struct wsm_hdr)) ||
260 		    (read_len > EFFECTIVE_BUF_SIZE))) {
261 		pr_debug("Invalid read len: %zu (%04x)",
262 			 read_len, *ctrl_reg);
263 		goto err;
264 	}
265 
266 	/* Add SIZE of PIGGYBACK reg (CONTROL Reg)
267 	 * to the NEXT Message length + 2 Bytes for SKB
268 	 */
269 	read_len = read_len + 2;
270 
271 	alloc_len = priv->hwbus_ops->align_size(
272 		priv->hwbus_priv, read_len);
273 
274 	/* Check if not exceeding CW1200 capabilities */
275 	if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) {
276 		pr_debug("Read aligned len: %zu\n",
277 			 alloc_len);
278 	}
279 
280 	skb_rx = dev_alloc_skb(alloc_len);
281 	if (WARN_ON(!skb_rx))
282 		goto err;
283 
284 	skb_trim(skb_rx, 0);
285 	skb_put(skb_rx, read_len);
286 	data = skb_rx->data;
287 	if (WARN_ON(!data))
288 		goto err;
289 
290 	if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) {
291 		pr_err("rx blew up, len %zu\n", alloc_len);
292 		goto err;
293 	}
294 
295 	/* Piggyback */
296 	*ctrl_reg = __le16_to_cpu(
297 		((__le16 *)data)[alloc_len / 2 - 1]);
298 
299 	wsm = (struct wsm_hdr *)data;
300 	wsm_len = __le16_to_cpu(wsm->len);
301 	if (WARN_ON(wsm_len > read_len))
302 		goto err;
303 
304 	if (priv->wsm_enable_wsm_dumps)
305 		print_hex_dump_bytes("<-- ",
306 				     DUMP_PREFIX_NONE,
307 				     data, wsm_len);
308 
309 	wsm_id  = __le16_to_cpu(wsm->id) & 0xFFF;
310 	wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7;
311 
312 	skb_trim(skb_rx, wsm_len);
313 
314 	if (wsm_id == 0x0800) {
315 		wsm_handle_exception(priv,
316 				     &data[sizeof(*wsm)],
317 				     wsm_len - sizeof(*wsm));
318 		goto err;
319 	} else if (!rx_resync) {
320 		if (WARN_ON(wsm_seq != priv->wsm_rx_seq))
321 			goto err;
322 	}
323 	priv->wsm_rx_seq = (wsm_seq + 1) & 7;
324 	rx_resync = 0;
325 
326 	if (wsm_id & 0x0400) {
327 		int rc = wsm_release_tx_buffer(priv, 1);
328 		if (WARN_ON(rc < 0))
329 			return rc;
330 		else if (rc > 0)
331 			*tx = 1;
332 	}
333 
334 	/* cw1200_wsm_rx takes care on SKB livetime */
335 	if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx)))
336 		goto err;
337 
338 	if (skb_rx) {
339 		dev_kfree_skb(skb_rx);
340 		skb_rx = NULL;
341 	}
342 
343 	return 0;
344 
345 err:
346 	if (skb_rx) {
347 		dev_kfree_skb(skb_rx);
348 		skb_rx = NULL;
349 	}
350 	return -1;
351 }
352 
cw1200_bh_tx_helper(struct cw1200_common * priv,int * pending_tx,int * tx_burst)353 static int cw1200_bh_tx_helper(struct cw1200_common *priv,
354 			       int *pending_tx,
355 			       int *tx_burst)
356 {
357 	size_t tx_len;
358 	u8 *data;
359 	int ret;
360 	struct wsm_hdr *wsm;
361 
362 	if (priv->device_can_sleep) {
363 		ret = cw1200_device_wakeup(priv);
364 		if (WARN_ON(ret < 0)) { /* Error in wakeup */
365 			*pending_tx = 1;
366 			return 0;
367 		} else if (ret) { /* Woke up */
368 			priv->device_can_sleep = false;
369 		} else { /* Did not awake */
370 			*pending_tx = 1;
371 			return 0;
372 		}
373 	}
374 
375 	wsm_alloc_tx_buffer(priv);
376 	ret = wsm_get_tx(priv, &data, &tx_len, tx_burst);
377 	if (ret <= 0) {
378 		wsm_release_tx_buffer(priv, 1);
379 		if (WARN_ON(ret < 0))
380 			return ret; /* Error */
381 		return 0; /* No work */
382 	}
383 
384 	wsm = (struct wsm_hdr *)data;
385 	BUG_ON(tx_len < sizeof(*wsm));
386 	BUG_ON(__le16_to_cpu(wsm->len) != tx_len);
387 
388 	atomic_add(1, &priv->bh_tx);
389 
390 	tx_len = priv->hwbus_ops->align_size(
391 		priv->hwbus_priv, tx_len);
392 
393 	/* Check if not exceeding CW1200 capabilities */
394 	if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE))
395 		pr_debug("Write aligned len: %zu\n", tx_len);
396 
397 	wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX));
398 	wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq));
399 
400 	if (WARN_ON(cw1200_data_write(priv, data, tx_len))) {
401 		pr_err("tx blew up, len %zu\n", tx_len);
402 		wsm_release_tx_buffer(priv, 1);
403 		return -1; /* Error */
404 	}
405 
406 	if (priv->wsm_enable_wsm_dumps)
407 		print_hex_dump_bytes("--> ",
408 				     DUMP_PREFIX_NONE,
409 				     data,
410 				     __le16_to_cpu(wsm->len));
411 
412 	wsm_txed(priv, data);
413 	priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX;
414 
415 	if (*tx_burst > 1) {
416 		cw1200_debug_tx_burst(priv);
417 		return 1; /* Work remains */
418 	}
419 
420 	return 0;
421 }
422 
cw1200_bh(void * arg)423 static int cw1200_bh(void *arg)
424 {
425 	struct cw1200_common *priv = arg;
426 	int rx, tx, term, suspend;
427 	u16 ctrl_reg = 0;
428 	int tx_allowed;
429 	int pending_tx = 0;
430 	int tx_burst;
431 	long status;
432 	u32 dummy;
433 	int ret;
434 
435 	for (;;) {
436 		if (!priv->hw_bufs_used &&
437 		    priv->powersave_enabled &&
438 		    !priv->device_can_sleep &&
439 		    !atomic_read(&priv->recent_scan)) {
440 			status = 1 * HZ;
441 			pr_debug("[BH] Device wakedown. No data.\n");
442 			cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0);
443 			priv->device_can_sleep = true;
444 		} else if (priv->hw_bufs_used) {
445 			/* Interrupt loss detection */
446 			status = 1 * HZ;
447 		} else {
448 			status = MAX_SCHEDULE_TIMEOUT;
449 		}
450 
451 		/* Dummy Read for SDIO retry mechanism*/
452 		if ((priv->hw_type != -1) &&
453 		    (atomic_read(&priv->bh_rx) == 0) &&
454 		    (atomic_read(&priv->bh_tx) == 0))
455 			cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID,
456 					&dummy, sizeof(dummy));
457 
458 		pr_debug("[BH] waiting ...\n");
459 		status = wait_event_interruptible_timeout(priv->bh_wq, ({
460 				rx = atomic_xchg(&priv->bh_rx, 0);
461 				tx = atomic_xchg(&priv->bh_tx, 0);
462 				term = atomic_xchg(&priv->bh_term, 0);
463 				suspend = pending_tx ?
464 					0 : atomic_read(&priv->bh_suspend);
465 				(rx || tx || term || suspend || priv->bh_error);
466 			}), status);
467 
468 		pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n",
469 			 rx, tx, term, suspend, priv->bh_error, status);
470 
471 		/* Did an error occur? */
472 		if ((status < 0 && status != -ERESTARTSYS) ||
473 		    term || priv->bh_error) {
474 			break;
475 		}
476 		if (!status) {  /* wait_event timed out */
477 			unsigned long timestamp = jiffies;
478 			long timeout;
479 			int pending = 0;
480 			int i;
481 
482 			/* Check to see if we have any outstanding frames */
483 			if (priv->hw_bufs_used && (!rx || !tx)) {
484 				wiphy_warn(priv->hw->wiphy,
485 					   "Missed interrupt? (%d frames outstanding)\n",
486 					   priv->hw_bufs_used);
487 				rx = 1;
488 
489 				/* Get a timestamp of "oldest" frame */
490 				for (i = 0; i < 4; ++i)
491 					pending += cw1200_queue_get_xmit_timestamp(
492 						&priv->tx_queue[i],
493 						&timestamp,
494 						priv->pending_frame_id);
495 
496 				/* Check if frame transmission is timed out.
497 				 * Add an extra second with respect to possible
498 				 * interrupt loss.
499 				 */
500 				timeout = timestamp +
501 					WSM_CMD_LAST_CHANCE_TIMEOUT +
502 					1 * HZ  -
503 					jiffies;
504 
505 				/* And terminate BH thread if the frame is "stuck" */
506 				if (pending && timeout < 0) {
507 					wiphy_warn(priv->hw->wiphy,
508 						   "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n",
509 						   priv->hw_bufs_used, pending,
510 						   timestamp, jiffies);
511 					break;
512 				}
513 			} else if (!priv->device_can_sleep &&
514 				   !atomic_read(&priv->recent_scan)) {
515 				pr_debug("[BH] Device wakedown. Timeout.\n");
516 				cw1200_reg_write_16(priv,
517 						    ST90TDS_CONTROL_REG_ID, 0);
518 				priv->device_can_sleep = true;
519 			}
520 			goto done;
521 		} else if (suspend) {
522 			pr_debug("[BH] Device suspend.\n");
523 			if (priv->powersave_enabled) {
524 				pr_debug("[BH] Device wakedown. Suspend.\n");
525 				cw1200_reg_write_16(priv,
526 						    ST90TDS_CONTROL_REG_ID, 0);
527 				priv->device_can_sleep = true;
528 			}
529 
530 			atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED);
531 			wake_up(&priv->bh_evt_wq);
532 			status = wait_event_interruptible(priv->bh_wq,
533 							  CW1200_BH_RESUME == atomic_read(&priv->bh_suspend));
534 			if (status < 0) {
535 				wiphy_err(priv->hw->wiphy,
536 					  "Failed to wait for resume: %ld.\n",
537 					  status);
538 				break;
539 			}
540 			pr_debug("[BH] Device resume.\n");
541 			atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED);
542 			wake_up(&priv->bh_evt_wq);
543 			atomic_add(1, &priv->bh_rx);
544 			goto done;
545 		}
546 
547 	rx:
548 		tx += pending_tx;
549 		pending_tx = 0;
550 
551 		if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
552 			break;
553 
554 		/* Don't bother trying to rx unless we have data to read */
555 		if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
556 			ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
557 			if (ret < 0)
558 				break;
559 			/* Double up here if there's more data.. */
560 			if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) {
561 				ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx);
562 				if (ret < 0)
563 					break;
564 			}
565 		}
566 
567 	tx:
568 		if (tx) {
569 			tx = 0;
570 
571 			BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers);
572 			tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used;
573 			tx_allowed = tx_burst > 0;
574 
575 			if (!tx_allowed) {
576 				/* Buffers full.  Ensure we process tx
577 				 * after we handle rx..
578 				 */
579 				pending_tx = tx;
580 				goto done_rx;
581 			}
582 			ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst);
583 			if (ret < 0)
584 				break;
585 			if (ret > 0) /* More to transmit */
586 				tx = ret;
587 
588 			/* Re-read ctrl reg */
589 			if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg))
590 				break;
591 		}
592 
593 	done_rx:
594 		if (priv->bh_error)
595 			break;
596 		if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK)
597 			goto rx;
598 		if (tx)
599 			goto tx;
600 
601 	done:
602 		/* Re-enable device interrupts */
603 		priv->hwbus_ops->lock(priv->hwbus_priv);
604 		__cw1200_irq_enable(priv, 1);
605 		priv->hwbus_ops->unlock(priv->hwbus_priv);
606 	}
607 
608 	/* Explicitly disable device interrupts */
609 	priv->hwbus_ops->lock(priv->hwbus_priv);
610 	__cw1200_irq_enable(priv, 0);
611 	priv->hwbus_ops->unlock(priv->hwbus_priv);
612 
613 	if (!term) {
614 		pr_err("[BH] Fatal error, exiting.\n");
615 		priv->bh_error = 1;
616 		/* TODO: schedule_work(recovery) */
617 	}
618 	return 0;
619 }
620