• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *
3  *  Bluetooth HCI Three-wire UART driver
4  *
5  *  Copyright (C) 2012  Intel Corporation
6  *
7  *
8  *  This program is free software; you can redistribute it and/or modify
9  *  it under the terms of the GNU General Public License as published by
10  *  the Free Software Foundation; either version 2 of the License, or
11  *  (at your option) any later version.
12  *
13  *  This program is distributed in the hope that it will be useful,
14  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
15  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  *  GNU General Public License for more details.
17  *
18  *  You should have received a copy of the GNU General Public License
19  *  along with this program; if not, write to the Free Software
20  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/errno.h>
26 #include <linux/skbuff.h>
27 
28 #include <net/bluetooth/bluetooth.h>
29 #include <net/bluetooth/hci_core.h>
30 
31 #include "hci_uart.h"
32 
33 #define HCI_3WIRE_ACK_PKT	0
34 #define HCI_3WIRE_LINK_PKT	15
35 
36 /* Sliding window size */
37 #define H5_TX_WIN_MAX		4
38 
39 #define H5_ACK_TIMEOUT	msecs_to_jiffies(250)
40 #define H5_SYNC_TIMEOUT	msecs_to_jiffies(100)
41 
42 /*
43  * Maximum Three-wire packet:
44  *     4 byte header + max value for 12-bit length + 2 bytes for CRC
45  */
46 #define H5_MAX_LEN (4 + 0xfff + 2)
47 
48 /* Convenience macros for reading Three-wire header values */
49 #define H5_HDR_SEQ(hdr)		((hdr)[0] & 0x07)
50 #define H5_HDR_ACK(hdr)		(((hdr)[0] >> 3) & 0x07)
51 #define H5_HDR_CRC(hdr)		(((hdr)[0] >> 6) & 0x01)
52 #define H5_HDR_RELIABLE(hdr)	(((hdr)[0] >> 7) & 0x01)
53 #define H5_HDR_PKT_TYPE(hdr)	((hdr)[1] & 0x0f)
54 #define H5_HDR_LEN(hdr)		((((hdr)[1] >> 4) & 0xff) + ((hdr)[2] << 4))
55 
56 #define SLIP_DELIMITER	0xc0
57 #define SLIP_ESC	0xdb
58 #define SLIP_ESC_DELIM	0xdc
59 #define SLIP_ESC_ESC	0xdd
60 
61 /* H5 state flags */
62 enum {
63 	H5_RX_ESC,	/* SLIP escape mode */
64 	H5_TX_ACK_REQ,	/* Pending ack to send */
65 };
66 
67 struct h5 {
68 	struct sk_buff_head	unack;		/* Unack'ed packets queue */
69 	struct sk_buff_head	rel;		/* Reliable packets queue */
70 	struct sk_buff_head	unrel;		/* Unreliable packets queue */
71 
72 	unsigned long		flags;
73 
74 	struct sk_buff		*rx_skb;	/* Receive buffer */
75 	size_t			rx_pending;	/* Expecting more bytes */
76 	u8			rx_ack;		/* Last ack number received */
77 
78 	int			(*rx_func)(struct hci_uart *hu, u8 c);
79 
80 	struct timer_list	timer;		/* Retransmission timer */
81 
82 	u8			tx_seq;		/* Next seq number to send */
83 	u8			tx_ack;		/* Next ack number to send */
84 	u8			tx_win;		/* Sliding window size */
85 
86 	enum {
87 		H5_UNINITIALIZED,
88 		H5_INITIALIZED,
89 		H5_ACTIVE,
90 	} state;
91 
92 	enum {
93 		H5_AWAKE,
94 		H5_SLEEPING,
95 		H5_WAKING_UP,
96 	} sleep;
97 };
98 
99 static void h5_reset_rx(struct h5 *h5);
100 
h5_link_control(struct hci_uart * hu,const void * data,size_t len)101 static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
102 {
103 	struct h5 *h5 = hu->priv;
104 	struct sk_buff *nskb;
105 
106 	nskb = alloc_skb(3, GFP_ATOMIC);
107 	if (!nskb)
108 		return;
109 
110 	bt_cb(nskb)->pkt_type = HCI_3WIRE_LINK_PKT;
111 
112 	memcpy(skb_put(nskb, len), data, len);
113 
114 	skb_queue_tail(&h5->unrel, nskb);
115 }
116 
h5_cfg_field(struct h5 * h5)117 static u8 h5_cfg_field(struct h5 *h5)
118 {
119 	u8 field = 0;
120 
121 	/* Sliding window size (first 3 bits) */
122 	field |= (h5->tx_win & 7);
123 
124 	return field;
125 }
126 
h5_timed_event(unsigned long arg)127 static void h5_timed_event(unsigned long arg)
128 {
129 	const unsigned char sync_req[] = { 0x01, 0x7e };
130 	unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
131 	struct hci_uart *hu = (struct hci_uart *)arg;
132 	struct h5 *h5 = hu->priv;
133 	struct sk_buff *skb;
134 	unsigned long flags;
135 
136 	BT_DBG("%s", hu->hdev->name);
137 
138 	if (h5->state == H5_UNINITIALIZED)
139 		h5_link_control(hu, sync_req, sizeof(sync_req));
140 
141 	if (h5->state == H5_INITIALIZED) {
142 		conf_req[2] = h5_cfg_field(h5);
143 		h5_link_control(hu, conf_req, sizeof(conf_req));
144 	}
145 
146 	if (h5->state != H5_ACTIVE) {
147 		mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
148 		goto wakeup;
149 	}
150 
151 	if (h5->sleep != H5_AWAKE) {
152 		h5->sleep = H5_SLEEPING;
153 		goto wakeup;
154 	}
155 
156 	BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
157 
158 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
159 
160 	while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
161 		h5->tx_seq = (h5->tx_seq - 1) & 0x07;
162 		skb_queue_head(&h5->rel, skb);
163 	}
164 
165 	spin_unlock_irqrestore(&h5->unack.lock, flags);
166 
167 wakeup:
168 	hci_uart_tx_wakeup(hu);
169 }
170 
h5_peer_reset(struct hci_uart * hu)171 static void h5_peer_reset(struct hci_uart *hu)
172 {
173 	struct h5 *h5 = hu->priv;
174 
175 	BT_ERR("Peer device has reset");
176 
177 	h5->state = H5_UNINITIALIZED;
178 
179 	del_timer(&h5->timer);
180 
181 	skb_queue_purge(&h5->rel);
182 	skb_queue_purge(&h5->unrel);
183 	skb_queue_purge(&h5->unack);
184 
185 	h5->tx_seq = 0;
186 	h5->tx_ack = 0;
187 
188 	/* Send reset request to upper stack */
189 	hci_reset_dev(hu->hdev);
190 }
191 
h5_open(struct hci_uart * hu)192 static int h5_open(struct hci_uart *hu)
193 {
194 	struct h5 *h5;
195 	const unsigned char sync[] = { 0x01, 0x7e };
196 
197 	BT_DBG("hu %p", hu);
198 
199 	h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
200 	if (!h5)
201 		return -ENOMEM;
202 
203 	hu->priv = h5;
204 
205 	skb_queue_head_init(&h5->unack);
206 	skb_queue_head_init(&h5->rel);
207 	skb_queue_head_init(&h5->unrel);
208 
209 	h5_reset_rx(h5);
210 
211 	init_timer(&h5->timer);
212 	h5->timer.function = h5_timed_event;
213 	h5->timer.data = (unsigned long)hu;
214 
215 	h5->tx_win = H5_TX_WIN_MAX;
216 
217 	set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
218 
219 	/* Send initial sync request */
220 	h5_link_control(hu, sync, sizeof(sync));
221 	mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
222 
223 	return 0;
224 }
225 
h5_close(struct hci_uart * hu)226 static int h5_close(struct hci_uart *hu)
227 {
228 	struct h5 *h5 = hu->priv;
229 
230 	del_timer_sync(&h5->timer);
231 
232 	skb_queue_purge(&h5->unack);
233 	skb_queue_purge(&h5->rel);
234 	skb_queue_purge(&h5->unrel);
235 
236 	kfree(h5);
237 
238 	return 0;
239 }
240 
h5_pkt_cull(struct h5 * h5)241 static void h5_pkt_cull(struct h5 *h5)
242 {
243 	struct sk_buff *skb, *tmp;
244 	unsigned long flags;
245 	int i, to_remove;
246 	u8 seq;
247 
248 	spin_lock_irqsave(&h5->unack.lock, flags);
249 
250 	to_remove = skb_queue_len(&h5->unack);
251 	if (to_remove == 0)
252 		goto unlock;
253 
254 	seq = h5->tx_seq;
255 
256 	while (to_remove > 0) {
257 		if (h5->rx_ack == seq)
258 			break;
259 
260 		to_remove--;
261 		seq = (seq - 1) & 0x07;
262 	}
263 
264 	if (seq != h5->rx_ack)
265 		BT_ERR("Controller acked invalid packet");
266 
267 	i = 0;
268 	skb_queue_walk_safe(&h5->unack, skb, tmp) {
269 		if (i++ >= to_remove)
270 			break;
271 
272 		__skb_unlink(skb, &h5->unack);
273 		kfree_skb(skb);
274 	}
275 
276 	if (skb_queue_empty(&h5->unack))
277 		del_timer(&h5->timer);
278 
279 unlock:
280 	spin_unlock_irqrestore(&h5->unack.lock, flags);
281 }
282 
h5_handle_internal_rx(struct hci_uart * hu)283 static void h5_handle_internal_rx(struct hci_uart *hu)
284 {
285 	struct h5 *h5 = hu->priv;
286 	const unsigned char sync_req[] = { 0x01, 0x7e };
287 	const unsigned char sync_rsp[] = { 0x02, 0x7d };
288 	unsigned char conf_req[] = { 0x03, 0xfc, 0x01 };
289 	const unsigned char conf_rsp[] = { 0x04, 0x7b };
290 	const unsigned char wakeup_req[] = { 0x05, 0xfa };
291 	const unsigned char woken_req[] = { 0x06, 0xf9 };
292 	const unsigned char sleep_req[] = { 0x07, 0x78 };
293 	const unsigned char *hdr = h5->rx_skb->data;
294 	const unsigned char *data = &h5->rx_skb->data[4];
295 
296 	BT_DBG("%s", hu->hdev->name);
297 
298 	if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
299 		return;
300 
301 	if (H5_HDR_LEN(hdr) < 2)
302 		return;
303 
304 	conf_req[2] = h5_cfg_field(h5);
305 
306 	if (memcmp(data, sync_req, 2) == 0) {
307 		if (h5->state == H5_ACTIVE)
308 			h5_peer_reset(hu);
309 		h5_link_control(hu, sync_rsp, 2);
310 	} else if (memcmp(data, sync_rsp, 2) == 0) {
311 		if (h5->state == H5_ACTIVE)
312 			h5_peer_reset(hu);
313 		h5->state = H5_INITIALIZED;
314 		h5_link_control(hu, conf_req, 3);
315 	} else if (memcmp(data, conf_req, 2) == 0) {
316 		h5_link_control(hu, conf_rsp, 2);
317 		h5_link_control(hu, conf_req, 3);
318 	} else if (memcmp(data, conf_rsp, 2) == 0) {
319 		if (H5_HDR_LEN(hdr) > 2)
320 			h5->tx_win = (data[2] & 7);
321 		BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
322 		h5->state = H5_ACTIVE;
323 		hci_uart_init_ready(hu);
324 		return;
325 	} else if (memcmp(data, sleep_req, 2) == 0) {
326 		BT_DBG("Peer went to sleep");
327 		h5->sleep = H5_SLEEPING;
328 		return;
329 	} else if (memcmp(data, woken_req, 2) == 0) {
330 		BT_DBG("Peer woke up");
331 		h5->sleep = H5_AWAKE;
332 	} else if (memcmp(data, wakeup_req, 2) == 0) {
333 		BT_DBG("Peer requested wakeup");
334 		h5_link_control(hu, woken_req, 2);
335 		h5->sleep = H5_AWAKE;
336 	} else {
337 		BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
338 		return;
339 	}
340 
341 	hci_uart_tx_wakeup(hu);
342 }
343 
h5_complete_rx_pkt(struct hci_uart * hu)344 static void h5_complete_rx_pkt(struct hci_uart *hu)
345 {
346 	struct h5 *h5 = hu->priv;
347 	const unsigned char *hdr = h5->rx_skb->data;
348 
349 	if (H5_HDR_RELIABLE(hdr)) {
350 		h5->tx_ack = (h5->tx_ack + 1) % 8;
351 		set_bit(H5_TX_ACK_REQ, &h5->flags);
352 		hci_uart_tx_wakeup(hu);
353 	}
354 
355 	h5->rx_ack = H5_HDR_ACK(hdr);
356 
357 	h5_pkt_cull(h5);
358 
359 	switch (H5_HDR_PKT_TYPE(hdr)) {
360 	case HCI_EVENT_PKT:
361 	case HCI_ACLDATA_PKT:
362 	case HCI_SCODATA_PKT:
363 		bt_cb(h5->rx_skb)->pkt_type = H5_HDR_PKT_TYPE(hdr);
364 
365 		/* Remove Three-wire header */
366 		skb_pull(h5->rx_skb, 4);
367 
368 		hci_recv_frame(hu->hdev, h5->rx_skb);
369 		h5->rx_skb = NULL;
370 
371 		break;
372 
373 	default:
374 		h5_handle_internal_rx(hu);
375 		break;
376 	}
377 
378 	h5_reset_rx(h5);
379 }
380 
h5_rx_crc(struct hci_uart * hu,unsigned char c)381 static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
382 {
383 	h5_complete_rx_pkt(hu);
384 
385 	return 0;
386 }
387 
h5_rx_payload(struct hci_uart * hu,unsigned char c)388 static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
389 {
390 	struct h5 *h5 = hu->priv;
391 	const unsigned char *hdr = h5->rx_skb->data;
392 
393 	if (H5_HDR_CRC(hdr)) {
394 		h5->rx_func = h5_rx_crc;
395 		h5->rx_pending = 2;
396 	} else {
397 		h5_complete_rx_pkt(hu);
398 	}
399 
400 	return 0;
401 }
402 
h5_rx_3wire_hdr(struct hci_uart * hu,unsigned char c)403 static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
404 {
405 	struct h5 *h5 = hu->priv;
406 	const unsigned char *hdr = h5->rx_skb->data;
407 
408 	BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
409 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
410 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
411 	       H5_HDR_LEN(hdr));
412 
413 	if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
414 		BT_ERR("Invalid header checksum");
415 		h5_reset_rx(h5);
416 		return 0;
417 	}
418 
419 	if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
420 		BT_ERR("Out-of-order packet arrived (%u != %u)",
421 		       H5_HDR_SEQ(hdr), h5->tx_ack);
422 		h5_reset_rx(h5);
423 		return 0;
424 	}
425 
426 	if (h5->state != H5_ACTIVE &&
427 	    H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
428 		BT_ERR("Non-link packet received in non-active state");
429 		h5_reset_rx(h5);
430 		return 0;
431 	}
432 
433 	h5->rx_func = h5_rx_payload;
434 	h5->rx_pending = H5_HDR_LEN(hdr);
435 
436 	return 0;
437 }
438 
h5_rx_pkt_start(struct hci_uart * hu,unsigned char c)439 static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
440 {
441 	struct h5 *h5 = hu->priv;
442 
443 	if (c == SLIP_DELIMITER)
444 		return 1;
445 
446 	h5->rx_func = h5_rx_3wire_hdr;
447 	h5->rx_pending = 4;
448 
449 	h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
450 	if (!h5->rx_skb) {
451 		BT_ERR("Can't allocate mem for new packet");
452 		h5_reset_rx(h5);
453 		return -ENOMEM;
454 	}
455 
456 	h5->rx_skb->dev = (void *)hu->hdev;
457 
458 	return 0;
459 }
460 
h5_rx_delimiter(struct hci_uart * hu,unsigned char c)461 static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
462 {
463 	struct h5 *h5 = hu->priv;
464 
465 	if (c == SLIP_DELIMITER)
466 		h5->rx_func = h5_rx_pkt_start;
467 
468 	return 1;
469 }
470 
h5_unslip_one_byte(struct h5 * h5,unsigned char c)471 static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
472 {
473 	const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
474 	const u8 *byte = &c;
475 
476 	if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
477 		set_bit(H5_RX_ESC, &h5->flags);
478 		return;
479 	}
480 
481 	if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
482 		switch (c) {
483 		case SLIP_ESC_DELIM:
484 			byte = &delim;
485 			break;
486 		case SLIP_ESC_ESC:
487 			byte = &esc;
488 			break;
489 		default:
490 			BT_ERR("Invalid esc byte 0x%02hhx", c);
491 			h5_reset_rx(h5);
492 			return;
493 		}
494 	}
495 
496 	memcpy(skb_put(h5->rx_skb, 1), byte, 1);
497 	h5->rx_pending--;
498 
499 	BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
500 }
501 
h5_reset_rx(struct h5 * h5)502 static void h5_reset_rx(struct h5 *h5)
503 {
504 	if (h5->rx_skb) {
505 		kfree_skb(h5->rx_skb);
506 		h5->rx_skb = NULL;
507 	}
508 
509 	h5->rx_func = h5_rx_delimiter;
510 	h5->rx_pending = 0;
511 	clear_bit(H5_RX_ESC, &h5->flags);
512 }
513 
h5_recv(struct hci_uart * hu,const void * data,int count)514 static int h5_recv(struct hci_uart *hu, const void *data, int count)
515 {
516 	struct h5 *h5 = hu->priv;
517 	const unsigned char *ptr = data;
518 
519 	BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
520 	       count);
521 
522 	while (count > 0) {
523 		int processed;
524 
525 		if (h5->rx_pending > 0) {
526 			if (*ptr == SLIP_DELIMITER) {
527 				BT_ERR("Too short H5 packet");
528 				h5_reset_rx(h5);
529 				continue;
530 			}
531 
532 			h5_unslip_one_byte(h5, *ptr);
533 
534 			ptr++; count--;
535 			continue;
536 		}
537 
538 		processed = h5->rx_func(hu, *ptr);
539 		if (processed < 0)
540 			return processed;
541 
542 		ptr += processed;
543 		count -= processed;
544 	}
545 
546 	return 0;
547 }
548 
h5_enqueue(struct hci_uart * hu,struct sk_buff * skb)549 static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
550 {
551 	struct h5 *h5 = hu->priv;
552 
553 	if (skb->len > 0xfff) {
554 		BT_ERR("Packet too long (%u bytes)", skb->len);
555 		kfree_skb(skb);
556 		return 0;
557 	}
558 
559 	if (h5->state != H5_ACTIVE) {
560 		BT_ERR("Ignoring HCI data in non-active state");
561 		kfree_skb(skb);
562 		return 0;
563 	}
564 
565 	switch (bt_cb(skb)->pkt_type) {
566 	case HCI_ACLDATA_PKT:
567 	case HCI_COMMAND_PKT:
568 		skb_queue_tail(&h5->rel, skb);
569 		break;
570 
571 	case HCI_SCODATA_PKT:
572 		skb_queue_tail(&h5->unrel, skb);
573 		break;
574 
575 	default:
576 		BT_ERR("Unknown packet type %u", bt_cb(skb)->pkt_type);
577 		kfree_skb(skb);
578 		break;
579 	}
580 
581 	return 0;
582 }
583 
h5_slip_delim(struct sk_buff * skb)584 static void h5_slip_delim(struct sk_buff *skb)
585 {
586 	const char delim = SLIP_DELIMITER;
587 
588 	memcpy(skb_put(skb, 1), &delim, 1);
589 }
590 
h5_slip_one_byte(struct sk_buff * skb,u8 c)591 static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
592 {
593 	const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
594 	const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
595 
596 	switch (c) {
597 	case SLIP_DELIMITER:
598 		memcpy(skb_put(skb, 2), &esc_delim, 2);
599 		break;
600 	case SLIP_ESC:
601 		memcpy(skb_put(skb, 2), &esc_esc, 2);
602 		break;
603 	default:
604 		memcpy(skb_put(skb, 1), &c, 1);
605 	}
606 }
607 
valid_packet_type(u8 type)608 static bool valid_packet_type(u8 type)
609 {
610 	switch (type) {
611 	case HCI_ACLDATA_PKT:
612 	case HCI_COMMAND_PKT:
613 	case HCI_SCODATA_PKT:
614 	case HCI_3WIRE_LINK_PKT:
615 	case HCI_3WIRE_ACK_PKT:
616 		return true;
617 	default:
618 		return false;
619 	}
620 }
621 
h5_prepare_pkt(struct hci_uart * hu,u8 pkt_type,const u8 * data,size_t len)622 static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
623 				      const u8 *data, size_t len)
624 {
625 	struct h5 *h5 = hu->priv;
626 	struct sk_buff *nskb;
627 	u8 hdr[4];
628 	int i;
629 
630 	if (!valid_packet_type(pkt_type)) {
631 		BT_ERR("Unknown packet type %u", pkt_type);
632 		return NULL;
633 	}
634 
635 	/*
636 	 * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
637 	 * (because bytes 0xc0 and 0xdb are escaped, worst case is when
638 	 * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
639 	 * delimiters at start and end).
640 	 */
641 	nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
642 	if (!nskb)
643 		return NULL;
644 
645 	bt_cb(nskb)->pkt_type = pkt_type;
646 
647 	h5_slip_delim(nskb);
648 
649 	hdr[0] = h5->tx_ack << 3;
650 	clear_bit(H5_TX_ACK_REQ, &h5->flags);
651 
652 	/* Reliable packet? */
653 	if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
654 		hdr[0] |= 1 << 7;
655 		hdr[0] |= h5->tx_seq;
656 		h5->tx_seq = (h5->tx_seq + 1) % 8;
657 	}
658 
659 	hdr[1] = pkt_type | ((len & 0x0f) << 4);
660 	hdr[2] = len >> 4;
661 	hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
662 
663 	BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
664 	       hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
665 	       H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
666 	       H5_HDR_LEN(hdr));
667 
668 	for (i = 0; i < 4; i++)
669 		h5_slip_one_byte(nskb, hdr[i]);
670 
671 	for (i = 0; i < len; i++)
672 		h5_slip_one_byte(nskb, data[i]);
673 
674 	h5_slip_delim(nskb);
675 
676 	return nskb;
677 }
678 
h5_dequeue(struct hci_uart * hu)679 static struct sk_buff *h5_dequeue(struct hci_uart *hu)
680 {
681 	struct h5 *h5 = hu->priv;
682 	unsigned long flags;
683 	struct sk_buff *skb, *nskb;
684 
685 	if (h5->sleep != H5_AWAKE) {
686 		const unsigned char wakeup_req[] = { 0x05, 0xfa };
687 
688 		if (h5->sleep == H5_WAKING_UP)
689 			return NULL;
690 
691 		h5->sleep = H5_WAKING_UP;
692 		BT_DBG("Sending wakeup request");
693 
694 		mod_timer(&h5->timer, jiffies + HZ / 100);
695 		return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
696 	}
697 
698 	skb = skb_dequeue(&h5->unrel);
699 	if (skb) {
700 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
701 				      skb->data, skb->len);
702 		if (nskb) {
703 			kfree_skb(skb);
704 			return nskb;
705 		}
706 
707 		skb_queue_head(&h5->unrel, skb);
708 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
709 	}
710 
711 	spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
712 
713 	if (h5->unack.qlen >= h5->tx_win)
714 		goto unlock;
715 
716 	skb = skb_dequeue(&h5->rel);
717 	if (skb) {
718 		nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type,
719 				      skb->data, skb->len);
720 		if (nskb) {
721 			__skb_queue_tail(&h5->unack, skb);
722 			mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
723 			spin_unlock_irqrestore(&h5->unack.lock, flags);
724 			return nskb;
725 		}
726 
727 		skb_queue_head(&h5->rel, skb);
728 		BT_ERR("Could not dequeue pkt because alloc_skb failed");
729 	}
730 
731 unlock:
732 	spin_unlock_irqrestore(&h5->unack.lock, flags);
733 
734 	if (test_bit(H5_TX_ACK_REQ, &h5->flags))
735 		return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
736 
737 	return NULL;
738 }
739 
h5_flush(struct hci_uart * hu)740 static int h5_flush(struct hci_uart *hu)
741 {
742 	BT_DBG("hu %p", hu);
743 	return 0;
744 }
745 
746 static const struct hci_uart_proto h5p = {
747 	.id		= HCI_UART_3WIRE,
748 	.name		= "Three-wire (H5)",
749 	.open		= h5_open,
750 	.close		= h5_close,
751 	.recv		= h5_recv,
752 	.enqueue	= h5_enqueue,
753 	.dequeue	= h5_dequeue,
754 	.flush		= h5_flush,
755 };
756 
h5_init(void)757 int __init h5_init(void)
758 {
759 	return hci_uart_register_proto(&h5p);
760 }
761 
h5_deinit(void)762 int __exit h5_deinit(void)
763 {
764 	return hci_uart_unregister_proto(&h5p);
765 }
766