• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
55 				       u8 code, u8 ident, u16 dlen, void *data);
56 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
57 			   void *data);
58 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
59 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
60 
61 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
62 		     struct sk_buff_head *skbs, u8 event);
63 
bdaddr_type(u8 link_type,u8 bdaddr_type)64 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
65 {
66 	if (link_type == LE_LINK) {
67 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
68 			return BDADDR_LE_PUBLIC;
69 		else
70 			return BDADDR_LE_RANDOM;
71 	}
72 
73 	return BDADDR_BREDR;
74 }
75 
bdaddr_src_type(struct hci_conn * hcon)76 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
77 {
78 	return bdaddr_type(hcon->type, hcon->src_type);
79 }
80 
bdaddr_dst_type(struct hci_conn * hcon)81 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
82 {
83 	return bdaddr_type(hcon->type, hcon->dst_type);
84 }
85 
86 /* ---- L2CAP channels ---- */
87 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)88 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
89 						   u16 cid)
90 {
91 	struct l2cap_chan *c;
92 
93 	list_for_each_entry(c, &conn->chan_l, list) {
94 		if (c->dcid == cid)
95 			return c;
96 	}
97 	return NULL;
98 }
99 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)100 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
101 						   u16 cid)
102 {
103 	struct l2cap_chan *c;
104 
105 	list_for_each_entry(c, &conn->chan_l, list) {
106 		if (c->scid == cid)
107 			return c;
108 	}
109 	return NULL;
110 }
111 
112 /* Find channel with given SCID.
113  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
115 						 u16 cid)
116 {
117 	struct l2cap_chan *c;
118 
119 	mutex_lock(&conn->chan_lock);
120 	c = __l2cap_get_chan_by_scid(conn, cid);
121 	if (c)
122 		l2cap_chan_lock(c);
123 	mutex_unlock(&conn->chan_lock);
124 
125 	return c;
126 }
127 
128 /* Find channel with given DCID.
129  * Returns locked channel.
130  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)131 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
132 						 u16 cid)
133 {
134 	struct l2cap_chan *c;
135 
136 	mutex_lock(&conn->chan_lock);
137 	c = __l2cap_get_chan_by_dcid(conn, cid);
138 	if (c)
139 		l2cap_chan_lock(c);
140 	mutex_unlock(&conn->chan_lock);
141 
142 	return c;
143 }
144 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)145 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
146 						    u8 ident)
147 {
148 	struct l2cap_chan *c;
149 
150 	list_for_each_entry(c, &conn->chan_l, list) {
151 		if (c->ident == ident)
152 			return c;
153 	}
154 	return NULL;
155 }
156 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)157 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
158 						  u8 ident)
159 {
160 	struct l2cap_chan *c;
161 
162 	mutex_lock(&conn->chan_lock);
163 	c = __l2cap_get_chan_by_ident(conn, ident);
164 	if (c)
165 		l2cap_chan_lock(c);
166 	mutex_unlock(&conn->chan_lock);
167 
168 	return c;
169 }
170 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)171 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
172 						      u8 src_type)
173 {
174 	struct l2cap_chan *c;
175 
176 	list_for_each_entry(c, &chan_list, global_l) {
177 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
178 			continue;
179 
180 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
181 			continue;
182 
183 		if (c->sport == psm && !bacmp(&c->src, src))
184 			return c;
185 	}
186 	return NULL;
187 }
188 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)189 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
190 {
191 	int err;
192 
193 	write_lock(&chan_list_lock);
194 
195 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
196 		err = -EADDRINUSE;
197 		goto done;
198 	}
199 
200 	if (psm) {
201 		chan->psm = psm;
202 		chan->sport = psm;
203 		err = 0;
204 	} else {
205 		u16 p, start, end, incr;
206 
207 		if (chan->src_type == BDADDR_BREDR) {
208 			start = L2CAP_PSM_DYN_START;
209 			end = L2CAP_PSM_AUTO_END;
210 			incr = 2;
211 		} else {
212 			start = L2CAP_PSM_LE_DYN_START;
213 			end = L2CAP_PSM_LE_DYN_END;
214 			incr = 1;
215 		}
216 
217 		err = -EINVAL;
218 		for (p = start; p <= end; p += incr)
219 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
220 							 chan->src_type)) {
221 				chan->psm   = cpu_to_le16(p);
222 				chan->sport = cpu_to_le16(p);
223 				err = 0;
224 				break;
225 			}
226 	}
227 
228 done:
229 	write_unlock(&chan_list_lock);
230 	return err;
231 }
232 EXPORT_SYMBOL_GPL(l2cap_add_psm);
233 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)234 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
235 {
236 	write_lock(&chan_list_lock);
237 
238 	/* Override the defaults (which are for conn-oriented) */
239 	chan->omtu = L2CAP_DEFAULT_MTU;
240 	chan->chan_type = L2CAP_CHAN_FIXED;
241 
242 	chan->scid = scid;
243 
244 	write_unlock(&chan_list_lock);
245 
246 	return 0;
247 }
248 
l2cap_alloc_cid(struct l2cap_conn * conn)249 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
250 {
251 	u16 cid, dyn_end;
252 
253 	if (conn->hcon->type == LE_LINK)
254 		dyn_end = L2CAP_CID_LE_DYN_END;
255 	else
256 		dyn_end = L2CAP_CID_DYN_END;
257 
258 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
259 		if (!__l2cap_get_chan_by_scid(conn, cid))
260 			return cid;
261 	}
262 
263 	return 0;
264 }
265 
l2cap_state_change(struct l2cap_chan * chan,int state)266 static void l2cap_state_change(struct l2cap_chan *chan, int state)
267 {
268 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
269 	       state_to_string(state));
270 
271 	chan->state = state;
272 	chan->ops->state_change(chan, state, 0);
273 }
274 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)275 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
276 						int state, int err)
277 {
278 	chan->state = state;
279 	chan->ops->state_change(chan, chan->state, err);
280 }
281 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)282 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
283 {
284 	chan->ops->state_change(chan, chan->state, err);
285 }
286 
__set_retrans_timer(struct l2cap_chan * chan)287 static void __set_retrans_timer(struct l2cap_chan *chan)
288 {
289 	if (!delayed_work_pending(&chan->monitor_timer) &&
290 	    chan->retrans_timeout) {
291 		l2cap_set_timer(chan, &chan->retrans_timer,
292 				msecs_to_jiffies(chan->retrans_timeout));
293 	}
294 }
295 
__set_monitor_timer(struct l2cap_chan * chan)296 static void __set_monitor_timer(struct l2cap_chan *chan)
297 {
298 	__clear_retrans_timer(chan);
299 	if (chan->monitor_timeout) {
300 		l2cap_set_timer(chan, &chan->monitor_timer,
301 				msecs_to_jiffies(chan->monitor_timeout));
302 	}
303 }
304 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)305 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
306 					       u16 seq)
307 {
308 	struct sk_buff *skb;
309 
310 	skb_queue_walk(head, skb) {
311 		if (bt_cb(skb)->l2cap.txseq == seq)
312 			return skb;
313 	}
314 
315 	return NULL;
316 }
317 
318 /* ---- L2CAP sequence number lists ---- */
319 
320 /* For ERTM, ordered lists of sequence numbers must be tracked for
321  * SREJ requests that are received and for frames that are to be
322  * retransmitted. These seq_list functions implement a singly-linked
323  * list in an array, where membership in the list can also be checked
324  * in constant time. Items can also be added to the tail of the list
325  * and removed from the head in constant time, without further memory
326  * allocs or frees.
327  */
328 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)329 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
330 {
331 	size_t alloc_size, i;
332 
333 	/* Allocated size is a power of 2 to map sequence numbers
334 	 * (which may be up to 14 bits) in to a smaller array that is
335 	 * sized for the negotiated ERTM transmit windows.
336 	 */
337 	alloc_size = roundup_pow_of_two(size);
338 
339 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
340 	if (!seq_list->list)
341 		return -ENOMEM;
342 
343 	seq_list->mask = alloc_size - 1;
344 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
345 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
346 	for (i = 0; i < alloc_size; i++)
347 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
348 
349 	return 0;
350 }
351 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)352 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
353 {
354 	kfree(seq_list->list);
355 }
356 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)357 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
358 					   u16 seq)
359 {
360 	/* Constant-time check for list membership */
361 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
362 }
363 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)364 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
365 {
366 	u16 seq = seq_list->head;
367 	u16 mask = seq_list->mask;
368 
369 	seq_list->head = seq_list->list[seq & mask];
370 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
371 
372 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
373 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
374 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
375 	}
376 
377 	return seq;
378 }
379 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)380 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
381 {
382 	u16 i;
383 
384 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
385 		return;
386 
387 	for (i = 0; i <= seq_list->mask; i++)
388 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
389 
390 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 }
393 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)394 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
395 {
396 	u16 mask = seq_list->mask;
397 
398 	/* All appends happen in constant time */
399 
400 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
401 		return;
402 
403 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
404 		seq_list->head = seq;
405 	else
406 		seq_list->list[seq_list->tail & mask] = seq;
407 
408 	seq_list->tail = seq;
409 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
410 }
411 
l2cap_chan_timeout(struct work_struct * work)412 static void l2cap_chan_timeout(struct work_struct *work)
413 {
414 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
415 					       chan_timer.work);
416 	struct l2cap_conn *conn = chan->conn;
417 	int reason;
418 
419 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
420 
421 	mutex_lock(&conn->chan_lock);
422 	l2cap_chan_lock(chan);
423 
424 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
425 		reason = ECONNREFUSED;
426 	else if (chan->state == BT_CONNECT &&
427 		 chan->sec_level != BT_SECURITY_SDP)
428 		reason = ECONNREFUSED;
429 	else
430 		reason = ETIMEDOUT;
431 
432 	l2cap_chan_close(chan, reason);
433 
434 	l2cap_chan_unlock(chan);
435 
436 	chan->ops->close(chan);
437 	mutex_unlock(&conn->chan_lock);
438 
439 	l2cap_chan_put(chan);
440 }
441 
l2cap_chan_create(void)442 struct l2cap_chan *l2cap_chan_create(void)
443 {
444 	struct l2cap_chan *chan;
445 
446 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
447 	if (!chan)
448 		return NULL;
449 
450 	mutex_init(&chan->lock);
451 
452 	/* Set default lock nesting level */
453 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
454 
455 	write_lock(&chan_list_lock);
456 	list_add(&chan->global_l, &chan_list);
457 	write_unlock(&chan_list_lock);
458 
459 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
460 
461 	chan->state = BT_OPEN;
462 
463 	kref_init(&chan->kref);
464 
465 	/* This flag is cleared in l2cap_chan_ready() */
466 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
467 
468 	BT_DBG("chan %p", chan);
469 
470 	return chan;
471 }
472 EXPORT_SYMBOL_GPL(l2cap_chan_create);
473 
l2cap_chan_destroy(struct kref * kref)474 static void l2cap_chan_destroy(struct kref *kref)
475 {
476 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
477 
478 	BT_DBG("chan %p", chan);
479 
480 	write_lock(&chan_list_lock);
481 	list_del(&chan->global_l);
482 	write_unlock(&chan_list_lock);
483 
484 	kfree(chan);
485 }
486 
l2cap_chan_hold(struct l2cap_chan * c)487 void l2cap_chan_hold(struct l2cap_chan *c)
488 {
489 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
490 
491 	kref_get(&c->kref);
492 }
493 
l2cap_chan_put(struct l2cap_chan * c)494 void l2cap_chan_put(struct l2cap_chan *c)
495 {
496 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
497 
498 	kref_put(&c->kref, l2cap_chan_destroy);
499 }
500 EXPORT_SYMBOL_GPL(l2cap_chan_put);
501 
l2cap_chan_set_defaults(struct l2cap_chan * chan)502 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
503 {
504 	chan->fcs  = L2CAP_FCS_CRC16;
505 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
506 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
507 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
508 	chan->remote_max_tx = chan->max_tx;
509 	chan->remote_tx_win = chan->tx_win;
510 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
511 	chan->sec_level = BT_SECURITY_LOW;
512 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
513 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
514 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
515 	chan->conf_state = 0;
516 
517 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
518 }
519 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
520 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)521 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
522 {
523 	chan->sdu = NULL;
524 	chan->sdu_last_frag = NULL;
525 	chan->sdu_len = 0;
526 	chan->tx_credits = tx_credits;
527 	/* Derive MPS from connection MTU to stop HCI fragmentation */
528 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
529 	/* Give enough credits for a full packet */
530 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
531 
532 	skb_queue_head_init(&chan->tx_q);
533 }
534 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)535 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
536 {
537 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
538 	       __le16_to_cpu(chan->psm), chan->dcid);
539 
540 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
541 
542 	chan->conn = conn;
543 
544 	switch (chan->chan_type) {
545 	case L2CAP_CHAN_CONN_ORIENTED:
546 		/* Alloc CID for connection-oriented socket */
547 		chan->scid = l2cap_alloc_cid(conn);
548 		if (conn->hcon->type == ACL_LINK)
549 			chan->omtu = L2CAP_DEFAULT_MTU;
550 		break;
551 
552 	case L2CAP_CHAN_CONN_LESS:
553 		/* Connectionless socket */
554 		chan->scid = L2CAP_CID_CONN_LESS;
555 		chan->dcid = L2CAP_CID_CONN_LESS;
556 		chan->omtu = L2CAP_DEFAULT_MTU;
557 		break;
558 
559 	case L2CAP_CHAN_FIXED:
560 		/* Caller will set CID and CID specific MTU values */
561 		break;
562 
563 	default:
564 		/* Raw socket can send/recv signalling messages only */
565 		chan->scid = L2CAP_CID_SIGNALING;
566 		chan->dcid = L2CAP_CID_SIGNALING;
567 		chan->omtu = L2CAP_DEFAULT_MTU;
568 	}
569 
570 	chan->local_id		= L2CAP_BESTEFFORT_ID;
571 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
572 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
573 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
574 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
575 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
576 
577 	l2cap_chan_hold(chan);
578 
579 	/* Only keep a reference for fixed channels if they requested it */
580 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
581 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
582 		hci_conn_hold(conn->hcon);
583 
584 	list_add(&chan->list, &conn->chan_l);
585 }
586 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)587 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
588 {
589 	mutex_lock(&conn->chan_lock);
590 	__l2cap_chan_add(conn, chan);
591 	mutex_unlock(&conn->chan_lock);
592 }
593 
l2cap_chan_del(struct l2cap_chan * chan,int err)594 void l2cap_chan_del(struct l2cap_chan *chan, int err)
595 {
596 	struct l2cap_conn *conn = chan->conn;
597 
598 	__clear_chan_timer(chan);
599 
600 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
601 	       state_to_string(chan->state));
602 
603 	chan->ops->teardown(chan, err);
604 
605 	if (conn) {
606 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
607 		/* Delete from channel list */
608 		list_del(&chan->list);
609 
610 		l2cap_chan_put(chan);
611 
612 		chan->conn = NULL;
613 
614 		/* Reference was only held for non-fixed channels or
615 		 * fixed channels that explicitly requested it using the
616 		 * FLAG_HOLD_HCI_CONN flag.
617 		 */
618 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
619 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
620 			hci_conn_drop(conn->hcon);
621 
622 		if (mgr && mgr->bredr_chan == chan)
623 			mgr->bredr_chan = NULL;
624 	}
625 
626 	if (chan->hs_hchan) {
627 		struct hci_chan *hs_hchan = chan->hs_hchan;
628 
629 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
630 		amp_disconnect_logical_link(hs_hchan);
631 	}
632 
633 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
634 		return;
635 
636 	switch(chan->mode) {
637 	case L2CAP_MODE_BASIC:
638 		break;
639 
640 	case L2CAP_MODE_LE_FLOWCTL:
641 		skb_queue_purge(&chan->tx_q);
642 		break;
643 
644 	case L2CAP_MODE_ERTM:
645 		__clear_retrans_timer(chan);
646 		__clear_monitor_timer(chan);
647 		__clear_ack_timer(chan);
648 
649 		skb_queue_purge(&chan->srej_q);
650 
651 		l2cap_seq_list_free(&chan->srej_list);
652 		l2cap_seq_list_free(&chan->retrans_list);
653 
654 		/* fall through */
655 
656 	case L2CAP_MODE_STREAMING:
657 		skb_queue_purge(&chan->tx_q);
658 		break;
659 	}
660 
661 	return;
662 }
663 EXPORT_SYMBOL_GPL(l2cap_chan_del);
664 
l2cap_conn_update_id_addr(struct work_struct * work)665 static void l2cap_conn_update_id_addr(struct work_struct *work)
666 {
667 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
668 					       id_addr_update_work);
669 	struct hci_conn *hcon = conn->hcon;
670 	struct l2cap_chan *chan;
671 
672 	mutex_lock(&conn->chan_lock);
673 
674 	list_for_each_entry(chan, &conn->chan_l, list) {
675 		l2cap_chan_lock(chan);
676 		bacpy(&chan->dst, &hcon->dst);
677 		chan->dst_type = bdaddr_dst_type(hcon);
678 		l2cap_chan_unlock(chan);
679 	}
680 
681 	mutex_unlock(&conn->chan_lock);
682 }
683 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)684 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
685 {
686 	struct l2cap_conn *conn = chan->conn;
687 	struct l2cap_le_conn_rsp rsp;
688 	u16 result;
689 
690 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
691 		result = L2CAP_CR_LE_AUTHORIZATION;
692 	else
693 		result = L2CAP_CR_LE_BAD_PSM;
694 
695 	l2cap_state_change(chan, BT_DISCONN);
696 
697 	rsp.dcid    = cpu_to_le16(chan->scid);
698 	rsp.mtu     = cpu_to_le16(chan->imtu);
699 	rsp.mps     = cpu_to_le16(chan->mps);
700 	rsp.credits = cpu_to_le16(chan->rx_credits);
701 	rsp.result  = cpu_to_le16(result);
702 
703 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
704 		       &rsp);
705 }
706 
l2cap_chan_connect_reject(struct l2cap_chan * chan)707 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
708 {
709 	struct l2cap_conn *conn = chan->conn;
710 	struct l2cap_conn_rsp rsp;
711 	u16 result;
712 
713 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
714 		result = L2CAP_CR_SEC_BLOCK;
715 	else
716 		result = L2CAP_CR_BAD_PSM;
717 
718 	l2cap_state_change(chan, BT_DISCONN);
719 
720 	rsp.scid   = cpu_to_le16(chan->dcid);
721 	rsp.dcid   = cpu_to_le16(chan->scid);
722 	rsp.result = cpu_to_le16(result);
723 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
724 
725 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
726 }
727 
l2cap_chan_close(struct l2cap_chan * chan,int reason)728 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
729 {
730 	struct l2cap_conn *conn = chan->conn;
731 
732 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
733 
734 	switch (chan->state) {
735 	case BT_LISTEN:
736 		chan->ops->teardown(chan, 0);
737 		break;
738 
739 	case BT_CONNECTED:
740 	case BT_CONFIG:
741 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
742 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
743 			l2cap_send_disconn_req(chan, reason);
744 		} else
745 			l2cap_chan_del(chan, reason);
746 		break;
747 
748 	case BT_CONNECT2:
749 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
750 			if (conn->hcon->type == ACL_LINK)
751 				l2cap_chan_connect_reject(chan);
752 			else if (conn->hcon->type == LE_LINK)
753 				l2cap_chan_le_connect_reject(chan);
754 		}
755 
756 		l2cap_chan_del(chan, reason);
757 		break;
758 
759 	case BT_CONNECT:
760 	case BT_DISCONN:
761 		l2cap_chan_del(chan, reason);
762 		break;
763 
764 	default:
765 		chan->ops->teardown(chan, 0);
766 		break;
767 	}
768 }
769 EXPORT_SYMBOL(l2cap_chan_close);
770 
l2cap_get_auth_type(struct l2cap_chan * chan)771 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
772 {
773 	switch (chan->chan_type) {
774 	case L2CAP_CHAN_RAW:
775 		switch (chan->sec_level) {
776 		case BT_SECURITY_HIGH:
777 		case BT_SECURITY_FIPS:
778 			return HCI_AT_DEDICATED_BONDING_MITM;
779 		case BT_SECURITY_MEDIUM:
780 			return HCI_AT_DEDICATED_BONDING;
781 		default:
782 			return HCI_AT_NO_BONDING;
783 		}
784 		break;
785 	case L2CAP_CHAN_CONN_LESS:
786 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
787 			if (chan->sec_level == BT_SECURITY_LOW)
788 				chan->sec_level = BT_SECURITY_SDP;
789 		}
790 		if (chan->sec_level == BT_SECURITY_HIGH ||
791 		    chan->sec_level == BT_SECURITY_FIPS)
792 			return HCI_AT_NO_BONDING_MITM;
793 		else
794 			return HCI_AT_NO_BONDING;
795 		break;
796 	case L2CAP_CHAN_CONN_ORIENTED:
797 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
798 			if (chan->sec_level == BT_SECURITY_LOW)
799 				chan->sec_level = BT_SECURITY_SDP;
800 
801 			if (chan->sec_level == BT_SECURITY_HIGH ||
802 			    chan->sec_level == BT_SECURITY_FIPS)
803 				return HCI_AT_NO_BONDING_MITM;
804 			else
805 				return HCI_AT_NO_BONDING;
806 		}
807 		/* fall through */
808 	default:
809 		switch (chan->sec_level) {
810 		case BT_SECURITY_HIGH:
811 		case BT_SECURITY_FIPS:
812 			return HCI_AT_GENERAL_BONDING_MITM;
813 		case BT_SECURITY_MEDIUM:
814 			return HCI_AT_GENERAL_BONDING;
815 		default:
816 			return HCI_AT_NO_BONDING;
817 		}
818 		break;
819 	}
820 }
821 
822 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)823 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
824 {
825 	struct l2cap_conn *conn = chan->conn;
826 	__u8 auth_type;
827 
828 	if (conn->hcon->type == LE_LINK)
829 		return smp_conn_security(conn->hcon, chan->sec_level);
830 
831 	auth_type = l2cap_get_auth_type(chan);
832 
833 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
834 				 initiator);
835 }
836 
l2cap_get_ident(struct l2cap_conn * conn)837 static u8 l2cap_get_ident(struct l2cap_conn *conn)
838 {
839 	u8 id;
840 
841 	/* Get next available identificator.
842 	 *    1 - 128 are used by kernel.
843 	 *  129 - 199 are reserved.
844 	 *  200 - 254 are used by utilities like l2ping, etc.
845 	 */
846 
847 	mutex_lock(&conn->ident_lock);
848 
849 	if (++conn->tx_ident > 128)
850 		conn->tx_ident = 1;
851 
852 	id = conn->tx_ident;
853 
854 	mutex_unlock(&conn->ident_lock);
855 
856 	return id;
857 }
858 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)859 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
860 			   void *data)
861 {
862 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
863 	u8 flags;
864 
865 	BT_DBG("code 0x%2.2x", code);
866 
867 	if (!skb)
868 		return;
869 
870 	/* Use NO_FLUSH if supported or we have an LE link (which does
871 	 * not support auto-flushing packets) */
872 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
873 	    conn->hcon->type == LE_LINK)
874 		flags = ACL_START_NO_FLUSH;
875 	else
876 		flags = ACL_START;
877 
878 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
879 	skb->priority = HCI_PRIO_MAX;
880 
881 	hci_send_acl(conn->hchan, skb, flags);
882 }
883 
__chan_is_moving(struct l2cap_chan * chan)884 static bool __chan_is_moving(struct l2cap_chan *chan)
885 {
886 	return chan->move_state != L2CAP_MOVE_STABLE &&
887 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
888 }
889 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)890 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
891 {
892 	struct hci_conn *hcon = chan->conn->hcon;
893 	u16 flags;
894 
895 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
896 	       skb->priority);
897 
898 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
899 		if (chan->hs_hchan)
900 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
901 		else
902 			kfree_skb(skb);
903 
904 		return;
905 	}
906 
907 	/* Use NO_FLUSH for LE links (where this is the only option) or
908 	 * if the BR/EDR link supports it and flushing has not been
909 	 * explicitly requested (through FLAG_FLUSHABLE).
910 	 */
911 	if (hcon->type == LE_LINK ||
912 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
913 	     lmp_no_flush_capable(hcon->hdev)))
914 		flags = ACL_START_NO_FLUSH;
915 	else
916 		flags = ACL_START;
917 
918 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
919 	hci_send_acl(chan->conn->hchan, skb, flags);
920 }
921 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)922 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
923 {
924 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
925 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
926 
927 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
928 		/* S-Frame */
929 		control->sframe = 1;
930 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
931 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
932 
933 		control->sar = 0;
934 		control->txseq = 0;
935 	} else {
936 		/* I-Frame */
937 		control->sframe = 0;
938 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
939 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
940 
941 		control->poll = 0;
942 		control->super = 0;
943 	}
944 }
945 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)946 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
947 {
948 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
949 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
950 
951 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
952 		/* S-Frame */
953 		control->sframe = 1;
954 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
955 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
956 
957 		control->sar = 0;
958 		control->txseq = 0;
959 	} else {
960 		/* I-Frame */
961 		control->sframe = 0;
962 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
963 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
964 
965 		control->poll = 0;
966 		control->super = 0;
967 	}
968 }
969 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)970 static inline void __unpack_control(struct l2cap_chan *chan,
971 				    struct sk_buff *skb)
972 {
973 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
974 		__unpack_extended_control(get_unaligned_le32(skb->data),
975 					  &bt_cb(skb)->l2cap);
976 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
977 	} else {
978 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
979 					  &bt_cb(skb)->l2cap);
980 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
981 	}
982 }
983 
__pack_extended_control(struct l2cap_ctrl * control)984 static u32 __pack_extended_control(struct l2cap_ctrl *control)
985 {
986 	u32 packed;
987 
988 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
989 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
990 
991 	if (control->sframe) {
992 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
993 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
994 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
995 	} else {
996 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
997 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
998 	}
999 
1000 	return packed;
1001 }
1002 
__pack_enhanced_control(struct l2cap_ctrl * control)1003 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1004 {
1005 	u16 packed;
1006 
1007 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1008 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1009 
1010 	if (control->sframe) {
1011 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1012 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1013 		packed |= L2CAP_CTRL_FRAME_TYPE;
1014 	} else {
1015 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1016 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1017 	}
1018 
1019 	return packed;
1020 }
1021 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1022 static inline void __pack_control(struct l2cap_chan *chan,
1023 				  struct l2cap_ctrl *control,
1024 				  struct sk_buff *skb)
1025 {
1026 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1027 		put_unaligned_le32(__pack_extended_control(control),
1028 				   skb->data + L2CAP_HDR_SIZE);
1029 	} else {
1030 		put_unaligned_le16(__pack_enhanced_control(control),
1031 				   skb->data + L2CAP_HDR_SIZE);
1032 	}
1033 }
1034 
__ertm_hdr_size(struct l2cap_chan * chan)1035 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1036 {
1037 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1038 		return L2CAP_EXT_HDR_SIZE;
1039 	else
1040 		return L2CAP_ENH_HDR_SIZE;
1041 }
1042 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1043 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1044 					       u32 control)
1045 {
1046 	struct sk_buff *skb;
1047 	struct l2cap_hdr *lh;
1048 	int hlen = __ertm_hdr_size(chan);
1049 
1050 	if (chan->fcs == L2CAP_FCS_CRC16)
1051 		hlen += L2CAP_FCS_SIZE;
1052 
1053 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1054 
1055 	if (!skb)
1056 		return ERR_PTR(-ENOMEM);
1057 
1058 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1059 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1060 	lh->cid = cpu_to_le16(chan->dcid);
1061 
1062 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1063 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1064 	else
1065 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1066 
1067 	if (chan->fcs == L2CAP_FCS_CRC16) {
1068 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1069 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1070 	}
1071 
1072 	skb->priority = HCI_PRIO_MAX;
1073 	return skb;
1074 }
1075 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1076 static void l2cap_send_sframe(struct l2cap_chan *chan,
1077 			      struct l2cap_ctrl *control)
1078 {
1079 	struct sk_buff *skb;
1080 	u32 control_field;
1081 
1082 	BT_DBG("chan %p, control %p", chan, control);
1083 
1084 	if (!control->sframe)
1085 		return;
1086 
1087 	if (__chan_is_moving(chan))
1088 		return;
1089 
1090 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1091 	    !control->poll)
1092 		control->final = 1;
1093 
1094 	if (control->super == L2CAP_SUPER_RR)
1095 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1096 	else if (control->super == L2CAP_SUPER_RNR)
1097 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1098 
1099 	if (control->super != L2CAP_SUPER_SREJ) {
1100 		chan->last_acked_seq = control->reqseq;
1101 		__clear_ack_timer(chan);
1102 	}
1103 
1104 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1105 	       control->final, control->poll, control->super);
1106 
1107 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1108 		control_field = __pack_extended_control(control);
1109 	else
1110 		control_field = __pack_enhanced_control(control);
1111 
1112 	skb = l2cap_create_sframe_pdu(chan, control_field);
1113 	if (!IS_ERR(skb))
1114 		l2cap_do_send(chan, skb);
1115 }
1116 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1117 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1118 {
1119 	struct l2cap_ctrl control;
1120 
1121 	BT_DBG("chan %p, poll %d", chan, poll);
1122 
1123 	memset(&control, 0, sizeof(control));
1124 	control.sframe = 1;
1125 	control.poll = poll;
1126 
1127 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1128 		control.super = L2CAP_SUPER_RNR;
1129 	else
1130 		control.super = L2CAP_SUPER_RR;
1131 
1132 	control.reqseq = chan->buffer_seq;
1133 	l2cap_send_sframe(chan, &control);
1134 }
1135 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1136 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1137 {
1138 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1139 		return true;
1140 
1141 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1142 }
1143 
__amp_capable(struct l2cap_chan * chan)1144 static bool __amp_capable(struct l2cap_chan *chan)
1145 {
1146 	struct l2cap_conn *conn = chan->conn;
1147 	struct hci_dev *hdev;
1148 	bool amp_available = false;
1149 
1150 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1151 		return false;
1152 
1153 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1154 		return false;
1155 
1156 	read_lock(&hci_dev_list_lock);
1157 	list_for_each_entry(hdev, &hci_dev_list, list) {
1158 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1159 		    test_bit(HCI_UP, &hdev->flags)) {
1160 			amp_available = true;
1161 			break;
1162 		}
1163 	}
1164 	read_unlock(&hci_dev_list_lock);
1165 
1166 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1167 		return amp_available;
1168 
1169 	return false;
1170 }
1171 
l2cap_check_efs(struct l2cap_chan * chan)1172 static bool l2cap_check_efs(struct l2cap_chan *chan)
1173 {
1174 	/* Check EFS parameters */
1175 	return true;
1176 }
1177 
l2cap_send_conn_req(struct l2cap_chan * chan)1178 void l2cap_send_conn_req(struct l2cap_chan *chan)
1179 {
1180 	struct l2cap_conn *conn = chan->conn;
1181 	struct l2cap_conn_req req;
1182 
1183 	req.scid = cpu_to_le16(chan->scid);
1184 	req.psm  = chan->psm;
1185 
1186 	chan->ident = l2cap_get_ident(conn);
1187 
1188 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1189 
1190 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1191 }
1192 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1193 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1194 {
1195 	struct l2cap_create_chan_req req;
1196 	req.scid = cpu_to_le16(chan->scid);
1197 	req.psm  = chan->psm;
1198 	req.amp_id = amp_id;
1199 
1200 	chan->ident = l2cap_get_ident(chan->conn);
1201 
1202 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1203 		       sizeof(req), &req);
1204 }
1205 
l2cap_move_setup(struct l2cap_chan * chan)1206 static void l2cap_move_setup(struct l2cap_chan *chan)
1207 {
1208 	struct sk_buff *skb;
1209 
1210 	BT_DBG("chan %p", chan);
1211 
1212 	if (chan->mode != L2CAP_MODE_ERTM)
1213 		return;
1214 
1215 	__clear_retrans_timer(chan);
1216 	__clear_monitor_timer(chan);
1217 	__clear_ack_timer(chan);
1218 
1219 	chan->retry_count = 0;
1220 	skb_queue_walk(&chan->tx_q, skb) {
1221 		if (bt_cb(skb)->l2cap.retries)
1222 			bt_cb(skb)->l2cap.retries = 1;
1223 		else
1224 			break;
1225 	}
1226 
1227 	chan->expected_tx_seq = chan->buffer_seq;
1228 
1229 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1230 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1231 	l2cap_seq_list_clear(&chan->retrans_list);
1232 	l2cap_seq_list_clear(&chan->srej_list);
1233 	skb_queue_purge(&chan->srej_q);
1234 
1235 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1236 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1237 
1238 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1239 }
1240 
l2cap_move_done(struct l2cap_chan * chan)1241 static void l2cap_move_done(struct l2cap_chan *chan)
1242 {
1243 	u8 move_role = chan->move_role;
1244 	BT_DBG("chan %p", chan);
1245 
1246 	chan->move_state = L2CAP_MOVE_STABLE;
1247 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1248 
1249 	if (chan->mode != L2CAP_MODE_ERTM)
1250 		return;
1251 
1252 	switch (move_role) {
1253 	case L2CAP_MOVE_ROLE_INITIATOR:
1254 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1255 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1256 		break;
1257 	case L2CAP_MOVE_ROLE_RESPONDER:
1258 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1259 		break;
1260 	}
1261 }
1262 
l2cap_chan_ready(struct l2cap_chan * chan)1263 static void l2cap_chan_ready(struct l2cap_chan *chan)
1264 {
1265 	/* The channel may have already been flagged as connected in
1266 	 * case of receiving data before the L2CAP info req/rsp
1267 	 * procedure is complete.
1268 	 */
1269 	if (chan->state == BT_CONNECTED)
1270 		return;
1271 
1272 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1273 	chan->conf_state = 0;
1274 	__clear_chan_timer(chan);
1275 
1276 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1277 		chan->ops->suspend(chan);
1278 
1279 	chan->state = BT_CONNECTED;
1280 
1281 	chan->ops->ready(chan);
1282 }
1283 
l2cap_le_connect(struct l2cap_chan * chan)1284 static void l2cap_le_connect(struct l2cap_chan *chan)
1285 {
1286 	struct l2cap_conn *conn = chan->conn;
1287 	struct l2cap_le_conn_req req;
1288 
1289 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1290 		return;
1291 
1292 	l2cap_le_flowctl_init(chan, 0);
1293 
1294 	req.psm     = chan->psm;
1295 	req.scid    = cpu_to_le16(chan->scid);
1296 	req.mtu     = cpu_to_le16(chan->imtu);
1297 	req.mps     = cpu_to_le16(chan->mps);
1298 	req.credits = cpu_to_le16(chan->rx_credits);
1299 
1300 	chan->ident = l2cap_get_ident(conn);
1301 
1302 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1303 		       sizeof(req), &req);
1304 }
1305 
l2cap_le_start(struct l2cap_chan * chan)1306 static void l2cap_le_start(struct l2cap_chan *chan)
1307 {
1308 	struct l2cap_conn *conn = chan->conn;
1309 
1310 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1311 		return;
1312 
1313 	if (!chan->psm) {
1314 		l2cap_chan_ready(chan);
1315 		return;
1316 	}
1317 
1318 	if (chan->state == BT_CONNECT)
1319 		l2cap_le_connect(chan);
1320 }
1321 
l2cap_start_connection(struct l2cap_chan * chan)1322 static void l2cap_start_connection(struct l2cap_chan *chan)
1323 {
1324 	if (__amp_capable(chan)) {
1325 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1326 		a2mp_discover_amp(chan);
1327 	} else if (chan->conn->hcon->type == LE_LINK) {
1328 		l2cap_le_start(chan);
1329 	} else {
1330 		l2cap_send_conn_req(chan);
1331 	}
1332 }
1333 
l2cap_request_info(struct l2cap_conn * conn)1334 static void l2cap_request_info(struct l2cap_conn *conn)
1335 {
1336 	struct l2cap_info_req req;
1337 
1338 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1339 		return;
1340 
1341 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1342 
1343 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1344 	conn->info_ident = l2cap_get_ident(conn);
1345 
1346 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1347 
1348 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1349 		       sizeof(req), &req);
1350 }
1351 
l2cap_check_enc_key_size(struct hci_conn * hcon)1352 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1353 {
1354 	/* The minimum encryption key size needs to be enforced by the
1355 	 * host stack before establishing any L2CAP connections. The
1356 	 * specification in theory allows a minimum of 1, but to align
1357 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1358 	 *
1359 	 * This check might also be called for unencrypted connections
1360 	 * that have no key size requirements. Ensure that the link is
1361 	 * actually encrypted before enforcing a key size.
1362 	 */
1363 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1364 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1365 }
1366 
l2cap_do_start(struct l2cap_chan * chan)1367 static void l2cap_do_start(struct l2cap_chan *chan)
1368 {
1369 	struct l2cap_conn *conn = chan->conn;
1370 
1371 	if (conn->hcon->type == LE_LINK) {
1372 		l2cap_le_start(chan);
1373 		return;
1374 	}
1375 
1376 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1377 		l2cap_request_info(conn);
1378 		return;
1379 	}
1380 
1381 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1382 		return;
1383 
1384 	if (!l2cap_chan_check_security(chan, true) ||
1385 	    !__l2cap_no_conn_pending(chan))
1386 		return;
1387 
1388 	if (l2cap_check_enc_key_size(conn->hcon))
1389 		l2cap_start_connection(chan);
1390 	else
1391 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1392 }
1393 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1394 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1395 {
1396 	u32 local_feat_mask = l2cap_feat_mask;
1397 	if (!disable_ertm)
1398 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1399 
1400 	switch (mode) {
1401 	case L2CAP_MODE_ERTM:
1402 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1403 	case L2CAP_MODE_STREAMING:
1404 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1405 	default:
1406 		return 0x00;
1407 	}
1408 }
1409 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1410 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1411 {
1412 	struct l2cap_conn *conn = chan->conn;
1413 	struct l2cap_disconn_req req;
1414 
1415 	if (!conn)
1416 		return;
1417 
1418 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1419 		__clear_retrans_timer(chan);
1420 		__clear_monitor_timer(chan);
1421 		__clear_ack_timer(chan);
1422 	}
1423 
1424 	if (chan->scid == L2CAP_CID_A2MP) {
1425 		l2cap_state_change(chan, BT_DISCONN);
1426 		return;
1427 	}
1428 
1429 	req.dcid = cpu_to_le16(chan->dcid);
1430 	req.scid = cpu_to_le16(chan->scid);
1431 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1432 		       sizeof(req), &req);
1433 
1434 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1435 }
1436 
1437 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1438 static void l2cap_conn_start(struct l2cap_conn *conn)
1439 {
1440 	struct l2cap_chan *chan, *tmp;
1441 
1442 	BT_DBG("conn %p", conn);
1443 
1444 	mutex_lock(&conn->chan_lock);
1445 
1446 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1447 		l2cap_chan_lock(chan);
1448 
1449 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1450 			l2cap_chan_ready(chan);
1451 			l2cap_chan_unlock(chan);
1452 			continue;
1453 		}
1454 
1455 		if (chan->state == BT_CONNECT) {
1456 			if (!l2cap_chan_check_security(chan, true) ||
1457 			    !__l2cap_no_conn_pending(chan)) {
1458 				l2cap_chan_unlock(chan);
1459 				continue;
1460 			}
1461 
1462 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1463 			    && test_bit(CONF_STATE2_DEVICE,
1464 					&chan->conf_state)) {
1465 				l2cap_chan_close(chan, ECONNRESET);
1466 				l2cap_chan_unlock(chan);
1467 				continue;
1468 			}
1469 
1470 			if (l2cap_check_enc_key_size(conn->hcon))
1471 				l2cap_start_connection(chan);
1472 			else
1473 				l2cap_chan_close(chan, ECONNREFUSED);
1474 
1475 		} else if (chan->state == BT_CONNECT2) {
1476 			struct l2cap_conn_rsp rsp;
1477 			char buf[128];
1478 			rsp.scid = cpu_to_le16(chan->dcid);
1479 			rsp.dcid = cpu_to_le16(chan->scid);
1480 
1481 			if (l2cap_chan_check_security(chan, false)) {
1482 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1483 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1484 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1485 					chan->ops->defer(chan);
1486 
1487 				} else {
1488 					l2cap_state_change(chan, BT_CONFIG);
1489 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1490 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1491 				}
1492 			} else {
1493 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1494 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1495 			}
1496 
1497 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1498 				       sizeof(rsp), &rsp);
1499 
1500 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1501 			    rsp.result != L2CAP_CR_SUCCESS) {
1502 				l2cap_chan_unlock(chan);
1503 				continue;
1504 			}
1505 
1506 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1507 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1508 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1509 			chan->num_conf_req++;
1510 		}
1511 
1512 		l2cap_chan_unlock(chan);
1513 	}
1514 
1515 	mutex_unlock(&conn->chan_lock);
1516 }
1517 
l2cap_le_conn_ready(struct l2cap_conn * conn)1518 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1519 {
1520 	struct hci_conn *hcon = conn->hcon;
1521 	struct hci_dev *hdev = hcon->hdev;
1522 
1523 	BT_DBG("%s conn %p", hdev->name, conn);
1524 
1525 	/* For outgoing pairing which doesn't necessarily have an
1526 	 * associated socket (e.g. mgmt_pair_device).
1527 	 */
1528 	if (hcon->out)
1529 		smp_conn_security(hcon, hcon->pending_sec_level);
1530 
1531 	/* For LE slave connections, make sure the connection interval
1532 	 * is in the range of the minium and maximum interval that has
1533 	 * been configured for this connection. If not, then trigger
1534 	 * the connection update procedure.
1535 	 */
1536 	if (hcon->role == HCI_ROLE_SLAVE &&
1537 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1538 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1539 		struct l2cap_conn_param_update_req req;
1540 
1541 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1542 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1543 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1544 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1545 
1546 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1547 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1548 	}
1549 }
1550 
l2cap_conn_ready(struct l2cap_conn * conn)1551 static void l2cap_conn_ready(struct l2cap_conn *conn)
1552 {
1553 	struct l2cap_chan *chan;
1554 	struct hci_conn *hcon = conn->hcon;
1555 
1556 	BT_DBG("conn %p", conn);
1557 
1558 	if (hcon->type == ACL_LINK)
1559 		l2cap_request_info(conn);
1560 
1561 	mutex_lock(&conn->chan_lock);
1562 
1563 	list_for_each_entry(chan, &conn->chan_l, list) {
1564 
1565 		l2cap_chan_lock(chan);
1566 
1567 		if (chan->scid == L2CAP_CID_A2MP) {
1568 			l2cap_chan_unlock(chan);
1569 			continue;
1570 		}
1571 
1572 		if (hcon->type == LE_LINK) {
1573 			l2cap_le_start(chan);
1574 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1575 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1576 				l2cap_chan_ready(chan);
1577 		} else if (chan->state == BT_CONNECT) {
1578 			l2cap_do_start(chan);
1579 		}
1580 
1581 		l2cap_chan_unlock(chan);
1582 	}
1583 
1584 	mutex_unlock(&conn->chan_lock);
1585 
1586 	if (hcon->type == LE_LINK)
1587 		l2cap_le_conn_ready(conn);
1588 
1589 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1590 }
1591 
1592 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1593 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1594 {
1595 	struct l2cap_chan *chan;
1596 
1597 	BT_DBG("conn %p", conn);
1598 
1599 	mutex_lock(&conn->chan_lock);
1600 
1601 	list_for_each_entry(chan, &conn->chan_l, list) {
1602 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1603 			l2cap_chan_set_err(chan, err);
1604 	}
1605 
1606 	mutex_unlock(&conn->chan_lock);
1607 }
1608 
l2cap_info_timeout(struct work_struct * work)1609 static void l2cap_info_timeout(struct work_struct *work)
1610 {
1611 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1612 					       info_timer.work);
1613 
1614 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1615 	conn->info_ident = 0;
1616 
1617 	l2cap_conn_start(conn);
1618 }
1619 
1620 /*
1621  * l2cap_user
1622  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1623  * callback is called during registration. The ->remove callback is called
1624  * during unregistration.
1625  * An l2cap_user object can either be explicitly unregistered or when the
1626  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1627  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1628  * External modules must own a reference to the l2cap_conn object if they intend
1629  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1630  * any time if they don't.
1631  */
1632 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1633 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1634 {
1635 	struct hci_dev *hdev = conn->hcon->hdev;
1636 	int ret;
1637 
1638 	/* We need to check whether l2cap_conn is registered. If it is not, we
1639 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1640 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1641 	 * relies on the parent hci_conn object to be locked. This itself relies
1642 	 * on the hci_dev object to be locked. So we must lock the hci device
1643 	 * here, too. */
1644 
1645 	hci_dev_lock(hdev);
1646 
1647 	if (!list_empty(&user->list)) {
1648 		ret = -EINVAL;
1649 		goto out_unlock;
1650 	}
1651 
1652 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1653 	if (!conn->hchan) {
1654 		ret = -ENODEV;
1655 		goto out_unlock;
1656 	}
1657 
1658 	ret = user->probe(conn, user);
1659 	if (ret)
1660 		goto out_unlock;
1661 
1662 	list_add(&user->list, &conn->users);
1663 	ret = 0;
1664 
1665 out_unlock:
1666 	hci_dev_unlock(hdev);
1667 	return ret;
1668 }
1669 EXPORT_SYMBOL(l2cap_register_user);
1670 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1671 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1672 {
1673 	struct hci_dev *hdev = conn->hcon->hdev;
1674 
1675 	hci_dev_lock(hdev);
1676 
1677 	if (list_empty(&user->list))
1678 		goto out_unlock;
1679 
1680 	list_del_init(&user->list);
1681 	user->remove(conn, user);
1682 
1683 out_unlock:
1684 	hci_dev_unlock(hdev);
1685 }
1686 EXPORT_SYMBOL(l2cap_unregister_user);
1687 
l2cap_unregister_all_users(struct l2cap_conn * conn)1688 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1689 {
1690 	struct l2cap_user *user;
1691 
1692 	while (!list_empty(&conn->users)) {
1693 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1694 		list_del_init(&user->list);
1695 		user->remove(conn, user);
1696 	}
1697 }
1698 
l2cap_conn_del(struct hci_conn * hcon,int err)1699 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1700 {
1701 	struct l2cap_conn *conn = hcon->l2cap_data;
1702 	struct l2cap_chan *chan, *l;
1703 
1704 	if (!conn)
1705 		return;
1706 
1707 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1708 
1709 	kfree_skb(conn->rx_skb);
1710 
1711 	skb_queue_purge(&conn->pending_rx);
1712 
1713 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1714 	 * might block if we are running on a worker from the same workqueue
1715 	 * pending_rx_work is waiting on.
1716 	 */
1717 	if (work_pending(&conn->pending_rx_work))
1718 		cancel_work_sync(&conn->pending_rx_work);
1719 
1720 	if (work_pending(&conn->id_addr_update_work))
1721 		cancel_work_sync(&conn->id_addr_update_work);
1722 
1723 	l2cap_unregister_all_users(conn);
1724 
1725 	/* Force the connection to be immediately dropped */
1726 	hcon->disc_timeout = 0;
1727 
1728 	mutex_lock(&conn->chan_lock);
1729 
1730 	/* Kill channels */
1731 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1732 		l2cap_chan_hold(chan);
1733 		l2cap_chan_lock(chan);
1734 
1735 		l2cap_chan_del(chan, err);
1736 
1737 		l2cap_chan_unlock(chan);
1738 
1739 		chan->ops->close(chan);
1740 		l2cap_chan_put(chan);
1741 	}
1742 
1743 	mutex_unlock(&conn->chan_lock);
1744 
1745 	hci_chan_del(conn->hchan);
1746 
1747 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1748 		cancel_delayed_work_sync(&conn->info_timer);
1749 
1750 	hcon->l2cap_data = NULL;
1751 	conn->hchan = NULL;
1752 	l2cap_conn_put(conn);
1753 }
1754 
l2cap_conn_free(struct kref * ref)1755 static void l2cap_conn_free(struct kref *ref)
1756 {
1757 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1758 
1759 	hci_conn_put(conn->hcon);
1760 	kfree(conn);
1761 }
1762 
l2cap_conn_get(struct l2cap_conn * conn)1763 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1764 {
1765 	kref_get(&conn->ref);
1766 	return conn;
1767 }
1768 EXPORT_SYMBOL(l2cap_conn_get);
1769 
l2cap_conn_put(struct l2cap_conn * conn)1770 void l2cap_conn_put(struct l2cap_conn *conn)
1771 {
1772 	kref_put(&conn->ref, l2cap_conn_free);
1773 }
1774 EXPORT_SYMBOL(l2cap_conn_put);
1775 
1776 /* ---- Socket interface ---- */
1777 
1778 /* Find socket with psm and source / destination bdaddr.
1779  * Returns closest match.
1780  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1781 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1782 						   bdaddr_t *src,
1783 						   bdaddr_t *dst,
1784 						   u8 link_type)
1785 {
1786 	struct l2cap_chan *c, *c1 = NULL;
1787 
1788 	read_lock(&chan_list_lock);
1789 
1790 	list_for_each_entry(c, &chan_list, global_l) {
1791 		if (state && c->state != state)
1792 			continue;
1793 
1794 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1795 			continue;
1796 
1797 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1798 			continue;
1799 
1800 		if (c->psm == psm) {
1801 			int src_match, dst_match;
1802 			int src_any, dst_any;
1803 
1804 			/* Exact match. */
1805 			src_match = !bacmp(&c->src, src);
1806 			dst_match = !bacmp(&c->dst, dst);
1807 			if (src_match && dst_match) {
1808 				l2cap_chan_hold(c);
1809 				read_unlock(&chan_list_lock);
1810 				return c;
1811 			}
1812 
1813 			/* Closest match */
1814 			src_any = !bacmp(&c->src, BDADDR_ANY);
1815 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1816 			if ((src_match && dst_any) || (src_any && dst_match) ||
1817 			    (src_any && dst_any))
1818 				c1 = c;
1819 		}
1820 	}
1821 
1822 	if (c1)
1823 		l2cap_chan_hold(c1);
1824 
1825 	read_unlock(&chan_list_lock);
1826 
1827 	return c1;
1828 }
1829 
l2cap_monitor_timeout(struct work_struct * work)1830 static void l2cap_monitor_timeout(struct work_struct *work)
1831 {
1832 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1833 					       monitor_timer.work);
1834 
1835 	BT_DBG("chan %p", chan);
1836 
1837 	l2cap_chan_lock(chan);
1838 
1839 	if (!chan->conn) {
1840 		l2cap_chan_unlock(chan);
1841 		l2cap_chan_put(chan);
1842 		return;
1843 	}
1844 
1845 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1846 
1847 	l2cap_chan_unlock(chan);
1848 	l2cap_chan_put(chan);
1849 }
1850 
l2cap_retrans_timeout(struct work_struct * work)1851 static void l2cap_retrans_timeout(struct work_struct *work)
1852 {
1853 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1854 					       retrans_timer.work);
1855 
1856 	BT_DBG("chan %p", chan);
1857 
1858 	l2cap_chan_lock(chan);
1859 
1860 	if (!chan->conn) {
1861 		l2cap_chan_unlock(chan);
1862 		l2cap_chan_put(chan);
1863 		return;
1864 	}
1865 
1866 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1867 	l2cap_chan_unlock(chan);
1868 	l2cap_chan_put(chan);
1869 }
1870 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1871 static void l2cap_streaming_send(struct l2cap_chan *chan,
1872 				 struct sk_buff_head *skbs)
1873 {
1874 	struct sk_buff *skb;
1875 	struct l2cap_ctrl *control;
1876 
1877 	BT_DBG("chan %p, skbs %p", chan, skbs);
1878 
1879 	if (__chan_is_moving(chan))
1880 		return;
1881 
1882 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1883 
1884 	while (!skb_queue_empty(&chan->tx_q)) {
1885 
1886 		skb = skb_dequeue(&chan->tx_q);
1887 
1888 		bt_cb(skb)->l2cap.retries = 1;
1889 		control = &bt_cb(skb)->l2cap;
1890 
1891 		control->reqseq = 0;
1892 		control->txseq = chan->next_tx_seq;
1893 
1894 		__pack_control(chan, control, skb);
1895 
1896 		if (chan->fcs == L2CAP_FCS_CRC16) {
1897 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1898 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1899 		}
1900 
1901 		l2cap_do_send(chan, skb);
1902 
1903 		BT_DBG("Sent txseq %u", control->txseq);
1904 
1905 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1906 		chan->frames_sent++;
1907 	}
1908 }
1909 
l2cap_ertm_send(struct l2cap_chan * chan)1910 static int l2cap_ertm_send(struct l2cap_chan *chan)
1911 {
1912 	struct sk_buff *skb, *tx_skb;
1913 	struct l2cap_ctrl *control;
1914 	int sent = 0;
1915 
1916 	BT_DBG("chan %p", chan);
1917 
1918 	if (chan->state != BT_CONNECTED)
1919 		return -ENOTCONN;
1920 
1921 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1922 		return 0;
1923 
1924 	if (__chan_is_moving(chan))
1925 		return 0;
1926 
1927 	while (chan->tx_send_head &&
1928 	       chan->unacked_frames < chan->remote_tx_win &&
1929 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1930 
1931 		skb = chan->tx_send_head;
1932 
1933 		bt_cb(skb)->l2cap.retries = 1;
1934 		control = &bt_cb(skb)->l2cap;
1935 
1936 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1937 			control->final = 1;
1938 
1939 		control->reqseq = chan->buffer_seq;
1940 		chan->last_acked_seq = chan->buffer_seq;
1941 		control->txseq = chan->next_tx_seq;
1942 
1943 		__pack_control(chan, control, skb);
1944 
1945 		if (chan->fcs == L2CAP_FCS_CRC16) {
1946 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1947 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1948 		}
1949 
1950 		/* Clone after data has been modified. Data is assumed to be
1951 		   read-only (for locking purposes) on cloned sk_buffs.
1952 		 */
1953 		tx_skb = skb_clone(skb, GFP_KERNEL);
1954 
1955 		if (!tx_skb)
1956 			break;
1957 
1958 		__set_retrans_timer(chan);
1959 
1960 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1961 		chan->unacked_frames++;
1962 		chan->frames_sent++;
1963 		sent++;
1964 
1965 		if (skb_queue_is_last(&chan->tx_q, skb))
1966 			chan->tx_send_head = NULL;
1967 		else
1968 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1969 
1970 		l2cap_do_send(chan, tx_skb);
1971 		BT_DBG("Sent txseq %u", control->txseq);
1972 	}
1973 
1974 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1975 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1976 
1977 	return sent;
1978 }
1979 
l2cap_ertm_resend(struct l2cap_chan * chan)1980 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1981 {
1982 	struct l2cap_ctrl control;
1983 	struct sk_buff *skb;
1984 	struct sk_buff *tx_skb;
1985 	u16 seq;
1986 
1987 	BT_DBG("chan %p", chan);
1988 
1989 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1990 		return;
1991 
1992 	if (__chan_is_moving(chan))
1993 		return;
1994 
1995 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1996 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1997 
1998 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1999 		if (!skb) {
2000 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2001 			       seq);
2002 			continue;
2003 		}
2004 
2005 		bt_cb(skb)->l2cap.retries++;
2006 		control = bt_cb(skb)->l2cap;
2007 
2008 		if (chan->max_tx != 0 &&
2009 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2010 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2011 			l2cap_send_disconn_req(chan, ECONNRESET);
2012 			l2cap_seq_list_clear(&chan->retrans_list);
2013 			break;
2014 		}
2015 
2016 		control.reqseq = chan->buffer_seq;
2017 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2018 			control.final = 1;
2019 		else
2020 			control.final = 0;
2021 
2022 		if (skb_cloned(skb)) {
2023 			/* Cloned sk_buffs are read-only, so we need a
2024 			 * writeable copy
2025 			 */
2026 			tx_skb = skb_copy(skb, GFP_KERNEL);
2027 		} else {
2028 			tx_skb = skb_clone(skb, GFP_KERNEL);
2029 		}
2030 
2031 		if (!tx_skb) {
2032 			l2cap_seq_list_clear(&chan->retrans_list);
2033 			break;
2034 		}
2035 
2036 		/* Update skb contents */
2037 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2038 			put_unaligned_le32(__pack_extended_control(&control),
2039 					   tx_skb->data + L2CAP_HDR_SIZE);
2040 		} else {
2041 			put_unaligned_le16(__pack_enhanced_control(&control),
2042 					   tx_skb->data + L2CAP_HDR_SIZE);
2043 		}
2044 
2045 		/* Update FCS */
2046 		if (chan->fcs == L2CAP_FCS_CRC16) {
2047 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2048 					tx_skb->len - L2CAP_FCS_SIZE);
2049 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2050 						L2CAP_FCS_SIZE);
2051 		}
2052 
2053 		l2cap_do_send(chan, tx_skb);
2054 
2055 		BT_DBG("Resent txseq %d", control.txseq);
2056 
2057 		chan->last_acked_seq = chan->buffer_seq;
2058 	}
2059 }
2060 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2061 static void l2cap_retransmit(struct l2cap_chan *chan,
2062 			     struct l2cap_ctrl *control)
2063 {
2064 	BT_DBG("chan %p, control %p", chan, control);
2065 
2066 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2067 	l2cap_ertm_resend(chan);
2068 }
2069 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2070 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2071 				 struct l2cap_ctrl *control)
2072 {
2073 	struct sk_buff *skb;
2074 
2075 	BT_DBG("chan %p, control %p", chan, control);
2076 
2077 	if (control->poll)
2078 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2079 
2080 	l2cap_seq_list_clear(&chan->retrans_list);
2081 
2082 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2083 		return;
2084 
2085 	if (chan->unacked_frames) {
2086 		skb_queue_walk(&chan->tx_q, skb) {
2087 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2088 			    skb == chan->tx_send_head)
2089 				break;
2090 		}
2091 
2092 		skb_queue_walk_from(&chan->tx_q, skb) {
2093 			if (skb == chan->tx_send_head)
2094 				break;
2095 
2096 			l2cap_seq_list_append(&chan->retrans_list,
2097 					      bt_cb(skb)->l2cap.txseq);
2098 		}
2099 
2100 		l2cap_ertm_resend(chan);
2101 	}
2102 }
2103 
l2cap_send_ack(struct l2cap_chan * chan)2104 static void l2cap_send_ack(struct l2cap_chan *chan)
2105 {
2106 	struct l2cap_ctrl control;
2107 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2108 					 chan->last_acked_seq);
2109 	int threshold;
2110 
2111 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2112 	       chan, chan->last_acked_seq, chan->buffer_seq);
2113 
2114 	memset(&control, 0, sizeof(control));
2115 	control.sframe = 1;
2116 
2117 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2118 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2119 		__clear_ack_timer(chan);
2120 		control.super = L2CAP_SUPER_RNR;
2121 		control.reqseq = chan->buffer_seq;
2122 		l2cap_send_sframe(chan, &control);
2123 	} else {
2124 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2125 			l2cap_ertm_send(chan);
2126 			/* If any i-frames were sent, they included an ack */
2127 			if (chan->buffer_seq == chan->last_acked_seq)
2128 				frames_to_ack = 0;
2129 		}
2130 
2131 		/* Ack now if the window is 3/4ths full.
2132 		 * Calculate without mul or div
2133 		 */
2134 		threshold = chan->ack_win;
2135 		threshold += threshold << 1;
2136 		threshold >>= 2;
2137 
2138 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2139 		       threshold);
2140 
2141 		if (frames_to_ack >= threshold) {
2142 			__clear_ack_timer(chan);
2143 			control.super = L2CAP_SUPER_RR;
2144 			control.reqseq = chan->buffer_seq;
2145 			l2cap_send_sframe(chan, &control);
2146 			frames_to_ack = 0;
2147 		}
2148 
2149 		if (frames_to_ack)
2150 			__set_ack_timer(chan);
2151 	}
2152 }
2153 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2154 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2155 					 struct msghdr *msg, int len,
2156 					 int count, struct sk_buff *skb)
2157 {
2158 	struct l2cap_conn *conn = chan->conn;
2159 	struct sk_buff **frag;
2160 	int sent = 0;
2161 
2162 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2163 		return -EFAULT;
2164 
2165 	sent += count;
2166 	len  -= count;
2167 
2168 	/* Continuation fragments (no L2CAP header) */
2169 	frag = &skb_shinfo(skb)->frag_list;
2170 	while (len) {
2171 		struct sk_buff *tmp;
2172 
2173 		count = min_t(unsigned int, conn->mtu, len);
2174 
2175 		tmp = chan->ops->alloc_skb(chan, 0, count,
2176 					   msg->msg_flags & MSG_DONTWAIT);
2177 		if (IS_ERR(tmp))
2178 			return PTR_ERR(tmp);
2179 
2180 		*frag = tmp;
2181 
2182 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2183 				   &msg->msg_iter))
2184 			return -EFAULT;
2185 
2186 		sent += count;
2187 		len  -= count;
2188 
2189 		skb->len += (*frag)->len;
2190 		skb->data_len += (*frag)->len;
2191 
2192 		frag = &(*frag)->next;
2193 	}
2194 
2195 	return sent;
2196 }
2197 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2198 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2199 						 struct msghdr *msg, size_t len)
2200 {
2201 	struct l2cap_conn *conn = chan->conn;
2202 	struct sk_buff *skb;
2203 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2204 	struct l2cap_hdr *lh;
2205 
2206 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2207 	       __le16_to_cpu(chan->psm), len);
2208 
2209 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2210 
2211 	skb = chan->ops->alloc_skb(chan, hlen, count,
2212 				   msg->msg_flags & MSG_DONTWAIT);
2213 	if (IS_ERR(skb))
2214 		return skb;
2215 
2216 	/* Create L2CAP header */
2217 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2218 	lh->cid = cpu_to_le16(chan->dcid);
2219 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2220 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2221 
2222 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2223 	if (unlikely(err < 0)) {
2224 		kfree_skb(skb);
2225 		return ERR_PTR(err);
2226 	}
2227 	return skb;
2228 }
2229 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2230 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2231 					      struct msghdr *msg, size_t len)
2232 {
2233 	struct l2cap_conn *conn = chan->conn;
2234 	struct sk_buff *skb;
2235 	int err, count;
2236 	struct l2cap_hdr *lh;
2237 
2238 	BT_DBG("chan %p len %zu", chan, len);
2239 
2240 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2241 
2242 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2243 				   msg->msg_flags & MSG_DONTWAIT);
2244 	if (IS_ERR(skb))
2245 		return skb;
2246 
2247 	/* Create L2CAP header */
2248 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2249 	lh->cid = cpu_to_le16(chan->dcid);
2250 	lh->len = cpu_to_le16(len);
2251 
2252 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2253 	if (unlikely(err < 0)) {
2254 		kfree_skb(skb);
2255 		return ERR_PTR(err);
2256 	}
2257 	return skb;
2258 }
2259 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2260 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2261 					       struct msghdr *msg, size_t len,
2262 					       u16 sdulen)
2263 {
2264 	struct l2cap_conn *conn = chan->conn;
2265 	struct sk_buff *skb;
2266 	int err, count, hlen;
2267 	struct l2cap_hdr *lh;
2268 
2269 	BT_DBG("chan %p len %zu", chan, len);
2270 
2271 	if (!conn)
2272 		return ERR_PTR(-ENOTCONN);
2273 
2274 	hlen = __ertm_hdr_size(chan);
2275 
2276 	if (sdulen)
2277 		hlen += L2CAP_SDULEN_SIZE;
2278 
2279 	if (chan->fcs == L2CAP_FCS_CRC16)
2280 		hlen += L2CAP_FCS_SIZE;
2281 
2282 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2283 
2284 	skb = chan->ops->alloc_skb(chan, hlen, count,
2285 				   msg->msg_flags & MSG_DONTWAIT);
2286 	if (IS_ERR(skb))
2287 		return skb;
2288 
2289 	/* Create L2CAP header */
2290 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2291 	lh->cid = cpu_to_le16(chan->dcid);
2292 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2293 
2294 	/* Control header is populated later */
2295 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2296 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2297 	else
2298 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2299 
2300 	if (sdulen)
2301 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2302 
2303 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2304 	if (unlikely(err < 0)) {
2305 		kfree_skb(skb);
2306 		return ERR_PTR(err);
2307 	}
2308 
2309 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2310 	bt_cb(skb)->l2cap.retries = 0;
2311 	return skb;
2312 }
2313 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2314 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2315 			     struct sk_buff_head *seg_queue,
2316 			     struct msghdr *msg, size_t len)
2317 {
2318 	struct sk_buff *skb;
2319 	u16 sdu_len;
2320 	size_t pdu_len;
2321 	u8 sar;
2322 
2323 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2324 
2325 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2326 	 * so fragmented skbs are not used.  The HCI layer's handling
2327 	 * of fragmented skbs is not compatible with ERTM's queueing.
2328 	 */
2329 
2330 	/* PDU size is derived from the HCI MTU */
2331 	pdu_len = chan->conn->mtu;
2332 
2333 	/* Constrain PDU size for BR/EDR connections */
2334 	if (!chan->hs_hcon)
2335 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2336 
2337 	/* Adjust for largest possible L2CAP overhead. */
2338 	if (chan->fcs)
2339 		pdu_len -= L2CAP_FCS_SIZE;
2340 
2341 	pdu_len -= __ertm_hdr_size(chan);
2342 
2343 	/* Remote device may have requested smaller PDUs */
2344 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2345 
2346 	if (len <= pdu_len) {
2347 		sar = L2CAP_SAR_UNSEGMENTED;
2348 		sdu_len = 0;
2349 		pdu_len = len;
2350 	} else {
2351 		sar = L2CAP_SAR_START;
2352 		sdu_len = len;
2353 	}
2354 
2355 	while (len > 0) {
2356 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2357 
2358 		if (IS_ERR(skb)) {
2359 			__skb_queue_purge(seg_queue);
2360 			return PTR_ERR(skb);
2361 		}
2362 
2363 		bt_cb(skb)->l2cap.sar = sar;
2364 		__skb_queue_tail(seg_queue, skb);
2365 
2366 		len -= pdu_len;
2367 		if (sdu_len)
2368 			sdu_len = 0;
2369 
2370 		if (len <= pdu_len) {
2371 			sar = L2CAP_SAR_END;
2372 			pdu_len = len;
2373 		} else {
2374 			sar = L2CAP_SAR_CONTINUE;
2375 		}
2376 	}
2377 
2378 	return 0;
2379 }
2380 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2381 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2382 						   struct msghdr *msg,
2383 						   size_t len, u16 sdulen)
2384 {
2385 	struct l2cap_conn *conn = chan->conn;
2386 	struct sk_buff *skb;
2387 	int err, count, hlen;
2388 	struct l2cap_hdr *lh;
2389 
2390 	BT_DBG("chan %p len %zu", chan, len);
2391 
2392 	if (!conn)
2393 		return ERR_PTR(-ENOTCONN);
2394 
2395 	hlen = L2CAP_HDR_SIZE;
2396 
2397 	if (sdulen)
2398 		hlen += L2CAP_SDULEN_SIZE;
2399 
2400 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2401 
2402 	skb = chan->ops->alloc_skb(chan, hlen, count,
2403 				   msg->msg_flags & MSG_DONTWAIT);
2404 	if (IS_ERR(skb))
2405 		return skb;
2406 
2407 	/* Create L2CAP header */
2408 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2409 	lh->cid = cpu_to_le16(chan->dcid);
2410 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2411 
2412 	if (sdulen)
2413 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2414 
2415 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2416 	if (unlikely(err < 0)) {
2417 		kfree_skb(skb);
2418 		return ERR_PTR(err);
2419 	}
2420 
2421 	return skb;
2422 }
2423 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2424 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2425 				struct sk_buff_head *seg_queue,
2426 				struct msghdr *msg, size_t len)
2427 {
2428 	struct sk_buff *skb;
2429 	size_t pdu_len;
2430 	u16 sdu_len;
2431 
2432 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2433 
2434 	sdu_len = len;
2435 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2436 
2437 	while (len > 0) {
2438 		if (len <= pdu_len)
2439 			pdu_len = len;
2440 
2441 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2442 		if (IS_ERR(skb)) {
2443 			__skb_queue_purge(seg_queue);
2444 			return PTR_ERR(skb);
2445 		}
2446 
2447 		__skb_queue_tail(seg_queue, skb);
2448 
2449 		len -= pdu_len;
2450 
2451 		if (sdu_len) {
2452 			sdu_len = 0;
2453 			pdu_len += L2CAP_SDULEN_SIZE;
2454 		}
2455 	}
2456 
2457 	return 0;
2458 }
2459 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2460 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2461 {
2462 	int sent = 0;
2463 
2464 	BT_DBG("chan %p", chan);
2465 
2466 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2467 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2468 		chan->tx_credits--;
2469 		sent++;
2470 	}
2471 
2472 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2473 	       skb_queue_len(&chan->tx_q));
2474 }
2475 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2476 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2477 {
2478 	struct sk_buff *skb;
2479 	int err;
2480 	struct sk_buff_head seg_queue;
2481 
2482 	if (!chan->conn)
2483 		return -ENOTCONN;
2484 
2485 	/* Connectionless channel */
2486 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2487 		skb = l2cap_create_connless_pdu(chan, msg, len);
2488 		if (IS_ERR(skb))
2489 			return PTR_ERR(skb);
2490 
2491 		/* Channel lock is released before requesting new skb and then
2492 		 * reacquired thus we need to recheck channel state.
2493 		 */
2494 		if (chan->state != BT_CONNECTED) {
2495 			kfree_skb(skb);
2496 			return -ENOTCONN;
2497 		}
2498 
2499 		l2cap_do_send(chan, skb);
2500 		return len;
2501 	}
2502 
2503 	switch (chan->mode) {
2504 	case L2CAP_MODE_LE_FLOWCTL:
2505 		/* Check outgoing MTU */
2506 		if (len > chan->omtu)
2507 			return -EMSGSIZE;
2508 
2509 		__skb_queue_head_init(&seg_queue);
2510 
2511 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2512 
2513 		if (chan->state != BT_CONNECTED) {
2514 			__skb_queue_purge(&seg_queue);
2515 			err = -ENOTCONN;
2516 		}
2517 
2518 		if (err)
2519 			return err;
2520 
2521 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2522 
2523 		l2cap_le_flowctl_send(chan);
2524 
2525 		if (!chan->tx_credits)
2526 			chan->ops->suspend(chan);
2527 
2528 		err = len;
2529 
2530 		break;
2531 
2532 	case L2CAP_MODE_BASIC:
2533 		/* Check outgoing MTU */
2534 		if (len > chan->omtu)
2535 			return -EMSGSIZE;
2536 
2537 		/* Create a basic PDU */
2538 		skb = l2cap_create_basic_pdu(chan, msg, len);
2539 		if (IS_ERR(skb))
2540 			return PTR_ERR(skb);
2541 
2542 		/* Channel lock is released before requesting new skb and then
2543 		 * reacquired thus we need to recheck channel state.
2544 		 */
2545 		if (chan->state != BT_CONNECTED) {
2546 			kfree_skb(skb);
2547 			return -ENOTCONN;
2548 		}
2549 
2550 		l2cap_do_send(chan, skb);
2551 		err = len;
2552 		break;
2553 
2554 	case L2CAP_MODE_ERTM:
2555 	case L2CAP_MODE_STREAMING:
2556 		/* Check outgoing MTU */
2557 		if (len > chan->omtu) {
2558 			err = -EMSGSIZE;
2559 			break;
2560 		}
2561 
2562 		__skb_queue_head_init(&seg_queue);
2563 
2564 		/* Do segmentation before calling in to the state machine,
2565 		 * since it's possible to block while waiting for memory
2566 		 * allocation.
2567 		 */
2568 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2569 
2570 		/* The channel could have been closed while segmenting,
2571 		 * check that it is still connected.
2572 		 */
2573 		if (chan->state != BT_CONNECTED) {
2574 			__skb_queue_purge(&seg_queue);
2575 			err = -ENOTCONN;
2576 		}
2577 
2578 		if (err)
2579 			break;
2580 
2581 		if (chan->mode == L2CAP_MODE_ERTM)
2582 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2583 		else
2584 			l2cap_streaming_send(chan, &seg_queue);
2585 
2586 		err = len;
2587 
2588 		/* If the skbs were not queued for sending, they'll still be in
2589 		 * seg_queue and need to be purged.
2590 		 */
2591 		__skb_queue_purge(&seg_queue);
2592 		break;
2593 
2594 	default:
2595 		BT_DBG("bad state %1.1x", chan->mode);
2596 		err = -EBADFD;
2597 	}
2598 
2599 	return err;
2600 }
2601 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2602 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2603 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2604 {
2605 	struct l2cap_ctrl control;
2606 	u16 seq;
2607 
2608 	BT_DBG("chan %p, txseq %u", chan, txseq);
2609 
2610 	memset(&control, 0, sizeof(control));
2611 	control.sframe = 1;
2612 	control.super = L2CAP_SUPER_SREJ;
2613 
2614 	for (seq = chan->expected_tx_seq; seq != txseq;
2615 	     seq = __next_seq(chan, seq)) {
2616 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2617 			control.reqseq = seq;
2618 			l2cap_send_sframe(chan, &control);
2619 			l2cap_seq_list_append(&chan->srej_list, seq);
2620 		}
2621 	}
2622 
2623 	chan->expected_tx_seq = __next_seq(chan, txseq);
2624 }
2625 
l2cap_send_srej_tail(struct l2cap_chan * chan)2626 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2627 {
2628 	struct l2cap_ctrl control;
2629 
2630 	BT_DBG("chan %p", chan);
2631 
2632 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2633 		return;
2634 
2635 	memset(&control, 0, sizeof(control));
2636 	control.sframe = 1;
2637 	control.super = L2CAP_SUPER_SREJ;
2638 	control.reqseq = chan->srej_list.tail;
2639 	l2cap_send_sframe(chan, &control);
2640 }
2641 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2642 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2643 {
2644 	struct l2cap_ctrl control;
2645 	u16 initial_head;
2646 	u16 seq;
2647 
2648 	BT_DBG("chan %p, txseq %u", chan, txseq);
2649 
2650 	memset(&control, 0, sizeof(control));
2651 	control.sframe = 1;
2652 	control.super = L2CAP_SUPER_SREJ;
2653 
2654 	/* Capture initial list head to allow only one pass through the list. */
2655 	initial_head = chan->srej_list.head;
2656 
2657 	do {
2658 		seq = l2cap_seq_list_pop(&chan->srej_list);
2659 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2660 			break;
2661 
2662 		control.reqseq = seq;
2663 		l2cap_send_sframe(chan, &control);
2664 		l2cap_seq_list_append(&chan->srej_list, seq);
2665 	} while (chan->srej_list.head != initial_head);
2666 }
2667 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2668 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2669 {
2670 	struct sk_buff *acked_skb;
2671 	u16 ackseq;
2672 
2673 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2674 
2675 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2676 		return;
2677 
2678 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2679 	       chan->expected_ack_seq, chan->unacked_frames);
2680 
2681 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2682 	     ackseq = __next_seq(chan, ackseq)) {
2683 
2684 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2685 		if (acked_skb) {
2686 			skb_unlink(acked_skb, &chan->tx_q);
2687 			kfree_skb(acked_skb);
2688 			chan->unacked_frames--;
2689 		}
2690 	}
2691 
2692 	chan->expected_ack_seq = reqseq;
2693 
2694 	if (chan->unacked_frames == 0)
2695 		__clear_retrans_timer(chan);
2696 
2697 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2698 }
2699 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2700 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2701 {
2702 	BT_DBG("chan %p", chan);
2703 
2704 	chan->expected_tx_seq = chan->buffer_seq;
2705 	l2cap_seq_list_clear(&chan->srej_list);
2706 	skb_queue_purge(&chan->srej_q);
2707 	chan->rx_state = L2CAP_RX_STATE_RECV;
2708 }
2709 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2710 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2711 				struct l2cap_ctrl *control,
2712 				struct sk_buff_head *skbs, u8 event)
2713 {
2714 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2715 	       event);
2716 
2717 	switch (event) {
2718 	case L2CAP_EV_DATA_REQUEST:
2719 		if (chan->tx_send_head == NULL)
2720 			chan->tx_send_head = skb_peek(skbs);
2721 
2722 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2723 		l2cap_ertm_send(chan);
2724 		break;
2725 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2726 		BT_DBG("Enter LOCAL_BUSY");
2727 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2728 
2729 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2730 			/* The SREJ_SENT state must be aborted if we are to
2731 			 * enter the LOCAL_BUSY state.
2732 			 */
2733 			l2cap_abort_rx_srej_sent(chan);
2734 		}
2735 
2736 		l2cap_send_ack(chan);
2737 
2738 		break;
2739 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2740 		BT_DBG("Exit LOCAL_BUSY");
2741 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2742 
2743 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2744 			struct l2cap_ctrl local_control;
2745 
2746 			memset(&local_control, 0, sizeof(local_control));
2747 			local_control.sframe = 1;
2748 			local_control.super = L2CAP_SUPER_RR;
2749 			local_control.poll = 1;
2750 			local_control.reqseq = chan->buffer_seq;
2751 			l2cap_send_sframe(chan, &local_control);
2752 
2753 			chan->retry_count = 1;
2754 			__set_monitor_timer(chan);
2755 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2756 		}
2757 		break;
2758 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2759 		l2cap_process_reqseq(chan, control->reqseq);
2760 		break;
2761 	case L2CAP_EV_EXPLICIT_POLL:
2762 		l2cap_send_rr_or_rnr(chan, 1);
2763 		chan->retry_count = 1;
2764 		__set_monitor_timer(chan);
2765 		__clear_ack_timer(chan);
2766 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2767 		break;
2768 	case L2CAP_EV_RETRANS_TO:
2769 		l2cap_send_rr_or_rnr(chan, 1);
2770 		chan->retry_count = 1;
2771 		__set_monitor_timer(chan);
2772 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 		break;
2774 	case L2CAP_EV_RECV_FBIT:
2775 		/* Nothing to process */
2776 		break;
2777 	default:
2778 		break;
2779 	}
2780 }
2781 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2782 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2783 				  struct l2cap_ctrl *control,
2784 				  struct sk_buff_head *skbs, u8 event)
2785 {
2786 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2787 	       event);
2788 
2789 	switch (event) {
2790 	case L2CAP_EV_DATA_REQUEST:
2791 		if (chan->tx_send_head == NULL)
2792 			chan->tx_send_head = skb_peek(skbs);
2793 		/* Queue data, but don't send. */
2794 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2795 		break;
2796 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2797 		BT_DBG("Enter LOCAL_BUSY");
2798 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2799 
2800 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2801 			/* The SREJ_SENT state must be aborted if we are to
2802 			 * enter the LOCAL_BUSY state.
2803 			 */
2804 			l2cap_abort_rx_srej_sent(chan);
2805 		}
2806 
2807 		l2cap_send_ack(chan);
2808 
2809 		break;
2810 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2811 		BT_DBG("Exit LOCAL_BUSY");
2812 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2813 
2814 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2815 			struct l2cap_ctrl local_control;
2816 			memset(&local_control, 0, sizeof(local_control));
2817 			local_control.sframe = 1;
2818 			local_control.super = L2CAP_SUPER_RR;
2819 			local_control.poll = 1;
2820 			local_control.reqseq = chan->buffer_seq;
2821 			l2cap_send_sframe(chan, &local_control);
2822 
2823 			chan->retry_count = 1;
2824 			__set_monitor_timer(chan);
2825 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2826 		}
2827 		break;
2828 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2829 		l2cap_process_reqseq(chan, control->reqseq);
2830 
2831 		/* Fall through */
2832 
2833 	case L2CAP_EV_RECV_FBIT:
2834 		if (control && control->final) {
2835 			__clear_monitor_timer(chan);
2836 			if (chan->unacked_frames > 0)
2837 				__set_retrans_timer(chan);
2838 			chan->retry_count = 0;
2839 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2840 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2841 		}
2842 		break;
2843 	case L2CAP_EV_EXPLICIT_POLL:
2844 		/* Ignore */
2845 		break;
2846 	case L2CAP_EV_MONITOR_TO:
2847 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2848 			l2cap_send_rr_or_rnr(chan, 1);
2849 			__set_monitor_timer(chan);
2850 			chan->retry_count++;
2851 		} else {
2852 			l2cap_send_disconn_req(chan, ECONNABORTED);
2853 		}
2854 		break;
2855 	default:
2856 		break;
2857 	}
2858 }
2859 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2860 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2861 		     struct sk_buff_head *skbs, u8 event)
2862 {
2863 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2864 	       chan, control, skbs, event, chan->tx_state);
2865 
2866 	switch (chan->tx_state) {
2867 	case L2CAP_TX_STATE_XMIT:
2868 		l2cap_tx_state_xmit(chan, control, skbs, event);
2869 		break;
2870 	case L2CAP_TX_STATE_WAIT_F:
2871 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2872 		break;
2873 	default:
2874 		/* Ignore event */
2875 		break;
2876 	}
2877 }
2878 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2879 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2880 			     struct l2cap_ctrl *control)
2881 {
2882 	BT_DBG("chan %p, control %p", chan, control);
2883 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2884 }
2885 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2886 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2887 				  struct l2cap_ctrl *control)
2888 {
2889 	BT_DBG("chan %p, control %p", chan, control);
2890 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2891 }
2892 
2893 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2894 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2895 {
2896 	struct sk_buff *nskb;
2897 	struct l2cap_chan *chan;
2898 
2899 	BT_DBG("conn %p", conn);
2900 
2901 	mutex_lock(&conn->chan_lock);
2902 
2903 	list_for_each_entry(chan, &conn->chan_l, list) {
2904 		if (chan->chan_type != L2CAP_CHAN_RAW)
2905 			continue;
2906 
2907 		/* Don't send frame to the channel it came from */
2908 		if (bt_cb(skb)->l2cap.chan == chan)
2909 			continue;
2910 
2911 		nskb = skb_clone(skb, GFP_KERNEL);
2912 		if (!nskb)
2913 			continue;
2914 		if (chan->ops->recv(chan, nskb))
2915 			kfree_skb(nskb);
2916 	}
2917 
2918 	mutex_unlock(&conn->chan_lock);
2919 }
2920 
2921 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2922 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2923 				       u8 ident, u16 dlen, void *data)
2924 {
2925 	struct sk_buff *skb, **frag;
2926 	struct l2cap_cmd_hdr *cmd;
2927 	struct l2cap_hdr *lh;
2928 	int len, count;
2929 
2930 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2931 	       conn, code, ident, dlen);
2932 
2933 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2934 		return NULL;
2935 
2936 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2937 	count = min_t(unsigned int, conn->mtu, len);
2938 
2939 	skb = bt_skb_alloc(count, GFP_KERNEL);
2940 	if (!skb)
2941 		return NULL;
2942 
2943 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2944 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2945 
2946 	if (conn->hcon->type == LE_LINK)
2947 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2948 	else
2949 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2950 
2951 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2952 	cmd->code  = code;
2953 	cmd->ident = ident;
2954 	cmd->len   = cpu_to_le16(dlen);
2955 
2956 	if (dlen) {
2957 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2958 		skb_put_data(skb, data, count);
2959 		data += count;
2960 	}
2961 
2962 	len -= skb->len;
2963 
2964 	/* Continuation fragments (no L2CAP header) */
2965 	frag = &skb_shinfo(skb)->frag_list;
2966 	while (len) {
2967 		count = min_t(unsigned int, conn->mtu, len);
2968 
2969 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2970 		if (!*frag)
2971 			goto fail;
2972 
2973 		skb_put_data(*frag, data, count);
2974 
2975 		len  -= count;
2976 		data += count;
2977 
2978 		frag = &(*frag)->next;
2979 	}
2980 
2981 	return skb;
2982 
2983 fail:
2984 	kfree_skb(skb);
2985 	return NULL;
2986 }
2987 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2988 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2989 				     unsigned long *val)
2990 {
2991 	struct l2cap_conf_opt *opt = *ptr;
2992 	int len;
2993 
2994 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2995 	*ptr += len;
2996 
2997 	*type = opt->type;
2998 	*olen = opt->len;
2999 
3000 	switch (opt->len) {
3001 	case 1:
3002 		*val = *((u8 *) opt->val);
3003 		break;
3004 
3005 	case 2:
3006 		*val = get_unaligned_le16(opt->val);
3007 		break;
3008 
3009 	case 4:
3010 		*val = get_unaligned_le32(opt->val);
3011 		break;
3012 
3013 	default:
3014 		*val = (unsigned long) opt->val;
3015 		break;
3016 	}
3017 
3018 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3019 	return len;
3020 }
3021 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3022 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3023 {
3024 	struct l2cap_conf_opt *opt = *ptr;
3025 
3026 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3027 
3028 	if (size < L2CAP_CONF_OPT_SIZE + len)
3029 		return;
3030 
3031 	opt->type = type;
3032 	opt->len  = len;
3033 
3034 	switch (len) {
3035 	case 1:
3036 		*((u8 *) opt->val)  = val;
3037 		break;
3038 
3039 	case 2:
3040 		put_unaligned_le16(val, opt->val);
3041 		break;
3042 
3043 	case 4:
3044 		put_unaligned_le32(val, opt->val);
3045 		break;
3046 
3047 	default:
3048 		memcpy(opt->val, (void *) val, len);
3049 		break;
3050 	}
3051 
3052 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3053 }
3054 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3055 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3056 {
3057 	struct l2cap_conf_efs efs;
3058 
3059 	switch (chan->mode) {
3060 	case L2CAP_MODE_ERTM:
3061 		efs.id		= chan->local_id;
3062 		efs.stype	= chan->local_stype;
3063 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3064 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3065 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3066 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3067 		break;
3068 
3069 	case L2CAP_MODE_STREAMING:
3070 		efs.id		= 1;
3071 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3072 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3073 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3074 		efs.acc_lat	= 0;
3075 		efs.flush_to	= 0;
3076 		break;
3077 
3078 	default:
3079 		return;
3080 	}
3081 
3082 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3083 			   (unsigned long) &efs, size);
3084 }
3085 
l2cap_ack_timeout(struct work_struct * work)3086 static void l2cap_ack_timeout(struct work_struct *work)
3087 {
3088 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3089 					       ack_timer.work);
3090 	u16 frames_to_ack;
3091 
3092 	BT_DBG("chan %p", chan);
3093 
3094 	l2cap_chan_lock(chan);
3095 
3096 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3097 				     chan->last_acked_seq);
3098 
3099 	if (frames_to_ack)
3100 		l2cap_send_rr_or_rnr(chan, 0);
3101 
3102 	l2cap_chan_unlock(chan);
3103 	l2cap_chan_put(chan);
3104 }
3105 
l2cap_ertm_init(struct l2cap_chan * chan)3106 int l2cap_ertm_init(struct l2cap_chan *chan)
3107 {
3108 	int err;
3109 
3110 	chan->next_tx_seq = 0;
3111 	chan->expected_tx_seq = 0;
3112 	chan->expected_ack_seq = 0;
3113 	chan->unacked_frames = 0;
3114 	chan->buffer_seq = 0;
3115 	chan->frames_sent = 0;
3116 	chan->last_acked_seq = 0;
3117 	chan->sdu = NULL;
3118 	chan->sdu_last_frag = NULL;
3119 	chan->sdu_len = 0;
3120 
3121 	skb_queue_head_init(&chan->tx_q);
3122 
3123 	chan->local_amp_id = AMP_ID_BREDR;
3124 	chan->move_id = AMP_ID_BREDR;
3125 	chan->move_state = L2CAP_MOVE_STABLE;
3126 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3127 
3128 	if (chan->mode != L2CAP_MODE_ERTM)
3129 		return 0;
3130 
3131 	chan->rx_state = L2CAP_RX_STATE_RECV;
3132 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3133 
3134 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3135 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3136 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3137 
3138 	skb_queue_head_init(&chan->srej_q);
3139 
3140 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3141 	if (err < 0)
3142 		return err;
3143 
3144 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3145 	if (err < 0)
3146 		l2cap_seq_list_free(&chan->srej_list);
3147 
3148 	return err;
3149 }
3150 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3151 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3152 {
3153 	switch (mode) {
3154 	case L2CAP_MODE_STREAMING:
3155 	case L2CAP_MODE_ERTM:
3156 		if (l2cap_mode_supported(mode, remote_feat_mask))
3157 			return mode;
3158 		/* fall through */
3159 	default:
3160 		return L2CAP_MODE_BASIC;
3161 	}
3162 }
3163 
__l2cap_ews_supported(struct l2cap_conn * conn)3164 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3165 {
3166 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3167 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3168 }
3169 
__l2cap_efs_supported(struct l2cap_conn * conn)3170 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3171 {
3172 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3173 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3174 }
3175 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3176 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3177 				      struct l2cap_conf_rfc *rfc)
3178 {
3179 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3180 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3181 
3182 		/* Class 1 devices have must have ERTM timeouts
3183 		 * exceeding the Link Supervision Timeout.  The
3184 		 * default Link Supervision Timeout for AMP
3185 		 * controllers is 10 seconds.
3186 		 *
3187 		 * Class 1 devices use 0xffffffff for their
3188 		 * best-effort flush timeout, so the clamping logic
3189 		 * will result in a timeout that meets the above
3190 		 * requirement.  ERTM timeouts are 16-bit values, so
3191 		 * the maximum timeout is 65.535 seconds.
3192 		 */
3193 
3194 		/* Convert timeout to milliseconds and round */
3195 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3196 
3197 		/* This is the recommended formula for class 2 devices
3198 		 * that start ERTM timers when packets are sent to the
3199 		 * controller.
3200 		 */
3201 		ertm_to = 3 * ertm_to + 500;
3202 
3203 		if (ertm_to > 0xffff)
3204 			ertm_to = 0xffff;
3205 
3206 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3207 		rfc->monitor_timeout = rfc->retrans_timeout;
3208 	} else {
3209 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3210 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3211 	}
3212 }
3213 
l2cap_txwin_setup(struct l2cap_chan * chan)3214 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3215 {
3216 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3217 	    __l2cap_ews_supported(chan->conn)) {
3218 		/* use extended control field */
3219 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3220 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3221 	} else {
3222 		chan->tx_win = min_t(u16, chan->tx_win,
3223 				     L2CAP_DEFAULT_TX_WINDOW);
3224 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3225 	}
3226 	chan->ack_win = chan->tx_win;
3227 }
3228 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3229 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3230 {
3231 	struct l2cap_conf_req *req = data;
3232 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3233 	void *ptr = req->data;
3234 	void *endptr = data + data_size;
3235 	u16 size;
3236 
3237 	BT_DBG("chan %p", chan);
3238 
3239 	if (chan->num_conf_req || chan->num_conf_rsp)
3240 		goto done;
3241 
3242 	switch (chan->mode) {
3243 	case L2CAP_MODE_STREAMING:
3244 	case L2CAP_MODE_ERTM:
3245 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3246 			break;
3247 
3248 		if (__l2cap_efs_supported(chan->conn))
3249 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3250 
3251 		/* fall through */
3252 	default:
3253 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3254 		break;
3255 	}
3256 
3257 done:
3258 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3259 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3260 
3261 	switch (chan->mode) {
3262 	case L2CAP_MODE_BASIC:
3263 		if (disable_ertm)
3264 			break;
3265 
3266 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3267 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3268 			break;
3269 
3270 		rfc.mode            = L2CAP_MODE_BASIC;
3271 		rfc.txwin_size      = 0;
3272 		rfc.max_transmit    = 0;
3273 		rfc.retrans_timeout = 0;
3274 		rfc.monitor_timeout = 0;
3275 		rfc.max_pdu_size    = 0;
3276 
3277 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3278 				   (unsigned long) &rfc, endptr - ptr);
3279 		break;
3280 
3281 	case L2CAP_MODE_ERTM:
3282 		rfc.mode            = L2CAP_MODE_ERTM;
3283 		rfc.max_transmit    = chan->max_tx;
3284 
3285 		__l2cap_set_ertm_timeouts(chan, &rfc);
3286 
3287 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3288 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3289 			     L2CAP_FCS_SIZE);
3290 		rfc.max_pdu_size = cpu_to_le16(size);
3291 
3292 		l2cap_txwin_setup(chan);
3293 
3294 		rfc.txwin_size = min_t(u16, chan->tx_win,
3295 				       L2CAP_DEFAULT_TX_WINDOW);
3296 
3297 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3298 				   (unsigned long) &rfc, endptr - ptr);
3299 
3300 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3301 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3302 
3303 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3304 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3305 					   chan->tx_win, endptr - ptr);
3306 
3307 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3308 			if (chan->fcs == L2CAP_FCS_NONE ||
3309 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3310 				chan->fcs = L2CAP_FCS_NONE;
3311 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3312 						   chan->fcs, endptr - ptr);
3313 			}
3314 		break;
3315 
3316 	case L2CAP_MODE_STREAMING:
3317 		l2cap_txwin_setup(chan);
3318 		rfc.mode            = L2CAP_MODE_STREAMING;
3319 		rfc.txwin_size      = 0;
3320 		rfc.max_transmit    = 0;
3321 		rfc.retrans_timeout = 0;
3322 		rfc.monitor_timeout = 0;
3323 
3324 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3325 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3326 			     L2CAP_FCS_SIZE);
3327 		rfc.max_pdu_size = cpu_to_le16(size);
3328 
3329 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3330 				   (unsigned long) &rfc, endptr - ptr);
3331 
3332 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3333 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3334 
3335 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3336 			if (chan->fcs == L2CAP_FCS_NONE ||
3337 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3338 				chan->fcs = L2CAP_FCS_NONE;
3339 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3340 						   chan->fcs, endptr - ptr);
3341 			}
3342 		break;
3343 	}
3344 
3345 	req->dcid  = cpu_to_le16(chan->dcid);
3346 	req->flags = cpu_to_le16(0);
3347 
3348 	return ptr - data;
3349 }
3350 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3351 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3352 {
3353 	struct l2cap_conf_rsp *rsp = data;
3354 	void *ptr = rsp->data;
3355 	void *endptr = data + data_size;
3356 	void *req = chan->conf_req;
3357 	int len = chan->conf_len;
3358 	int type, hint, olen;
3359 	unsigned long val;
3360 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3361 	struct l2cap_conf_efs efs;
3362 	u8 remote_efs = 0;
3363 	u16 mtu = L2CAP_DEFAULT_MTU;
3364 	u16 result = L2CAP_CONF_SUCCESS;
3365 	u16 size;
3366 
3367 	BT_DBG("chan %p", chan);
3368 
3369 	while (len >= L2CAP_CONF_OPT_SIZE) {
3370 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3371 		if (len < 0)
3372 			break;
3373 
3374 		hint  = type & L2CAP_CONF_HINT;
3375 		type &= L2CAP_CONF_MASK;
3376 
3377 		switch (type) {
3378 		case L2CAP_CONF_MTU:
3379 			if (olen != 2)
3380 				break;
3381 			mtu = val;
3382 			break;
3383 
3384 		case L2CAP_CONF_FLUSH_TO:
3385 			if (olen != 2)
3386 				break;
3387 			chan->flush_to = val;
3388 			break;
3389 
3390 		case L2CAP_CONF_QOS:
3391 			break;
3392 
3393 		case L2CAP_CONF_RFC:
3394 			if (olen != sizeof(rfc))
3395 				break;
3396 			memcpy(&rfc, (void *) val, olen);
3397 			break;
3398 
3399 		case L2CAP_CONF_FCS:
3400 			if (olen != 1)
3401 				break;
3402 			if (val == L2CAP_FCS_NONE)
3403 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3404 			break;
3405 
3406 		case L2CAP_CONF_EFS:
3407 			if (olen != sizeof(efs))
3408 				break;
3409 			remote_efs = 1;
3410 			memcpy(&efs, (void *) val, olen);
3411 			break;
3412 
3413 		case L2CAP_CONF_EWS:
3414 			if (olen != 2)
3415 				break;
3416 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3417 				return -ECONNREFUSED;
3418 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3419 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3420 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3421 			chan->remote_tx_win = val;
3422 			break;
3423 
3424 		default:
3425 			if (hint)
3426 				break;
3427 			result = L2CAP_CONF_UNKNOWN;
3428 			*((u8 *) ptr++) = type;
3429 			break;
3430 		}
3431 	}
3432 
3433 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3434 		goto done;
3435 
3436 	switch (chan->mode) {
3437 	case L2CAP_MODE_STREAMING:
3438 	case L2CAP_MODE_ERTM:
3439 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3440 			chan->mode = l2cap_select_mode(rfc.mode,
3441 						       chan->conn->feat_mask);
3442 			break;
3443 		}
3444 
3445 		if (remote_efs) {
3446 			if (__l2cap_efs_supported(chan->conn))
3447 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3448 			else
3449 				return -ECONNREFUSED;
3450 		}
3451 
3452 		if (chan->mode != rfc.mode)
3453 			return -ECONNREFUSED;
3454 
3455 		break;
3456 	}
3457 
3458 done:
3459 	if (chan->mode != rfc.mode) {
3460 		result = L2CAP_CONF_UNACCEPT;
3461 		rfc.mode = chan->mode;
3462 
3463 		if (chan->num_conf_rsp == 1)
3464 			return -ECONNREFUSED;
3465 
3466 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3467 				   (unsigned long) &rfc, endptr - ptr);
3468 	}
3469 
3470 	if (result == L2CAP_CONF_SUCCESS) {
3471 		/* Configure output options and let the other side know
3472 		 * which ones we don't like. */
3473 
3474 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3475 			result = L2CAP_CONF_UNACCEPT;
3476 		else {
3477 			chan->omtu = mtu;
3478 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3479 		}
3480 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3481 
3482 		if (remote_efs) {
3483 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3484 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3485 			    efs.stype != chan->local_stype) {
3486 
3487 				result = L2CAP_CONF_UNACCEPT;
3488 
3489 				if (chan->num_conf_req >= 1)
3490 					return -ECONNREFUSED;
3491 
3492 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3493 						   sizeof(efs),
3494 						   (unsigned long) &efs, endptr - ptr);
3495 			} else {
3496 				/* Send PENDING Conf Rsp */
3497 				result = L2CAP_CONF_PENDING;
3498 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3499 			}
3500 		}
3501 
3502 		switch (rfc.mode) {
3503 		case L2CAP_MODE_BASIC:
3504 			chan->fcs = L2CAP_FCS_NONE;
3505 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3506 			break;
3507 
3508 		case L2CAP_MODE_ERTM:
3509 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3510 				chan->remote_tx_win = rfc.txwin_size;
3511 			else
3512 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3513 
3514 			chan->remote_max_tx = rfc.max_transmit;
3515 
3516 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3517 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3518 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3519 			rfc.max_pdu_size = cpu_to_le16(size);
3520 			chan->remote_mps = size;
3521 
3522 			__l2cap_set_ertm_timeouts(chan, &rfc);
3523 
3524 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3525 
3526 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3527 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3528 
3529 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3530 				chan->remote_id = efs.id;
3531 				chan->remote_stype = efs.stype;
3532 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3533 				chan->remote_flush_to =
3534 					le32_to_cpu(efs.flush_to);
3535 				chan->remote_acc_lat =
3536 					le32_to_cpu(efs.acc_lat);
3537 				chan->remote_sdu_itime =
3538 					le32_to_cpu(efs.sdu_itime);
3539 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3540 						   sizeof(efs),
3541 						   (unsigned long) &efs, endptr - ptr);
3542 			}
3543 			break;
3544 
3545 		case L2CAP_MODE_STREAMING:
3546 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3547 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3548 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3549 			rfc.max_pdu_size = cpu_to_le16(size);
3550 			chan->remote_mps = size;
3551 
3552 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3553 
3554 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3555 					   (unsigned long) &rfc, endptr - ptr);
3556 
3557 			break;
3558 
3559 		default:
3560 			result = L2CAP_CONF_UNACCEPT;
3561 
3562 			memset(&rfc, 0, sizeof(rfc));
3563 			rfc.mode = chan->mode;
3564 		}
3565 
3566 		if (result == L2CAP_CONF_SUCCESS)
3567 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3568 	}
3569 	rsp->scid   = cpu_to_le16(chan->dcid);
3570 	rsp->result = cpu_to_le16(result);
3571 	rsp->flags  = cpu_to_le16(0);
3572 
3573 	return ptr - data;
3574 }
3575 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3576 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3577 				void *data, size_t size, u16 *result)
3578 {
3579 	struct l2cap_conf_req *req = data;
3580 	void *ptr = req->data;
3581 	void *endptr = data + size;
3582 	int type, olen;
3583 	unsigned long val;
3584 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3585 	struct l2cap_conf_efs efs;
3586 
3587 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3588 
3589 	while (len >= L2CAP_CONF_OPT_SIZE) {
3590 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3591 		if (len < 0)
3592 			break;
3593 
3594 		switch (type) {
3595 		case L2CAP_CONF_MTU:
3596 			if (olen != 2)
3597 				break;
3598 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3599 				*result = L2CAP_CONF_UNACCEPT;
3600 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3601 			} else
3602 				chan->imtu = val;
3603 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3604 					   endptr - ptr);
3605 			break;
3606 
3607 		case L2CAP_CONF_FLUSH_TO:
3608 			if (olen != 2)
3609 				break;
3610 			chan->flush_to = val;
3611 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3612 					   chan->flush_to, endptr - ptr);
3613 			break;
3614 
3615 		case L2CAP_CONF_RFC:
3616 			if (olen != sizeof(rfc))
3617 				break;
3618 			memcpy(&rfc, (void *)val, olen);
3619 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3620 			    rfc.mode != chan->mode)
3621 				return -ECONNREFUSED;
3622 			chan->fcs = 0;
3623 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3624 					   (unsigned long) &rfc, endptr - ptr);
3625 			break;
3626 
3627 		case L2CAP_CONF_EWS:
3628 			if (olen != 2)
3629 				break;
3630 			chan->ack_win = min_t(u16, val, chan->ack_win);
3631 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3632 					   chan->tx_win, endptr - ptr);
3633 			break;
3634 
3635 		case L2CAP_CONF_EFS:
3636 			if (olen != sizeof(efs))
3637 				break;
3638 			memcpy(&efs, (void *)val, olen);
3639 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3640 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3641 			    efs.stype != chan->local_stype)
3642 				return -ECONNREFUSED;
3643 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3644 					   (unsigned long) &efs, endptr - ptr);
3645 			break;
3646 
3647 		case L2CAP_CONF_FCS:
3648 			if (olen != 1)
3649 				break;
3650 			if (*result == L2CAP_CONF_PENDING)
3651 				if (val == L2CAP_FCS_NONE)
3652 					set_bit(CONF_RECV_NO_FCS,
3653 						&chan->conf_state);
3654 			break;
3655 		}
3656 	}
3657 
3658 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3659 		return -ECONNREFUSED;
3660 
3661 	chan->mode = rfc.mode;
3662 
3663 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3664 		switch (rfc.mode) {
3665 		case L2CAP_MODE_ERTM:
3666 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3667 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3668 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3669 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3670 				chan->ack_win = min_t(u16, chan->ack_win,
3671 						      rfc.txwin_size);
3672 
3673 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3674 				chan->local_msdu = le16_to_cpu(efs.msdu);
3675 				chan->local_sdu_itime =
3676 					le32_to_cpu(efs.sdu_itime);
3677 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3678 				chan->local_flush_to =
3679 					le32_to_cpu(efs.flush_to);
3680 			}
3681 			break;
3682 
3683 		case L2CAP_MODE_STREAMING:
3684 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3685 		}
3686 	}
3687 
3688 	req->dcid   = cpu_to_le16(chan->dcid);
3689 	req->flags  = cpu_to_le16(0);
3690 
3691 	return ptr - data;
3692 }
3693 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3694 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3695 				u16 result, u16 flags)
3696 {
3697 	struct l2cap_conf_rsp *rsp = data;
3698 	void *ptr = rsp->data;
3699 
3700 	BT_DBG("chan %p", chan);
3701 
3702 	rsp->scid   = cpu_to_le16(chan->dcid);
3703 	rsp->result = cpu_to_le16(result);
3704 	rsp->flags  = cpu_to_le16(flags);
3705 
3706 	return ptr - data;
3707 }
3708 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3709 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3710 {
3711 	struct l2cap_le_conn_rsp rsp;
3712 	struct l2cap_conn *conn = chan->conn;
3713 
3714 	BT_DBG("chan %p", chan);
3715 
3716 	rsp.dcid    = cpu_to_le16(chan->scid);
3717 	rsp.mtu     = cpu_to_le16(chan->imtu);
3718 	rsp.mps     = cpu_to_le16(chan->mps);
3719 	rsp.credits = cpu_to_le16(chan->rx_credits);
3720 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3721 
3722 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3723 		       &rsp);
3724 }
3725 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3726 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3727 {
3728 	struct l2cap_conn_rsp rsp;
3729 	struct l2cap_conn *conn = chan->conn;
3730 	u8 buf[128];
3731 	u8 rsp_code;
3732 
3733 	rsp.scid   = cpu_to_le16(chan->dcid);
3734 	rsp.dcid   = cpu_to_le16(chan->scid);
3735 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3736 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3737 
3738 	if (chan->hs_hcon)
3739 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3740 	else
3741 		rsp_code = L2CAP_CONN_RSP;
3742 
3743 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3744 
3745 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3746 
3747 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3748 		return;
3749 
3750 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3751 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3752 	chan->num_conf_req++;
3753 }
3754 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3755 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3756 {
3757 	int type, olen;
3758 	unsigned long val;
3759 	/* Use sane default values in case a misbehaving remote device
3760 	 * did not send an RFC or extended window size option.
3761 	 */
3762 	u16 txwin_ext = chan->ack_win;
3763 	struct l2cap_conf_rfc rfc = {
3764 		.mode = chan->mode,
3765 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3766 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3767 		.max_pdu_size = cpu_to_le16(chan->imtu),
3768 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3769 	};
3770 
3771 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3772 
3773 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3774 		return;
3775 
3776 	while (len >= L2CAP_CONF_OPT_SIZE) {
3777 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3778 		if (len < 0)
3779 			break;
3780 
3781 		switch (type) {
3782 		case L2CAP_CONF_RFC:
3783 			if (olen != sizeof(rfc))
3784 				break;
3785 			memcpy(&rfc, (void *)val, olen);
3786 			break;
3787 		case L2CAP_CONF_EWS:
3788 			if (olen != 2)
3789 				break;
3790 			txwin_ext = val;
3791 			break;
3792 		}
3793 	}
3794 
3795 	switch (rfc.mode) {
3796 	case L2CAP_MODE_ERTM:
3797 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3798 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3799 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3800 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3801 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3802 		else
3803 			chan->ack_win = min_t(u16, chan->ack_win,
3804 					      rfc.txwin_size);
3805 		break;
3806 	case L2CAP_MODE_STREAMING:
3807 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3808 	}
3809 }
3810 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3811 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3812 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3813 				    u8 *data)
3814 {
3815 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3816 
3817 	if (cmd_len < sizeof(*rej))
3818 		return -EPROTO;
3819 
3820 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3821 		return 0;
3822 
3823 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3824 	    cmd->ident == conn->info_ident) {
3825 		cancel_delayed_work(&conn->info_timer);
3826 
3827 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3828 		conn->info_ident = 0;
3829 
3830 		l2cap_conn_start(conn);
3831 	}
3832 
3833 	return 0;
3834 }
3835 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3836 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3837 					struct l2cap_cmd_hdr *cmd,
3838 					u8 *data, u8 rsp_code, u8 amp_id)
3839 {
3840 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3841 	struct l2cap_conn_rsp rsp;
3842 	struct l2cap_chan *chan = NULL, *pchan;
3843 	int result, status = L2CAP_CS_NO_INFO;
3844 
3845 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3846 	__le16 psm = req->psm;
3847 
3848 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3849 
3850 	/* Check if we have socket listening on psm */
3851 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3852 					 &conn->hcon->dst, ACL_LINK);
3853 	if (!pchan) {
3854 		result = L2CAP_CR_BAD_PSM;
3855 		goto sendresp;
3856 	}
3857 
3858 	mutex_lock(&conn->chan_lock);
3859 	l2cap_chan_lock(pchan);
3860 
3861 	/* Check if the ACL is secure enough (if not SDP) */
3862 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3863 	    !hci_conn_check_link_mode(conn->hcon)) {
3864 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3865 		result = L2CAP_CR_SEC_BLOCK;
3866 		goto response;
3867 	}
3868 
3869 	result = L2CAP_CR_NO_MEM;
3870 
3871 	/* Check for valid dynamic CID range (as per Erratum 3253) */
3872 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
3873 		result = L2CAP_CR_INVALID_SCID;
3874 		goto response;
3875 	}
3876 
3877 	/* Check if we already have channel with that dcid */
3878 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
3879 		result = L2CAP_CR_SCID_IN_USE;
3880 		goto response;
3881 	}
3882 
3883 	chan = pchan->ops->new_connection(pchan);
3884 	if (!chan)
3885 		goto response;
3886 
3887 	/* For certain devices (ex: HID mouse), support for authentication,
3888 	 * pairing and bonding is optional. For such devices, inorder to avoid
3889 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3890 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3891 	 */
3892 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3893 
3894 	bacpy(&chan->src, &conn->hcon->src);
3895 	bacpy(&chan->dst, &conn->hcon->dst);
3896 	chan->src_type = bdaddr_src_type(conn->hcon);
3897 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3898 	chan->psm  = psm;
3899 	chan->dcid = scid;
3900 	chan->local_amp_id = amp_id;
3901 
3902 	__l2cap_chan_add(conn, chan);
3903 
3904 	dcid = chan->scid;
3905 
3906 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3907 
3908 	chan->ident = cmd->ident;
3909 
3910 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3911 		if (l2cap_chan_check_security(chan, false)) {
3912 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3913 				l2cap_state_change(chan, BT_CONNECT2);
3914 				result = L2CAP_CR_PEND;
3915 				status = L2CAP_CS_AUTHOR_PEND;
3916 				chan->ops->defer(chan);
3917 			} else {
3918 				/* Force pending result for AMP controllers.
3919 				 * The connection will succeed after the
3920 				 * physical link is up.
3921 				 */
3922 				if (amp_id == AMP_ID_BREDR) {
3923 					l2cap_state_change(chan, BT_CONFIG);
3924 					result = L2CAP_CR_SUCCESS;
3925 				} else {
3926 					l2cap_state_change(chan, BT_CONNECT2);
3927 					result = L2CAP_CR_PEND;
3928 				}
3929 				status = L2CAP_CS_NO_INFO;
3930 			}
3931 		} else {
3932 			l2cap_state_change(chan, BT_CONNECT2);
3933 			result = L2CAP_CR_PEND;
3934 			status = L2CAP_CS_AUTHEN_PEND;
3935 		}
3936 	} else {
3937 		l2cap_state_change(chan, BT_CONNECT2);
3938 		result = L2CAP_CR_PEND;
3939 		status = L2CAP_CS_NO_INFO;
3940 	}
3941 
3942 response:
3943 	l2cap_chan_unlock(pchan);
3944 	mutex_unlock(&conn->chan_lock);
3945 	l2cap_chan_put(pchan);
3946 
3947 sendresp:
3948 	rsp.scid   = cpu_to_le16(scid);
3949 	rsp.dcid   = cpu_to_le16(dcid);
3950 	rsp.result = cpu_to_le16(result);
3951 	rsp.status = cpu_to_le16(status);
3952 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3953 
3954 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3955 		struct l2cap_info_req info;
3956 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3957 
3958 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3959 		conn->info_ident = l2cap_get_ident(conn);
3960 
3961 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3962 
3963 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3964 			       sizeof(info), &info);
3965 	}
3966 
3967 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3968 	    result == L2CAP_CR_SUCCESS) {
3969 		u8 buf[128];
3970 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3971 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3972 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3973 		chan->num_conf_req++;
3974 	}
3975 
3976 	return chan;
3977 }
3978 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3979 static int l2cap_connect_req(struct l2cap_conn *conn,
3980 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3981 {
3982 	struct hci_dev *hdev = conn->hcon->hdev;
3983 	struct hci_conn *hcon = conn->hcon;
3984 
3985 	if (cmd_len < sizeof(struct l2cap_conn_req))
3986 		return -EPROTO;
3987 
3988 	hci_dev_lock(hdev);
3989 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3990 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3991 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3992 	hci_dev_unlock(hdev);
3993 
3994 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3995 	return 0;
3996 }
3997 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3998 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3999 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4000 				    u8 *data)
4001 {
4002 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4003 	u16 scid, dcid, result, status;
4004 	struct l2cap_chan *chan;
4005 	u8 req[128];
4006 	int err;
4007 
4008 	if (cmd_len < sizeof(*rsp))
4009 		return -EPROTO;
4010 
4011 	scid   = __le16_to_cpu(rsp->scid);
4012 	dcid   = __le16_to_cpu(rsp->dcid);
4013 	result = __le16_to_cpu(rsp->result);
4014 	status = __le16_to_cpu(rsp->status);
4015 
4016 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4017 	       dcid, scid, result, status);
4018 
4019 	mutex_lock(&conn->chan_lock);
4020 
4021 	if (scid) {
4022 		chan = __l2cap_get_chan_by_scid(conn, scid);
4023 		if (!chan) {
4024 			err = -EBADSLT;
4025 			goto unlock;
4026 		}
4027 	} else {
4028 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4029 		if (!chan) {
4030 			err = -EBADSLT;
4031 			goto unlock;
4032 		}
4033 	}
4034 
4035 	err = 0;
4036 
4037 	l2cap_chan_lock(chan);
4038 
4039 	switch (result) {
4040 	case L2CAP_CR_SUCCESS:
4041 		l2cap_state_change(chan, BT_CONFIG);
4042 		chan->ident = 0;
4043 		chan->dcid = dcid;
4044 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4045 
4046 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4047 			break;
4048 
4049 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4050 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4051 		chan->num_conf_req++;
4052 		break;
4053 
4054 	case L2CAP_CR_PEND:
4055 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4056 		break;
4057 
4058 	default:
4059 		l2cap_chan_del(chan, ECONNREFUSED);
4060 		break;
4061 	}
4062 
4063 	l2cap_chan_unlock(chan);
4064 
4065 unlock:
4066 	mutex_unlock(&conn->chan_lock);
4067 
4068 	return err;
4069 }
4070 
set_default_fcs(struct l2cap_chan * chan)4071 static inline void set_default_fcs(struct l2cap_chan *chan)
4072 {
4073 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4074 	 * sides request it.
4075 	 */
4076 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4077 		chan->fcs = L2CAP_FCS_NONE;
4078 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4079 		chan->fcs = L2CAP_FCS_CRC16;
4080 }
4081 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4082 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4083 				    u8 ident, u16 flags)
4084 {
4085 	struct l2cap_conn *conn = chan->conn;
4086 
4087 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4088 	       flags);
4089 
4090 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4091 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4092 
4093 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4094 		       l2cap_build_conf_rsp(chan, data,
4095 					    L2CAP_CONF_SUCCESS, flags), data);
4096 }
4097 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4098 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4099 				   u16 scid, u16 dcid)
4100 {
4101 	struct l2cap_cmd_rej_cid rej;
4102 
4103 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4104 	rej.scid = __cpu_to_le16(scid);
4105 	rej.dcid = __cpu_to_le16(dcid);
4106 
4107 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4108 }
4109 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4110 static inline int l2cap_config_req(struct l2cap_conn *conn,
4111 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4112 				   u8 *data)
4113 {
4114 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4115 	u16 dcid, flags;
4116 	u8 rsp[64];
4117 	struct l2cap_chan *chan;
4118 	int len, err = 0;
4119 
4120 	if (cmd_len < sizeof(*req))
4121 		return -EPROTO;
4122 
4123 	dcid  = __le16_to_cpu(req->dcid);
4124 	flags = __le16_to_cpu(req->flags);
4125 
4126 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4127 
4128 	chan = l2cap_get_chan_by_scid(conn, dcid);
4129 	if (!chan) {
4130 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4131 		return 0;
4132 	}
4133 
4134 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4135 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4136 				       chan->dcid);
4137 		goto unlock;
4138 	}
4139 
4140 	/* Reject if config buffer is too small. */
4141 	len = cmd_len - sizeof(*req);
4142 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4143 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4144 			       l2cap_build_conf_rsp(chan, rsp,
4145 			       L2CAP_CONF_REJECT, flags), rsp);
4146 		goto unlock;
4147 	}
4148 
4149 	/* Store config. */
4150 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4151 	chan->conf_len += len;
4152 
4153 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4154 		/* Incomplete config. Send empty response. */
4155 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4156 			       l2cap_build_conf_rsp(chan, rsp,
4157 			       L2CAP_CONF_SUCCESS, flags), rsp);
4158 		goto unlock;
4159 	}
4160 
4161 	/* Complete config. */
4162 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4163 	if (len < 0) {
4164 		l2cap_send_disconn_req(chan, ECONNRESET);
4165 		goto unlock;
4166 	}
4167 
4168 	chan->ident = cmd->ident;
4169 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4170 	chan->num_conf_rsp++;
4171 
4172 	/* Reset config buffer. */
4173 	chan->conf_len = 0;
4174 
4175 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4176 		goto unlock;
4177 
4178 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4179 		set_default_fcs(chan);
4180 
4181 		if (chan->mode == L2CAP_MODE_ERTM ||
4182 		    chan->mode == L2CAP_MODE_STREAMING)
4183 			err = l2cap_ertm_init(chan);
4184 
4185 		if (err < 0)
4186 			l2cap_send_disconn_req(chan, -err);
4187 		else
4188 			l2cap_chan_ready(chan);
4189 
4190 		goto unlock;
4191 	}
4192 
4193 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4194 		u8 buf[64];
4195 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4196 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4197 		chan->num_conf_req++;
4198 	}
4199 
4200 	/* Got Conf Rsp PENDING from remote side and assume we sent
4201 	   Conf Rsp PENDING in the code above */
4202 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4203 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4204 
4205 		/* check compatibility */
4206 
4207 		/* Send rsp for BR/EDR channel */
4208 		if (!chan->hs_hcon)
4209 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4210 		else
4211 			chan->ident = cmd->ident;
4212 	}
4213 
4214 unlock:
4215 	l2cap_chan_unlock(chan);
4216 	return err;
4217 }
4218 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4219 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4220 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4221 				   u8 *data)
4222 {
4223 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4224 	u16 scid, flags, result;
4225 	struct l2cap_chan *chan;
4226 	int len = cmd_len - sizeof(*rsp);
4227 	int err = 0;
4228 
4229 	if (cmd_len < sizeof(*rsp))
4230 		return -EPROTO;
4231 
4232 	scid   = __le16_to_cpu(rsp->scid);
4233 	flags  = __le16_to_cpu(rsp->flags);
4234 	result = __le16_to_cpu(rsp->result);
4235 
4236 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4237 	       result, len);
4238 
4239 	chan = l2cap_get_chan_by_scid(conn, scid);
4240 	if (!chan)
4241 		return 0;
4242 
4243 	switch (result) {
4244 	case L2CAP_CONF_SUCCESS:
4245 		l2cap_conf_rfc_get(chan, rsp->data, len);
4246 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4247 		break;
4248 
4249 	case L2CAP_CONF_PENDING:
4250 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4251 
4252 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4253 			char buf[64];
4254 
4255 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4256 						   buf, sizeof(buf), &result);
4257 			if (len < 0) {
4258 				l2cap_send_disconn_req(chan, ECONNRESET);
4259 				goto done;
4260 			}
4261 
4262 			if (!chan->hs_hcon) {
4263 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4264 							0);
4265 			} else {
4266 				if (l2cap_check_efs(chan)) {
4267 					amp_create_logical_link(chan);
4268 					chan->ident = cmd->ident;
4269 				}
4270 			}
4271 		}
4272 		goto done;
4273 
4274 	case L2CAP_CONF_UNACCEPT:
4275 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4276 			char req[64];
4277 
4278 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4279 				l2cap_send_disconn_req(chan, ECONNRESET);
4280 				goto done;
4281 			}
4282 
4283 			/* throw out any old stored conf requests */
4284 			result = L2CAP_CONF_SUCCESS;
4285 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4286 						   req, sizeof(req), &result);
4287 			if (len < 0) {
4288 				l2cap_send_disconn_req(chan, ECONNRESET);
4289 				goto done;
4290 			}
4291 
4292 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4293 				       L2CAP_CONF_REQ, len, req);
4294 			chan->num_conf_req++;
4295 			if (result != L2CAP_CONF_SUCCESS)
4296 				goto done;
4297 			break;
4298 		}
4299 		/* fall through */
4300 
4301 	default:
4302 		l2cap_chan_set_err(chan, ECONNRESET);
4303 
4304 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4305 		l2cap_send_disconn_req(chan, ECONNRESET);
4306 		goto done;
4307 	}
4308 
4309 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4310 		goto done;
4311 
4312 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4313 
4314 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4315 		set_default_fcs(chan);
4316 
4317 		if (chan->mode == L2CAP_MODE_ERTM ||
4318 		    chan->mode == L2CAP_MODE_STREAMING)
4319 			err = l2cap_ertm_init(chan);
4320 
4321 		if (err < 0)
4322 			l2cap_send_disconn_req(chan, -err);
4323 		else
4324 			l2cap_chan_ready(chan);
4325 	}
4326 
4327 done:
4328 	l2cap_chan_unlock(chan);
4329 	return err;
4330 }
4331 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4332 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4333 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4334 				       u8 *data)
4335 {
4336 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4337 	struct l2cap_disconn_rsp rsp;
4338 	u16 dcid, scid;
4339 	struct l2cap_chan *chan;
4340 
4341 	if (cmd_len != sizeof(*req))
4342 		return -EPROTO;
4343 
4344 	scid = __le16_to_cpu(req->scid);
4345 	dcid = __le16_to_cpu(req->dcid);
4346 
4347 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4348 
4349 	mutex_lock(&conn->chan_lock);
4350 
4351 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4352 	if (!chan) {
4353 		mutex_unlock(&conn->chan_lock);
4354 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4355 		return 0;
4356 	}
4357 
4358 	l2cap_chan_lock(chan);
4359 
4360 	rsp.dcid = cpu_to_le16(chan->scid);
4361 	rsp.scid = cpu_to_le16(chan->dcid);
4362 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4363 
4364 	chan->ops->set_shutdown(chan);
4365 
4366 	l2cap_chan_hold(chan);
4367 	l2cap_chan_del(chan, ECONNRESET);
4368 
4369 	l2cap_chan_unlock(chan);
4370 
4371 	chan->ops->close(chan);
4372 	l2cap_chan_put(chan);
4373 
4374 	mutex_unlock(&conn->chan_lock);
4375 
4376 	return 0;
4377 }
4378 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4379 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4380 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4381 				       u8 *data)
4382 {
4383 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4384 	u16 dcid, scid;
4385 	struct l2cap_chan *chan;
4386 
4387 	if (cmd_len != sizeof(*rsp))
4388 		return -EPROTO;
4389 
4390 	scid = __le16_to_cpu(rsp->scid);
4391 	dcid = __le16_to_cpu(rsp->dcid);
4392 
4393 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4394 
4395 	mutex_lock(&conn->chan_lock);
4396 
4397 	chan = __l2cap_get_chan_by_scid(conn, scid);
4398 	if (!chan) {
4399 		mutex_unlock(&conn->chan_lock);
4400 		return 0;
4401 	}
4402 
4403 	l2cap_chan_lock(chan);
4404 
4405 	if (chan->state != BT_DISCONN) {
4406 		l2cap_chan_unlock(chan);
4407 		mutex_unlock(&conn->chan_lock);
4408 		return 0;
4409 	}
4410 
4411 	l2cap_chan_hold(chan);
4412 	l2cap_chan_del(chan, 0);
4413 
4414 	l2cap_chan_unlock(chan);
4415 
4416 	chan->ops->close(chan);
4417 	l2cap_chan_put(chan);
4418 
4419 	mutex_unlock(&conn->chan_lock);
4420 
4421 	return 0;
4422 }
4423 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4424 static inline int l2cap_information_req(struct l2cap_conn *conn,
4425 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4426 					u8 *data)
4427 {
4428 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4429 	u16 type;
4430 
4431 	if (cmd_len != sizeof(*req))
4432 		return -EPROTO;
4433 
4434 	type = __le16_to_cpu(req->type);
4435 
4436 	BT_DBG("type 0x%4.4x", type);
4437 
4438 	if (type == L2CAP_IT_FEAT_MASK) {
4439 		u8 buf[8];
4440 		u32 feat_mask = l2cap_feat_mask;
4441 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4442 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4443 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4444 		if (!disable_ertm)
4445 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4446 				| L2CAP_FEAT_FCS;
4447 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4448 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4449 				| L2CAP_FEAT_EXT_WINDOW;
4450 
4451 		put_unaligned_le32(feat_mask, rsp->data);
4452 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4453 			       buf);
4454 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4455 		u8 buf[12];
4456 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4457 
4458 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4459 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4460 		rsp->data[0] = conn->local_fixed_chan;
4461 		memset(rsp->data + 1, 0, 7);
4462 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4463 			       buf);
4464 	} else {
4465 		struct l2cap_info_rsp rsp;
4466 		rsp.type   = cpu_to_le16(type);
4467 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4468 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4469 			       &rsp);
4470 	}
4471 
4472 	return 0;
4473 }
4474 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4475 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4476 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4477 					u8 *data)
4478 {
4479 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4480 	u16 type, result;
4481 
4482 	if (cmd_len < sizeof(*rsp))
4483 		return -EPROTO;
4484 
4485 	type   = __le16_to_cpu(rsp->type);
4486 	result = __le16_to_cpu(rsp->result);
4487 
4488 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4489 
4490 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4491 	if (cmd->ident != conn->info_ident ||
4492 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4493 		return 0;
4494 
4495 	cancel_delayed_work(&conn->info_timer);
4496 
4497 	if (result != L2CAP_IR_SUCCESS) {
4498 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4499 		conn->info_ident = 0;
4500 
4501 		l2cap_conn_start(conn);
4502 
4503 		return 0;
4504 	}
4505 
4506 	switch (type) {
4507 	case L2CAP_IT_FEAT_MASK:
4508 		conn->feat_mask = get_unaligned_le32(rsp->data);
4509 
4510 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4511 			struct l2cap_info_req req;
4512 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4513 
4514 			conn->info_ident = l2cap_get_ident(conn);
4515 
4516 			l2cap_send_cmd(conn, conn->info_ident,
4517 				       L2CAP_INFO_REQ, sizeof(req), &req);
4518 		} else {
4519 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4520 			conn->info_ident = 0;
4521 
4522 			l2cap_conn_start(conn);
4523 		}
4524 		break;
4525 
4526 	case L2CAP_IT_FIXED_CHAN:
4527 		conn->remote_fixed_chan = rsp->data[0];
4528 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4529 		conn->info_ident = 0;
4530 
4531 		l2cap_conn_start(conn);
4532 		break;
4533 	}
4534 
4535 	return 0;
4536 }
4537 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4538 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4539 				    struct l2cap_cmd_hdr *cmd,
4540 				    u16 cmd_len, void *data)
4541 {
4542 	struct l2cap_create_chan_req *req = data;
4543 	struct l2cap_create_chan_rsp rsp;
4544 	struct l2cap_chan *chan;
4545 	struct hci_dev *hdev;
4546 	u16 psm, scid;
4547 
4548 	if (cmd_len != sizeof(*req))
4549 		return -EPROTO;
4550 
4551 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4552 		return -EINVAL;
4553 
4554 	psm = le16_to_cpu(req->psm);
4555 	scid = le16_to_cpu(req->scid);
4556 
4557 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4558 
4559 	/* For controller id 0 make BR/EDR connection */
4560 	if (req->amp_id == AMP_ID_BREDR) {
4561 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4562 			      req->amp_id);
4563 		return 0;
4564 	}
4565 
4566 	/* Validate AMP controller id */
4567 	hdev = hci_dev_get(req->amp_id);
4568 	if (!hdev)
4569 		goto error;
4570 
4571 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4572 		hci_dev_put(hdev);
4573 		goto error;
4574 	}
4575 
4576 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4577 			     req->amp_id);
4578 	if (chan) {
4579 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4580 		struct hci_conn *hs_hcon;
4581 
4582 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4583 						  &conn->hcon->dst);
4584 		if (!hs_hcon) {
4585 			hci_dev_put(hdev);
4586 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4587 					       chan->dcid);
4588 			return 0;
4589 		}
4590 
4591 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4592 
4593 		mgr->bredr_chan = chan;
4594 		chan->hs_hcon = hs_hcon;
4595 		chan->fcs = L2CAP_FCS_NONE;
4596 		conn->mtu = hdev->block_mtu;
4597 	}
4598 
4599 	hci_dev_put(hdev);
4600 
4601 	return 0;
4602 
4603 error:
4604 	rsp.dcid = 0;
4605 	rsp.scid = cpu_to_le16(scid);
4606 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4607 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4608 
4609 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4610 		       sizeof(rsp), &rsp);
4611 
4612 	return 0;
4613 }
4614 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4615 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4616 {
4617 	struct l2cap_move_chan_req req;
4618 	u8 ident;
4619 
4620 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4621 
4622 	ident = l2cap_get_ident(chan->conn);
4623 	chan->ident = ident;
4624 
4625 	req.icid = cpu_to_le16(chan->scid);
4626 	req.dest_amp_id = dest_amp_id;
4627 
4628 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4629 		       &req);
4630 
4631 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4632 }
4633 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4634 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4635 {
4636 	struct l2cap_move_chan_rsp rsp;
4637 
4638 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4639 
4640 	rsp.icid = cpu_to_le16(chan->dcid);
4641 	rsp.result = cpu_to_le16(result);
4642 
4643 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4644 		       sizeof(rsp), &rsp);
4645 }
4646 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4647 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4648 {
4649 	struct l2cap_move_chan_cfm cfm;
4650 
4651 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4652 
4653 	chan->ident = l2cap_get_ident(chan->conn);
4654 
4655 	cfm.icid = cpu_to_le16(chan->scid);
4656 	cfm.result = cpu_to_le16(result);
4657 
4658 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4659 		       sizeof(cfm), &cfm);
4660 
4661 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4662 }
4663 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4664 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4665 {
4666 	struct l2cap_move_chan_cfm cfm;
4667 
4668 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4669 
4670 	cfm.icid = cpu_to_le16(icid);
4671 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4672 
4673 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4674 		       sizeof(cfm), &cfm);
4675 }
4676 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4677 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4678 					 u16 icid)
4679 {
4680 	struct l2cap_move_chan_cfm_rsp rsp;
4681 
4682 	BT_DBG("icid 0x%4.4x", icid);
4683 
4684 	rsp.icid = cpu_to_le16(icid);
4685 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4686 }
4687 
__release_logical_link(struct l2cap_chan * chan)4688 static void __release_logical_link(struct l2cap_chan *chan)
4689 {
4690 	chan->hs_hchan = NULL;
4691 	chan->hs_hcon = NULL;
4692 
4693 	/* Placeholder - release the logical link */
4694 }
4695 
l2cap_logical_fail(struct l2cap_chan * chan)4696 static void l2cap_logical_fail(struct l2cap_chan *chan)
4697 {
4698 	/* Logical link setup failed */
4699 	if (chan->state != BT_CONNECTED) {
4700 		/* Create channel failure, disconnect */
4701 		l2cap_send_disconn_req(chan, ECONNRESET);
4702 		return;
4703 	}
4704 
4705 	switch (chan->move_role) {
4706 	case L2CAP_MOVE_ROLE_RESPONDER:
4707 		l2cap_move_done(chan);
4708 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4709 		break;
4710 	case L2CAP_MOVE_ROLE_INITIATOR:
4711 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4712 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4713 			/* Remote has only sent pending or
4714 			 * success responses, clean up
4715 			 */
4716 			l2cap_move_done(chan);
4717 		}
4718 
4719 		/* Other amp move states imply that the move
4720 		 * has already aborted
4721 		 */
4722 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4723 		break;
4724 	}
4725 }
4726 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4727 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4728 					struct hci_chan *hchan)
4729 {
4730 	struct l2cap_conf_rsp rsp;
4731 
4732 	chan->hs_hchan = hchan;
4733 	chan->hs_hcon->l2cap_data = chan->conn;
4734 
4735 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4736 
4737 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4738 		int err;
4739 
4740 		set_default_fcs(chan);
4741 
4742 		err = l2cap_ertm_init(chan);
4743 		if (err < 0)
4744 			l2cap_send_disconn_req(chan, -err);
4745 		else
4746 			l2cap_chan_ready(chan);
4747 	}
4748 }
4749 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4750 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4751 				      struct hci_chan *hchan)
4752 {
4753 	chan->hs_hcon = hchan->conn;
4754 	chan->hs_hcon->l2cap_data = chan->conn;
4755 
4756 	BT_DBG("move_state %d", chan->move_state);
4757 
4758 	switch (chan->move_state) {
4759 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4760 		/* Move confirm will be sent after a success
4761 		 * response is received
4762 		 */
4763 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4764 		break;
4765 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4766 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4767 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4768 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4769 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4770 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4771 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4772 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4773 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4774 		}
4775 		break;
4776 	default:
4777 		/* Move was not in expected state, free the channel */
4778 		__release_logical_link(chan);
4779 
4780 		chan->move_state = L2CAP_MOVE_STABLE;
4781 	}
4782 }
4783 
4784 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)4785 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4786 		       u8 status)
4787 {
4788 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4789 
4790 	if (status) {
4791 		l2cap_logical_fail(chan);
4792 		__release_logical_link(chan);
4793 		return;
4794 	}
4795 
4796 	if (chan->state != BT_CONNECTED) {
4797 		/* Ignore logical link if channel is on BR/EDR */
4798 		if (chan->local_amp_id != AMP_ID_BREDR)
4799 			l2cap_logical_finish_create(chan, hchan);
4800 	} else {
4801 		l2cap_logical_finish_move(chan, hchan);
4802 	}
4803 }
4804 
l2cap_move_start(struct l2cap_chan * chan)4805 void l2cap_move_start(struct l2cap_chan *chan)
4806 {
4807 	BT_DBG("chan %p", chan);
4808 
4809 	if (chan->local_amp_id == AMP_ID_BREDR) {
4810 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4811 			return;
4812 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4813 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4814 		/* Placeholder - start physical link setup */
4815 	} else {
4816 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4817 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4818 		chan->move_id = 0;
4819 		l2cap_move_setup(chan);
4820 		l2cap_send_move_chan_req(chan, 0);
4821 	}
4822 }
4823 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)4824 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4825 			    u8 local_amp_id, u8 remote_amp_id)
4826 {
4827 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4828 	       local_amp_id, remote_amp_id);
4829 
4830 	chan->fcs = L2CAP_FCS_NONE;
4831 
4832 	/* Outgoing channel on AMP */
4833 	if (chan->state == BT_CONNECT) {
4834 		if (result == L2CAP_CR_SUCCESS) {
4835 			chan->local_amp_id = local_amp_id;
4836 			l2cap_send_create_chan_req(chan, remote_amp_id);
4837 		} else {
4838 			/* Revert to BR/EDR connect */
4839 			l2cap_send_conn_req(chan);
4840 		}
4841 
4842 		return;
4843 	}
4844 
4845 	/* Incoming channel on AMP */
4846 	if (__l2cap_no_conn_pending(chan)) {
4847 		struct l2cap_conn_rsp rsp;
4848 		char buf[128];
4849 		rsp.scid = cpu_to_le16(chan->dcid);
4850 		rsp.dcid = cpu_to_le16(chan->scid);
4851 
4852 		if (result == L2CAP_CR_SUCCESS) {
4853 			/* Send successful response */
4854 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4855 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4856 		} else {
4857 			/* Send negative response */
4858 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4859 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4860 		}
4861 
4862 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4863 			       sizeof(rsp), &rsp);
4864 
4865 		if (result == L2CAP_CR_SUCCESS) {
4866 			l2cap_state_change(chan, BT_CONFIG);
4867 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4868 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4869 				       L2CAP_CONF_REQ,
4870 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4871 			chan->num_conf_req++;
4872 		}
4873 	}
4874 }
4875 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)4876 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4877 				   u8 remote_amp_id)
4878 {
4879 	l2cap_move_setup(chan);
4880 	chan->move_id = local_amp_id;
4881 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4882 
4883 	l2cap_send_move_chan_req(chan, remote_amp_id);
4884 }
4885 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)4886 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4887 {
4888 	struct hci_chan *hchan = NULL;
4889 
4890 	/* Placeholder - get hci_chan for logical link */
4891 
4892 	if (hchan) {
4893 		if (hchan->state == BT_CONNECTED) {
4894 			/* Logical link is ready to go */
4895 			chan->hs_hcon = hchan->conn;
4896 			chan->hs_hcon->l2cap_data = chan->conn;
4897 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4898 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4899 
4900 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4901 		} else {
4902 			/* Wait for logical link to be ready */
4903 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4904 		}
4905 	} else {
4906 		/* Logical link not available */
4907 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4908 	}
4909 }
4910 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)4911 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4912 {
4913 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4914 		u8 rsp_result;
4915 		if (result == -EINVAL)
4916 			rsp_result = L2CAP_MR_BAD_ID;
4917 		else
4918 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4919 
4920 		l2cap_send_move_chan_rsp(chan, rsp_result);
4921 	}
4922 
4923 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4924 	chan->move_state = L2CAP_MOVE_STABLE;
4925 
4926 	/* Restart data transmission */
4927 	l2cap_ertm_send(chan);
4928 }
4929 
4930 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)4931 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4932 {
4933 	u8 local_amp_id = chan->local_amp_id;
4934 	u8 remote_amp_id = chan->remote_amp_id;
4935 
4936 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4937 	       chan, result, local_amp_id, remote_amp_id);
4938 
4939 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4940 		return;
4941 
4942 	if (chan->state != BT_CONNECTED) {
4943 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4944 	} else if (result != L2CAP_MR_SUCCESS) {
4945 		l2cap_do_move_cancel(chan, result);
4946 	} else {
4947 		switch (chan->move_role) {
4948 		case L2CAP_MOVE_ROLE_INITIATOR:
4949 			l2cap_do_move_initiate(chan, local_amp_id,
4950 					       remote_amp_id);
4951 			break;
4952 		case L2CAP_MOVE_ROLE_RESPONDER:
4953 			l2cap_do_move_respond(chan, result);
4954 			break;
4955 		default:
4956 			l2cap_do_move_cancel(chan, result);
4957 			break;
4958 		}
4959 	}
4960 }
4961 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4962 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4963 					 struct l2cap_cmd_hdr *cmd,
4964 					 u16 cmd_len, void *data)
4965 {
4966 	struct l2cap_move_chan_req *req = data;
4967 	struct l2cap_move_chan_rsp rsp;
4968 	struct l2cap_chan *chan;
4969 	u16 icid = 0;
4970 	u16 result = L2CAP_MR_NOT_ALLOWED;
4971 
4972 	if (cmd_len != sizeof(*req))
4973 		return -EPROTO;
4974 
4975 	icid = le16_to_cpu(req->icid);
4976 
4977 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4978 
4979 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4980 		return -EINVAL;
4981 
4982 	chan = l2cap_get_chan_by_dcid(conn, icid);
4983 	if (!chan) {
4984 		rsp.icid = cpu_to_le16(icid);
4985 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4986 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4987 			       sizeof(rsp), &rsp);
4988 		return 0;
4989 	}
4990 
4991 	chan->ident = cmd->ident;
4992 
4993 	if (chan->scid < L2CAP_CID_DYN_START ||
4994 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4995 	    (chan->mode != L2CAP_MODE_ERTM &&
4996 	     chan->mode != L2CAP_MODE_STREAMING)) {
4997 		result = L2CAP_MR_NOT_ALLOWED;
4998 		goto send_move_response;
4999 	}
5000 
5001 	if (chan->local_amp_id == req->dest_amp_id) {
5002 		result = L2CAP_MR_SAME_ID;
5003 		goto send_move_response;
5004 	}
5005 
5006 	if (req->dest_amp_id != AMP_ID_BREDR) {
5007 		struct hci_dev *hdev;
5008 		hdev = hci_dev_get(req->dest_amp_id);
5009 		if (!hdev || hdev->dev_type != HCI_AMP ||
5010 		    !test_bit(HCI_UP, &hdev->flags)) {
5011 			if (hdev)
5012 				hci_dev_put(hdev);
5013 
5014 			result = L2CAP_MR_BAD_ID;
5015 			goto send_move_response;
5016 		}
5017 		hci_dev_put(hdev);
5018 	}
5019 
5020 	/* Detect a move collision.  Only send a collision response
5021 	 * if this side has "lost", otherwise proceed with the move.
5022 	 * The winner has the larger bd_addr.
5023 	 */
5024 	if ((__chan_is_moving(chan) ||
5025 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5026 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5027 		result = L2CAP_MR_COLLISION;
5028 		goto send_move_response;
5029 	}
5030 
5031 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5032 	l2cap_move_setup(chan);
5033 	chan->move_id = req->dest_amp_id;
5034 	icid = chan->dcid;
5035 
5036 	if (req->dest_amp_id == AMP_ID_BREDR) {
5037 		/* Moving to BR/EDR */
5038 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5039 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5040 			result = L2CAP_MR_PEND;
5041 		} else {
5042 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5043 			result = L2CAP_MR_SUCCESS;
5044 		}
5045 	} else {
5046 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5047 		/* Placeholder - uncomment when amp functions are available */
5048 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5049 		result = L2CAP_MR_PEND;
5050 	}
5051 
5052 send_move_response:
5053 	l2cap_send_move_chan_rsp(chan, result);
5054 
5055 	l2cap_chan_unlock(chan);
5056 
5057 	return 0;
5058 }
5059 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5060 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5061 {
5062 	struct l2cap_chan *chan;
5063 	struct hci_chan *hchan = NULL;
5064 
5065 	chan = l2cap_get_chan_by_scid(conn, icid);
5066 	if (!chan) {
5067 		l2cap_send_move_chan_cfm_icid(conn, icid);
5068 		return;
5069 	}
5070 
5071 	__clear_chan_timer(chan);
5072 	if (result == L2CAP_MR_PEND)
5073 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5074 
5075 	switch (chan->move_state) {
5076 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5077 		/* Move confirm will be sent when logical link
5078 		 * is complete.
5079 		 */
5080 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5081 		break;
5082 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5083 		if (result == L2CAP_MR_PEND) {
5084 			break;
5085 		} else if (test_bit(CONN_LOCAL_BUSY,
5086 				    &chan->conn_state)) {
5087 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5088 		} else {
5089 			/* Logical link is up or moving to BR/EDR,
5090 			 * proceed with move
5091 			 */
5092 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5093 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5094 		}
5095 		break;
5096 	case L2CAP_MOVE_WAIT_RSP:
5097 		/* Moving to AMP */
5098 		if (result == L2CAP_MR_SUCCESS) {
5099 			/* Remote is ready, send confirm immediately
5100 			 * after logical link is ready
5101 			 */
5102 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5103 		} else {
5104 			/* Both logical link and move success
5105 			 * are required to confirm
5106 			 */
5107 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5108 		}
5109 
5110 		/* Placeholder - get hci_chan for logical link */
5111 		if (!hchan) {
5112 			/* Logical link not available */
5113 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5114 			break;
5115 		}
5116 
5117 		/* If the logical link is not yet connected, do not
5118 		 * send confirmation.
5119 		 */
5120 		if (hchan->state != BT_CONNECTED)
5121 			break;
5122 
5123 		/* Logical link is already ready to go */
5124 
5125 		chan->hs_hcon = hchan->conn;
5126 		chan->hs_hcon->l2cap_data = chan->conn;
5127 
5128 		if (result == L2CAP_MR_SUCCESS) {
5129 			/* Can confirm now */
5130 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5131 		} else {
5132 			/* Now only need move success
5133 			 * to confirm
5134 			 */
5135 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5136 		}
5137 
5138 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5139 		break;
5140 	default:
5141 		/* Any other amp move state means the move failed. */
5142 		chan->move_id = chan->local_amp_id;
5143 		l2cap_move_done(chan);
5144 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5145 	}
5146 
5147 	l2cap_chan_unlock(chan);
5148 }
5149 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5150 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5151 			    u16 result)
5152 {
5153 	struct l2cap_chan *chan;
5154 
5155 	chan = l2cap_get_chan_by_ident(conn, ident);
5156 	if (!chan) {
5157 		/* Could not locate channel, icid is best guess */
5158 		l2cap_send_move_chan_cfm_icid(conn, icid);
5159 		return;
5160 	}
5161 
5162 	__clear_chan_timer(chan);
5163 
5164 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5165 		if (result == L2CAP_MR_COLLISION) {
5166 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5167 		} else {
5168 			/* Cleanup - cancel move */
5169 			chan->move_id = chan->local_amp_id;
5170 			l2cap_move_done(chan);
5171 		}
5172 	}
5173 
5174 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5175 
5176 	l2cap_chan_unlock(chan);
5177 }
5178 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5179 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5180 				  struct l2cap_cmd_hdr *cmd,
5181 				  u16 cmd_len, void *data)
5182 {
5183 	struct l2cap_move_chan_rsp *rsp = data;
5184 	u16 icid, result;
5185 
5186 	if (cmd_len != sizeof(*rsp))
5187 		return -EPROTO;
5188 
5189 	icid = le16_to_cpu(rsp->icid);
5190 	result = le16_to_cpu(rsp->result);
5191 
5192 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5193 
5194 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5195 		l2cap_move_continue(conn, icid, result);
5196 	else
5197 		l2cap_move_fail(conn, cmd->ident, icid, result);
5198 
5199 	return 0;
5200 }
5201 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5202 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5203 				      struct l2cap_cmd_hdr *cmd,
5204 				      u16 cmd_len, void *data)
5205 {
5206 	struct l2cap_move_chan_cfm *cfm = data;
5207 	struct l2cap_chan *chan;
5208 	u16 icid, result;
5209 
5210 	if (cmd_len != sizeof(*cfm))
5211 		return -EPROTO;
5212 
5213 	icid = le16_to_cpu(cfm->icid);
5214 	result = le16_to_cpu(cfm->result);
5215 
5216 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5217 
5218 	chan = l2cap_get_chan_by_dcid(conn, icid);
5219 	if (!chan) {
5220 		/* Spec requires a response even if the icid was not found */
5221 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5222 		return 0;
5223 	}
5224 
5225 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5226 		if (result == L2CAP_MC_CONFIRMED) {
5227 			chan->local_amp_id = chan->move_id;
5228 			if (chan->local_amp_id == AMP_ID_BREDR)
5229 				__release_logical_link(chan);
5230 		} else {
5231 			chan->move_id = chan->local_amp_id;
5232 		}
5233 
5234 		l2cap_move_done(chan);
5235 	}
5236 
5237 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5238 
5239 	l2cap_chan_unlock(chan);
5240 
5241 	return 0;
5242 }
5243 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5244 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5245 						 struct l2cap_cmd_hdr *cmd,
5246 						 u16 cmd_len, void *data)
5247 {
5248 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5249 	struct l2cap_chan *chan;
5250 	u16 icid;
5251 
5252 	if (cmd_len != sizeof(*rsp))
5253 		return -EPROTO;
5254 
5255 	icid = le16_to_cpu(rsp->icid);
5256 
5257 	BT_DBG("icid 0x%4.4x", icid);
5258 
5259 	chan = l2cap_get_chan_by_scid(conn, icid);
5260 	if (!chan)
5261 		return 0;
5262 
5263 	__clear_chan_timer(chan);
5264 
5265 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5266 		chan->local_amp_id = chan->move_id;
5267 
5268 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5269 			__release_logical_link(chan);
5270 
5271 		l2cap_move_done(chan);
5272 	}
5273 
5274 	l2cap_chan_unlock(chan);
5275 
5276 	return 0;
5277 }
5278 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5279 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5280 					      struct l2cap_cmd_hdr *cmd,
5281 					      u16 cmd_len, u8 *data)
5282 {
5283 	struct hci_conn *hcon = conn->hcon;
5284 	struct l2cap_conn_param_update_req *req;
5285 	struct l2cap_conn_param_update_rsp rsp;
5286 	u16 min, max, latency, to_multiplier;
5287 	int err;
5288 
5289 	if (hcon->role != HCI_ROLE_MASTER)
5290 		return -EINVAL;
5291 
5292 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5293 		return -EPROTO;
5294 
5295 	req = (struct l2cap_conn_param_update_req *) data;
5296 	min		= __le16_to_cpu(req->min);
5297 	max		= __le16_to_cpu(req->max);
5298 	latency		= __le16_to_cpu(req->latency);
5299 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5300 
5301 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5302 	       min, max, latency, to_multiplier);
5303 
5304 	memset(&rsp, 0, sizeof(rsp));
5305 
5306 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5307 	if (err)
5308 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5309 	else
5310 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5311 
5312 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5313 		       sizeof(rsp), &rsp);
5314 
5315 	if (!err) {
5316 		u8 store_hint;
5317 
5318 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5319 						to_multiplier);
5320 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5321 				    store_hint, min, max, latency,
5322 				    to_multiplier);
5323 
5324 	}
5325 
5326 	return 0;
5327 }
5328 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5329 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5330 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5331 				u8 *data)
5332 {
5333 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5334 	struct hci_conn *hcon = conn->hcon;
5335 	u16 dcid, mtu, mps, credits, result;
5336 	struct l2cap_chan *chan;
5337 	int err, sec_level;
5338 
5339 	if (cmd_len < sizeof(*rsp))
5340 		return -EPROTO;
5341 
5342 	dcid    = __le16_to_cpu(rsp->dcid);
5343 	mtu     = __le16_to_cpu(rsp->mtu);
5344 	mps     = __le16_to_cpu(rsp->mps);
5345 	credits = __le16_to_cpu(rsp->credits);
5346 	result  = __le16_to_cpu(rsp->result);
5347 
5348 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5349 					   dcid < L2CAP_CID_DYN_START ||
5350 					   dcid > L2CAP_CID_LE_DYN_END))
5351 		return -EPROTO;
5352 
5353 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5354 	       dcid, mtu, mps, credits, result);
5355 
5356 	mutex_lock(&conn->chan_lock);
5357 
5358 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5359 	if (!chan) {
5360 		err = -EBADSLT;
5361 		goto unlock;
5362 	}
5363 
5364 	err = 0;
5365 
5366 	l2cap_chan_lock(chan);
5367 
5368 	switch (result) {
5369 	case L2CAP_CR_LE_SUCCESS:
5370 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5371 			err = -EBADSLT;
5372 			break;
5373 		}
5374 
5375 		chan->ident = 0;
5376 		chan->dcid = dcid;
5377 		chan->omtu = mtu;
5378 		chan->remote_mps = mps;
5379 		chan->tx_credits = credits;
5380 		l2cap_chan_ready(chan);
5381 		break;
5382 
5383 	case L2CAP_CR_LE_AUTHENTICATION:
5384 	case L2CAP_CR_LE_ENCRYPTION:
5385 		/* If we already have MITM protection we can't do
5386 		 * anything.
5387 		 */
5388 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5389 			l2cap_chan_del(chan, ECONNREFUSED);
5390 			break;
5391 		}
5392 
5393 		sec_level = hcon->sec_level + 1;
5394 		if (chan->sec_level < sec_level)
5395 			chan->sec_level = sec_level;
5396 
5397 		/* We'll need to send a new Connect Request */
5398 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5399 
5400 		smp_conn_security(hcon, chan->sec_level);
5401 		break;
5402 
5403 	default:
5404 		l2cap_chan_del(chan, ECONNREFUSED);
5405 		break;
5406 	}
5407 
5408 	l2cap_chan_unlock(chan);
5409 
5410 unlock:
5411 	mutex_unlock(&conn->chan_lock);
5412 
5413 	return err;
5414 }
5415 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5416 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5417 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5418 				      u8 *data)
5419 {
5420 	int err = 0;
5421 
5422 	switch (cmd->code) {
5423 	case L2CAP_COMMAND_REJ:
5424 		l2cap_command_rej(conn, cmd, cmd_len, data);
5425 		break;
5426 
5427 	case L2CAP_CONN_REQ:
5428 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5429 		break;
5430 
5431 	case L2CAP_CONN_RSP:
5432 	case L2CAP_CREATE_CHAN_RSP:
5433 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5434 		break;
5435 
5436 	case L2CAP_CONF_REQ:
5437 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5438 		break;
5439 
5440 	case L2CAP_CONF_RSP:
5441 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5442 		break;
5443 
5444 	case L2CAP_DISCONN_REQ:
5445 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5446 		break;
5447 
5448 	case L2CAP_DISCONN_RSP:
5449 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5450 		break;
5451 
5452 	case L2CAP_ECHO_REQ:
5453 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5454 		break;
5455 
5456 	case L2CAP_ECHO_RSP:
5457 		break;
5458 
5459 	case L2CAP_INFO_REQ:
5460 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5461 		break;
5462 
5463 	case L2CAP_INFO_RSP:
5464 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5465 		break;
5466 
5467 	case L2CAP_CREATE_CHAN_REQ:
5468 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5469 		break;
5470 
5471 	case L2CAP_MOVE_CHAN_REQ:
5472 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5473 		break;
5474 
5475 	case L2CAP_MOVE_CHAN_RSP:
5476 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5477 		break;
5478 
5479 	case L2CAP_MOVE_CHAN_CFM:
5480 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5481 		break;
5482 
5483 	case L2CAP_MOVE_CHAN_CFM_RSP:
5484 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5485 		break;
5486 
5487 	default:
5488 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5489 		err = -EINVAL;
5490 		break;
5491 	}
5492 
5493 	return err;
5494 }
5495 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5496 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5497 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5498 				u8 *data)
5499 {
5500 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5501 	struct l2cap_le_conn_rsp rsp;
5502 	struct l2cap_chan *chan, *pchan;
5503 	u16 dcid, scid, credits, mtu, mps;
5504 	__le16 psm;
5505 	u8 result;
5506 
5507 	if (cmd_len != sizeof(*req))
5508 		return -EPROTO;
5509 
5510 	scid = __le16_to_cpu(req->scid);
5511 	mtu  = __le16_to_cpu(req->mtu);
5512 	mps  = __le16_to_cpu(req->mps);
5513 	psm  = req->psm;
5514 	dcid = 0;
5515 	credits = 0;
5516 
5517 	if (mtu < 23 || mps < 23)
5518 		return -EPROTO;
5519 
5520 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5521 	       scid, mtu, mps);
5522 
5523 	/* Check if we have socket listening on psm */
5524 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5525 					 &conn->hcon->dst, LE_LINK);
5526 	if (!pchan) {
5527 		result = L2CAP_CR_LE_BAD_PSM;
5528 		chan = NULL;
5529 		goto response;
5530 	}
5531 
5532 	mutex_lock(&conn->chan_lock);
5533 	l2cap_chan_lock(pchan);
5534 
5535 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5536 				     SMP_ALLOW_STK)) {
5537 		result = L2CAP_CR_LE_AUTHENTICATION;
5538 		chan = NULL;
5539 		goto response_unlock;
5540 	}
5541 
5542 	/* Check for valid dynamic CID range */
5543 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5544 		result = L2CAP_CR_LE_INVALID_SCID;
5545 		chan = NULL;
5546 		goto response_unlock;
5547 	}
5548 
5549 	/* Check if we already have channel with that dcid */
5550 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5551 		result = L2CAP_CR_LE_SCID_IN_USE;
5552 		chan = NULL;
5553 		goto response_unlock;
5554 	}
5555 
5556 	chan = pchan->ops->new_connection(pchan);
5557 	if (!chan) {
5558 		result = L2CAP_CR_LE_NO_MEM;
5559 		goto response_unlock;
5560 	}
5561 
5562 	bacpy(&chan->src, &conn->hcon->src);
5563 	bacpy(&chan->dst, &conn->hcon->dst);
5564 	chan->src_type = bdaddr_src_type(conn->hcon);
5565 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5566 	chan->psm  = psm;
5567 	chan->dcid = scid;
5568 	chan->omtu = mtu;
5569 	chan->remote_mps = mps;
5570 
5571 	__l2cap_chan_add(conn, chan);
5572 
5573 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5574 
5575 	dcid = chan->scid;
5576 	credits = chan->rx_credits;
5577 
5578 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5579 
5580 	chan->ident = cmd->ident;
5581 
5582 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5583 		l2cap_state_change(chan, BT_CONNECT2);
5584 		/* The following result value is actually not defined
5585 		 * for LE CoC but we use it to let the function know
5586 		 * that it should bail out after doing its cleanup
5587 		 * instead of sending a response.
5588 		 */
5589 		result = L2CAP_CR_PEND;
5590 		chan->ops->defer(chan);
5591 	} else {
5592 		l2cap_chan_ready(chan);
5593 		result = L2CAP_CR_LE_SUCCESS;
5594 	}
5595 
5596 response_unlock:
5597 	l2cap_chan_unlock(pchan);
5598 	mutex_unlock(&conn->chan_lock);
5599 	l2cap_chan_put(pchan);
5600 
5601 	if (result == L2CAP_CR_PEND)
5602 		return 0;
5603 
5604 response:
5605 	if (chan) {
5606 		rsp.mtu = cpu_to_le16(chan->imtu);
5607 		rsp.mps = cpu_to_le16(chan->mps);
5608 	} else {
5609 		rsp.mtu = 0;
5610 		rsp.mps = 0;
5611 	}
5612 
5613 	rsp.dcid    = cpu_to_le16(dcid);
5614 	rsp.credits = cpu_to_le16(credits);
5615 	rsp.result  = cpu_to_le16(result);
5616 
5617 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5618 
5619 	return 0;
5620 }
5621 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5622 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5623 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5624 				   u8 *data)
5625 {
5626 	struct l2cap_le_credits *pkt;
5627 	struct l2cap_chan *chan;
5628 	u16 cid, credits, max_credits;
5629 
5630 	if (cmd_len != sizeof(*pkt))
5631 		return -EPROTO;
5632 
5633 	pkt = (struct l2cap_le_credits *) data;
5634 	cid	= __le16_to_cpu(pkt->cid);
5635 	credits	= __le16_to_cpu(pkt->credits);
5636 
5637 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5638 
5639 	chan = l2cap_get_chan_by_dcid(conn, cid);
5640 	if (!chan)
5641 		return -EBADSLT;
5642 
5643 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5644 	if (credits > max_credits) {
5645 		BT_ERR("LE credits overflow");
5646 		l2cap_send_disconn_req(chan, ECONNRESET);
5647 		l2cap_chan_unlock(chan);
5648 
5649 		/* Return 0 so that we don't trigger an unnecessary
5650 		 * command reject packet.
5651 		 */
5652 		return 0;
5653 	}
5654 
5655 	chan->tx_credits += credits;
5656 
5657 	/* Resume sending */
5658 	l2cap_le_flowctl_send(chan);
5659 
5660 	if (chan->tx_credits)
5661 		chan->ops->resume(chan);
5662 
5663 	l2cap_chan_unlock(chan);
5664 
5665 	return 0;
5666 }
5667 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5668 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5669 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5670 				       u8 *data)
5671 {
5672 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5673 	struct l2cap_chan *chan;
5674 
5675 	if (cmd_len < sizeof(*rej))
5676 		return -EPROTO;
5677 
5678 	mutex_lock(&conn->chan_lock);
5679 
5680 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5681 	if (!chan)
5682 		goto done;
5683 
5684 	l2cap_chan_lock(chan);
5685 	l2cap_chan_del(chan, ECONNREFUSED);
5686 	l2cap_chan_unlock(chan);
5687 
5688 done:
5689 	mutex_unlock(&conn->chan_lock);
5690 	return 0;
5691 }
5692 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5693 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5694 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5695 				   u8 *data)
5696 {
5697 	int err = 0;
5698 
5699 	switch (cmd->code) {
5700 	case L2CAP_COMMAND_REJ:
5701 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5702 		break;
5703 
5704 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5705 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5706 		break;
5707 
5708 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5709 		break;
5710 
5711 	case L2CAP_LE_CONN_RSP:
5712 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5713 		break;
5714 
5715 	case L2CAP_LE_CONN_REQ:
5716 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5717 		break;
5718 
5719 	case L2CAP_LE_CREDITS:
5720 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5721 		break;
5722 
5723 	case L2CAP_DISCONN_REQ:
5724 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5725 		break;
5726 
5727 	case L2CAP_DISCONN_RSP:
5728 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5729 		break;
5730 
5731 	default:
5732 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5733 		err = -EINVAL;
5734 		break;
5735 	}
5736 
5737 	return err;
5738 }
5739 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5740 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5741 					struct sk_buff *skb)
5742 {
5743 	struct hci_conn *hcon = conn->hcon;
5744 	struct l2cap_cmd_hdr *cmd;
5745 	u16 len;
5746 	int err;
5747 
5748 	if (hcon->type != LE_LINK)
5749 		goto drop;
5750 
5751 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5752 		goto drop;
5753 
5754 	cmd = (void *) skb->data;
5755 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5756 
5757 	len = le16_to_cpu(cmd->len);
5758 
5759 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5760 
5761 	if (len != skb->len || !cmd->ident) {
5762 		BT_DBG("corrupted command");
5763 		goto drop;
5764 	}
5765 
5766 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5767 	if (err) {
5768 		struct l2cap_cmd_rej_unk rej;
5769 
5770 		BT_ERR("Wrong link type (%d)", err);
5771 
5772 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5773 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5774 			       sizeof(rej), &rej);
5775 	}
5776 
5777 drop:
5778 	kfree_skb(skb);
5779 }
5780 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5781 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5782 				     struct sk_buff *skb)
5783 {
5784 	struct hci_conn *hcon = conn->hcon;
5785 	u8 *data = skb->data;
5786 	int len = skb->len;
5787 	struct l2cap_cmd_hdr cmd;
5788 	int err;
5789 
5790 	l2cap_raw_recv(conn, skb);
5791 
5792 	if (hcon->type != ACL_LINK)
5793 		goto drop;
5794 
5795 	while (len >= L2CAP_CMD_HDR_SIZE) {
5796 		u16 cmd_len;
5797 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5798 		data += L2CAP_CMD_HDR_SIZE;
5799 		len  -= L2CAP_CMD_HDR_SIZE;
5800 
5801 		cmd_len = le16_to_cpu(cmd.len);
5802 
5803 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5804 		       cmd.ident);
5805 
5806 		if (cmd_len > len || !cmd.ident) {
5807 			BT_DBG("corrupted command");
5808 			break;
5809 		}
5810 
5811 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5812 		if (err) {
5813 			struct l2cap_cmd_rej_unk rej;
5814 
5815 			BT_ERR("Wrong link type (%d)", err);
5816 
5817 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5818 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5819 				       sizeof(rej), &rej);
5820 		}
5821 
5822 		data += cmd_len;
5823 		len  -= cmd_len;
5824 	}
5825 
5826 drop:
5827 	kfree_skb(skb);
5828 }
5829 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5830 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5831 {
5832 	u16 our_fcs, rcv_fcs;
5833 	int hdr_size;
5834 
5835 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5836 		hdr_size = L2CAP_EXT_HDR_SIZE;
5837 	else
5838 		hdr_size = L2CAP_ENH_HDR_SIZE;
5839 
5840 	if (chan->fcs == L2CAP_FCS_CRC16) {
5841 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5842 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5843 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5844 
5845 		if (our_fcs != rcv_fcs)
5846 			return -EBADMSG;
5847 	}
5848 	return 0;
5849 }
5850 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5851 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5852 {
5853 	struct l2cap_ctrl control;
5854 
5855 	BT_DBG("chan %p", chan);
5856 
5857 	memset(&control, 0, sizeof(control));
5858 	control.sframe = 1;
5859 	control.final = 1;
5860 	control.reqseq = chan->buffer_seq;
5861 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5862 
5863 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5864 		control.super = L2CAP_SUPER_RNR;
5865 		l2cap_send_sframe(chan, &control);
5866 	}
5867 
5868 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5869 	    chan->unacked_frames > 0)
5870 		__set_retrans_timer(chan);
5871 
5872 	/* Send pending iframes */
5873 	l2cap_ertm_send(chan);
5874 
5875 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5876 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5877 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5878 		 * send it now.
5879 		 */
5880 		control.super = L2CAP_SUPER_RR;
5881 		l2cap_send_sframe(chan, &control);
5882 	}
5883 }
5884 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5885 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5886 			    struct sk_buff **last_frag)
5887 {
5888 	/* skb->len reflects data in skb as well as all fragments
5889 	 * skb->data_len reflects only data in fragments
5890 	 */
5891 	if (!skb_has_frag_list(skb))
5892 		skb_shinfo(skb)->frag_list = new_frag;
5893 
5894 	new_frag->next = NULL;
5895 
5896 	(*last_frag)->next = new_frag;
5897 	*last_frag = new_frag;
5898 
5899 	skb->len += new_frag->len;
5900 	skb->data_len += new_frag->len;
5901 	skb->truesize += new_frag->truesize;
5902 }
5903 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5904 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5905 				struct l2cap_ctrl *control)
5906 {
5907 	int err = -EINVAL;
5908 
5909 	switch (control->sar) {
5910 	case L2CAP_SAR_UNSEGMENTED:
5911 		if (chan->sdu)
5912 			break;
5913 
5914 		err = chan->ops->recv(chan, skb);
5915 		break;
5916 
5917 	case L2CAP_SAR_START:
5918 		if (chan->sdu)
5919 			break;
5920 
5921 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5922 			break;
5923 
5924 		chan->sdu_len = get_unaligned_le16(skb->data);
5925 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5926 
5927 		if (chan->sdu_len > chan->imtu) {
5928 			err = -EMSGSIZE;
5929 			break;
5930 		}
5931 
5932 		if (skb->len >= chan->sdu_len)
5933 			break;
5934 
5935 		chan->sdu = skb;
5936 		chan->sdu_last_frag = skb;
5937 
5938 		skb = NULL;
5939 		err = 0;
5940 		break;
5941 
5942 	case L2CAP_SAR_CONTINUE:
5943 		if (!chan->sdu)
5944 			break;
5945 
5946 		append_skb_frag(chan->sdu, skb,
5947 				&chan->sdu_last_frag);
5948 		skb = NULL;
5949 
5950 		if (chan->sdu->len >= chan->sdu_len)
5951 			break;
5952 
5953 		err = 0;
5954 		break;
5955 
5956 	case L2CAP_SAR_END:
5957 		if (!chan->sdu)
5958 			break;
5959 
5960 		append_skb_frag(chan->sdu, skb,
5961 				&chan->sdu_last_frag);
5962 		skb = NULL;
5963 
5964 		if (chan->sdu->len != chan->sdu_len)
5965 			break;
5966 
5967 		err = chan->ops->recv(chan, chan->sdu);
5968 
5969 		if (!err) {
5970 			/* Reassembly complete */
5971 			chan->sdu = NULL;
5972 			chan->sdu_last_frag = NULL;
5973 			chan->sdu_len = 0;
5974 		}
5975 		break;
5976 	}
5977 
5978 	if (err) {
5979 		kfree_skb(skb);
5980 		kfree_skb(chan->sdu);
5981 		chan->sdu = NULL;
5982 		chan->sdu_last_frag = NULL;
5983 		chan->sdu_len = 0;
5984 	}
5985 
5986 	return err;
5987 }
5988 
l2cap_resegment(struct l2cap_chan * chan)5989 static int l2cap_resegment(struct l2cap_chan *chan)
5990 {
5991 	/* Placeholder */
5992 	return 0;
5993 }
5994 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5995 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5996 {
5997 	u8 event;
5998 
5999 	if (chan->mode != L2CAP_MODE_ERTM)
6000 		return;
6001 
6002 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6003 	l2cap_tx(chan, NULL, NULL, event);
6004 }
6005 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6006 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6007 {
6008 	int err = 0;
6009 	/* Pass sequential frames to l2cap_reassemble_sdu()
6010 	 * until a gap is encountered.
6011 	 */
6012 
6013 	BT_DBG("chan %p", chan);
6014 
6015 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6016 		struct sk_buff *skb;
6017 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6018 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6019 
6020 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6021 
6022 		if (!skb)
6023 			break;
6024 
6025 		skb_unlink(skb, &chan->srej_q);
6026 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6027 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6028 		if (err)
6029 			break;
6030 	}
6031 
6032 	if (skb_queue_empty(&chan->srej_q)) {
6033 		chan->rx_state = L2CAP_RX_STATE_RECV;
6034 		l2cap_send_ack(chan);
6035 	}
6036 
6037 	return err;
6038 }
6039 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6040 static void l2cap_handle_srej(struct l2cap_chan *chan,
6041 			      struct l2cap_ctrl *control)
6042 {
6043 	struct sk_buff *skb;
6044 
6045 	BT_DBG("chan %p, control %p", chan, control);
6046 
6047 	if (control->reqseq == chan->next_tx_seq) {
6048 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6049 		l2cap_send_disconn_req(chan, ECONNRESET);
6050 		return;
6051 	}
6052 
6053 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6054 
6055 	if (skb == NULL) {
6056 		BT_DBG("Seq %d not available for retransmission",
6057 		       control->reqseq);
6058 		return;
6059 	}
6060 
6061 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6062 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6063 		l2cap_send_disconn_req(chan, ECONNRESET);
6064 		return;
6065 	}
6066 
6067 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6068 
6069 	if (control->poll) {
6070 		l2cap_pass_to_tx(chan, control);
6071 
6072 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6073 		l2cap_retransmit(chan, control);
6074 		l2cap_ertm_send(chan);
6075 
6076 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6077 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6078 			chan->srej_save_reqseq = control->reqseq;
6079 		}
6080 	} else {
6081 		l2cap_pass_to_tx_fbit(chan, control);
6082 
6083 		if (control->final) {
6084 			if (chan->srej_save_reqseq != control->reqseq ||
6085 			    !test_and_clear_bit(CONN_SREJ_ACT,
6086 						&chan->conn_state))
6087 				l2cap_retransmit(chan, control);
6088 		} else {
6089 			l2cap_retransmit(chan, control);
6090 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6091 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6092 				chan->srej_save_reqseq = control->reqseq;
6093 			}
6094 		}
6095 	}
6096 }
6097 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6098 static void l2cap_handle_rej(struct l2cap_chan *chan,
6099 			     struct l2cap_ctrl *control)
6100 {
6101 	struct sk_buff *skb;
6102 
6103 	BT_DBG("chan %p, control %p", chan, control);
6104 
6105 	if (control->reqseq == chan->next_tx_seq) {
6106 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6107 		l2cap_send_disconn_req(chan, ECONNRESET);
6108 		return;
6109 	}
6110 
6111 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6112 
6113 	if (chan->max_tx && skb &&
6114 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6115 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6116 		l2cap_send_disconn_req(chan, ECONNRESET);
6117 		return;
6118 	}
6119 
6120 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6121 
6122 	l2cap_pass_to_tx(chan, control);
6123 
6124 	if (control->final) {
6125 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6126 			l2cap_retransmit_all(chan, control);
6127 	} else {
6128 		l2cap_retransmit_all(chan, control);
6129 		l2cap_ertm_send(chan);
6130 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6131 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6132 	}
6133 }
6134 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6135 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6136 {
6137 	BT_DBG("chan %p, txseq %d", chan, txseq);
6138 
6139 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6140 	       chan->expected_tx_seq);
6141 
6142 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6143 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6144 		    chan->tx_win) {
6145 			/* See notes below regarding "double poll" and
6146 			 * invalid packets.
6147 			 */
6148 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6149 				BT_DBG("Invalid/Ignore - after SREJ");
6150 				return L2CAP_TXSEQ_INVALID_IGNORE;
6151 			} else {
6152 				BT_DBG("Invalid - in window after SREJ sent");
6153 				return L2CAP_TXSEQ_INVALID;
6154 			}
6155 		}
6156 
6157 		if (chan->srej_list.head == txseq) {
6158 			BT_DBG("Expected SREJ");
6159 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6160 		}
6161 
6162 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6163 			BT_DBG("Duplicate SREJ - txseq already stored");
6164 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6165 		}
6166 
6167 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6168 			BT_DBG("Unexpected SREJ - not requested");
6169 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6170 		}
6171 	}
6172 
6173 	if (chan->expected_tx_seq == txseq) {
6174 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6175 		    chan->tx_win) {
6176 			BT_DBG("Invalid - txseq outside tx window");
6177 			return L2CAP_TXSEQ_INVALID;
6178 		} else {
6179 			BT_DBG("Expected");
6180 			return L2CAP_TXSEQ_EXPECTED;
6181 		}
6182 	}
6183 
6184 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6185 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6186 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6187 		return L2CAP_TXSEQ_DUPLICATE;
6188 	}
6189 
6190 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6191 		/* A source of invalid packets is a "double poll" condition,
6192 		 * where delays cause us to send multiple poll packets.  If
6193 		 * the remote stack receives and processes both polls,
6194 		 * sequence numbers can wrap around in such a way that a
6195 		 * resent frame has a sequence number that looks like new data
6196 		 * with a sequence gap.  This would trigger an erroneous SREJ
6197 		 * request.
6198 		 *
6199 		 * Fortunately, this is impossible with a tx window that's
6200 		 * less than half of the maximum sequence number, which allows
6201 		 * invalid frames to be safely ignored.
6202 		 *
6203 		 * With tx window sizes greater than half of the tx window
6204 		 * maximum, the frame is invalid and cannot be ignored.  This
6205 		 * causes a disconnect.
6206 		 */
6207 
6208 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6209 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6210 			return L2CAP_TXSEQ_INVALID_IGNORE;
6211 		} else {
6212 			BT_DBG("Invalid - txseq outside tx window");
6213 			return L2CAP_TXSEQ_INVALID;
6214 		}
6215 	} else {
6216 		BT_DBG("Unexpected - txseq indicates missing frames");
6217 		return L2CAP_TXSEQ_UNEXPECTED;
6218 	}
6219 }
6220 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6221 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6222 			       struct l2cap_ctrl *control,
6223 			       struct sk_buff *skb, u8 event)
6224 {
6225 	int err = 0;
6226 	bool skb_in_use = false;
6227 
6228 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6229 	       event);
6230 
6231 	switch (event) {
6232 	case L2CAP_EV_RECV_IFRAME:
6233 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6234 		case L2CAP_TXSEQ_EXPECTED:
6235 			l2cap_pass_to_tx(chan, control);
6236 
6237 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6238 				BT_DBG("Busy, discarding expected seq %d",
6239 				       control->txseq);
6240 				break;
6241 			}
6242 
6243 			chan->expected_tx_seq = __next_seq(chan,
6244 							   control->txseq);
6245 
6246 			chan->buffer_seq = chan->expected_tx_seq;
6247 			skb_in_use = true;
6248 
6249 			err = l2cap_reassemble_sdu(chan, skb, control);
6250 			if (err)
6251 				break;
6252 
6253 			if (control->final) {
6254 				if (!test_and_clear_bit(CONN_REJ_ACT,
6255 							&chan->conn_state)) {
6256 					control->final = 0;
6257 					l2cap_retransmit_all(chan, control);
6258 					l2cap_ertm_send(chan);
6259 				}
6260 			}
6261 
6262 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6263 				l2cap_send_ack(chan);
6264 			break;
6265 		case L2CAP_TXSEQ_UNEXPECTED:
6266 			l2cap_pass_to_tx(chan, control);
6267 
6268 			/* Can't issue SREJ frames in the local busy state.
6269 			 * Drop this frame, it will be seen as missing
6270 			 * when local busy is exited.
6271 			 */
6272 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6273 				BT_DBG("Busy, discarding unexpected seq %d",
6274 				       control->txseq);
6275 				break;
6276 			}
6277 
6278 			/* There was a gap in the sequence, so an SREJ
6279 			 * must be sent for each missing frame.  The
6280 			 * current frame is stored for later use.
6281 			 */
6282 			skb_queue_tail(&chan->srej_q, skb);
6283 			skb_in_use = true;
6284 			BT_DBG("Queued %p (queue len %d)", skb,
6285 			       skb_queue_len(&chan->srej_q));
6286 
6287 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6288 			l2cap_seq_list_clear(&chan->srej_list);
6289 			l2cap_send_srej(chan, control->txseq);
6290 
6291 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6292 			break;
6293 		case L2CAP_TXSEQ_DUPLICATE:
6294 			l2cap_pass_to_tx(chan, control);
6295 			break;
6296 		case L2CAP_TXSEQ_INVALID_IGNORE:
6297 			break;
6298 		case L2CAP_TXSEQ_INVALID:
6299 		default:
6300 			l2cap_send_disconn_req(chan, ECONNRESET);
6301 			break;
6302 		}
6303 		break;
6304 	case L2CAP_EV_RECV_RR:
6305 		l2cap_pass_to_tx(chan, control);
6306 		if (control->final) {
6307 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6308 
6309 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6310 			    !__chan_is_moving(chan)) {
6311 				control->final = 0;
6312 				l2cap_retransmit_all(chan, control);
6313 			}
6314 
6315 			l2cap_ertm_send(chan);
6316 		} else if (control->poll) {
6317 			l2cap_send_i_or_rr_or_rnr(chan);
6318 		} else {
6319 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6320 					       &chan->conn_state) &&
6321 			    chan->unacked_frames)
6322 				__set_retrans_timer(chan);
6323 
6324 			l2cap_ertm_send(chan);
6325 		}
6326 		break;
6327 	case L2CAP_EV_RECV_RNR:
6328 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6329 		l2cap_pass_to_tx(chan, control);
6330 		if (control && control->poll) {
6331 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6332 			l2cap_send_rr_or_rnr(chan, 0);
6333 		}
6334 		__clear_retrans_timer(chan);
6335 		l2cap_seq_list_clear(&chan->retrans_list);
6336 		break;
6337 	case L2CAP_EV_RECV_REJ:
6338 		l2cap_handle_rej(chan, control);
6339 		break;
6340 	case L2CAP_EV_RECV_SREJ:
6341 		l2cap_handle_srej(chan, control);
6342 		break;
6343 	default:
6344 		break;
6345 	}
6346 
6347 	if (skb && !skb_in_use) {
6348 		BT_DBG("Freeing %p", skb);
6349 		kfree_skb(skb);
6350 	}
6351 
6352 	return err;
6353 }
6354 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6355 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6356 				    struct l2cap_ctrl *control,
6357 				    struct sk_buff *skb, u8 event)
6358 {
6359 	int err = 0;
6360 	u16 txseq = control->txseq;
6361 	bool skb_in_use = false;
6362 
6363 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6364 	       event);
6365 
6366 	switch (event) {
6367 	case L2CAP_EV_RECV_IFRAME:
6368 		switch (l2cap_classify_txseq(chan, txseq)) {
6369 		case L2CAP_TXSEQ_EXPECTED:
6370 			/* Keep frame for reassembly later */
6371 			l2cap_pass_to_tx(chan, control);
6372 			skb_queue_tail(&chan->srej_q, skb);
6373 			skb_in_use = true;
6374 			BT_DBG("Queued %p (queue len %d)", skb,
6375 			       skb_queue_len(&chan->srej_q));
6376 
6377 			chan->expected_tx_seq = __next_seq(chan, txseq);
6378 			break;
6379 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6380 			l2cap_seq_list_pop(&chan->srej_list);
6381 
6382 			l2cap_pass_to_tx(chan, control);
6383 			skb_queue_tail(&chan->srej_q, skb);
6384 			skb_in_use = true;
6385 			BT_DBG("Queued %p (queue len %d)", skb,
6386 			       skb_queue_len(&chan->srej_q));
6387 
6388 			err = l2cap_rx_queued_iframes(chan);
6389 			if (err)
6390 				break;
6391 
6392 			break;
6393 		case L2CAP_TXSEQ_UNEXPECTED:
6394 			/* Got a frame that can't be reassembled yet.
6395 			 * Save it for later, and send SREJs to cover
6396 			 * the missing frames.
6397 			 */
6398 			skb_queue_tail(&chan->srej_q, skb);
6399 			skb_in_use = true;
6400 			BT_DBG("Queued %p (queue len %d)", skb,
6401 			       skb_queue_len(&chan->srej_q));
6402 
6403 			l2cap_pass_to_tx(chan, control);
6404 			l2cap_send_srej(chan, control->txseq);
6405 			break;
6406 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6407 			/* This frame was requested with an SREJ, but
6408 			 * some expected retransmitted frames are
6409 			 * missing.  Request retransmission of missing
6410 			 * SREJ'd frames.
6411 			 */
6412 			skb_queue_tail(&chan->srej_q, skb);
6413 			skb_in_use = true;
6414 			BT_DBG("Queued %p (queue len %d)", skb,
6415 			       skb_queue_len(&chan->srej_q));
6416 
6417 			l2cap_pass_to_tx(chan, control);
6418 			l2cap_send_srej_list(chan, control->txseq);
6419 			break;
6420 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6421 			/* We've already queued this frame.  Drop this copy. */
6422 			l2cap_pass_to_tx(chan, control);
6423 			break;
6424 		case L2CAP_TXSEQ_DUPLICATE:
6425 			/* Expecting a later sequence number, so this frame
6426 			 * was already received.  Ignore it completely.
6427 			 */
6428 			break;
6429 		case L2CAP_TXSEQ_INVALID_IGNORE:
6430 			break;
6431 		case L2CAP_TXSEQ_INVALID:
6432 		default:
6433 			l2cap_send_disconn_req(chan, ECONNRESET);
6434 			break;
6435 		}
6436 		break;
6437 	case L2CAP_EV_RECV_RR:
6438 		l2cap_pass_to_tx(chan, control);
6439 		if (control->final) {
6440 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6441 
6442 			if (!test_and_clear_bit(CONN_REJ_ACT,
6443 						&chan->conn_state)) {
6444 				control->final = 0;
6445 				l2cap_retransmit_all(chan, control);
6446 			}
6447 
6448 			l2cap_ertm_send(chan);
6449 		} else if (control->poll) {
6450 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6451 					       &chan->conn_state) &&
6452 			    chan->unacked_frames) {
6453 				__set_retrans_timer(chan);
6454 			}
6455 
6456 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6457 			l2cap_send_srej_tail(chan);
6458 		} else {
6459 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6460 					       &chan->conn_state) &&
6461 			    chan->unacked_frames)
6462 				__set_retrans_timer(chan);
6463 
6464 			l2cap_send_ack(chan);
6465 		}
6466 		break;
6467 	case L2CAP_EV_RECV_RNR:
6468 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6469 		l2cap_pass_to_tx(chan, control);
6470 		if (control->poll) {
6471 			l2cap_send_srej_tail(chan);
6472 		} else {
6473 			struct l2cap_ctrl rr_control;
6474 			memset(&rr_control, 0, sizeof(rr_control));
6475 			rr_control.sframe = 1;
6476 			rr_control.super = L2CAP_SUPER_RR;
6477 			rr_control.reqseq = chan->buffer_seq;
6478 			l2cap_send_sframe(chan, &rr_control);
6479 		}
6480 
6481 		break;
6482 	case L2CAP_EV_RECV_REJ:
6483 		l2cap_handle_rej(chan, control);
6484 		break;
6485 	case L2CAP_EV_RECV_SREJ:
6486 		l2cap_handle_srej(chan, control);
6487 		break;
6488 	}
6489 
6490 	if (skb && !skb_in_use) {
6491 		BT_DBG("Freeing %p", skb);
6492 		kfree_skb(skb);
6493 	}
6494 
6495 	return err;
6496 }
6497 
l2cap_finish_move(struct l2cap_chan * chan)6498 static int l2cap_finish_move(struct l2cap_chan *chan)
6499 {
6500 	BT_DBG("chan %p", chan);
6501 
6502 	chan->rx_state = L2CAP_RX_STATE_RECV;
6503 
6504 	if (chan->hs_hcon)
6505 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6506 	else
6507 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6508 
6509 	return l2cap_resegment(chan);
6510 }
6511 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6512 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6513 				 struct l2cap_ctrl *control,
6514 				 struct sk_buff *skb, u8 event)
6515 {
6516 	int err;
6517 
6518 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6519 	       event);
6520 
6521 	if (!control->poll)
6522 		return -EPROTO;
6523 
6524 	l2cap_process_reqseq(chan, control->reqseq);
6525 
6526 	if (!skb_queue_empty(&chan->tx_q))
6527 		chan->tx_send_head = skb_peek(&chan->tx_q);
6528 	else
6529 		chan->tx_send_head = NULL;
6530 
6531 	/* Rewind next_tx_seq to the point expected
6532 	 * by the receiver.
6533 	 */
6534 	chan->next_tx_seq = control->reqseq;
6535 	chan->unacked_frames = 0;
6536 
6537 	err = l2cap_finish_move(chan);
6538 	if (err)
6539 		return err;
6540 
6541 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6542 	l2cap_send_i_or_rr_or_rnr(chan);
6543 
6544 	if (event == L2CAP_EV_RECV_IFRAME)
6545 		return -EPROTO;
6546 
6547 	return l2cap_rx_state_recv(chan, control, NULL, event);
6548 }
6549 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6550 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6551 				 struct l2cap_ctrl *control,
6552 				 struct sk_buff *skb, u8 event)
6553 {
6554 	int err;
6555 
6556 	if (!control->final)
6557 		return -EPROTO;
6558 
6559 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6560 
6561 	chan->rx_state = L2CAP_RX_STATE_RECV;
6562 	l2cap_process_reqseq(chan, control->reqseq);
6563 
6564 	if (!skb_queue_empty(&chan->tx_q))
6565 		chan->tx_send_head = skb_peek(&chan->tx_q);
6566 	else
6567 		chan->tx_send_head = NULL;
6568 
6569 	/* Rewind next_tx_seq to the point expected
6570 	 * by the receiver.
6571 	 */
6572 	chan->next_tx_seq = control->reqseq;
6573 	chan->unacked_frames = 0;
6574 
6575 	if (chan->hs_hcon)
6576 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6577 	else
6578 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6579 
6580 	err = l2cap_resegment(chan);
6581 
6582 	if (!err)
6583 		err = l2cap_rx_state_recv(chan, control, skb, event);
6584 
6585 	return err;
6586 }
6587 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6588 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6589 {
6590 	/* Make sure reqseq is for a packet that has been sent but not acked */
6591 	u16 unacked;
6592 
6593 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6594 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6595 }
6596 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6597 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6598 		    struct sk_buff *skb, u8 event)
6599 {
6600 	int err = 0;
6601 
6602 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6603 	       control, skb, event, chan->rx_state);
6604 
6605 	if (__valid_reqseq(chan, control->reqseq)) {
6606 		switch (chan->rx_state) {
6607 		case L2CAP_RX_STATE_RECV:
6608 			err = l2cap_rx_state_recv(chan, control, skb, event);
6609 			break;
6610 		case L2CAP_RX_STATE_SREJ_SENT:
6611 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6612 						       event);
6613 			break;
6614 		case L2CAP_RX_STATE_WAIT_P:
6615 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6616 			break;
6617 		case L2CAP_RX_STATE_WAIT_F:
6618 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6619 			break;
6620 		default:
6621 			/* shut it down */
6622 			break;
6623 		}
6624 	} else {
6625 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6626 		       control->reqseq, chan->next_tx_seq,
6627 		       chan->expected_ack_seq);
6628 		l2cap_send_disconn_req(chan, ECONNRESET);
6629 	}
6630 
6631 	return err;
6632 }
6633 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6634 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6635 			   struct sk_buff *skb)
6636 {
6637 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6638 	       chan->rx_state);
6639 
6640 	if (l2cap_classify_txseq(chan, control->txseq) ==
6641 	    L2CAP_TXSEQ_EXPECTED) {
6642 		l2cap_pass_to_tx(chan, control);
6643 
6644 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6645 		       __next_seq(chan, chan->buffer_seq));
6646 
6647 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6648 
6649 		l2cap_reassemble_sdu(chan, skb, control);
6650 	} else {
6651 		if (chan->sdu) {
6652 			kfree_skb(chan->sdu);
6653 			chan->sdu = NULL;
6654 		}
6655 		chan->sdu_last_frag = NULL;
6656 		chan->sdu_len = 0;
6657 
6658 		if (skb) {
6659 			BT_DBG("Freeing %p", skb);
6660 			kfree_skb(skb);
6661 		}
6662 	}
6663 
6664 	chan->last_acked_seq = control->txseq;
6665 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6666 
6667 	return 0;
6668 }
6669 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6670 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6671 {
6672 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6673 	u16 len;
6674 	u8 event;
6675 
6676 	__unpack_control(chan, skb);
6677 
6678 	len = skb->len;
6679 
6680 	/*
6681 	 * We can just drop the corrupted I-frame here.
6682 	 * Receiver will miss it and start proper recovery
6683 	 * procedures and ask for retransmission.
6684 	 */
6685 	if (l2cap_check_fcs(chan, skb))
6686 		goto drop;
6687 
6688 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6689 		len -= L2CAP_SDULEN_SIZE;
6690 
6691 	if (chan->fcs == L2CAP_FCS_CRC16)
6692 		len -= L2CAP_FCS_SIZE;
6693 
6694 	if (len > chan->mps) {
6695 		l2cap_send_disconn_req(chan, ECONNRESET);
6696 		goto drop;
6697 	}
6698 
6699 	if ((chan->mode == L2CAP_MODE_ERTM ||
6700 	     chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6701 		goto drop;
6702 
6703 	if (!control->sframe) {
6704 		int err;
6705 
6706 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6707 		       control->sar, control->reqseq, control->final,
6708 		       control->txseq);
6709 
6710 		/* Validate F-bit - F=0 always valid, F=1 only
6711 		 * valid in TX WAIT_F
6712 		 */
6713 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6714 			goto drop;
6715 
6716 		if (chan->mode != L2CAP_MODE_STREAMING) {
6717 			event = L2CAP_EV_RECV_IFRAME;
6718 			err = l2cap_rx(chan, control, skb, event);
6719 		} else {
6720 			err = l2cap_stream_rx(chan, control, skb);
6721 		}
6722 
6723 		if (err)
6724 			l2cap_send_disconn_req(chan, ECONNRESET);
6725 	} else {
6726 		const u8 rx_func_to_event[4] = {
6727 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6728 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6729 		};
6730 
6731 		/* Only I-frames are expected in streaming mode */
6732 		if (chan->mode == L2CAP_MODE_STREAMING)
6733 			goto drop;
6734 
6735 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6736 		       control->reqseq, control->final, control->poll,
6737 		       control->super);
6738 
6739 		if (len != 0) {
6740 			BT_ERR("Trailing bytes: %d in sframe", len);
6741 			l2cap_send_disconn_req(chan, ECONNRESET);
6742 			goto drop;
6743 		}
6744 
6745 		/* Validate F and P bits */
6746 		if (control->final && (control->poll ||
6747 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6748 			goto drop;
6749 
6750 		event = rx_func_to_event[control->super];
6751 		if (l2cap_rx(chan, control, skb, event))
6752 			l2cap_send_disconn_req(chan, ECONNRESET);
6753 	}
6754 
6755 	return 0;
6756 
6757 drop:
6758 	kfree_skb(skb);
6759 	return 0;
6760 }
6761 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6762 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6763 {
6764 	struct l2cap_conn *conn = chan->conn;
6765 	struct l2cap_le_credits pkt;
6766 	u16 return_credits;
6767 
6768 	return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
6769 
6770 	if (!return_credits)
6771 		return;
6772 
6773 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6774 
6775 	chan->rx_credits += return_credits;
6776 
6777 	pkt.cid     = cpu_to_le16(chan->scid);
6778 	pkt.credits = cpu_to_le16(return_credits);
6779 
6780 	chan->ident = l2cap_get_ident(conn);
6781 
6782 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6783 }
6784 
l2cap_le_recv(struct l2cap_chan * chan,struct sk_buff * skb)6785 static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
6786 {
6787 	int err;
6788 
6789 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
6790 
6791 	/* Wait recv to confirm reception before updating the credits */
6792 	err = chan->ops->recv(chan, skb);
6793 
6794 	/* Update credits whenever an SDU is received */
6795 	l2cap_chan_le_send_credits(chan);
6796 
6797 	return err;
6798 }
6799 
l2cap_le_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6800 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6801 {
6802 	int err;
6803 
6804 	if (!chan->rx_credits) {
6805 		BT_ERR("No credits to receive LE L2CAP data");
6806 		l2cap_send_disconn_req(chan, ECONNRESET);
6807 		return -ENOBUFS;
6808 	}
6809 
6810 	if (chan->imtu < skb->len) {
6811 		BT_ERR("Too big LE L2CAP PDU");
6812 		return -ENOBUFS;
6813 	}
6814 
6815 	chan->rx_credits--;
6816 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6817 
6818 	/* Update if remote had run out of credits, this should only happens
6819 	 * if the remote is not using the entire MPS.
6820 	 */
6821 	if (!chan->rx_credits)
6822 		l2cap_chan_le_send_credits(chan);
6823 
6824 	err = 0;
6825 
6826 	if (!chan->sdu) {
6827 		u16 sdu_len;
6828 
6829 		sdu_len = get_unaligned_le16(skb->data);
6830 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6831 
6832 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6833 		       sdu_len, skb->len, chan->imtu);
6834 
6835 		if (sdu_len > chan->imtu) {
6836 			BT_ERR("Too big LE L2CAP SDU length received");
6837 			err = -EMSGSIZE;
6838 			goto failed;
6839 		}
6840 
6841 		if (skb->len > sdu_len) {
6842 			BT_ERR("Too much LE L2CAP data received");
6843 			err = -EINVAL;
6844 			goto failed;
6845 		}
6846 
6847 		if (skb->len == sdu_len)
6848 			return l2cap_le_recv(chan, skb);
6849 
6850 		chan->sdu = skb;
6851 		chan->sdu_len = sdu_len;
6852 		chan->sdu_last_frag = skb;
6853 
6854 		/* Detect if remote is not able to use the selected MPS */
6855 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6856 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6857 
6858 			/* Adjust the number of credits */
6859 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6860 			chan->mps = mps_len;
6861 			l2cap_chan_le_send_credits(chan);
6862 		}
6863 
6864 		return 0;
6865 	}
6866 
6867 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6868 	       chan->sdu->len, skb->len, chan->sdu_len);
6869 
6870 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6871 		BT_ERR("Too much LE L2CAP data received");
6872 		err = -EINVAL;
6873 		goto failed;
6874 	}
6875 
6876 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6877 	skb = NULL;
6878 
6879 	if (chan->sdu->len == chan->sdu_len) {
6880 		err = l2cap_le_recv(chan, chan->sdu);
6881 		if (!err) {
6882 			chan->sdu = NULL;
6883 			chan->sdu_last_frag = NULL;
6884 			chan->sdu_len = 0;
6885 		}
6886 	}
6887 
6888 failed:
6889 	if (err) {
6890 		kfree_skb(skb);
6891 		kfree_skb(chan->sdu);
6892 		chan->sdu = NULL;
6893 		chan->sdu_last_frag = NULL;
6894 		chan->sdu_len = 0;
6895 	}
6896 
6897 	/* We can't return an error here since we took care of the skb
6898 	 * freeing internally. An error return would cause the caller to
6899 	 * do a double-free of the skb.
6900 	 */
6901 	return 0;
6902 }
6903 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6904 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6905 			       struct sk_buff *skb)
6906 {
6907 	struct l2cap_chan *chan;
6908 
6909 	chan = l2cap_get_chan_by_scid(conn, cid);
6910 	if (!chan) {
6911 		if (cid == L2CAP_CID_A2MP) {
6912 			chan = a2mp_channel_create(conn, skb);
6913 			if (!chan) {
6914 				kfree_skb(skb);
6915 				return;
6916 			}
6917 
6918 			l2cap_chan_lock(chan);
6919 		} else {
6920 			BT_DBG("unknown cid 0x%4.4x", cid);
6921 			/* Drop packet and return */
6922 			kfree_skb(skb);
6923 			return;
6924 		}
6925 	}
6926 
6927 	BT_DBG("chan %p, len %d", chan, skb->len);
6928 
6929 	/* If we receive data on a fixed channel before the info req/rsp
6930 	 * procdure is done simply assume that the channel is supported
6931 	 * and mark it as ready.
6932 	 */
6933 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6934 		l2cap_chan_ready(chan);
6935 
6936 	if (chan->state != BT_CONNECTED)
6937 		goto drop;
6938 
6939 	switch (chan->mode) {
6940 	case L2CAP_MODE_LE_FLOWCTL:
6941 		if (l2cap_le_data_rcv(chan, skb) < 0)
6942 			goto drop;
6943 
6944 		goto done;
6945 
6946 	case L2CAP_MODE_BASIC:
6947 		/* If socket recv buffers overflows we drop data here
6948 		 * which is *bad* because L2CAP has to be reliable.
6949 		 * But we don't have any other choice. L2CAP doesn't
6950 		 * provide flow control mechanism. */
6951 
6952 		if (chan->imtu < skb->len) {
6953 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6954 			goto drop;
6955 		}
6956 
6957 		if (!chan->ops->recv(chan, skb))
6958 			goto done;
6959 		break;
6960 
6961 	case L2CAP_MODE_ERTM:
6962 	case L2CAP_MODE_STREAMING:
6963 		l2cap_data_rcv(chan, skb);
6964 		goto done;
6965 
6966 	default:
6967 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6968 		break;
6969 	}
6970 
6971 drop:
6972 	kfree_skb(skb);
6973 
6974 done:
6975 	l2cap_chan_unlock(chan);
6976 }
6977 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6978 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6979 				  struct sk_buff *skb)
6980 {
6981 	struct hci_conn *hcon = conn->hcon;
6982 	struct l2cap_chan *chan;
6983 
6984 	if (hcon->type != ACL_LINK)
6985 		goto free_skb;
6986 
6987 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6988 					ACL_LINK);
6989 	if (!chan)
6990 		goto free_skb;
6991 
6992 	BT_DBG("chan %p, len %d", chan, skb->len);
6993 
6994 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6995 		goto drop;
6996 
6997 	if (chan->imtu < skb->len)
6998 		goto drop;
6999 
7000 	/* Store remote BD_ADDR and PSM for msg_name */
7001 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7002 	bt_cb(skb)->l2cap.psm = psm;
7003 
7004 	if (!chan->ops->recv(chan, skb)) {
7005 		l2cap_chan_put(chan);
7006 		return;
7007 	}
7008 
7009 drop:
7010 	l2cap_chan_put(chan);
7011 free_skb:
7012 	kfree_skb(skb);
7013 }
7014 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7015 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7016 {
7017 	struct l2cap_hdr *lh = (void *) skb->data;
7018 	struct hci_conn *hcon = conn->hcon;
7019 	u16 cid, len;
7020 	__le16 psm;
7021 
7022 	if (hcon->state != BT_CONNECTED) {
7023 		BT_DBG("queueing pending rx skb");
7024 		skb_queue_tail(&conn->pending_rx, skb);
7025 		return;
7026 	}
7027 
7028 	skb_pull(skb, L2CAP_HDR_SIZE);
7029 	cid = __le16_to_cpu(lh->cid);
7030 	len = __le16_to_cpu(lh->len);
7031 
7032 	if (len != skb->len) {
7033 		kfree_skb(skb);
7034 		return;
7035 	}
7036 
7037 	/* Since we can't actively block incoming LE connections we must
7038 	 * at least ensure that we ignore incoming data from them.
7039 	 */
7040 	if (hcon->type == LE_LINK &&
7041 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7042 				   bdaddr_dst_type(hcon))) {
7043 		kfree_skb(skb);
7044 		return;
7045 	}
7046 
7047 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7048 
7049 	switch (cid) {
7050 	case L2CAP_CID_SIGNALING:
7051 		l2cap_sig_channel(conn, skb);
7052 		break;
7053 
7054 	case L2CAP_CID_CONN_LESS:
7055 		psm = get_unaligned((__le16 *) skb->data);
7056 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7057 		l2cap_conless_channel(conn, psm, skb);
7058 		break;
7059 
7060 	case L2CAP_CID_LE_SIGNALING:
7061 		l2cap_le_sig_channel(conn, skb);
7062 		break;
7063 
7064 	default:
7065 		l2cap_data_channel(conn, cid, skb);
7066 		break;
7067 	}
7068 }
7069 
process_pending_rx(struct work_struct * work)7070 static void process_pending_rx(struct work_struct *work)
7071 {
7072 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7073 					       pending_rx_work);
7074 	struct sk_buff *skb;
7075 
7076 	BT_DBG("");
7077 
7078 	while ((skb = skb_dequeue(&conn->pending_rx)))
7079 		l2cap_recv_frame(conn, skb);
7080 }
7081 
l2cap_conn_add(struct hci_conn * hcon)7082 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7083 {
7084 	struct l2cap_conn *conn = hcon->l2cap_data;
7085 	struct hci_chan *hchan;
7086 
7087 	if (conn)
7088 		return conn;
7089 
7090 	hchan = hci_chan_create(hcon);
7091 	if (!hchan)
7092 		return NULL;
7093 
7094 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7095 	if (!conn) {
7096 		hci_chan_del(hchan);
7097 		return NULL;
7098 	}
7099 
7100 	kref_init(&conn->ref);
7101 	hcon->l2cap_data = conn;
7102 	conn->hcon = hci_conn_get(hcon);
7103 	conn->hchan = hchan;
7104 
7105 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7106 
7107 	switch (hcon->type) {
7108 	case LE_LINK:
7109 		if (hcon->hdev->le_mtu) {
7110 			conn->mtu = hcon->hdev->le_mtu;
7111 			break;
7112 		}
7113 		/* fall through */
7114 	default:
7115 		conn->mtu = hcon->hdev->acl_mtu;
7116 		break;
7117 	}
7118 
7119 	conn->feat_mask = 0;
7120 
7121 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7122 
7123 	if (hcon->type == ACL_LINK &&
7124 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7125 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7126 
7127 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7128 	    (bredr_sc_enabled(hcon->hdev) ||
7129 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7130 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7131 
7132 	mutex_init(&conn->ident_lock);
7133 	mutex_init(&conn->chan_lock);
7134 
7135 	INIT_LIST_HEAD(&conn->chan_l);
7136 	INIT_LIST_HEAD(&conn->users);
7137 
7138 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7139 
7140 	skb_queue_head_init(&conn->pending_rx);
7141 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7142 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7143 
7144 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7145 
7146 	return conn;
7147 }
7148 
is_valid_psm(u16 psm,u8 dst_type)7149 static bool is_valid_psm(u16 psm, u8 dst_type) {
7150 	if (!psm)
7151 		return false;
7152 
7153 	if (bdaddr_type_is_le(dst_type))
7154 		return (psm <= 0x00ff);
7155 
7156 	/* PSM must be odd and lsb of upper byte must be 0 */
7157 	return ((psm & 0x0101) == 0x0001);
7158 }
7159 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7160 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7161 		       bdaddr_t *dst, u8 dst_type)
7162 {
7163 	struct l2cap_conn *conn;
7164 	struct hci_conn *hcon;
7165 	struct hci_dev *hdev;
7166 	int err;
7167 
7168 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7169 	       dst_type, __le16_to_cpu(psm));
7170 
7171 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7172 	if (!hdev)
7173 		return -EHOSTUNREACH;
7174 
7175 	hci_dev_lock(hdev);
7176 
7177 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7178 	    chan->chan_type != L2CAP_CHAN_RAW) {
7179 		err = -EINVAL;
7180 		goto done;
7181 	}
7182 
7183 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7184 		err = -EINVAL;
7185 		goto done;
7186 	}
7187 
7188 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7189 		err = -EINVAL;
7190 		goto done;
7191 	}
7192 
7193 	switch (chan->mode) {
7194 	case L2CAP_MODE_BASIC:
7195 		break;
7196 	case L2CAP_MODE_LE_FLOWCTL:
7197 		break;
7198 	case L2CAP_MODE_ERTM:
7199 	case L2CAP_MODE_STREAMING:
7200 		if (!disable_ertm)
7201 			break;
7202 		/* fall through */
7203 	default:
7204 		err = -EOPNOTSUPP;
7205 		goto done;
7206 	}
7207 
7208 	switch (chan->state) {
7209 	case BT_CONNECT:
7210 	case BT_CONNECT2:
7211 	case BT_CONFIG:
7212 		/* Already connecting */
7213 		err = 0;
7214 		goto done;
7215 
7216 	case BT_CONNECTED:
7217 		/* Already connected */
7218 		err = -EISCONN;
7219 		goto done;
7220 
7221 	case BT_OPEN:
7222 	case BT_BOUND:
7223 		/* Can connect */
7224 		break;
7225 
7226 	default:
7227 		err = -EBADFD;
7228 		goto done;
7229 	}
7230 
7231 	/* Set destination address and psm */
7232 	bacpy(&chan->dst, dst);
7233 	chan->dst_type = dst_type;
7234 
7235 	chan->psm = psm;
7236 	chan->dcid = cid;
7237 
7238 	if (bdaddr_type_is_le(dst_type)) {
7239 		/* Convert from L2CAP channel address type to HCI address type
7240 		 */
7241 		if (dst_type == BDADDR_LE_PUBLIC)
7242 			dst_type = ADDR_LE_DEV_PUBLIC;
7243 		else
7244 			dst_type = ADDR_LE_DEV_RANDOM;
7245 
7246 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7247 			hcon = hci_connect_le(hdev, dst, dst_type,
7248 					      chan->sec_level,
7249 					      HCI_LE_CONN_TIMEOUT,
7250 					      HCI_ROLE_SLAVE, NULL);
7251 		else
7252 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7253 						   chan->sec_level,
7254 						   HCI_LE_CONN_TIMEOUT);
7255 
7256 	} else {
7257 		u8 auth_type = l2cap_get_auth_type(chan);
7258 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7259 	}
7260 
7261 	if (IS_ERR(hcon)) {
7262 		err = PTR_ERR(hcon);
7263 		goto done;
7264 	}
7265 
7266 	conn = l2cap_conn_add(hcon);
7267 	if (!conn) {
7268 		hci_conn_drop(hcon);
7269 		err = -ENOMEM;
7270 		goto done;
7271 	}
7272 
7273 	mutex_lock(&conn->chan_lock);
7274 	l2cap_chan_lock(chan);
7275 
7276 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7277 		hci_conn_drop(hcon);
7278 		err = -EBUSY;
7279 		goto chan_unlock;
7280 	}
7281 
7282 	/* Update source addr of the socket */
7283 	bacpy(&chan->src, &hcon->src);
7284 	chan->src_type = bdaddr_src_type(hcon);
7285 
7286 	__l2cap_chan_add(conn, chan);
7287 
7288 	/* l2cap_chan_add takes its own ref so we can drop this one */
7289 	hci_conn_drop(hcon);
7290 
7291 	l2cap_state_change(chan, BT_CONNECT);
7292 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7293 
7294 	/* Release chan->sport so that it can be reused by other
7295 	 * sockets (as it's only used for listening sockets).
7296 	 */
7297 	write_lock(&chan_list_lock);
7298 	chan->sport = 0;
7299 	write_unlock(&chan_list_lock);
7300 
7301 	if (hcon->state == BT_CONNECTED) {
7302 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7303 			__clear_chan_timer(chan);
7304 			if (l2cap_chan_check_security(chan, true))
7305 				l2cap_state_change(chan, BT_CONNECTED);
7306 		} else
7307 			l2cap_do_start(chan);
7308 	}
7309 
7310 	err = 0;
7311 
7312 chan_unlock:
7313 	l2cap_chan_unlock(chan);
7314 	mutex_unlock(&conn->chan_lock);
7315 done:
7316 	hci_dev_unlock(hdev);
7317 	hci_dev_put(hdev);
7318 	return err;
7319 }
7320 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7321 
7322 /* ---- L2CAP interface with lower layer (HCI) ---- */
7323 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7324 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7325 {
7326 	int exact = 0, lm1 = 0, lm2 = 0;
7327 	struct l2cap_chan *c;
7328 
7329 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7330 
7331 	/* Find listening sockets and check their link_mode */
7332 	read_lock(&chan_list_lock);
7333 	list_for_each_entry(c, &chan_list, global_l) {
7334 		if (c->state != BT_LISTEN)
7335 			continue;
7336 
7337 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7338 			lm1 |= HCI_LM_ACCEPT;
7339 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7340 				lm1 |= HCI_LM_MASTER;
7341 			exact++;
7342 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7343 			lm2 |= HCI_LM_ACCEPT;
7344 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7345 				lm2 |= HCI_LM_MASTER;
7346 		}
7347 	}
7348 	read_unlock(&chan_list_lock);
7349 
7350 	return exact ? lm1 : lm2;
7351 }
7352 
7353 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7354  * from an existing channel in the list or from the beginning of the
7355  * global list (by passing NULL as first parameter).
7356  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7357 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7358 						  struct hci_conn *hcon)
7359 {
7360 	u8 src_type = bdaddr_src_type(hcon);
7361 
7362 	read_lock(&chan_list_lock);
7363 
7364 	if (c)
7365 		c = list_next_entry(c, global_l);
7366 	else
7367 		c = list_entry(chan_list.next, typeof(*c), global_l);
7368 
7369 	list_for_each_entry_from(c, &chan_list, global_l) {
7370 		if (c->chan_type != L2CAP_CHAN_FIXED)
7371 			continue;
7372 		if (c->state != BT_LISTEN)
7373 			continue;
7374 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7375 			continue;
7376 		if (src_type != c->src_type)
7377 			continue;
7378 
7379 		l2cap_chan_hold(c);
7380 		read_unlock(&chan_list_lock);
7381 		return c;
7382 	}
7383 
7384 	read_unlock(&chan_list_lock);
7385 
7386 	return NULL;
7387 }
7388 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7389 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7390 {
7391 	struct hci_dev *hdev = hcon->hdev;
7392 	struct l2cap_conn *conn;
7393 	struct l2cap_chan *pchan;
7394 	u8 dst_type;
7395 
7396 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7397 		return;
7398 
7399 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7400 
7401 	if (status) {
7402 		l2cap_conn_del(hcon, bt_to_errno(status));
7403 		return;
7404 	}
7405 
7406 	conn = l2cap_conn_add(hcon);
7407 	if (!conn)
7408 		return;
7409 
7410 	dst_type = bdaddr_dst_type(hcon);
7411 
7412 	/* If device is blocked, do not create channels for it */
7413 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7414 		return;
7415 
7416 	/* Find fixed channels and notify them of the new connection. We
7417 	 * use multiple individual lookups, continuing each time where
7418 	 * we left off, because the list lock would prevent calling the
7419 	 * potentially sleeping l2cap_chan_lock() function.
7420 	 */
7421 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7422 	while (pchan) {
7423 		struct l2cap_chan *chan, *next;
7424 
7425 		/* Client fixed channels should override server ones */
7426 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7427 			goto next;
7428 
7429 		l2cap_chan_lock(pchan);
7430 		chan = pchan->ops->new_connection(pchan);
7431 		if (chan) {
7432 			bacpy(&chan->src, &hcon->src);
7433 			bacpy(&chan->dst, &hcon->dst);
7434 			chan->src_type = bdaddr_src_type(hcon);
7435 			chan->dst_type = dst_type;
7436 
7437 			__l2cap_chan_add(conn, chan);
7438 		}
7439 
7440 		l2cap_chan_unlock(pchan);
7441 next:
7442 		next = l2cap_global_fixed_chan(pchan, hcon);
7443 		l2cap_chan_put(pchan);
7444 		pchan = next;
7445 	}
7446 
7447 	l2cap_conn_ready(conn);
7448 }
7449 
l2cap_disconn_ind(struct hci_conn * hcon)7450 int l2cap_disconn_ind(struct hci_conn *hcon)
7451 {
7452 	struct l2cap_conn *conn = hcon->l2cap_data;
7453 
7454 	BT_DBG("hcon %p", hcon);
7455 
7456 	if (!conn)
7457 		return HCI_ERROR_REMOTE_USER_TERM;
7458 	return conn->disc_reason;
7459 }
7460 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7461 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7462 {
7463 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7464 		return;
7465 
7466 	BT_DBG("hcon %p reason %d", hcon, reason);
7467 
7468 	l2cap_conn_del(hcon, bt_to_errno(reason));
7469 }
7470 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7471 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7472 {
7473 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7474 		return;
7475 
7476 	if (encrypt == 0x00) {
7477 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7478 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7479 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7480 			   chan->sec_level == BT_SECURITY_FIPS)
7481 			l2cap_chan_close(chan, ECONNREFUSED);
7482 	} else {
7483 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7484 			__clear_chan_timer(chan);
7485 	}
7486 }
7487 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7488 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7489 {
7490 	struct l2cap_conn *conn = hcon->l2cap_data;
7491 	struct l2cap_chan *chan;
7492 
7493 	if (!conn)
7494 		return;
7495 
7496 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7497 
7498 	mutex_lock(&conn->chan_lock);
7499 
7500 	list_for_each_entry(chan, &conn->chan_l, list) {
7501 		l2cap_chan_lock(chan);
7502 
7503 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7504 		       state_to_string(chan->state));
7505 
7506 		if (chan->scid == L2CAP_CID_A2MP) {
7507 			l2cap_chan_unlock(chan);
7508 			continue;
7509 		}
7510 
7511 		if (!status && encrypt)
7512 			chan->sec_level = hcon->sec_level;
7513 
7514 		if (!__l2cap_no_conn_pending(chan)) {
7515 			l2cap_chan_unlock(chan);
7516 			continue;
7517 		}
7518 
7519 		if (!status && (chan->state == BT_CONNECTED ||
7520 				chan->state == BT_CONFIG)) {
7521 			chan->ops->resume(chan);
7522 			l2cap_check_encryption(chan, encrypt);
7523 			l2cap_chan_unlock(chan);
7524 			continue;
7525 		}
7526 
7527 		if (chan->state == BT_CONNECT) {
7528 			if (!status && l2cap_check_enc_key_size(hcon))
7529 				l2cap_start_connection(chan);
7530 			else
7531 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7532 		} else if (chan->state == BT_CONNECT2 &&
7533 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7534 			struct l2cap_conn_rsp rsp;
7535 			__u16 res, stat;
7536 
7537 			if (!status && l2cap_check_enc_key_size(hcon)) {
7538 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7539 					res = L2CAP_CR_PEND;
7540 					stat = L2CAP_CS_AUTHOR_PEND;
7541 					chan->ops->defer(chan);
7542 				} else {
7543 					l2cap_state_change(chan, BT_CONFIG);
7544 					res = L2CAP_CR_SUCCESS;
7545 					stat = L2CAP_CS_NO_INFO;
7546 				}
7547 			} else {
7548 				l2cap_state_change(chan, BT_DISCONN);
7549 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7550 				res = L2CAP_CR_SEC_BLOCK;
7551 				stat = L2CAP_CS_NO_INFO;
7552 			}
7553 
7554 			rsp.scid   = cpu_to_le16(chan->dcid);
7555 			rsp.dcid   = cpu_to_le16(chan->scid);
7556 			rsp.result = cpu_to_le16(res);
7557 			rsp.status = cpu_to_le16(stat);
7558 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7559 				       sizeof(rsp), &rsp);
7560 
7561 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7562 			    res == L2CAP_CR_SUCCESS) {
7563 				char buf[128];
7564 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7565 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7566 					       L2CAP_CONF_REQ,
7567 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7568 					       buf);
7569 				chan->num_conf_req++;
7570 			}
7571 		}
7572 
7573 		l2cap_chan_unlock(chan);
7574 	}
7575 
7576 	mutex_unlock(&conn->chan_lock);
7577 }
7578 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7579 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7580 {
7581 	struct l2cap_conn *conn = hcon->l2cap_data;
7582 	struct l2cap_hdr *hdr;
7583 	int len;
7584 
7585 	/* For AMP controller do not create l2cap conn */
7586 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7587 		goto drop;
7588 
7589 	if (!conn)
7590 		conn = l2cap_conn_add(hcon);
7591 
7592 	if (!conn)
7593 		goto drop;
7594 
7595 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7596 
7597 	switch (flags) {
7598 	case ACL_START:
7599 	case ACL_START_NO_FLUSH:
7600 	case ACL_COMPLETE:
7601 		if (conn->rx_len) {
7602 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7603 			kfree_skb(conn->rx_skb);
7604 			conn->rx_skb = NULL;
7605 			conn->rx_len = 0;
7606 			l2cap_conn_unreliable(conn, ECOMM);
7607 		}
7608 
7609 		/* Start fragment always begin with Basic L2CAP header */
7610 		if (skb->len < L2CAP_HDR_SIZE) {
7611 			BT_ERR("Frame is too short (len %d)", skb->len);
7612 			l2cap_conn_unreliable(conn, ECOMM);
7613 			goto drop;
7614 		}
7615 
7616 		hdr = (struct l2cap_hdr *) skb->data;
7617 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7618 
7619 		if (len == skb->len) {
7620 			/* Complete frame received */
7621 			l2cap_recv_frame(conn, skb);
7622 			return;
7623 		}
7624 
7625 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7626 
7627 		if (skb->len > len) {
7628 			BT_ERR("Frame is too long (len %d, expected len %d)",
7629 			       skb->len, len);
7630 			l2cap_conn_unreliable(conn, ECOMM);
7631 			goto drop;
7632 		}
7633 
7634 		/* Allocate skb for the complete frame (with header) */
7635 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7636 		if (!conn->rx_skb)
7637 			goto drop;
7638 
7639 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7640 					  skb->len);
7641 		conn->rx_len = len - skb->len;
7642 		break;
7643 
7644 	case ACL_CONT:
7645 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7646 
7647 		if (!conn->rx_len) {
7648 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7649 			l2cap_conn_unreliable(conn, ECOMM);
7650 			goto drop;
7651 		}
7652 
7653 		if (skb->len > conn->rx_len) {
7654 			BT_ERR("Fragment is too long (len %d, expected %d)",
7655 			       skb->len, conn->rx_len);
7656 			kfree_skb(conn->rx_skb);
7657 			conn->rx_skb = NULL;
7658 			conn->rx_len = 0;
7659 			l2cap_conn_unreliable(conn, ECOMM);
7660 			goto drop;
7661 		}
7662 
7663 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7664 					  skb->len);
7665 		conn->rx_len -= skb->len;
7666 
7667 		if (!conn->rx_len) {
7668 			/* Complete frame received. l2cap_recv_frame
7669 			 * takes ownership of the skb so set the global
7670 			 * rx_skb pointer to NULL first.
7671 			 */
7672 			struct sk_buff *rx_skb = conn->rx_skb;
7673 			conn->rx_skb = NULL;
7674 			l2cap_recv_frame(conn, rx_skb);
7675 		}
7676 		break;
7677 	}
7678 
7679 drop:
7680 	kfree_skb(skb);
7681 }
7682 
7683 static struct hci_cb l2cap_cb = {
7684 	.name		= "L2CAP",
7685 	.connect_cfm	= l2cap_connect_cfm,
7686 	.disconn_cfm	= l2cap_disconn_cfm,
7687 	.security_cfm	= l2cap_security_cfm,
7688 };
7689 
l2cap_debugfs_show(struct seq_file * f,void * p)7690 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7691 {
7692 	struct l2cap_chan *c;
7693 
7694 	read_lock(&chan_list_lock);
7695 
7696 	list_for_each_entry(c, &chan_list, global_l) {
7697 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7698 			   &c->src, c->src_type, &c->dst, c->dst_type,
7699 			   c->state, __le16_to_cpu(c->psm),
7700 			   c->scid, c->dcid, c->imtu, c->omtu,
7701 			   c->sec_level, c->mode);
7702 	}
7703 
7704 	read_unlock(&chan_list_lock);
7705 
7706 	return 0;
7707 }
7708 
7709 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
7710 
7711 static struct dentry *l2cap_debugfs;
7712 
l2cap_init(void)7713 int __init l2cap_init(void)
7714 {
7715 	int err;
7716 
7717 	err = l2cap_init_sockets();
7718 	if (err < 0)
7719 		return err;
7720 
7721 	hci_register_cb(&l2cap_cb);
7722 
7723 	if (IS_ERR_OR_NULL(bt_debugfs))
7724 		return 0;
7725 
7726 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7727 					    NULL, &l2cap_debugfs_fops);
7728 
7729 	return 0;
7730 }
7731 
l2cap_exit(void)7732 void l2cap_exit(void)
7733 {
7734 	debugfs_remove(l2cap_debugfs);
7735 	hci_unregister_cb(&l2cap_cb);
7736 	l2cap_cleanup_sockets();
7737 }
7738 
7739 module_param(disable_ertm, bool, 0644);
7740 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7741