• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 
40 #include "smp.h"
41 #include "a2mp.h"
42 #include "amp.h"
43 
44 #define LE_FLOWCTL_MAX_CREDITS 65535
45 
46 bool disable_ertm;
47 
48 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
49 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 				       u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 			   void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 		     struct sk_buff_head *skbs, u8 event);
66 
bdaddr_type(struct hci_conn * hcon,__u8 type)67 static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68 {
69 	if (hcon->type == LE_LINK) {
70 		if (type == ADDR_LE_DEV_PUBLIC)
71 			return BDADDR_LE_PUBLIC;
72 		else
73 			return BDADDR_LE_RANDOM;
74 	}
75 
76 	return BDADDR_BREDR;
77 }
78 
79 /* ---- L2CAP channels ---- */
80 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)81 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 						   u16 cid)
83 {
84 	struct l2cap_chan *c;
85 
86 	list_for_each_entry(c, &conn->chan_l, list) {
87 		if (c->dcid == cid)
88 			return c;
89 	}
90 	return NULL;
91 }
92 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)93 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 						   u16 cid)
95 {
96 	struct l2cap_chan *c;
97 
98 	list_for_each_entry(c, &conn->chan_l, list) {
99 		if (c->scid == cid)
100 			return c;
101 	}
102 	return NULL;
103 }
104 
105 /* Find channel with given SCID.
106  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)107 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 						 u16 cid)
109 {
110 	struct l2cap_chan *c;
111 
112 	mutex_lock(&conn->chan_lock);
113 	c = __l2cap_get_chan_by_scid(conn, cid);
114 	if (c)
115 		l2cap_chan_lock(c);
116 	mutex_unlock(&conn->chan_lock);
117 
118 	return c;
119 }
120 
121 /* Find channel with given DCID.
122  * Returns locked channel.
123  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)124 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 						 u16 cid)
126 {
127 	struct l2cap_chan *c;
128 
129 	mutex_lock(&conn->chan_lock);
130 	c = __l2cap_get_chan_by_dcid(conn, cid);
131 	if (c)
132 		l2cap_chan_lock(c);
133 	mutex_unlock(&conn->chan_lock);
134 
135 	return c;
136 }
137 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)138 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 						    u8 ident)
140 {
141 	struct l2cap_chan *c;
142 
143 	list_for_each_entry(c, &conn->chan_l, list) {
144 		if (c->ident == ident)
145 			return c;
146 	}
147 	return NULL;
148 }
149 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)150 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 						  u8 ident)
152 {
153 	struct l2cap_chan *c;
154 
155 	mutex_lock(&conn->chan_lock);
156 	c = __l2cap_get_chan_by_ident(conn, ident);
157 	if (c)
158 		l2cap_chan_lock(c);
159 	mutex_unlock(&conn->chan_lock);
160 
161 	return c;
162 }
163 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src)164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
165 {
166 	struct l2cap_chan *c;
167 
168 	list_for_each_entry(c, &chan_list, global_l) {
169 		if (c->sport == psm && !bacmp(&c->src, src))
170 			return c;
171 	}
172 	return NULL;
173 }
174 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176 {
177 	int err;
178 
179 	write_lock(&chan_list_lock);
180 
181 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
182 		err = -EADDRINUSE;
183 		goto done;
184 	}
185 
186 	if (psm) {
187 		chan->psm = psm;
188 		chan->sport = psm;
189 		err = 0;
190 	} else {
191 		u16 p;
192 
193 		err = -EINVAL;
194 		for (p = 0x1001; p < 0x1100; p += 2)
195 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 				chan->psm   = cpu_to_le16(p);
197 				chan->sport = cpu_to_le16(p);
198 				err = 0;
199 				break;
200 			}
201 	}
202 
203 done:
204 	write_unlock(&chan_list_lock);
205 	return err;
206 }
207 EXPORT_SYMBOL_GPL(l2cap_add_psm);
208 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)209 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
210 {
211 	write_lock(&chan_list_lock);
212 
213 	/* Override the defaults (which are for conn-oriented) */
214 	chan->omtu = L2CAP_DEFAULT_MTU;
215 	chan->chan_type = L2CAP_CHAN_FIXED;
216 
217 	chan->scid = scid;
218 
219 	write_unlock(&chan_list_lock);
220 
221 	return 0;
222 }
223 
l2cap_alloc_cid(struct l2cap_conn * conn)224 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
225 {
226 	u16 cid, dyn_end;
227 
228 	if (conn->hcon->type == LE_LINK)
229 		dyn_end = L2CAP_CID_LE_DYN_END;
230 	else
231 		dyn_end = L2CAP_CID_DYN_END;
232 
233 	for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
234 		if (!__l2cap_get_chan_by_scid(conn, cid))
235 			return cid;
236 	}
237 
238 	return 0;
239 }
240 
l2cap_state_change(struct l2cap_chan * chan,int state)241 static void l2cap_state_change(struct l2cap_chan *chan, int state)
242 {
243 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
244 	       state_to_string(state));
245 
246 	chan->state = state;
247 	chan->ops->state_change(chan, state, 0);
248 }
249 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)250 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 						int state, int err)
252 {
253 	chan->state = state;
254 	chan->ops->state_change(chan, chan->state, err);
255 }
256 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)257 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258 {
259 	chan->ops->state_change(chan, chan->state, err);
260 }
261 
__set_retrans_timer(struct l2cap_chan * chan)262 static void __set_retrans_timer(struct l2cap_chan *chan)
263 {
264 	if (!delayed_work_pending(&chan->monitor_timer) &&
265 	    chan->retrans_timeout) {
266 		l2cap_set_timer(chan, &chan->retrans_timer,
267 				msecs_to_jiffies(chan->retrans_timeout));
268 	}
269 }
270 
__set_monitor_timer(struct l2cap_chan * chan)271 static void __set_monitor_timer(struct l2cap_chan *chan)
272 {
273 	__clear_retrans_timer(chan);
274 	if (chan->monitor_timeout) {
275 		l2cap_set_timer(chan, &chan->monitor_timer,
276 				msecs_to_jiffies(chan->monitor_timeout));
277 	}
278 }
279 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)280 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 					       u16 seq)
282 {
283 	struct sk_buff *skb;
284 
285 	skb_queue_walk(head, skb) {
286 		if (bt_cb(skb)->control.txseq == seq)
287 			return skb;
288 	}
289 
290 	return NULL;
291 }
292 
293 /* ---- L2CAP sequence number lists ---- */
294 
295 /* For ERTM, ordered lists of sequence numbers must be tracked for
296  * SREJ requests that are received and for frames that are to be
297  * retransmitted. These seq_list functions implement a singly-linked
298  * list in an array, where membership in the list can also be checked
299  * in constant time. Items can also be added to the tail of the list
300  * and removed from the head in constant time, without further memory
301  * allocs or frees.
302  */
303 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)304 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305 {
306 	size_t alloc_size, i;
307 
308 	/* Allocated size is a power of 2 to map sequence numbers
309 	 * (which may be up to 14 bits) in to a smaller array that is
310 	 * sized for the negotiated ERTM transmit windows.
311 	 */
312 	alloc_size = roundup_pow_of_two(size);
313 
314 	seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 	if (!seq_list->list)
316 		return -ENOMEM;
317 
318 	seq_list->mask = alloc_size - 1;
319 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 	for (i = 0; i < alloc_size; i++)
322 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323 
324 	return 0;
325 }
326 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)327 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328 {
329 	kfree(seq_list->list);
330 }
331 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)332 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 					   u16 seq)
334 {
335 	/* Constant-time check for list membership */
336 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337 }
338 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)339 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340 {
341 	u16 seq = seq_list->head;
342 	u16 mask = seq_list->mask;
343 
344 	seq_list->head = seq_list->list[seq & mask];
345 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346 
347 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 	}
351 
352 	return seq;
353 }
354 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)355 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356 {
357 	u16 i;
358 
359 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 		return;
361 
362 	for (i = 0; i <= seq_list->mask; i++)
363 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364 
365 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
367 }
368 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)369 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370 {
371 	u16 mask = seq_list->mask;
372 
373 	/* All appends happen in constant time */
374 
375 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 		return;
377 
378 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 		seq_list->head = seq;
380 	else
381 		seq_list->list[seq_list->tail & mask] = seq;
382 
383 	seq_list->tail = seq;
384 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
385 }
386 
l2cap_chan_timeout(struct work_struct * work)387 static void l2cap_chan_timeout(struct work_struct *work)
388 {
389 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
390 					       chan_timer.work);
391 	struct l2cap_conn *conn = chan->conn;
392 	int reason;
393 
394 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
395 
396 	mutex_lock(&conn->chan_lock);
397 	l2cap_chan_lock(chan);
398 
399 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
400 		reason = ECONNREFUSED;
401 	else if (chan->state == BT_CONNECT &&
402 		 chan->sec_level != BT_SECURITY_SDP)
403 		reason = ECONNREFUSED;
404 	else
405 		reason = ETIMEDOUT;
406 
407 	l2cap_chan_close(chan, reason);
408 
409 	l2cap_chan_unlock(chan);
410 
411 	chan->ops->close(chan);
412 	mutex_unlock(&conn->chan_lock);
413 
414 	l2cap_chan_put(chan);
415 }
416 
l2cap_chan_create(void)417 struct l2cap_chan *l2cap_chan_create(void)
418 {
419 	struct l2cap_chan *chan;
420 
421 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 	if (!chan)
423 		return NULL;
424 
425 	mutex_init(&chan->lock);
426 
427 	write_lock(&chan_list_lock);
428 	list_add(&chan->global_l, &chan_list);
429 	write_unlock(&chan_list_lock);
430 
431 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
432 
433 	chan->state = BT_OPEN;
434 
435 	kref_init(&chan->kref);
436 
437 	/* This flag is cleared in l2cap_chan_ready() */
438 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439 
440 	BT_DBG("chan %p", chan);
441 
442 	return chan;
443 }
444 EXPORT_SYMBOL_GPL(l2cap_chan_create);
445 
l2cap_chan_destroy(struct kref * kref)446 static void l2cap_chan_destroy(struct kref *kref)
447 {
448 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449 
450 	BT_DBG("chan %p", chan);
451 
452 	write_lock(&chan_list_lock);
453 	list_del(&chan->global_l);
454 	write_unlock(&chan_list_lock);
455 
456 	kfree(chan);
457 }
458 
l2cap_chan_hold(struct l2cap_chan * c)459 void l2cap_chan_hold(struct l2cap_chan *c)
460 {
461 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
462 
463 	kref_get(&c->kref);
464 }
465 
l2cap_chan_put(struct l2cap_chan * c)466 void l2cap_chan_put(struct l2cap_chan *c)
467 {
468 	BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
469 
470 	kref_put(&c->kref, l2cap_chan_destroy);
471 }
472 EXPORT_SYMBOL_GPL(l2cap_chan_put);
473 
l2cap_chan_set_defaults(struct l2cap_chan * chan)474 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475 {
476 	chan->fcs  = L2CAP_FCS_CRC16;
477 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
480 	chan->remote_max_tx = chan->max_tx;
481 	chan->remote_tx_win = chan->tx_win;
482 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
483 	chan->sec_level = BT_SECURITY_LOW;
484 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 	chan->conf_state = 0;
488 
489 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490 }
491 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
492 
l2cap_le_flowctl_init(struct l2cap_chan * chan)493 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
494 {
495 	chan->sdu = NULL;
496 	chan->sdu_last_frag = NULL;
497 	chan->sdu_len = 0;
498 	chan->tx_credits = 0;
499 	chan->rx_credits = le_max_credits;
500 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
501 
502 	skb_queue_head_init(&chan->tx_q);
503 }
504 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)505 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
506 {
507 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
508 	       __le16_to_cpu(chan->psm), chan->dcid);
509 
510 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
511 
512 	chan->conn = conn;
513 
514 	switch (chan->chan_type) {
515 	case L2CAP_CHAN_CONN_ORIENTED:
516 		/* Alloc CID for connection-oriented socket */
517 		chan->scid = l2cap_alloc_cid(conn);
518 		if (conn->hcon->type == ACL_LINK)
519 			chan->omtu = L2CAP_DEFAULT_MTU;
520 		break;
521 
522 	case L2CAP_CHAN_CONN_LESS:
523 		/* Connectionless socket */
524 		chan->scid = L2CAP_CID_CONN_LESS;
525 		chan->dcid = L2CAP_CID_CONN_LESS;
526 		chan->omtu = L2CAP_DEFAULT_MTU;
527 		break;
528 
529 	case L2CAP_CHAN_FIXED:
530 		/* Caller will set CID and CID specific MTU values */
531 		break;
532 
533 	default:
534 		/* Raw socket can send/recv signalling messages only */
535 		chan->scid = L2CAP_CID_SIGNALING;
536 		chan->dcid = L2CAP_CID_SIGNALING;
537 		chan->omtu = L2CAP_DEFAULT_MTU;
538 	}
539 
540 	chan->local_id		= L2CAP_BESTEFFORT_ID;
541 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
542 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
543 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
544 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
545 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
546 
547 	l2cap_chan_hold(chan);
548 
549 	/* Only keep a reference for fixed channels if they requested it */
550 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
551 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
552 		hci_conn_hold(conn->hcon);
553 
554 	list_add(&chan->list, &conn->chan_l);
555 }
556 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)557 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
558 {
559 	mutex_lock(&conn->chan_lock);
560 	__l2cap_chan_add(conn, chan);
561 	mutex_unlock(&conn->chan_lock);
562 }
563 
l2cap_chan_del(struct l2cap_chan * chan,int err)564 void l2cap_chan_del(struct l2cap_chan *chan, int err)
565 {
566 	struct l2cap_conn *conn = chan->conn;
567 
568 	__clear_chan_timer(chan);
569 
570 	BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
571 
572 	chan->ops->teardown(chan, err);
573 
574 	if (conn) {
575 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
576 		/* Delete from channel list */
577 		list_del(&chan->list);
578 
579 		l2cap_chan_put(chan);
580 
581 		chan->conn = NULL;
582 
583 		/* Reference was only held for non-fixed channels or
584 		 * fixed channels that explicitly requested it using the
585 		 * FLAG_HOLD_HCI_CONN flag.
586 		 */
587 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
588 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
589 			hci_conn_drop(conn->hcon);
590 
591 		if (mgr && mgr->bredr_chan == chan)
592 			mgr->bredr_chan = NULL;
593 	}
594 
595 	if (chan->hs_hchan) {
596 		struct hci_chan *hs_hchan = chan->hs_hchan;
597 
598 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
599 		amp_disconnect_logical_link(hs_hchan);
600 	}
601 
602 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
603 		return;
604 
605 	switch(chan->mode) {
606 	case L2CAP_MODE_BASIC:
607 		break;
608 
609 	case L2CAP_MODE_LE_FLOWCTL:
610 		skb_queue_purge(&chan->tx_q);
611 		break;
612 
613 	case L2CAP_MODE_ERTM:
614 		__clear_retrans_timer(chan);
615 		__clear_monitor_timer(chan);
616 		__clear_ack_timer(chan);
617 
618 		skb_queue_purge(&chan->srej_q);
619 
620 		l2cap_seq_list_free(&chan->srej_list);
621 		l2cap_seq_list_free(&chan->retrans_list);
622 
623 		/* fall through */
624 
625 	case L2CAP_MODE_STREAMING:
626 		skb_queue_purge(&chan->tx_q);
627 		break;
628 	}
629 
630 	return;
631 }
632 EXPORT_SYMBOL_GPL(l2cap_chan_del);
633 
l2cap_conn_update_id_addr(struct work_struct * work)634 static void l2cap_conn_update_id_addr(struct work_struct *work)
635 {
636 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
637 					       id_addr_update_work);
638 	struct hci_conn *hcon = conn->hcon;
639 	struct l2cap_chan *chan;
640 
641 	mutex_lock(&conn->chan_lock);
642 
643 	list_for_each_entry(chan, &conn->chan_l, list) {
644 		l2cap_chan_lock(chan);
645 		bacpy(&chan->dst, &hcon->dst);
646 		chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
647 		l2cap_chan_unlock(chan);
648 	}
649 
650 	mutex_unlock(&conn->chan_lock);
651 }
652 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)653 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
654 {
655 	struct l2cap_conn *conn = chan->conn;
656 	struct l2cap_le_conn_rsp rsp;
657 	u16 result;
658 
659 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
660 		result = L2CAP_CR_AUTHORIZATION;
661 	else
662 		result = L2CAP_CR_BAD_PSM;
663 
664 	l2cap_state_change(chan, BT_DISCONN);
665 
666 	rsp.dcid    = cpu_to_le16(chan->scid);
667 	rsp.mtu     = cpu_to_le16(chan->imtu);
668 	rsp.mps     = cpu_to_le16(chan->mps);
669 	rsp.credits = cpu_to_le16(chan->rx_credits);
670 	rsp.result  = cpu_to_le16(result);
671 
672 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
673 		       &rsp);
674 }
675 
l2cap_chan_connect_reject(struct l2cap_chan * chan)676 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
677 {
678 	struct l2cap_conn *conn = chan->conn;
679 	struct l2cap_conn_rsp rsp;
680 	u16 result;
681 
682 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 		result = L2CAP_CR_SEC_BLOCK;
684 	else
685 		result = L2CAP_CR_BAD_PSM;
686 
687 	l2cap_state_change(chan, BT_DISCONN);
688 
689 	rsp.scid   = cpu_to_le16(chan->dcid);
690 	rsp.dcid   = cpu_to_le16(chan->scid);
691 	rsp.result = cpu_to_le16(result);
692 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
693 
694 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
695 }
696 
l2cap_chan_close(struct l2cap_chan * chan,int reason)697 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
698 {
699 	struct l2cap_conn *conn = chan->conn;
700 
701 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
702 
703 	switch (chan->state) {
704 	case BT_LISTEN:
705 		chan->ops->teardown(chan, 0);
706 		break;
707 
708 	case BT_CONNECTED:
709 	case BT_CONFIG:
710 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
711 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
712 			l2cap_send_disconn_req(chan, reason);
713 		} else
714 			l2cap_chan_del(chan, reason);
715 		break;
716 
717 	case BT_CONNECT2:
718 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
719 			if (conn->hcon->type == ACL_LINK)
720 				l2cap_chan_connect_reject(chan);
721 			else if (conn->hcon->type == LE_LINK)
722 				l2cap_chan_le_connect_reject(chan);
723 		}
724 
725 		l2cap_chan_del(chan, reason);
726 		break;
727 
728 	case BT_CONNECT:
729 	case BT_DISCONN:
730 		l2cap_chan_del(chan, reason);
731 		break;
732 
733 	default:
734 		chan->ops->teardown(chan, 0);
735 		break;
736 	}
737 }
738 EXPORT_SYMBOL(l2cap_chan_close);
739 
l2cap_get_auth_type(struct l2cap_chan * chan)740 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
741 {
742 	switch (chan->chan_type) {
743 	case L2CAP_CHAN_RAW:
744 		switch (chan->sec_level) {
745 		case BT_SECURITY_HIGH:
746 		case BT_SECURITY_FIPS:
747 			return HCI_AT_DEDICATED_BONDING_MITM;
748 		case BT_SECURITY_MEDIUM:
749 			return HCI_AT_DEDICATED_BONDING;
750 		default:
751 			return HCI_AT_NO_BONDING;
752 		}
753 		break;
754 	case L2CAP_CHAN_CONN_LESS:
755 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
756 			if (chan->sec_level == BT_SECURITY_LOW)
757 				chan->sec_level = BT_SECURITY_SDP;
758 		}
759 		if (chan->sec_level == BT_SECURITY_HIGH ||
760 		    chan->sec_level == BT_SECURITY_FIPS)
761 			return HCI_AT_NO_BONDING_MITM;
762 		else
763 			return HCI_AT_NO_BONDING;
764 		break;
765 	case L2CAP_CHAN_CONN_ORIENTED:
766 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
767 			if (chan->sec_level == BT_SECURITY_LOW)
768 				chan->sec_level = BT_SECURITY_SDP;
769 
770 			if (chan->sec_level == BT_SECURITY_HIGH ||
771 			    chan->sec_level == BT_SECURITY_FIPS)
772 				return HCI_AT_NO_BONDING_MITM;
773 			else
774 				return HCI_AT_NO_BONDING;
775 		}
776 		/* fall through */
777 	default:
778 		switch (chan->sec_level) {
779 		case BT_SECURITY_HIGH:
780 		case BT_SECURITY_FIPS:
781 			return HCI_AT_GENERAL_BONDING_MITM;
782 		case BT_SECURITY_MEDIUM:
783 			return HCI_AT_GENERAL_BONDING;
784 		default:
785 			return HCI_AT_NO_BONDING;
786 		}
787 		break;
788 	}
789 }
790 
791 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)792 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
793 {
794 	struct l2cap_conn *conn = chan->conn;
795 	__u8 auth_type;
796 
797 	if (conn->hcon->type == LE_LINK)
798 		return smp_conn_security(conn->hcon, chan->sec_level);
799 
800 	auth_type = l2cap_get_auth_type(chan);
801 
802 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
803 				 initiator);
804 }
805 
l2cap_get_ident(struct l2cap_conn * conn)806 static u8 l2cap_get_ident(struct l2cap_conn *conn)
807 {
808 	u8 id;
809 
810 	/* Get next available identificator.
811 	 *    1 - 128 are used by kernel.
812 	 *  129 - 199 are reserved.
813 	 *  200 - 254 are used by utilities like l2ping, etc.
814 	 */
815 
816 	mutex_lock(&conn->ident_lock);
817 
818 	if (++conn->tx_ident > 128)
819 		conn->tx_ident = 1;
820 
821 	id = conn->tx_ident;
822 
823 	mutex_unlock(&conn->ident_lock);
824 
825 	return id;
826 }
827 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)828 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
829 			   void *data)
830 {
831 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
832 	u8 flags;
833 
834 	BT_DBG("code 0x%2.2x", code);
835 
836 	if (!skb)
837 		return;
838 
839 	if (lmp_no_flush_capable(conn->hcon->hdev))
840 		flags = ACL_START_NO_FLUSH;
841 	else
842 		flags = ACL_START;
843 
844 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
845 	skb->priority = HCI_PRIO_MAX;
846 
847 	hci_send_acl(conn->hchan, skb, flags);
848 }
849 
__chan_is_moving(struct l2cap_chan * chan)850 static bool __chan_is_moving(struct l2cap_chan *chan)
851 {
852 	return chan->move_state != L2CAP_MOVE_STABLE &&
853 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
854 }
855 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)856 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
857 {
858 	struct hci_conn *hcon = chan->conn->hcon;
859 	u16 flags;
860 
861 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
862 	       skb->priority);
863 
864 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
865 		if (chan->hs_hchan)
866 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
867 		else
868 			kfree_skb(skb);
869 
870 		return;
871 	}
872 
873 	if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
874 	    lmp_no_flush_capable(hcon->hdev))
875 		flags = ACL_START_NO_FLUSH;
876 	else
877 		flags = ACL_START;
878 
879 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
880 	hci_send_acl(chan->conn->hchan, skb, flags);
881 }
882 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)883 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
884 {
885 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
886 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
887 
888 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
889 		/* S-Frame */
890 		control->sframe = 1;
891 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
892 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
893 
894 		control->sar = 0;
895 		control->txseq = 0;
896 	} else {
897 		/* I-Frame */
898 		control->sframe = 0;
899 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
900 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
901 
902 		control->poll = 0;
903 		control->super = 0;
904 	}
905 }
906 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)907 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
908 {
909 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
910 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
911 
912 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
913 		/* S-Frame */
914 		control->sframe = 1;
915 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
916 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
917 
918 		control->sar = 0;
919 		control->txseq = 0;
920 	} else {
921 		/* I-Frame */
922 		control->sframe = 0;
923 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
924 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
925 
926 		control->poll = 0;
927 		control->super = 0;
928 	}
929 }
930 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)931 static inline void __unpack_control(struct l2cap_chan *chan,
932 				    struct sk_buff *skb)
933 {
934 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
935 		__unpack_extended_control(get_unaligned_le32(skb->data),
936 					  &bt_cb(skb)->control);
937 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
938 	} else {
939 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
940 					  &bt_cb(skb)->control);
941 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
942 	}
943 }
944 
__pack_extended_control(struct l2cap_ctrl * control)945 static u32 __pack_extended_control(struct l2cap_ctrl *control)
946 {
947 	u32 packed;
948 
949 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
950 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
951 
952 	if (control->sframe) {
953 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
954 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
955 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
956 	} else {
957 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
958 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
959 	}
960 
961 	return packed;
962 }
963 
__pack_enhanced_control(struct l2cap_ctrl * control)964 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
965 {
966 	u16 packed;
967 
968 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
969 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
970 
971 	if (control->sframe) {
972 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
973 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
974 		packed |= L2CAP_CTRL_FRAME_TYPE;
975 	} else {
976 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
977 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
978 	}
979 
980 	return packed;
981 }
982 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)983 static inline void __pack_control(struct l2cap_chan *chan,
984 				  struct l2cap_ctrl *control,
985 				  struct sk_buff *skb)
986 {
987 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
988 		put_unaligned_le32(__pack_extended_control(control),
989 				   skb->data + L2CAP_HDR_SIZE);
990 	} else {
991 		put_unaligned_le16(__pack_enhanced_control(control),
992 				   skb->data + L2CAP_HDR_SIZE);
993 	}
994 }
995 
__ertm_hdr_size(struct l2cap_chan * chan)996 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
997 {
998 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
999 		return L2CAP_EXT_HDR_SIZE;
1000 	else
1001 		return L2CAP_ENH_HDR_SIZE;
1002 }
1003 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1004 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1005 					       u32 control)
1006 {
1007 	struct sk_buff *skb;
1008 	struct l2cap_hdr *lh;
1009 	int hlen = __ertm_hdr_size(chan);
1010 
1011 	if (chan->fcs == L2CAP_FCS_CRC16)
1012 		hlen += L2CAP_FCS_SIZE;
1013 
1014 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1015 
1016 	if (!skb)
1017 		return ERR_PTR(-ENOMEM);
1018 
1019 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1020 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1021 	lh->cid = cpu_to_le16(chan->dcid);
1022 
1023 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1024 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1025 	else
1026 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1027 
1028 	if (chan->fcs == L2CAP_FCS_CRC16) {
1029 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1030 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1031 	}
1032 
1033 	skb->priority = HCI_PRIO_MAX;
1034 	return skb;
1035 }
1036 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1037 static void l2cap_send_sframe(struct l2cap_chan *chan,
1038 			      struct l2cap_ctrl *control)
1039 {
1040 	struct sk_buff *skb;
1041 	u32 control_field;
1042 
1043 	BT_DBG("chan %p, control %p", chan, control);
1044 
1045 	if (!control->sframe)
1046 		return;
1047 
1048 	if (__chan_is_moving(chan))
1049 		return;
1050 
1051 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1052 	    !control->poll)
1053 		control->final = 1;
1054 
1055 	if (control->super == L2CAP_SUPER_RR)
1056 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1057 	else if (control->super == L2CAP_SUPER_RNR)
1058 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1059 
1060 	if (control->super != L2CAP_SUPER_SREJ) {
1061 		chan->last_acked_seq = control->reqseq;
1062 		__clear_ack_timer(chan);
1063 	}
1064 
1065 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1066 	       control->final, control->poll, control->super);
1067 
1068 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1069 		control_field = __pack_extended_control(control);
1070 	else
1071 		control_field = __pack_enhanced_control(control);
1072 
1073 	skb = l2cap_create_sframe_pdu(chan, control_field);
1074 	if (!IS_ERR(skb))
1075 		l2cap_do_send(chan, skb);
1076 }
1077 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1078 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1079 {
1080 	struct l2cap_ctrl control;
1081 
1082 	BT_DBG("chan %p, poll %d", chan, poll);
1083 
1084 	memset(&control, 0, sizeof(control));
1085 	control.sframe = 1;
1086 	control.poll = poll;
1087 
1088 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1089 		control.super = L2CAP_SUPER_RNR;
1090 	else
1091 		control.super = L2CAP_SUPER_RR;
1092 
1093 	control.reqseq = chan->buffer_seq;
1094 	l2cap_send_sframe(chan, &control);
1095 }
1096 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1097 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1098 {
1099 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1100 		return true;
1101 
1102 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1103 }
1104 
__amp_capable(struct l2cap_chan * chan)1105 static bool __amp_capable(struct l2cap_chan *chan)
1106 {
1107 	struct l2cap_conn *conn = chan->conn;
1108 	struct hci_dev *hdev;
1109 	bool amp_available = false;
1110 
1111 	if (!conn->hs_enabled)
1112 		return false;
1113 
1114 	if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1115 		return false;
1116 
1117 	read_lock(&hci_dev_list_lock);
1118 	list_for_each_entry(hdev, &hci_dev_list, list) {
1119 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1120 		    test_bit(HCI_UP, &hdev->flags)) {
1121 			amp_available = true;
1122 			break;
1123 		}
1124 	}
1125 	read_unlock(&hci_dev_list_lock);
1126 
1127 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1128 		return amp_available;
1129 
1130 	return false;
1131 }
1132 
l2cap_check_efs(struct l2cap_chan * chan)1133 static bool l2cap_check_efs(struct l2cap_chan *chan)
1134 {
1135 	/* Check EFS parameters */
1136 	return true;
1137 }
1138 
l2cap_send_conn_req(struct l2cap_chan * chan)1139 void l2cap_send_conn_req(struct l2cap_chan *chan)
1140 {
1141 	struct l2cap_conn *conn = chan->conn;
1142 	struct l2cap_conn_req req;
1143 
1144 	req.scid = cpu_to_le16(chan->scid);
1145 	req.psm  = chan->psm;
1146 
1147 	chan->ident = l2cap_get_ident(conn);
1148 
1149 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1150 
1151 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1152 }
1153 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1154 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1155 {
1156 	struct l2cap_create_chan_req req;
1157 	req.scid = cpu_to_le16(chan->scid);
1158 	req.psm  = chan->psm;
1159 	req.amp_id = amp_id;
1160 
1161 	chan->ident = l2cap_get_ident(chan->conn);
1162 
1163 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1164 		       sizeof(req), &req);
1165 }
1166 
l2cap_move_setup(struct l2cap_chan * chan)1167 static void l2cap_move_setup(struct l2cap_chan *chan)
1168 {
1169 	struct sk_buff *skb;
1170 
1171 	BT_DBG("chan %p", chan);
1172 
1173 	if (chan->mode != L2CAP_MODE_ERTM)
1174 		return;
1175 
1176 	__clear_retrans_timer(chan);
1177 	__clear_monitor_timer(chan);
1178 	__clear_ack_timer(chan);
1179 
1180 	chan->retry_count = 0;
1181 	skb_queue_walk(&chan->tx_q, skb) {
1182 		if (bt_cb(skb)->control.retries)
1183 			bt_cb(skb)->control.retries = 1;
1184 		else
1185 			break;
1186 	}
1187 
1188 	chan->expected_tx_seq = chan->buffer_seq;
1189 
1190 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1191 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1192 	l2cap_seq_list_clear(&chan->retrans_list);
1193 	l2cap_seq_list_clear(&chan->srej_list);
1194 	skb_queue_purge(&chan->srej_q);
1195 
1196 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1197 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1198 
1199 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1200 }
1201 
l2cap_move_done(struct l2cap_chan * chan)1202 static void l2cap_move_done(struct l2cap_chan *chan)
1203 {
1204 	u8 move_role = chan->move_role;
1205 	BT_DBG("chan %p", chan);
1206 
1207 	chan->move_state = L2CAP_MOVE_STABLE;
1208 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1209 
1210 	if (chan->mode != L2CAP_MODE_ERTM)
1211 		return;
1212 
1213 	switch (move_role) {
1214 	case L2CAP_MOVE_ROLE_INITIATOR:
1215 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1216 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1217 		break;
1218 	case L2CAP_MOVE_ROLE_RESPONDER:
1219 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1220 		break;
1221 	}
1222 }
1223 
l2cap_chan_ready(struct l2cap_chan * chan)1224 static void l2cap_chan_ready(struct l2cap_chan *chan)
1225 {
1226 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1227 	chan->conf_state = 0;
1228 	__clear_chan_timer(chan);
1229 
1230 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1231 		chan->ops->suspend(chan);
1232 
1233 	chan->state = BT_CONNECTED;
1234 
1235 	chan->ops->ready(chan);
1236 }
1237 
l2cap_le_connect(struct l2cap_chan * chan)1238 static void l2cap_le_connect(struct l2cap_chan *chan)
1239 {
1240 	struct l2cap_conn *conn = chan->conn;
1241 	struct l2cap_le_conn_req req;
1242 
1243 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1244 		return;
1245 
1246 	req.psm     = chan->psm;
1247 	req.scid    = cpu_to_le16(chan->scid);
1248 	req.mtu     = cpu_to_le16(chan->imtu);
1249 	req.mps     = cpu_to_le16(chan->mps);
1250 	req.credits = cpu_to_le16(chan->rx_credits);
1251 
1252 	chan->ident = l2cap_get_ident(conn);
1253 
1254 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1255 		       sizeof(req), &req);
1256 }
1257 
l2cap_le_start(struct l2cap_chan * chan)1258 static void l2cap_le_start(struct l2cap_chan *chan)
1259 {
1260 	struct l2cap_conn *conn = chan->conn;
1261 
1262 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1263 		return;
1264 
1265 	if (!chan->psm) {
1266 		l2cap_chan_ready(chan);
1267 		return;
1268 	}
1269 
1270 	if (chan->state == BT_CONNECT)
1271 		l2cap_le_connect(chan);
1272 }
1273 
l2cap_start_connection(struct l2cap_chan * chan)1274 static void l2cap_start_connection(struct l2cap_chan *chan)
1275 {
1276 	if (__amp_capable(chan)) {
1277 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1278 		a2mp_discover_amp(chan);
1279 	} else if (chan->conn->hcon->type == LE_LINK) {
1280 		l2cap_le_start(chan);
1281 	} else {
1282 		l2cap_send_conn_req(chan);
1283 	}
1284 }
1285 
l2cap_request_info(struct l2cap_conn * conn)1286 static void l2cap_request_info(struct l2cap_conn *conn)
1287 {
1288 	struct l2cap_info_req req;
1289 
1290 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1291 		return;
1292 
1293 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1294 
1295 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1296 	conn->info_ident = l2cap_get_ident(conn);
1297 
1298 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1299 
1300 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1301 		       sizeof(req), &req);
1302 }
1303 
l2cap_do_start(struct l2cap_chan * chan)1304 static void l2cap_do_start(struct l2cap_chan *chan)
1305 {
1306 	struct l2cap_conn *conn = chan->conn;
1307 
1308 	if (conn->hcon->type == LE_LINK) {
1309 		l2cap_le_start(chan);
1310 		return;
1311 	}
1312 
1313 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1314 		l2cap_request_info(conn);
1315 		return;
1316 	}
1317 
1318 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1319 		return;
1320 
1321 	if (l2cap_chan_check_security(chan, true) &&
1322 	    __l2cap_no_conn_pending(chan))
1323 		l2cap_start_connection(chan);
1324 }
1325 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1326 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1327 {
1328 	u32 local_feat_mask = l2cap_feat_mask;
1329 	if (!disable_ertm)
1330 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1331 
1332 	switch (mode) {
1333 	case L2CAP_MODE_ERTM:
1334 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1335 	case L2CAP_MODE_STREAMING:
1336 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1337 	default:
1338 		return 0x00;
1339 	}
1340 }
1341 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1342 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1343 {
1344 	struct l2cap_conn *conn = chan->conn;
1345 	struct l2cap_disconn_req req;
1346 
1347 	if (!conn)
1348 		return;
1349 
1350 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1351 		__clear_retrans_timer(chan);
1352 		__clear_monitor_timer(chan);
1353 		__clear_ack_timer(chan);
1354 	}
1355 
1356 	if (chan->scid == L2CAP_CID_A2MP) {
1357 		l2cap_state_change(chan, BT_DISCONN);
1358 		return;
1359 	}
1360 
1361 	req.dcid = cpu_to_le16(chan->dcid);
1362 	req.scid = cpu_to_le16(chan->scid);
1363 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1364 		       sizeof(req), &req);
1365 
1366 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1367 }
1368 
1369 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1370 static void l2cap_conn_start(struct l2cap_conn *conn)
1371 {
1372 	struct l2cap_chan *chan, *tmp;
1373 
1374 	BT_DBG("conn %p", conn);
1375 
1376 	mutex_lock(&conn->chan_lock);
1377 
1378 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1379 		l2cap_chan_lock(chan);
1380 
1381 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1382 			l2cap_chan_ready(chan);
1383 			l2cap_chan_unlock(chan);
1384 			continue;
1385 		}
1386 
1387 		if (chan->state == BT_CONNECT) {
1388 			if (!l2cap_chan_check_security(chan, true) ||
1389 			    !__l2cap_no_conn_pending(chan)) {
1390 				l2cap_chan_unlock(chan);
1391 				continue;
1392 			}
1393 
1394 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1395 			    && test_bit(CONF_STATE2_DEVICE,
1396 					&chan->conf_state)) {
1397 				l2cap_chan_close(chan, ECONNRESET);
1398 				l2cap_chan_unlock(chan);
1399 				continue;
1400 			}
1401 
1402 			l2cap_start_connection(chan);
1403 
1404 		} else if (chan->state == BT_CONNECT2) {
1405 			struct l2cap_conn_rsp rsp;
1406 			char buf[128];
1407 			rsp.scid = cpu_to_le16(chan->dcid);
1408 			rsp.dcid = cpu_to_le16(chan->scid);
1409 
1410 			if (l2cap_chan_check_security(chan, false)) {
1411 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1412 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1413 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1414 					chan->ops->defer(chan);
1415 
1416 				} else {
1417 					l2cap_state_change(chan, BT_CONFIG);
1418 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1419 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1420 				}
1421 			} else {
1422 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1423 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1424 			}
1425 
1426 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1427 				       sizeof(rsp), &rsp);
1428 
1429 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1430 			    rsp.result != L2CAP_CR_SUCCESS) {
1431 				l2cap_chan_unlock(chan);
1432 				continue;
1433 			}
1434 
1435 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1436 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1437 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1438 			chan->num_conf_req++;
1439 		}
1440 
1441 		l2cap_chan_unlock(chan);
1442 	}
1443 
1444 	mutex_unlock(&conn->chan_lock);
1445 }
1446 
l2cap_le_conn_ready(struct l2cap_conn * conn)1447 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1448 {
1449 	struct hci_conn *hcon = conn->hcon;
1450 	struct hci_dev *hdev = hcon->hdev;
1451 
1452 	BT_DBG("%s conn %p", hdev->name, conn);
1453 
1454 	/* For outgoing pairing which doesn't necessarily have an
1455 	 * associated socket (e.g. mgmt_pair_device).
1456 	 */
1457 	if (hcon->out)
1458 		smp_conn_security(hcon, hcon->pending_sec_level);
1459 
1460 	/* For LE slave connections, make sure the connection interval
1461 	 * is in the range of the minium and maximum interval that has
1462 	 * been configured for this connection. If not, then trigger
1463 	 * the connection update procedure.
1464 	 */
1465 	if (hcon->role == HCI_ROLE_SLAVE &&
1466 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1467 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1468 		struct l2cap_conn_param_update_req req;
1469 
1470 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1471 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1472 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1473 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1474 
1475 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1476 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1477 	}
1478 }
1479 
l2cap_conn_ready(struct l2cap_conn * conn)1480 static void l2cap_conn_ready(struct l2cap_conn *conn)
1481 {
1482 	struct l2cap_chan *chan;
1483 	struct hci_conn *hcon = conn->hcon;
1484 
1485 	BT_DBG("conn %p", conn);
1486 
1487 	if (hcon->type == ACL_LINK)
1488 		l2cap_request_info(conn);
1489 
1490 	mutex_lock(&conn->chan_lock);
1491 
1492 	list_for_each_entry(chan, &conn->chan_l, list) {
1493 
1494 		l2cap_chan_lock(chan);
1495 
1496 		if (chan->scid == L2CAP_CID_A2MP) {
1497 			l2cap_chan_unlock(chan);
1498 			continue;
1499 		}
1500 
1501 		if (hcon->type == LE_LINK) {
1502 			l2cap_le_start(chan);
1503 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1504 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1505 				l2cap_chan_ready(chan);
1506 		} else if (chan->state == BT_CONNECT) {
1507 			l2cap_do_start(chan);
1508 		}
1509 
1510 		l2cap_chan_unlock(chan);
1511 	}
1512 
1513 	mutex_unlock(&conn->chan_lock);
1514 
1515 	if (hcon->type == LE_LINK)
1516 		l2cap_le_conn_ready(conn);
1517 
1518 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1519 }
1520 
1521 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1522 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1523 {
1524 	struct l2cap_chan *chan;
1525 
1526 	BT_DBG("conn %p", conn);
1527 
1528 	mutex_lock(&conn->chan_lock);
1529 
1530 	list_for_each_entry(chan, &conn->chan_l, list) {
1531 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1532 			l2cap_chan_set_err(chan, err);
1533 	}
1534 
1535 	mutex_unlock(&conn->chan_lock);
1536 }
1537 
l2cap_info_timeout(struct work_struct * work)1538 static void l2cap_info_timeout(struct work_struct *work)
1539 {
1540 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1541 					       info_timer.work);
1542 
1543 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1544 	conn->info_ident = 0;
1545 
1546 	l2cap_conn_start(conn);
1547 }
1548 
1549 /*
1550  * l2cap_user
1551  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1552  * callback is called during registration. The ->remove callback is called
1553  * during unregistration.
1554  * An l2cap_user object can either be explicitly unregistered or when the
1555  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1556  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1557  * External modules must own a reference to the l2cap_conn object if they intend
1558  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1559  * any time if they don't.
1560  */
1561 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1562 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1563 {
1564 	struct hci_dev *hdev = conn->hcon->hdev;
1565 	int ret;
1566 
1567 	/* We need to check whether l2cap_conn is registered. If it is not, we
1568 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1569 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1570 	 * relies on the parent hci_conn object to be locked. This itself relies
1571 	 * on the hci_dev object to be locked. So we must lock the hci device
1572 	 * here, too. */
1573 
1574 	hci_dev_lock(hdev);
1575 
1576 	if (user->list.next || user->list.prev) {
1577 		ret = -EINVAL;
1578 		goto out_unlock;
1579 	}
1580 
1581 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1582 	if (!conn->hchan) {
1583 		ret = -ENODEV;
1584 		goto out_unlock;
1585 	}
1586 
1587 	ret = user->probe(conn, user);
1588 	if (ret)
1589 		goto out_unlock;
1590 
1591 	list_add(&user->list, &conn->users);
1592 	ret = 0;
1593 
1594 out_unlock:
1595 	hci_dev_unlock(hdev);
1596 	return ret;
1597 }
1598 EXPORT_SYMBOL(l2cap_register_user);
1599 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1600 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1601 {
1602 	struct hci_dev *hdev = conn->hcon->hdev;
1603 
1604 	hci_dev_lock(hdev);
1605 
1606 	if (!user->list.next || !user->list.prev)
1607 		goto out_unlock;
1608 
1609 	list_del(&user->list);
1610 	user->list.next = NULL;
1611 	user->list.prev = NULL;
1612 	user->remove(conn, user);
1613 
1614 out_unlock:
1615 	hci_dev_unlock(hdev);
1616 }
1617 EXPORT_SYMBOL(l2cap_unregister_user);
1618 
l2cap_unregister_all_users(struct l2cap_conn * conn)1619 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1620 {
1621 	struct l2cap_user *user;
1622 
1623 	while (!list_empty(&conn->users)) {
1624 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1625 		list_del(&user->list);
1626 		user->list.next = NULL;
1627 		user->list.prev = NULL;
1628 		user->remove(conn, user);
1629 	}
1630 }
1631 
l2cap_conn_del(struct hci_conn * hcon,int err)1632 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1633 {
1634 	struct l2cap_conn *conn = hcon->l2cap_data;
1635 	struct l2cap_chan *chan, *l;
1636 
1637 	if (!conn)
1638 		return;
1639 
1640 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1641 
1642 	kfree_skb(conn->rx_skb);
1643 
1644 	skb_queue_purge(&conn->pending_rx);
1645 
1646 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1647 	 * might block if we are running on a worker from the same workqueue
1648 	 * pending_rx_work is waiting on.
1649 	 */
1650 	if (work_pending(&conn->pending_rx_work))
1651 		cancel_work_sync(&conn->pending_rx_work);
1652 
1653 	if (work_pending(&conn->id_addr_update_work))
1654 		cancel_work_sync(&conn->id_addr_update_work);
1655 
1656 	l2cap_unregister_all_users(conn);
1657 
1658 	/* Force the connection to be immediately dropped */
1659 	hcon->disc_timeout = 0;
1660 
1661 	mutex_lock(&conn->chan_lock);
1662 
1663 	/* Kill channels */
1664 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1665 		l2cap_chan_hold(chan);
1666 		l2cap_chan_lock(chan);
1667 
1668 		l2cap_chan_del(chan, err);
1669 
1670 		l2cap_chan_unlock(chan);
1671 
1672 		chan->ops->close(chan);
1673 		l2cap_chan_put(chan);
1674 	}
1675 
1676 	mutex_unlock(&conn->chan_lock);
1677 
1678 	hci_chan_del(conn->hchan);
1679 
1680 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1681 		cancel_delayed_work_sync(&conn->info_timer);
1682 
1683 	hcon->l2cap_data = NULL;
1684 	conn->hchan = NULL;
1685 	l2cap_conn_put(conn);
1686 }
1687 
l2cap_conn_free(struct kref * ref)1688 static void l2cap_conn_free(struct kref *ref)
1689 {
1690 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1691 
1692 	hci_conn_put(conn->hcon);
1693 	kfree(conn);
1694 }
1695 
l2cap_conn_get(struct l2cap_conn * conn)1696 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1697 {
1698 	kref_get(&conn->ref);
1699 	return conn;
1700 }
1701 EXPORT_SYMBOL(l2cap_conn_get);
1702 
l2cap_conn_put(struct l2cap_conn * conn)1703 void l2cap_conn_put(struct l2cap_conn *conn)
1704 {
1705 	kref_put(&conn->ref, l2cap_conn_free);
1706 }
1707 EXPORT_SYMBOL(l2cap_conn_put);
1708 
1709 /* ---- Socket interface ---- */
1710 
1711 /* Find socket with psm and source / destination bdaddr.
1712  * Returns closest match.
1713  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1714 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1715 						   bdaddr_t *src,
1716 						   bdaddr_t *dst,
1717 						   u8 link_type)
1718 {
1719 	struct l2cap_chan *c, *c1 = NULL;
1720 
1721 	read_lock(&chan_list_lock);
1722 
1723 	list_for_each_entry(c, &chan_list, global_l) {
1724 		if (state && c->state != state)
1725 			continue;
1726 
1727 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1728 			continue;
1729 
1730 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1731 			continue;
1732 
1733 		if (c->psm == psm) {
1734 			int src_match, dst_match;
1735 			int src_any, dst_any;
1736 
1737 			/* Exact match. */
1738 			src_match = !bacmp(&c->src, src);
1739 			dst_match = !bacmp(&c->dst, dst);
1740 			if (src_match && dst_match) {
1741 				l2cap_chan_hold(c);
1742 				read_unlock(&chan_list_lock);
1743 				return c;
1744 			}
1745 
1746 			/* Closest match */
1747 			src_any = !bacmp(&c->src, BDADDR_ANY);
1748 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1749 			if ((src_match && dst_any) || (src_any && dst_match) ||
1750 			    (src_any && dst_any))
1751 				c1 = c;
1752 		}
1753 	}
1754 
1755 	if (c1)
1756 		l2cap_chan_hold(c1);
1757 
1758 	read_unlock(&chan_list_lock);
1759 
1760 	return c1;
1761 }
1762 
l2cap_monitor_timeout(struct work_struct * work)1763 static void l2cap_monitor_timeout(struct work_struct *work)
1764 {
1765 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1766 					       monitor_timer.work);
1767 
1768 	BT_DBG("chan %p", chan);
1769 
1770 	l2cap_chan_lock(chan);
1771 
1772 	if (!chan->conn) {
1773 		l2cap_chan_unlock(chan);
1774 		l2cap_chan_put(chan);
1775 		return;
1776 	}
1777 
1778 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1779 
1780 	l2cap_chan_unlock(chan);
1781 	l2cap_chan_put(chan);
1782 }
1783 
l2cap_retrans_timeout(struct work_struct * work)1784 static void l2cap_retrans_timeout(struct work_struct *work)
1785 {
1786 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1787 					       retrans_timer.work);
1788 
1789 	BT_DBG("chan %p", chan);
1790 
1791 	l2cap_chan_lock(chan);
1792 
1793 	if (!chan->conn) {
1794 		l2cap_chan_unlock(chan);
1795 		l2cap_chan_put(chan);
1796 		return;
1797 	}
1798 
1799 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1800 	l2cap_chan_unlock(chan);
1801 	l2cap_chan_put(chan);
1802 }
1803 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1804 static void l2cap_streaming_send(struct l2cap_chan *chan,
1805 				 struct sk_buff_head *skbs)
1806 {
1807 	struct sk_buff *skb;
1808 	struct l2cap_ctrl *control;
1809 
1810 	BT_DBG("chan %p, skbs %p", chan, skbs);
1811 
1812 	if (__chan_is_moving(chan))
1813 		return;
1814 
1815 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1816 
1817 	while (!skb_queue_empty(&chan->tx_q)) {
1818 
1819 		skb = skb_dequeue(&chan->tx_q);
1820 
1821 		bt_cb(skb)->control.retries = 1;
1822 		control = &bt_cb(skb)->control;
1823 
1824 		control->reqseq = 0;
1825 		control->txseq = chan->next_tx_seq;
1826 
1827 		__pack_control(chan, control, skb);
1828 
1829 		if (chan->fcs == L2CAP_FCS_CRC16) {
1830 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1831 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1832 		}
1833 
1834 		l2cap_do_send(chan, skb);
1835 
1836 		BT_DBG("Sent txseq %u", control->txseq);
1837 
1838 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1839 		chan->frames_sent++;
1840 	}
1841 }
1842 
l2cap_ertm_send(struct l2cap_chan * chan)1843 static int l2cap_ertm_send(struct l2cap_chan *chan)
1844 {
1845 	struct sk_buff *skb, *tx_skb;
1846 	struct l2cap_ctrl *control;
1847 	int sent = 0;
1848 
1849 	BT_DBG("chan %p", chan);
1850 
1851 	if (chan->state != BT_CONNECTED)
1852 		return -ENOTCONN;
1853 
1854 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1855 		return 0;
1856 
1857 	if (__chan_is_moving(chan))
1858 		return 0;
1859 
1860 	while (chan->tx_send_head &&
1861 	       chan->unacked_frames < chan->remote_tx_win &&
1862 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1863 
1864 		skb = chan->tx_send_head;
1865 
1866 		bt_cb(skb)->control.retries = 1;
1867 		control = &bt_cb(skb)->control;
1868 
1869 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1870 			control->final = 1;
1871 
1872 		control->reqseq = chan->buffer_seq;
1873 		chan->last_acked_seq = chan->buffer_seq;
1874 		control->txseq = chan->next_tx_seq;
1875 
1876 		__pack_control(chan, control, skb);
1877 
1878 		if (chan->fcs == L2CAP_FCS_CRC16) {
1879 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1880 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1881 		}
1882 
1883 		/* Clone after data has been modified. Data is assumed to be
1884 		   read-only (for locking purposes) on cloned sk_buffs.
1885 		 */
1886 		tx_skb = skb_clone(skb, GFP_KERNEL);
1887 
1888 		if (!tx_skb)
1889 			break;
1890 
1891 		__set_retrans_timer(chan);
1892 
1893 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1894 		chan->unacked_frames++;
1895 		chan->frames_sent++;
1896 		sent++;
1897 
1898 		if (skb_queue_is_last(&chan->tx_q, skb))
1899 			chan->tx_send_head = NULL;
1900 		else
1901 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1902 
1903 		l2cap_do_send(chan, tx_skb);
1904 		BT_DBG("Sent txseq %u", control->txseq);
1905 	}
1906 
1907 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1908 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1909 
1910 	return sent;
1911 }
1912 
l2cap_ertm_resend(struct l2cap_chan * chan)1913 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1914 {
1915 	struct l2cap_ctrl control;
1916 	struct sk_buff *skb;
1917 	struct sk_buff *tx_skb;
1918 	u16 seq;
1919 
1920 	BT_DBG("chan %p", chan);
1921 
1922 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1923 		return;
1924 
1925 	if (__chan_is_moving(chan))
1926 		return;
1927 
1928 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1929 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1930 
1931 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1932 		if (!skb) {
1933 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1934 			       seq);
1935 			continue;
1936 		}
1937 
1938 		bt_cb(skb)->control.retries++;
1939 		control = bt_cb(skb)->control;
1940 
1941 		if (chan->max_tx != 0 &&
1942 		    bt_cb(skb)->control.retries > chan->max_tx) {
1943 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
1944 			l2cap_send_disconn_req(chan, ECONNRESET);
1945 			l2cap_seq_list_clear(&chan->retrans_list);
1946 			break;
1947 		}
1948 
1949 		control.reqseq = chan->buffer_seq;
1950 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1951 			control.final = 1;
1952 		else
1953 			control.final = 0;
1954 
1955 		if (skb_cloned(skb)) {
1956 			/* Cloned sk_buffs are read-only, so we need a
1957 			 * writeable copy
1958 			 */
1959 			tx_skb = skb_copy(skb, GFP_KERNEL);
1960 		} else {
1961 			tx_skb = skb_clone(skb, GFP_KERNEL);
1962 		}
1963 
1964 		if (!tx_skb) {
1965 			l2cap_seq_list_clear(&chan->retrans_list);
1966 			break;
1967 		}
1968 
1969 		/* Update skb contents */
1970 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1971 			put_unaligned_le32(__pack_extended_control(&control),
1972 					   tx_skb->data + L2CAP_HDR_SIZE);
1973 		} else {
1974 			put_unaligned_le16(__pack_enhanced_control(&control),
1975 					   tx_skb->data + L2CAP_HDR_SIZE);
1976 		}
1977 
1978 		/* Update FCS */
1979 		if (chan->fcs == L2CAP_FCS_CRC16) {
1980 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
1981 					tx_skb->len - L2CAP_FCS_SIZE);
1982 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1983 						L2CAP_FCS_SIZE);
1984 		}
1985 
1986 		l2cap_do_send(chan, tx_skb);
1987 
1988 		BT_DBG("Resent txseq %d", control.txseq);
1989 
1990 		chan->last_acked_seq = chan->buffer_seq;
1991 	}
1992 }
1993 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)1994 static void l2cap_retransmit(struct l2cap_chan *chan,
1995 			     struct l2cap_ctrl *control)
1996 {
1997 	BT_DBG("chan %p, control %p", chan, control);
1998 
1999 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2000 	l2cap_ertm_resend(chan);
2001 }
2002 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2003 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2004 				 struct l2cap_ctrl *control)
2005 {
2006 	struct sk_buff *skb;
2007 
2008 	BT_DBG("chan %p, control %p", chan, control);
2009 
2010 	if (control->poll)
2011 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2012 
2013 	l2cap_seq_list_clear(&chan->retrans_list);
2014 
2015 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2016 		return;
2017 
2018 	if (chan->unacked_frames) {
2019 		skb_queue_walk(&chan->tx_q, skb) {
2020 			if (bt_cb(skb)->control.txseq == control->reqseq ||
2021 			    skb == chan->tx_send_head)
2022 				break;
2023 		}
2024 
2025 		skb_queue_walk_from(&chan->tx_q, skb) {
2026 			if (skb == chan->tx_send_head)
2027 				break;
2028 
2029 			l2cap_seq_list_append(&chan->retrans_list,
2030 					      bt_cb(skb)->control.txseq);
2031 		}
2032 
2033 		l2cap_ertm_resend(chan);
2034 	}
2035 }
2036 
l2cap_send_ack(struct l2cap_chan * chan)2037 static void l2cap_send_ack(struct l2cap_chan *chan)
2038 {
2039 	struct l2cap_ctrl control;
2040 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2041 					 chan->last_acked_seq);
2042 	int threshold;
2043 
2044 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2045 	       chan, chan->last_acked_seq, chan->buffer_seq);
2046 
2047 	memset(&control, 0, sizeof(control));
2048 	control.sframe = 1;
2049 
2050 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2051 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2052 		__clear_ack_timer(chan);
2053 		control.super = L2CAP_SUPER_RNR;
2054 		control.reqseq = chan->buffer_seq;
2055 		l2cap_send_sframe(chan, &control);
2056 	} else {
2057 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2058 			l2cap_ertm_send(chan);
2059 			/* If any i-frames were sent, they included an ack */
2060 			if (chan->buffer_seq == chan->last_acked_seq)
2061 				frames_to_ack = 0;
2062 		}
2063 
2064 		/* Ack now if the window is 3/4ths full.
2065 		 * Calculate without mul or div
2066 		 */
2067 		threshold = chan->ack_win;
2068 		threshold += threshold << 1;
2069 		threshold >>= 2;
2070 
2071 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2072 		       threshold);
2073 
2074 		if (frames_to_ack >= threshold) {
2075 			__clear_ack_timer(chan);
2076 			control.super = L2CAP_SUPER_RR;
2077 			control.reqseq = chan->buffer_seq;
2078 			l2cap_send_sframe(chan, &control);
2079 			frames_to_ack = 0;
2080 		}
2081 
2082 		if (frames_to_ack)
2083 			__set_ack_timer(chan);
2084 	}
2085 }
2086 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2087 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2088 					 struct msghdr *msg, int len,
2089 					 int count, struct sk_buff *skb)
2090 {
2091 	struct l2cap_conn *conn = chan->conn;
2092 	struct sk_buff **frag;
2093 	int sent = 0;
2094 
2095 	if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2096 					msg->msg_iov, count))
2097 		return -EFAULT;
2098 
2099 	sent += count;
2100 	len  -= count;
2101 
2102 	/* Continuation fragments (no L2CAP header) */
2103 	frag = &skb_shinfo(skb)->frag_list;
2104 	while (len) {
2105 		struct sk_buff *tmp;
2106 
2107 		count = min_t(unsigned int, conn->mtu, len);
2108 
2109 		tmp = chan->ops->alloc_skb(chan, 0, count,
2110 					   msg->msg_flags & MSG_DONTWAIT);
2111 		if (IS_ERR(tmp))
2112 			return PTR_ERR(tmp);
2113 
2114 		*frag = tmp;
2115 
2116 		if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2117 						msg->msg_iov, count))
2118 			return -EFAULT;
2119 
2120 		sent += count;
2121 		len  -= count;
2122 
2123 		skb->len += (*frag)->len;
2124 		skb->data_len += (*frag)->len;
2125 
2126 		frag = &(*frag)->next;
2127 	}
2128 
2129 	return sent;
2130 }
2131 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2132 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2133 						 struct msghdr *msg, size_t len)
2134 {
2135 	struct l2cap_conn *conn = chan->conn;
2136 	struct sk_buff *skb;
2137 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2138 	struct l2cap_hdr *lh;
2139 
2140 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2141 	       __le16_to_cpu(chan->psm), len);
2142 
2143 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2144 
2145 	skb = chan->ops->alloc_skb(chan, hlen, count,
2146 				   msg->msg_flags & MSG_DONTWAIT);
2147 	if (IS_ERR(skb))
2148 		return skb;
2149 
2150 	/* Create L2CAP header */
2151 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2152 	lh->cid = cpu_to_le16(chan->dcid);
2153 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2154 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2155 
2156 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2157 	if (unlikely(err < 0)) {
2158 		kfree_skb(skb);
2159 		return ERR_PTR(err);
2160 	}
2161 	return skb;
2162 }
2163 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2164 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2165 					      struct msghdr *msg, size_t len)
2166 {
2167 	struct l2cap_conn *conn = chan->conn;
2168 	struct sk_buff *skb;
2169 	int err, count;
2170 	struct l2cap_hdr *lh;
2171 
2172 	BT_DBG("chan %p len %zu", chan, len);
2173 
2174 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2175 
2176 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2177 				   msg->msg_flags & MSG_DONTWAIT);
2178 	if (IS_ERR(skb))
2179 		return skb;
2180 
2181 	/* Create L2CAP header */
2182 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2183 	lh->cid = cpu_to_le16(chan->dcid);
2184 	lh->len = cpu_to_le16(len);
2185 
2186 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2187 	if (unlikely(err < 0)) {
2188 		kfree_skb(skb);
2189 		return ERR_PTR(err);
2190 	}
2191 	return skb;
2192 }
2193 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2194 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2195 					       struct msghdr *msg, size_t len,
2196 					       u16 sdulen)
2197 {
2198 	struct l2cap_conn *conn = chan->conn;
2199 	struct sk_buff *skb;
2200 	int err, count, hlen;
2201 	struct l2cap_hdr *lh;
2202 
2203 	BT_DBG("chan %p len %zu", chan, len);
2204 
2205 	if (!conn)
2206 		return ERR_PTR(-ENOTCONN);
2207 
2208 	hlen = __ertm_hdr_size(chan);
2209 
2210 	if (sdulen)
2211 		hlen += L2CAP_SDULEN_SIZE;
2212 
2213 	if (chan->fcs == L2CAP_FCS_CRC16)
2214 		hlen += L2CAP_FCS_SIZE;
2215 
2216 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2217 
2218 	skb = chan->ops->alloc_skb(chan, hlen, count,
2219 				   msg->msg_flags & MSG_DONTWAIT);
2220 	if (IS_ERR(skb))
2221 		return skb;
2222 
2223 	/* Create L2CAP header */
2224 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2225 	lh->cid = cpu_to_le16(chan->dcid);
2226 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2227 
2228 	/* Control header is populated later */
2229 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2230 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2231 	else
2232 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2233 
2234 	if (sdulen)
2235 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2236 
2237 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2238 	if (unlikely(err < 0)) {
2239 		kfree_skb(skb);
2240 		return ERR_PTR(err);
2241 	}
2242 
2243 	bt_cb(skb)->control.fcs = chan->fcs;
2244 	bt_cb(skb)->control.retries = 0;
2245 	return skb;
2246 }
2247 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2248 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2249 			     struct sk_buff_head *seg_queue,
2250 			     struct msghdr *msg, size_t len)
2251 {
2252 	struct sk_buff *skb;
2253 	u16 sdu_len;
2254 	size_t pdu_len;
2255 	u8 sar;
2256 
2257 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2258 
2259 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2260 	 * so fragmented skbs are not used.  The HCI layer's handling
2261 	 * of fragmented skbs is not compatible with ERTM's queueing.
2262 	 */
2263 
2264 	/* PDU size is derived from the HCI MTU */
2265 	pdu_len = chan->conn->mtu;
2266 
2267 	/* Constrain PDU size for BR/EDR connections */
2268 	if (!chan->hs_hcon)
2269 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2270 
2271 	/* Adjust for largest possible L2CAP overhead. */
2272 	if (chan->fcs)
2273 		pdu_len -= L2CAP_FCS_SIZE;
2274 
2275 	pdu_len -= __ertm_hdr_size(chan);
2276 
2277 	/* Remote device may have requested smaller PDUs */
2278 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2279 
2280 	if (len <= pdu_len) {
2281 		sar = L2CAP_SAR_UNSEGMENTED;
2282 		sdu_len = 0;
2283 		pdu_len = len;
2284 	} else {
2285 		sar = L2CAP_SAR_START;
2286 		sdu_len = len;
2287 	}
2288 
2289 	while (len > 0) {
2290 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2291 
2292 		if (IS_ERR(skb)) {
2293 			__skb_queue_purge(seg_queue);
2294 			return PTR_ERR(skb);
2295 		}
2296 
2297 		bt_cb(skb)->control.sar = sar;
2298 		__skb_queue_tail(seg_queue, skb);
2299 
2300 		len -= pdu_len;
2301 		if (sdu_len)
2302 			sdu_len = 0;
2303 
2304 		if (len <= pdu_len) {
2305 			sar = L2CAP_SAR_END;
2306 			pdu_len = len;
2307 		} else {
2308 			sar = L2CAP_SAR_CONTINUE;
2309 		}
2310 	}
2311 
2312 	return 0;
2313 }
2314 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2315 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2316 						   struct msghdr *msg,
2317 						   size_t len, u16 sdulen)
2318 {
2319 	struct l2cap_conn *conn = chan->conn;
2320 	struct sk_buff *skb;
2321 	int err, count, hlen;
2322 	struct l2cap_hdr *lh;
2323 
2324 	BT_DBG("chan %p len %zu", chan, len);
2325 
2326 	if (!conn)
2327 		return ERR_PTR(-ENOTCONN);
2328 
2329 	hlen = L2CAP_HDR_SIZE;
2330 
2331 	if (sdulen)
2332 		hlen += L2CAP_SDULEN_SIZE;
2333 
2334 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2335 
2336 	skb = chan->ops->alloc_skb(chan, hlen, count,
2337 				   msg->msg_flags & MSG_DONTWAIT);
2338 	if (IS_ERR(skb))
2339 		return skb;
2340 
2341 	/* Create L2CAP header */
2342 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2343 	lh->cid = cpu_to_le16(chan->dcid);
2344 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2345 
2346 	if (sdulen)
2347 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2348 
2349 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2350 	if (unlikely(err < 0)) {
2351 		kfree_skb(skb);
2352 		return ERR_PTR(err);
2353 	}
2354 
2355 	return skb;
2356 }
2357 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2358 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2359 				struct sk_buff_head *seg_queue,
2360 				struct msghdr *msg, size_t len)
2361 {
2362 	struct sk_buff *skb;
2363 	size_t pdu_len;
2364 	u16 sdu_len;
2365 
2366 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367 
2368 	sdu_len = len;
2369 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2370 
2371 	while (len > 0) {
2372 		if (len <= pdu_len)
2373 			pdu_len = len;
2374 
2375 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2376 		if (IS_ERR(skb)) {
2377 			__skb_queue_purge(seg_queue);
2378 			return PTR_ERR(skb);
2379 		}
2380 
2381 		__skb_queue_tail(seg_queue, skb);
2382 
2383 		len -= pdu_len;
2384 
2385 		if (sdu_len) {
2386 			sdu_len = 0;
2387 			pdu_len += L2CAP_SDULEN_SIZE;
2388 		}
2389 	}
2390 
2391 	return 0;
2392 }
2393 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2394 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2395 {
2396 	struct sk_buff *skb;
2397 	int err;
2398 	struct sk_buff_head seg_queue;
2399 
2400 	if (!chan->conn)
2401 		return -ENOTCONN;
2402 
2403 	/* Connectionless channel */
2404 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2405 		skb = l2cap_create_connless_pdu(chan, msg, len);
2406 		if (IS_ERR(skb))
2407 			return PTR_ERR(skb);
2408 
2409 		/* Channel lock is released before requesting new skb and then
2410 		 * reacquired thus we need to recheck channel state.
2411 		 */
2412 		if (chan->state != BT_CONNECTED) {
2413 			kfree_skb(skb);
2414 			return -ENOTCONN;
2415 		}
2416 
2417 		l2cap_do_send(chan, skb);
2418 		return len;
2419 	}
2420 
2421 	switch (chan->mode) {
2422 	case L2CAP_MODE_LE_FLOWCTL:
2423 		/* Check outgoing MTU */
2424 		if (len > chan->omtu)
2425 			return -EMSGSIZE;
2426 
2427 		if (!chan->tx_credits)
2428 			return -EAGAIN;
2429 
2430 		__skb_queue_head_init(&seg_queue);
2431 
2432 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2433 
2434 		if (chan->state != BT_CONNECTED) {
2435 			__skb_queue_purge(&seg_queue);
2436 			err = -ENOTCONN;
2437 		}
2438 
2439 		if (err)
2440 			return err;
2441 
2442 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2443 
2444 		while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2445 			l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2446 			chan->tx_credits--;
2447 		}
2448 
2449 		if (!chan->tx_credits)
2450 			chan->ops->suspend(chan);
2451 
2452 		err = len;
2453 
2454 		break;
2455 
2456 	case L2CAP_MODE_BASIC:
2457 		/* Check outgoing MTU */
2458 		if (len > chan->omtu)
2459 			return -EMSGSIZE;
2460 
2461 		/* Create a basic PDU */
2462 		skb = l2cap_create_basic_pdu(chan, msg, len);
2463 		if (IS_ERR(skb))
2464 			return PTR_ERR(skb);
2465 
2466 		/* Channel lock is released before requesting new skb and then
2467 		 * reacquired thus we need to recheck channel state.
2468 		 */
2469 		if (chan->state != BT_CONNECTED) {
2470 			kfree_skb(skb);
2471 			return -ENOTCONN;
2472 		}
2473 
2474 		l2cap_do_send(chan, skb);
2475 		err = len;
2476 		break;
2477 
2478 	case L2CAP_MODE_ERTM:
2479 	case L2CAP_MODE_STREAMING:
2480 		/* Check outgoing MTU */
2481 		if (len > chan->omtu) {
2482 			err = -EMSGSIZE;
2483 			break;
2484 		}
2485 
2486 		__skb_queue_head_init(&seg_queue);
2487 
2488 		/* Do segmentation before calling in to the state machine,
2489 		 * since it's possible to block while waiting for memory
2490 		 * allocation.
2491 		 */
2492 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2493 
2494 		/* The channel could have been closed while segmenting,
2495 		 * check that it is still connected.
2496 		 */
2497 		if (chan->state != BT_CONNECTED) {
2498 			__skb_queue_purge(&seg_queue);
2499 			err = -ENOTCONN;
2500 		}
2501 
2502 		if (err)
2503 			break;
2504 
2505 		if (chan->mode == L2CAP_MODE_ERTM)
2506 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2507 		else
2508 			l2cap_streaming_send(chan, &seg_queue);
2509 
2510 		err = len;
2511 
2512 		/* If the skbs were not queued for sending, they'll still be in
2513 		 * seg_queue and need to be purged.
2514 		 */
2515 		__skb_queue_purge(&seg_queue);
2516 		break;
2517 
2518 	default:
2519 		BT_DBG("bad state %1.1x", chan->mode);
2520 		err = -EBADFD;
2521 	}
2522 
2523 	return err;
2524 }
2525 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2526 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2527 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2528 {
2529 	struct l2cap_ctrl control;
2530 	u16 seq;
2531 
2532 	BT_DBG("chan %p, txseq %u", chan, txseq);
2533 
2534 	memset(&control, 0, sizeof(control));
2535 	control.sframe = 1;
2536 	control.super = L2CAP_SUPER_SREJ;
2537 
2538 	for (seq = chan->expected_tx_seq; seq != txseq;
2539 	     seq = __next_seq(chan, seq)) {
2540 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2541 			control.reqseq = seq;
2542 			l2cap_send_sframe(chan, &control);
2543 			l2cap_seq_list_append(&chan->srej_list, seq);
2544 		}
2545 	}
2546 
2547 	chan->expected_tx_seq = __next_seq(chan, txseq);
2548 }
2549 
l2cap_send_srej_tail(struct l2cap_chan * chan)2550 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2551 {
2552 	struct l2cap_ctrl control;
2553 
2554 	BT_DBG("chan %p", chan);
2555 
2556 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2557 		return;
2558 
2559 	memset(&control, 0, sizeof(control));
2560 	control.sframe = 1;
2561 	control.super = L2CAP_SUPER_SREJ;
2562 	control.reqseq = chan->srej_list.tail;
2563 	l2cap_send_sframe(chan, &control);
2564 }
2565 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2566 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2567 {
2568 	struct l2cap_ctrl control;
2569 	u16 initial_head;
2570 	u16 seq;
2571 
2572 	BT_DBG("chan %p, txseq %u", chan, txseq);
2573 
2574 	memset(&control, 0, sizeof(control));
2575 	control.sframe = 1;
2576 	control.super = L2CAP_SUPER_SREJ;
2577 
2578 	/* Capture initial list head to allow only one pass through the list. */
2579 	initial_head = chan->srej_list.head;
2580 
2581 	do {
2582 		seq = l2cap_seq_list_pop(&chan->srej_list);
2583 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2584 			break;
2585 
2586 		control.reqseq = seq;
2587 		l2cap_send_sframe(chan, &control);
2588 		l2cap_seq_list_append(&chan->srej_list, seq);
2589 	} while (chan->srej_list.head != initial_head);
2590 }
2591 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2592 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2593 {
2594 	struct sk_buff *acked_skb;
2595 	u16 ackseq;
2596 
2597 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2598 
2599 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2600 		return;
2601 
2602 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2603 	       chan->expected_ack_seq, chan->unacked_frames);
2604 
2605 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2606 	     ackseq = __next_seq(chan, ackseq)) {
2607 
2608 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2609 		if (acked_skb) {
2610 			skb_unlink(acked_skb, &chan->tx_q);
2611 			kfree_skb(acked_skb);
2612 			chan->unacked_frames--;
2613 		}
2614 	}
2615 
2616 	chan->expected_ack_seq = reqseq;
2617 
2618 	if (chan->unacked_frames == 0)
2619 		__clear_retrans_timer(chan);
2620 
2621 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2622 }
2623 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2624 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2625 {
2626 	BT_DBG("chan %p", chan);
2627 
2628 	chan->expected_tx_seq = chan->buffer_seq;
2629 	l2cap_seq_list_clear(&chan->srej_list);
2630 	skb_queue_purge(&chan->srej_q);
2631 	chan->rx_state = L2CAP_RX_STATE_RECV;
2632 }
2633 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2634 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2635 				struct l2cap_ctrl *control,
2636 				struct sk_buff_head *skbs, u8 event)
2637 {
2638 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2639 	       event);
2640 
2641 	switch (event) {
2642 	case L2CAP_EV_DATA_REQUEST:
2643 		if (chan->tx_send_head == NULL)
2644 			chan->tx_send_head = skb_peek(skbs);
2645 
2646 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2647 		l2cap_ertm_send(chan);
2648 		break;
2649 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2650 		BT_DBG("Enter LOCAL_BUSY");
2651 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2652 
2653 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2654 			/* The SREJ_SENT state must be aborted if we are to
2655 			 * enter the LOCAL_BUSY state.
2656 			 */
2657 			l2cap_abort_rx_srej_sent(chan);
2658 		}
2659 
2660 		l2cap_send_ack(chan);
2661 
2662 		break;
2663 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2664 		BT_DBG("Exit LOCAL_BUSY");
2665 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2666 
2667 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2668 			struct l2cap_ctrl local_control;
2669 
2670 			memset(&local_control, 0, sizeof(local_control));
2671 			local_control.sframe = 1;
2672 			local_control.super = L2CAP_SUPER_RR;
2673 			local_control.poll = 1;
2674 			local_control.reqseq = chan->buffer_seq;
2675 			l2cap_send_sframe(chan, &local_control);
2676 
2677 			chan->retry_count = 1;
2678 			__set_monitor_timer(chan);
2679 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 		}
2681 		break;
2682 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2683 		l2cap_process_reqseq(chan, control->reqseq);
2684 		break;
2685 	case L2CAP_EV_EXPLICIT_POLL:
2686 		l2cap_send_rr_or_rnr(chan, 1);
2687 		chan->retry_count = 1;
2688 		__set_monitor_timer(chan);
2689 		__clear_ack_timer(chan);
2690 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 		break;
2692 	case L2CAP_EV_RETRANS_TO:
2693 		l2cap_send_rr_or_rnr(chan, 1);
2694 		chan->retry_count = 1;
2695 		__set_monitor_timer(chan);
2696 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2697 		break;
2698 	case L2CAP_EV_RECV_FBIT:
2699 		/* Nothing to process */
2700 		break;
2701 	default:
2702 		break;
2703 	}
2704 }
2705 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2706 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2707 				  struct l2cap_ctrl *control,
2708 				  struct sk_buff_head *skbs, u8 event)
2709 {
2710 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2711 	       event);
2712 
2713 	switch (event) {
2714 	case L2CAP_EV_DATA_REQUEST:
2715 		if (chan->tx_send_head == NULL)
2716 			chan->tx_send_head = skb_peek(skbs);
2717 		/* Queue data, but don't send. */
2718 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2719 		break;
2720 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 		BT_DBG("Enter LOCAL_BUSY");
2722 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723 
2724 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 			/* The SREJ_SENT state must be aborted if we are to
2726 			 * enter the LOCAL_BUSY state.
2727 			 */
2728 			l2cap_abort_rx_srej_sent(chan);
2729 		}
2730 
2731 		l2cap_send_ack(chan);
2732 
2733 		break;
2734 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 		BT_DBG("Exit LOCAL_BUSY");
2736 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737 
2738 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 			struct l2cap_ctrl local_control;
2740 			memset(&local_control, 0, sizeof(local_control));
2741 			local_control.sframe = 1;
2742 			local_control.super = L2CAP_SUPER_RR;
2743 			local_control.poll = 1;
2744 			local_control.reqseq = chan->buffer_seq;
2745 			l2cap_send_sframe(chan, &local_control);
2746 
2747 			chan->retry_count = 1;
2748 			__set_monitor_timer(chan);
2749 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 		}
2751 		break;
2752 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 		l2cap_process_reqseq(chan, control->reqseq);
2754 
2755 		/* Fall through */
2756 
2757 	case L2CAP_EV_RECV_FBIT:
2758 		if (control && control->final) {
2759 			__clear_monitor_timer(chan);
2760 			if (chan->unacked_frames > 0)
2761 				__set_retrans_timer(chan);
2762 			chan->retry_count = 0;
2763 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2764 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2765 		}
2766 		break;
2767 	case L2CAP_EV_EXPLICIT_POLL:
2768 		/* Ignore */
2769 		break;
2770 	case L2CAP_EV_MONITOR_TO:
2771 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2772 			l2cap_send_rr_or_rnr(chan, 1);
2773 			__set_monitor_timer(chan);
2774 			chan->retry_count++;
2775 		} else {
2776 			l2cap_send_disconn_req(chan, ECONNABORTED);
2777 		}
2778 		break;
2779 	default:
2780 		break;
2781 	}
2782 }
2783 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2784 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2785 		     struct sk_buff_head *skbs, u8 event)
2786 {
2787 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 	       chan, control, skbs, event, chan->tx_state);
2789 
2790 	switch (chan->tx_state) {
2791 	case L2CAP_TX_STATE_XMIT:
2792 		l2cap_tx_state_xmit(chan, control, skbs, event);
2793 		break;
2794 	case L2CAP_TX_STATE_WAIT_F:
2795 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2796 		break;
2797 	default:
2798 		/* Ignore event */
2799 		break;
2800 	}
2801 }
2802 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2803 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2804 			     struct l2cap_ctrl *control)
2805 {
2806 	BT_DBG("chan %p, control %p", chan, control);
2807 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2808 }
2809 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2810 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2811 				  struct l2cap_ctrl *control)
2812 {
2813 	BT_DBG("chan %p, control %p", chan, control);
2814 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2815 }
2816 
2817 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2818 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2819 {
2820 	struct sk_buff *nskb;
2821 	struct l2cap_chan *chan;
2822 
2823 	BT_DBG("conn %p", conn);
2824 
2825 	mutex_lock(&conn->chan_lock);
2826 
2827 	list_for_each_entry(chan, &conn->chan_l, list) {
2828 		if (chan->chan_type != L2CAP_CHAN_RAW)
2829 			continue;
2830 
2831 		/* Don't send frame to the channel it came from */
2832 		if (bt_cb(skb)->chan == chan)
2833 			continue;
2834 
2835 		nskb = skb_clone(skb, GFP_KERNEL);
2836 		if (!nskb)
2837 			continue;
2838 		if (chan->ops->recv(chan, nskb))
2839 			kfree_skb(nskb);
2840 	}
2841 
2842 	mutex_unlock(&conn->chan_lock);
2843 }
2844 
2845 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2846 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2847 				       u8 ident, u16 dlen, void *data)
2848 {
2849 	struct sk_buff *skb, **frag;
2850 	struct l2cap_cmd_hdr *cmd;
2851 	struct l2cap_hdr *lh;
2852 	int len, count;
2853 
2854 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2855 	       conn, code, ident, dlen);
2856 
2857 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2858 		return NULL;
2859 
2860 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2861 	count = min_t(unsigned int, conn->mtu, len);
2862 
2863 	skb = bt_skb_alloc(count, GFP_KERNEL);
2864 	if (!skb)
2865 		return NULL;
2866 
2867 	lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2868 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2869 
2870 	if (conn->hcon->type == LE_LINK)
2871 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2872 	else
2873 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2874 
2875 	cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2876 	cmd->code  = code;
2877 	cmd->ident = ident;
2878 	cmd->len   = cpu_to_le16(dlen);
2879 
2880 	if (dlen) {
2881 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2882 		memcpy(skb_put(skb, count), data, count);
2883 		data += count;
2884 	}
2885 
2886 	len -= skb->len;
2887 
2888 	/* Continuation fragments (no L2CAP header) */
2889 	frag = &skb_shinfo(skb)->frag_list;
2890 	while (len) {
2891 		count = min_t(unsigned int, conn->mtu, len);
2892 
2893 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2894 		if (!*frag)
2895 			goto fail;
2896 
2897 		memcpy(skb_put(*frag, count), data, count);
2898 
2899 		len  -= count;
2900 		data += count;
2901 
2902 		frag = &(*frag)->next;
2903 	}
2904 
2905 	return skb;
2906 
2907 fail:
2908 	kfree_skb(skb);
2909 	return NULL;
2910 }
2911 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2912 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2913 				     unsigned long *val)
2914 {
2915 	struct l2cap_conf_opt *opt = *ptr;
2916 	int len;
2917 
2918 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2919 	*ptr += len;
2920 
2921 	*type = opt->type;
2922 	*olen = opt->len;
2923 
2924 	switch (opt->len) {
2925 	case 1:
2926 		*val = *((u8 *) opt->val);
2927 		break;
2928 
2929 	case 2:
2930 		*val = get_unaligned_le16(opt->val);
2931 		break;
2932 
2933 	case 4:
2934 		*val = get_unaligned_le32(opt->val);
2935 		break;
2936 
2937 	default:
2938 		*val = (unsigned long) opt->val;
2939 		break;
2940 	}
2941 
2942 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
2943 	return len;
2944 }
2945 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)2946 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
2947 {
2948 	struct l2cap_conf_opt *opt = *ptr;
2949 
2950 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
2951 
2952 	if (size < L2CAP_CONF_OPT_SIZE + len)
2953 		return;
2954 
2955 	opt->type = type;
2956 	opt->len  = len;
2957 
2958 	switch (len) {
2959 	case 1:
2960 		*((u8 *) opt->val)  = val;
2961 		break;
2962 
2963 	case 2:
2964 		put_unaligned_le16(val, opt->val);
2965 		break;
2966 
2967 	case 4:
2968 		put_unaligned_le32(val, opt->val);
2969 		break;
2970 
2971 	default:
2972 		memcpy(opt->val, (void *) val, len);
2973 		break;
2974 	}
2975 
2976 	*ptr += L2CAP_CONF_OPT_SIZE + len;
2977 }
2978 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)2979 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
2980 {
2981 	struct l2cap_conf_efs efs;
2982 
2983 	switch (chan->mode) {
2984 	case L2CAP_MODE_ERTM:
2985 		efs.id		= chan->local_id;
2986 		efs.stype	= chan->local_stype;
2987 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2988 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2989 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2990 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
2991 		break;
2992 
2993 	case L2CAP_MODE_STREAMING:
2994 		efs.id		= 1;
2995 		efs.stype	= L2CAP_SERV_BESTEFFORT;
2996 		efs.msdu	= cpu_to_le16(chan->local_msdu);
2997 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
2998 		efs.acc_lat	= 0;
2999 		efs.flush_to	= 0;
3000 		break;
3001 
3002 	default:
3003 		return;
3004 	}
3005 
3006 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3007 			   (unsigned long) &efs, size);
3008 }
3009 
l2cap_ack_timeout(struct work_struct * work)3010 static void l2cap_ack_timeout(struct work_struct *work)
3011 {
3012 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3013 					       ack_timer.work);
3014 	u16 frames_to_ack;
3015 
3016 	BT_DBG("chan %p", chan);
3017 
3018 	l2cap_chan_lock(chan);
3019 
3020 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3021 				     chan->last_acked_seq);
3022 
3023 	if (frames_to_ack)
3024 		l2cap_send_rr_or_rnr(chan, 0);
3025 
3026 	l2cap_chan_unlock(chan);
3027 	l2cap_chan_put(chan);
3028 }
3029 
l2cap_ertm_init(struct l2cap_chan * chan)3030 int l2cap_ertm_init(struct l2cap_chan *chan)
3031 {
3032 	int err;
3033 
3034 	chan->next_tx_seq = 0;
3035 	chan->expected_tx_seq = 0;
3036 	chan->expected_ack_seq = 0;
3037 	chan->unacked_frames = 0;
3038 	chan->buffer_seq = 0;
3039 	chan->frames_sent = 0;
3040 	chan->last_acked_seq = 0;
3041 	chan->sdu = NULL;
3042 	chan->sdu_last_frag = NULL;
3043 	chan->sdu_len = 0;
3044 
3045 	skb_queue_head_init(&chan->tx_q);
3046 
3047 	chan->local_amp_id = AMP_ID_BREDR;
3048 	chan->move_id = AMP_ID_BREDR;
3049 	chan->move_state = L2CAP_MOVE_STABLE;
3050 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3051 
3052 	if (chan->mode != L2CAP_MODE_ERTM)
3053 		return 0;
3054 
3055 	chan->rx_state = L2CAP_RX_STATE_RECV;
3056 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3057 
3058 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3059 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3060 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3061 
3062 	skb_queue_head_init(&chan->srej_q);
3063 
3064 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3065 	if (err < 0)
3066 		return err;
3067 
3068 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3069 	if (err < 0)
3070 		l2cap_seq_list_free(&chan->srej_list);
3071 
3072 	return err;
3073 }
3074 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3075 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3076 {
3077 	switch (mode) {
3078 	case L2CAP_MODE_STREAMING:
3079 	case L2CAP_MODE_ERTM:
3080 		if (l2cap_mode_supported(mode, remote_feat_mask))
3081 			return mode;
3082 		/* fall through */
3083 	default:
3084 		return L2CAP_MODE_BASIC;
3085 	}
3086 }
3087 
__l2cap_ews_supported(struct l2cap_conn * conn)3088 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3089 {
3090 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
3091 }
3092 
__l2cap_efs_supported(struct l2cap_conn * conn)3093 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3094 {
3095 	return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
3096 }
3097 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3098 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3099 				      struct l2cap_conf_rfc *rfc)
3100 {
3101 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3102 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3103 
3104 		/* Class 1 devices have must have ERTM timeouts
3105 		 * exceeding the Link Supervision Timeout.  The
3106 		 * default Link Supervision Timeout for AMP
3107 		 * controllers is 10 seconds.
3108 		 *
3109 		 * Class 1 devices use 0xffffffff for their
3110 		 * best-effort flush timeout, so the clamping logic
3111 		 * will result in a timeout that meets the above
3112 		 * requirement.  ERTM timeouts are 16-bit values, so
3113 		 * the maximum timeout is 65.535 seconds.
3114 		 */
3115 
3116 		/* Convert timeout to milliseconds and round */
3117 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3118 
3119 		/* This is the recommended formula for class 2 devices
3120 		 * that start ERTM timers when packets are sent to the
3121 		 * controller.
3122 		 */
3123 		ertm_to = 3 * ertm_to + 500;
3124 
3125 		if (ertm_to > 0xffff)
3126 			ertm_to = 0xffff;
3127 
3128 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3129 		rfc->monitor_timeout = rfc->retrans_timeout;
3130 	} else {
3131 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3132 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3133 	}
3134 }
3135 
l2cap_txwin_setup(struct l2cap_chan * chan)3136 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3137 {
3138 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3139 	    __l2cap_ews_supported(chan->conn)) {
3140 		/* use extended control field */
3141 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3142 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3143 	} else {
3144 		chan->tx_win = min_t(u16, chan->tx_win,
3145 				     L2CAP_DEFAULT_TX_WINDOW);
3146 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3147 	}
3148 	chan->ack_win = chan->tx_win;
3149 }
3150 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3151 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3152 {
3153 	struct l2cap_conf_req *req = data;
3154 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3155 	void *ptr = req->data;
3156 	void *endptr = data + data_size;
3157 	u16 size;
3158 
3159 	BT_DBG("chan %p", chan);
3160 
3161 	if (chan->num_conf_req || chan->num_conf_rsp)
3162 		goto done;
3163 
3164 	switch (chan->mode) {
3165 	case L2CAP_MODE_STREAMING:
3166 	case L2CAP_MODE_ERTM:
3167 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3168 			break;
3169 
3170 		if (__l2cap_efs_supported(chan->conn))
3171 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3172 
3173 		/* fall through */
3174 	default:
3175 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3176 		break;
3177 	}
3178 
3179 done:
3180 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3181 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3182 
3183 	switch (chan->mode) {
3184 	case L2CAP_MODE_BASIC:
3185 		if (disable_ertm)
3186 			break;
3187 
3188 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3189 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3190 			break;
3191 
3192 		rfc.mode            = L2CAP_MODE_BASIC;
3193 		rfc.txwin_size      = 0;
3194 		rfc.max_transmit    = 0;
3195 		rfc.retrans_timeout = 0;
3196 		rfc.monitor_timeout = 0;
3197 		rfc.max_pdu_size    = 0;
3198 
3199 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3200 				   (unsigned long) &rfc, endptr - ptr);
3201 		break;
3202 
3203 	case L2CAP_MODE_ERTM:
3204 		rfc.mode            = L2CAP_MODE_ERTM;
3205 		rfc.max_transmit    = chan->max_tx;
3206 
3207 		__l2cap_set_ertm_timeouts(chan, &rfc);
3208 
3209 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3210 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3211 			     L2CAP_FCS_SIZE);
3212 		rfc.max_pdu_size = cpu_to_le16(size);
3213 
3214 		l2cap_txwin_setup(chan);
3215 
3216 		rfc.txwin_size = min_t(u16, chan->tx_win,
3217 				       L2CAP_DEFAULT_TX_WINDOW);
3218 
3219 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3220 				   (unsigned long) &rfc, endptr - ptr);
3221 
3222 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3223 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3224 
3225 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3226 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3227 					   chan->tx_win, endptr - ptr);
3228 
3229 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3230 			if (chan->fcs == L2CAP_FCS_NONE ||
3231 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3232 				chan->fcs = L2CAP_FCS_NONE;
3233 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3234 						   chan->fcs, endptr - ptr);
3235 			}
3236 		break;
3237 
3238 	case L2CAP_MODE_STREAMING:
3239 		l2cap_txwin_setup(chan);
3240 		rfc.mode            = L2CAP_MODE_STREAMING;
3241 		rfc.txwin_size      = 0;
3242 		rfc.max_transmit    = 0;
3243 		rfc.retrans_timeout = 0;
3244 		rfc.monitor_timeout = 0;
3245 
3246 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3247 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3248 			     L2CAP_FCS_SIZE);
3249 		rfc.max_pdu_size = cpu_to_le16(size);
3250 
3251 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3252 				   (unsigned long) &rfc, endptr - ptr);
3253 
3254 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3256 
3257 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3258 			if (chan->fcs == L2CAP_FCS_NONE ||
3259 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3260 				chan->fcs = L2CAP_FCS_NONE;
3261 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3262 						   chan->fcs, endptr - ptr);
3263 			}
3264 		break;
3265 	}
3266 
3267 	req->dcid  = cpu_to_le16(chan->dcid);
3268 	req->flags = cpu_to_le16(0);
3269 
3270 	return ptr - data;
3271 }
3272 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3273 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3274 {
3275 	struct l2cap_conf_rsp *rsp = data;
3276 	void *ptr = rsp->data;
3277 	void *endptr = data + data_size;
3278 	void *req = chan->conf_req;
3279 	int len = chan->conf_len;
3280 	int type, hint, olen;
3281 	unsigned long val;
3282 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3283 	struct l2cap_conf_efs efs;
3284 	u8 remote_efs = 0;
3285 	u16 mtu = L2CAP_DEFAULT_MTU;
3286 	u16 result = L2CAP_CONF_SUCCESS;
3287 	u16 size;
3288 
3289 	BT_DBG("chan %p", chan);
3290 
3291 	while (len >= L2CAP_CONF_OPT_SIZE) {
3292 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3293 
3294 		hint  = type & L2CAP_CONF_HINT;
3295 		type &= L2CAP_CONF_MASK;
3296 
3297 		switch (type) {
3298 		case L2CAP_CONF_MTU:
3299 			mtu = val;
3300 			break;
3301 
3302 		case L2CAP_CONF_FLUSH_TO:
3303 			chan->flush_to = val;
3304 			break;
3305 
3306 		case L2CAP_CONF_QOS:
3307 			break;
3308 
3309 		case L2CAP_CONF_RFC:
3310 			if (olen == sizeof(rfc))
3311 				memcpy(&rfc, (void *) val, olen);
3312 			break;
3313 
3314 		case L2CAP_CONF_FCS:
3315 			if (val == L2CAP_FCS_NONE)
3316 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3317 			break;
3318 
3319 		case L2CAP_CONF_EFS:
3320 			if (olen == sizeof(efs)) {
3321 				remote_efs = 1;
3322 				memcpy(&efs, (void *) val, olen);
3323 			}
3324 			break;
3325 
3326 		case L2CAP_CONF_EWS:
3327 			if (!chan->conn->hs_enabled)
3328 				return -ECONNREFUSED;
3329 
3330 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3331 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3332 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3333 			chan->remote_tx_win = val;
3334 			break;
3335 
3336 		default:
3337 			if (hint)
3338 				break;
3339 
3340 			result = L2CAP_CONF_UNKNOWN;
3341 			*((u8 *) ptr++) = type;
3342 			break;
3343 		}
3344 	}
3345 
3346 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3347 		goto done;
3348 
3349 	switch (chan->mode) {
3350 	case L2CAP_MODE_STREAMING:
3351 	case L2CAP_MODE_ERTM:
3352 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3353 			chan->mode = l2cap_select_mode(rfc.mode,
3354 						       chan->conn->feat_mask);
3355 			break;
3356 		}
3357 
3358 		if (remote_efs) {
3359 			if (__l2cap_efs_supported(chan->conn))
3360 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3361 			else
3362 				return -ECONNREFUSED;
3363 		}
3364 
3365 		if (chan->mode != rfc.mode)
3366 			return -ECONNREFUSED;
3367 
3368 		break;
3369 	}
3370 
3371 done:
3372 	if (chan->mode != rfc.mode) {
3373 		result = L2CAP_CONF_UNACCEPT;
3374 		rfc.mode = chan->mode;
3375 
3376 		if (chan->num_conf_rsp == 1)
3377 			return -ECONNREFUSED;
3378 
3379 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3380 				   (unsigned long) &rfc, endptr - ptr);
3381 	}
3382 
3383 	if (result == L2CAP_CONF_SUCCESS) {
3384 		/* Configure output options and let the other side know
3385 		 * which ones we don't like. */
3386 
3387 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3388 			result = L2CAP_CONF_UNACCEPT;
3389 		else {
3390 			chan->omtu = mtu;
3391 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3392 		}
3393 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3394 
3395 		if (remote_efs) {
3396 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3397 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3398 			    efs.stype != chan->local_stype) {
3399 
3400 				result = L2CAP_CONF_UNACCEPT;
3401 
3402 				if (chan->num_conf_req >= 1)
3403 					return -ECONNREFUSED;
3404 
3405 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3406 						   sizeof(efs),
3407 						   (unsigned long) &efs, endptr - ptr);
3408 			} else {
3409 				/* Send PENDING Conf Rsp */
3410 				result = L2CAP_CONF_PENDING;
3411 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3412 			}
3413 		}
3414 
3415 		switch (rfc.mode) {
3416 		case L2CAP_MODE_BASIC:
3417 			chan->fcs = L2CAP_FCS_NONE;
3418 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3419 			break;
3420 
3421 		case L2CAP_MODE_ERTM:
3422 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3423 				chan->remote_tx_win = rfc.txwin_size;
3424 			else
3425 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3426 
3427 			chan->remote_max_tx = rfc.max_transmit;
3428 
3429 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3430 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3431 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3432 			rfc.max_pdu_size = cpu_to_le16(size);
3433 			chan->remote_mps = size;
3434 
3435 			__l2cap_set_ertm_timeouts(chan, &rfc);
3436 
3437 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3438 
3439 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3440 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3441 
3442 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3443 				chan->remote_id = efs.id;
3444 				chan->remote_stype = efs.stype;
3445 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3446 				chan->remote_flush_to =
3447 					le32_to_cpu(efs.flush_to);
3448 				chan->remote_acc_lat =
3449 					le32_to_cpu(efs.acc_lat);
3450 				chan->remote_sdu_itime =
3451 					le32_to_cpu(efs.sdu_itime);
3452 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3453 						   sizeof(efs),
3454 						   (unsigned long) &efs, endptr - ptr);
3455 			}
3456 			break;
3457 
3458 		case L2CAP_MODE_STREAMING:
3459 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3460 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3461 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3462 			rfc.max_pdu_size = cpu_to_le16(size);
3463 			chan->remote_mps = size;
3464 
3465 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3466 
3467 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3468 					   (unsigned long) &rfc, endptr - ptr);
3469 
3470 			break;
3471 
3472 		default:
3473 			result = L2CAP_CONF_UNACCEPT;
3474 
3475 			memset(&rfc, 0, sizeof(rfc));
3476 			rfc.mode = chan->mode;
3477 		}
3478 
3479 		if (result == L2CAP_CONF_SUCCESS)
3480 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3481 	}
3482 	rsp->scid   = cpu_to_le16(chan->dcid);
3483 	rsp->result = cpu_to_le16(result);
3484 	rsp->flags  = cpu_to_le16(0);
3485 
3486 	return ptr - data;
3487 }
3488 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3489 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3490 				void *data, size_t size, u16 *result)
3491 {
3492 	struct l2cap_conf_req *req = data;
3493 	void *ptr = req->data;
3494 	void *endptr = data + size;
3495 	int type, olen;
3496 	unsigned long val;
3497 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3498 	struct l2cap_conf_efs efs;
3499 
3500 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3501 
3502 	while (len >= L2CAP_CONF_OPT_SIZE) {
3503 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3504 
3505 		switch (type) {
3506 		case L2CAP_CONF_MTU:
3507 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3508 				*result = L2CAP_CONF_UNACCEPT;
3509 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3510 			} else
3511 				chan->imtu = val;
3512 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3513 			break;
3514 
3515 		case L2CAP_CONF_FLUSH_TO:
3516 			chan->flush_to = val;
3517 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
3518 					   2, chan->flush_to, endptr - ptr);
3519 			break;
3520 
3521 		case L2CAP_CONF_RFC:
3522 			if (olen == sizeof(rfc))
3523 				memcpy(&rfc, (void *)val, olen);
3524 
3525 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3526 			    rfc.mode != chan->mode)
3527 				return -ECONNREFUSED;
3528 
3529 			chan->fcs = 0;
3530 
3531 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3532 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3533 			break;
3534 
3535 		case L2CAP_CONF_EWS:
3536 			chan->ack_win = min_t(u16, val, chan->ack_win);
3537 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3538 					   chan->tx_win, endptr - ptr);
3539 			break;
3540 
3541 		case L2CAP_CONF_EFS:
3542 			if (olen == sizeof(efs)) {
3543 				memcpy(&efs, (void *)val, olen);
3544 
3545 				if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3546 				    efs.stype != L2CAP_SERV_NOTRAFIC &&
3547 				    efs.stype != chan->local_stype)
3548 					return -ECONNREFUSED;
3549 
3550 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3551 						   (unsigned long) &efs, endptr - ptr);
3552 			}
3553 			break;
3554 
3555 		case L2CAP_CONF_FCS:
3556 			if (*result == L2CAP_CONF_PENDING)
3557 				if (val == L2CAP_FCS_NONE)
3558 					set_bit(CONF_RECV_NO_FCS,
3559 						&chan->conf_state);
3560 			break;
3561 		}
3562 	}
3563 
3564 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3565 		return -ECONNREFUSED;
3566 
3567 	chan->mode = rfc.mode;
3568 
3569 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3570 		switch (rfc.mode) {
3571 		case L2CAP_MODE_ERTM:
3572 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3573 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3574 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3575 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3576 				chan->ack_win = min_t(u16, chan->ack_win,
3577 						      rfc.txwin_size);
3578 
3579 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3580 				chan->local_msdu = le16_to_cpu(efs.msdu);
3581 				chan->local_sdu_itime =
3582 					le32_to_cpu(efs.sdu_itime);
3583 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3584 				chan->local_flush_to =
3585 					le32_to_cpu(efs.flush_to);
3586 			}
3587 			break;
3588 
3589 		case L2CAP_MODE_STREAMING:
3590 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3591 		}
3592 	}
3593 
3594 	req->dcid   = cpu_to_le16(chan->dcid);
3595 	req->flags  = cpu_to_le16(0);
3596 
3597 	return ptr - data;
3598 }
3599 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3600 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3601 				u16 result, u16 flags)
3602 {
3603 	struct l2cap_conf_rsp *rsp = data;
3604 	void *ptr = rsp->data;
3605 
3606 	BT_DBG("chan %p", chan);
3607 
3608 	rsp->scid   = cpu_to_le16(chan->dcid);
3609 	rsp->result = cpu_to_le16(result);
3610 	rsp->flags  = cpu_to_le16(flags);
3611 
3612 	return ptr - data;
3613 }
3614 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3615 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3616 {
3617 	struct l2cap_le_conn_rsp rsp;
3618 	struct l2cap_conn *conn = chan->conn;
3619 
3620 	BT_DBG("chan %p", chan);
3621 
3622 	rsp.dcid    = cpu_to_le16(chan->scid);
3623 	rsp.mtu     = cpu_to_le16(chan->imtu);
3624 	rsp.mps     = cpu_to_le16(chan->mps);
3625 	rsp.credits = cpu_to_le16(chan->rx_credits);
3626 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3627 
3628 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3629 		       &rsp);
3630 }
3631 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3632 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3633 {
3634 	struct l2cap_conn_rsp rsp;
3635 	struct l2cap_conn *conn = chan->conn;
3636 	u8 buf[128];
3637 	u8 rsp_code;
3638 
3639 	rsp.scid   = cpu_to_le16(chan->dcid);
3640 	rsp.dcid   = cpu_to_le16(chan->scid);
3641 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3642 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3643 
3644 	if (chan->hs_hcon)
3645 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3646 	else
3647 		rsp_code = L2CAP_CONN_RSP;
3648 
3649 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3650 
3651 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3652 
3653 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3654 		return;
3655 
3656 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3657 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3658 	chan->num_conf_req++;
3659 }
3660 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3661 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3662 {
3663 	int type, olen;
3664 	unsigned long val;
3665 	/* Use sane default values in case a misbehaving remote device
3666 	 * did not send an RFC or extended window size option.
3667 	 */
3668 	u16 txwin_ext = chan->ack_win;
3669 	struct l2cap_conf_rfc rfc = {
3670 		.mode = chan->mode,
3671 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3672 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3673 		.max_pdu_size = cpu_to_le16(chan->imtu),
3674 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3675 	};
3676 
3677 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3678 
3679 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3680 		return;
3681 
3682 	while (len >= L2CAP_CONF_OPT_SIZE) {
3683 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3684 
3685 		switch (type) {
3686 		case L2CAP_CONF_RFC:
3687 			if (olen == sizeof(rfc))
3688 				memcpy(&rfc, (void *)val, olen);
3689 			break;
3690 		case L2CAP_CONF_EWS:
3691 			txwin_ext = val;
3692 			break;
3693 		}
3694 	}
3695 
3696 	switch (rfc.mode) {
3697 	case L2CAP_MODE_ERTM:
3698 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3699 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3700 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3701 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3702 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3703 		else
3704 			chan->ack_win = min_t(u16, chan->ack_win,
3705 					      rfc.txwin_size);
3706 		break;
3707 	case L2CAP_MODE_STREAMING:
3708 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3709 	}
3710 }
3711 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3712 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3713 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3714 				    u8 *data)
3715 {
3716 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3717 
3718 	if (cmd_len < sizeof(*rej))
3719 		return -EPROTO;
3720 
3721 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3722 		return 0;
3723 
3724 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3725 	    cmd->ident == conn->info_ident) {
3726 		cancel_delayed_work(&conn->info_timer);
3727 
3728 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3729 		conn->info_ident = 0;
3730 
3731 		l2cap_conn_start(conn);
3732 	}
3733 
3734 	return 0;
3735 }
3736 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3737 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3738 					struct l2cap_cmd_hdr *cmd,
3739 					u8 *data, u8 rsp_code, u8 amp_id)
3740 {
3741 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3742 	struct l2cap_conn_rsp rsp;
3743 	struct l2cap_chan *chan = NULL, *pchan;
3744 	int result, status = L2CAP_CS_NO_INFO;
3745 
3746 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3747 	__le16 psm = req->psm;
3748 
3749 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3750 
3751 	/* Check if we have socket listening on psm */
3752 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3753 					 &conn->hcon->dst, ACL_LINK);
3754 	if (!pchan) {
3755 		result = L2CAP_CR_BAD_PSM;
3756 		goto sendresp;
3757 	}
3758 
3759 	mutex_lock(&conn->chan_lock);
3760 	l2cap_chan_lock(pchan);
3761 
3762 	/* Check if the ACL is secure enough (if not SDP) */
3763 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3764 	    !hci_conn_check_link_mode(conn->hcon)) {
3765 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3766 		result = L2CAP_CR_SEC_BLOCK;
3767 		goto response;
3768 	}
3769 
3770 	result = L2CAP_CR_NO_MEM;
3771 
3772 	/* Check if we already have channel with that dcid */
3773 	if (__l2cap_get_chan_by_dcid(conn, scid))
3774 		goto response;
3775 
3776 	chan = pchan->ops->new_connection(pchan);
3777 	if (!chan)
3778 		goto response;
3779 
3780 	/* For certain devices (ex: HID mouse), support for authentication,
3781 	 * pairing and bonding is optional. For such devices, inorder to avoid
3782 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3783 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3784 	 */
3785 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3786 
3787 	bacpy(&chan->src, &conn->hcon->src);
3788 	bacpy(&chan->dst, &conn->hcon->dst);
3789 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3790 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
3791 	chan->psm  = psm;
3792 	chan->dcid = scid;
3793 	chan->local_amp_id = amp_id;
3794 
3795 	__l2cap_chan_add(conn, chan);
3796 
3797 	dcid = chan->scid;
3798 
3799 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3800 
3801 	chan->ident = cmd->ident;
3802 
3803 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3804 		if (l2cap_chan_check_security(chan, false)) {
3805 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3806 				l2cap_state_change(chan, BT_CONNECT2);
3807 				result = L2CAP_CR_PEND;
3808 				status = L2CAP_CS_AUTHOR_PEND;
3809 				chan->ops->defer(chan);
3810 			} else {
3811 				/* Force pending result for AMP controllers.
3812 				 * The connection will succeed after the
3813 				 * physical link is up.
3814 				 */
3815 				if (amp_id == AMP_ID_BREDR) {
3816 					l2cap_state_change(chan, BT_CONFIG);
3817 					result = L2CAP_CR_SUCCESS;
3818 				} else {
3819 					l2cap_state_change(chan, BT_CONNECT2);
3820 					result = L2CAP_CR_PEND;
3821 				}
3822 				status = L2CAP_CS_NO_INFO;
3823 			}
3824 		} else {
3825 			l2cap_state_change(chan, BT_CONNECT2);
3826 			result = L2CAP_CR_PEND;
3827 			status = L2CAP_CS_AUTHEN_PEND;
3828 		}
3829 	} else {
3830 		l2cap_state_change(chan, BT_CONNECT2);
3831 		result = L2CAP_CR_PEND;
3832 		status = L2CAP_CS_NO_INFO;
3833 	}
3834 
3835 response:
3836 	l2cap_chan_unlock(pchan);
3837 	mutex_unlock(&conn->chan_lock);
3838 	l2cap_chan_put(pchan);
3839 
3840 sendresp:
3841 	rsp.scid   = cpu_to_le16(scid);
3842 	rsp.dcid   = cpu_to_le16(dcid);
3843 	rsp.result = cpu_to_le16(result);
3844 	rsp.status = cpu_to_le16(status);
3845 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3846 
3847 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3848 		struct l2cap_info_req info;
3849 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3850 
3851 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3852 		conn->info_ident = l2cap_get_ident(conn);
3853 
3854 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3855 
3856 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3857 			       sizeof(info), &info);
3858 	}
3859 
3860 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3861 	    result == L2CAP_CR_SUCCESS) {
3862 		u8 buf[128];
3863 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3864 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3865 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3866 		chan->num_conf_req++;
3867 	}
3868 
3869 	return chan;
3870 }
3871 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3872 static int l2cap_connect_req(struct l2cap_conn *conn,
3873 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3874 {
3875 	struct hci_dev *hdev = conn->hcon->hdev;
3876 	struct hci_conn *hcon = conn->hcon;
3877 
3878 	if (cmd_len < sizeof(struct l2cap_conn_req))
3879 		return -EPROTO;
3880 
3881 	hci_dev_lock(hdev);
3882 	if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3883 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3884 		mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3885 				      hcon->dst_type, 0, NULL, 0,
3886 				      hcon->dev_class);
3887 	hci_dev_unlock(hdev);
3888 
3889 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3890 	return 0;
3891 }
3892 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3893 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3894 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3895 				    u8 *data)
3896 {
3897 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3898 	u16 scid, dcid, result, status;
3899 	struct l2cap_chan *chan;
3900 	u8 req[128];
3901 	int err;
3902 
3903 	if (cmd_len < sizeof(*rsp))
3904 		return -EPROTO;
3905 
3906 	scid   = __le16_to_cpu(rsp->scid);
3907 	dcid   = __le16_to_cpu(rsp->dcid);
3908 	result = __le16_to_cpu(rsp->result);
3909 	status = __le16_to_cpu(rsp->status);
3910 
3911 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3912 	       dcid, scid, result, status);
3913 
3914 	mutex_lock(&conn->chan_lock);
3915 
3916 	if (scid) {
3917 		chan = __l2cap_get_chan_by_scid(conn, scid);
3918 		if (!chan) {
3919 			err = -EBADSLT;
3920 			goto unlock;
3921 		}
3922 	} else {
3923 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3924 		if (!chan) {
3925 			err = -EBADSLT;
3926 			goto unlock;
3927 		}
3928 	}
3929 
3930 	err = 0;
3931 
3932 	l2cap_chan_lock(chan);
3933 
3934 	switch (result) {
3935 	case L2CAP_CR_SUCCESS:
3936 		l2cap_state_change(chan, BT_CONFIG);
3937 		chan->ident = 0;
3938 		chan->dcid = dcid;
3939 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
3940 
3941 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3942 			break;
3943 
3944 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3945 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
3946 		chan->num_conf_req++;
3947 		break;
3948 
3949 	case L2CAP_CR_PEND:
3950 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
3951 		break;
3952 
3953 	default:
3954 		l2cap_chan_del(chan, ECONNREFUSED);
3955 		break;
3956 	}
3957 
3958 	l2cap_chan_unlock(chan);
3959 
3960 unlock:
3961 	mutex_unlock(&conn->chan_lock);
3962 
3963 	return err;
3964 }
3965 
set_default_fcs(struct l2cap_chan * chan)3966 static inline void set_default_fcs(struct l2cap_chan *chan)
3967 {
3968 	/* FCS is enabled only in ERTM or streaming mode, if one or both
3969 	 * sides request it.
3970 	 */
3971 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
3972 		chan->fcs = L2CAP_FCS_NONE;
3973 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
3974 		chan->fcs = L2CAP_FCS_CRC16;
3975 }
3976 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)3977 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3978 				    u8 ident, u16 flags)
3979 {
3980 	struct l2cap_conn *conn = chan->conn;
3981 
3982 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3983 	       flags);
3984 
3985 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3986 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3987 
3988 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3989 		       l2cap_build_conf_rsp(chan, data,
3990 					    L2CAP_CONF_SUCCESS, flags), data);
3991 }
3992 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)3993 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3994 				   u16 scid, u16 dcid)
3995 {
3996 	struct l2cap_cmd_rej_cid rej;
3997 
3998 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
3999 	rej.scid = __cpu_to_le16(scid);
4000 	rej.dcid = __cpu_to_le16(dcid);
4001 
4002 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4003 }
4004 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4005 static inline int l2cap_config_req(struct l2cap_conn *conn,
4006 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4007 				   u8 *data)
4008 {
4009 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4010 	u16 dcid, flags;
4011 	u8 rsp[64];
4012 	struct l2cap_chan *chan;
4013 	int len, err = 0;
4014 
4015 	if (cmd_len < sizeof(*req))
4016 		return -EPROTO;
4017 
4018 	dcid  = __le16_to_cpu(req->dcid);
4019 	flags = __le16_to_cpu(req->flags);
4020 
4021 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4022 
4023 	chan = l2cap_get_chan_by_scid(conn, dcid);
4024 	if (!chan) {
4025 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4026 		return 0;
4027 	}
4028 
4029 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
4030 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4031 				       chan->dcid);
4032 		goto unlock;
4033 	}
4034 
4035 	/* Reject if config buffer is too small. */
4036 	len = cmd_len - sizeof(*req);
4037 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4038 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4039 			       l2cap_build_conf_rsp(chan, rsp,
4040 			       L2CAP_CONF_REJECT, flags), rsp);
4041 		goto unlock;
4042 	}
4043 
4044 	/* Store config. */
4045 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4046 	chan->conf_len += len;
4047 
4048 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4049 		/* Incomplete config. Send empty response. */
4050 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4051 			       l2cap_build_conf_rsp(chan, rsp,
4052 			       L2CAP_CONF_SUCCESS, flags), rsp);
4053 		goto unlock;
4054 	}
4055 
4056 	/* Complete config. */
4057 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4058 	if (len < 0) {
4059 		l2cap_send_disconn_req(chan, ECONNRESET);
4060 		goto unlock;
4061 	}
4062 
4063 	chan->ident = cmd->ident;
4064 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4065 	chan->num_conf_rsp++;
4066 
4067 	/* Reset config buffer. */
4068 	chan->conf_len = 0;
4069 
4070 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4071 		goto unlock;
4072 
4073 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4074 		set_default_fcs(chan);
4075 
4076 		if (chan->mode == L2CAP_MODE_ERTM ||
4077 		    chan->mode == L2CAP_MODE_STREAMING)
4078 			err = l2cap_ertm_init(chan);
4079 
4080 		if (err < 0)
4081 			l2cap_send_disconn_req(chan, -err);
4082 		else
4083 			l2cap_chan_ready(chan);
4084 
4085 		goto unlock;
4086 	}
4087 
4088 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4089 		u8 buf[64];
4090 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4091 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4092 		chan->num_conf_req++;
4093 	}
4094 
4095 	/* Got Conf Rsp PENDING from remote side and asume we sent
4096 	   Conf Rsp PENDING in the code above */
4097 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4098 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4099 
4100 		/* check compatibility */
4101 
4102 		/* Send rsp for BR/EDR channel */
4103 		if (!chan->hs_hcon)
4104 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4105 		else
4106 			chan->ident = cmd->ident;
4107 	}
4108 
4109 unlock:
4110 	l2cap_chan_unlock(chan);
4111 	return err;
4112 }
4113 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4114 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4115 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4116 				   u8 *data)
4117 {
4118 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4119 	u16 scid, flags, result;
4120 	struct l2cap_chan *chan;
4121 	int len = cmd_len - sizeof(*rsp);
4122 	int err = 0;
4123 
4124 	if (cmd_len < sizeof(*rsp))
4125 		return -EPROTO;
4126 
4127 	scid   = __le16_to_cpu(rsp->scid);
4128 	flags  = __le16_to_cpu(rsp->flags);
4129 	result = __le16_to_cpu(rsp->result);
4130 
4131 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4132 	       result, len);
4133 
4134 	chan = l2cap_get_chan_by_scid(conn, scid);
4135 	if (!chan)
4136 		return 0;
4137 
4138 	switch (result) {
4139 	case L2CAP_CONF_SUCCESS:
4140 		l2cap_conf_rfc_get(chan, rsp->data, len);
4141 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4142 		break;
4143 
4144 	case L2CAP_CONF_PENDING:
4145 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4146 
4147 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4148 			char buf[64];
4149 
4150 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4151 						   buf, sizeof(buf), &result);
4152 			if (len < 0) {
4153 				l2cap_send_disconn_req(chan, ECONNRESET);
4154 				goto done;
4155 			}
4156 
4157 			if (!chan->hs_hcon) {
4158 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4159 							0);
4160 			} else {
4161 				if (l2cap_check_efs(chan)) {
4162 					amp_create_logical_link(chan);
4163 					chan->ident = cmd->ident;
4164 				}
4165 			}
4166 		}
4167 		goto done;
4168 
4169 	case L2CAP_CONF_UNACCEPT:
4170 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4171 			char req[64];
4172 
4173 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4174 				l2cap_send_disconn_req(chan, ECONNRESET);
4175 				goto done;
4176 			}
4177 
4178 			/* throw out any old stored conf requests */
4179 			result = L2CAP_CONF_SUCCESS;
4180 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4181 						   req, sizeof(req), &result);
4182 			if (len < 0) {
4183 				l2cap_send_disconn_req(chan, ECONNRESET);
4184 				goto done;
4185 			}
4186 
4187 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4188 				       L2CAP_CONF_REQ, len, req);
4189 			chan->num_conf_req++;
4190 			if (result != L2CAP_CONF_SUCCESS)
4191 				goto done;
4192 			break;
4193 		}
4194 
4195 	default:
4196 		l2cap_chan_set_err(chan, ECONNRESET);
4197 
4198 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4199 		l2cap_send_disconn_req(chan, ECONNRESET);
4200 		goto done;
4201 	}
4202 
4203 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4204 		goto done;
4205 
4206 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4207 
4208 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4209 		set_default_fcs(chan);
4210 
4211 		if (chan->mode == L2CAP_MODE_ERTM ||
4212 		    chan->mode == L2CAP_MODE_STREAMING)
4213 			err = l2cap_ertm_init(chan);
4214 
4215 		if (err < 0)
4216 			l2cap_send_disconn_req(chan, -err);
4217 		else
4218 			l2cap_chan_ready(chan);
4219 	}
4220 
4221 done:
4222 	l2cap_chan_unlock(chan);
4223 	return err;
4224 }
4225 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4226 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4227 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4228 				       u8 *data)
4229 {
4230 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4231 	struct l2cap_disconn_rsp rsp;
4232 	u16 dcid, scid;
4233 	struct l2cap_chan *chan;
4234 
4235 	if (cmd_len != sizeof(*req))
4236 		return -EPROTO;
4237 
4238 	scid = __le16_to_cpu(req->scid);
4239 	dcid = __le16_to_cpu(req->dcid);
4240 
4241 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4242 
4243 	mutex_lock(&conn->chan_lock);
4244 
4245 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4246 	if (!chan) {
4247 		mutex_unlock(&conn->chan_lock);
4248 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4249 		return 0;
4250 	}
4251 
4252 	l2cap_chan_lock(chan);
4253 
4254 	rsp.dcid = cpu_to_le16(chan->scid);
4255 	rsp.scid = cpu_to_le16(chan->dcid);
4256 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4257 
4258 	chan->ops->set_shutdown(chan);
4259 
4260 	l2cap_chan_hold(chan);
4261 	l2cap_chan_del(chan, ECONNRESET);
4262 
4263 	l2cap_chan_unlock(chan);
4264 
4265 	chan->ops->close(chan);
4266 	l2cap_chan_put(chan);
4267 
4268 	mutex_unlock(&conn->chan_lock);
4269 
4270 	return 0;
4271 }
4272 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4273 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4274 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4275 				       u8 *data)
4276 {
4277 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4278 	u16 dcid, scid;
4279 	struct l2cap_chan *chan;
4280 
4281 	if (cmd_len != sizeof(*rsp))
4282 		return -EPROTO;
4283 
4284 	scid = __le16_to_cpu(rsp->scid);
4285 	dcid = __le16_to_cpu(rsp->dcid);
4286 
4287 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4288 
4289 	mutex_lock(&conn->chan_lock);
4290 
4291 	chan = __l2cap_get_chan_by_scid(conn, scid);
4292 	if (!chan) {
4293 		mutex_unlock(&conn->chan_lock);
4294 		return 0;
4295 	}
4296 
4297 	l2cap_chan_lock(chan);
4298 
4299 	l2cap_chan_hold(chan);
4300 	l2cap_chan_del(chan, 0);
4301 
4302 	l2cap_chan_unlock(chan);
4303 
4304 	chan->ops->close(chan);
4305 	l2cap_chan_put(chan);
4306 
4307 	mutex_unlock(&conn->chan_lock);
4308 
4309 	return 0;
4310 }
4311 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4312 static inline int l2cap_information_req(struct l2cap_conn *conn,
4313 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4314 					u8 *data)
4315 {
4316 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4317 	u16 type;
4318 
4319 	if (cmd_len != sizeof(*req))
4320 		return -EPROTO;
4321 
4322 	type = __le16_to_cpu(req->type);
4323 
4324 	BT_DBG("type 0x%4.4x", type);
4325 
4326 	if (type == L2CAP_IT_FEAT_MASK) {
4327 		u8 buf[8];
4328 		u32 feat_mask = l2cap_feat_mask;
4329 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4330 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4331 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4332 		if (!disable_ertm)
4333 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4334 				| L2CAP_FEAT_FCS;
4335 		if (conn->hs_enabled)
4336 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4337 				| L2CAP_FEAT_EXT_WINDOW;
4338 
4339 		put_unaligned_le32(feat_mask, rsp->data);
4340 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4341 			       buf);
4342 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4343 		u8 buf[12];
4344 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4345 
4346 		if (conn->hs_enabled)
4347 			l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4348 		else
4349 			l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4350 
4351 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4352 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4353 		memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
4354 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4355 			       buf);
4356 	} else {
4357 		struct l2cap_info_rsp rsp;
4358 		rsp.type   = cpu_to_le16(type);
4359 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4360 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4361 			       &rsp);
4362 	}
4363 
4364 	return 0;
4365 }
4366 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4367 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4368 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4369 					u8 *data)
4370 {
4371 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4372 	u16 type, result;
4373 
4374 	if (cmd_len < sizeof(*rsp))
4375 		return -EPROTO;
4376 
4377 	type   = __le16_to_cpu(rsp->type);
4378 	result = __le16_to_cpu(rsp->result);
4379 
4380 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4381 
4382 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4383 	if (cmd->ident != conn->info_ident ||
4384 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4385 		return 0;
4386 
4387 	cancel_delayed_work(&conn->info_timer);
4388 
4389 	if (result != L2CAP_IR_SUCCESS) {
4390 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4391 		conn->info_ident = 0;
4392 
4393 		l2cap_conn_start(conn);
4394 
4395 		return 0;
4396 	}
4397 
4398 	switch (type) {
4399 	case L2CAP_IT_FEAT_MASK:
4400 		conn->feat_mask = get_unaligned_le32(rsp->data);
4401 
4402 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4403 			struct l2cap_info_req req;
4404 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4405 
4406 			conn->info_ident = l2cap_get_ident(conn);
4407 
4408 			l2cap_send_cmd(conn, conn->info_ident,
4409 				       L2CAP_INFO_REQ, sizeof(req), &req);
4410 		} else {
4411 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4412 			conn->info_ident = 0;
4413 
4414 			l2cap_conn_start(conn);
4415 		}
4416 		break;
4417 
4418 	case L2CAP_IT_FIXED_CHAN:
4419 		conn->fixed_chan_mask = rsp->data[0];
4420 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4421 		conn->info_ident = 0;
4422 
4423 		l2cap_conn_start(conn);
4424 		break;
4425 	}
4426 
4427 	return 0;
4428 }
4429 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4430 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4431 				    struct l2cap_cmd_hdr *cmd,
4432 				    u16 cmd_len, void *data)
4433 {
4434 	struct l2cap_create_chan_req *req = data;
4435 	struct l2cap_create_chan_rsp rsp;
4436 	struct l2cap_chan *chan;
4437 	struct hci_dev *hdev;
4438 	u16 psm, scid;
4439 
4440 	if (cmd_len != sizeof(*req))
4441 		return -EPROTO;
4442 
4443 	if (!conn->hs_enabled)
4444 		return -EINVAL;
4445 
4446 	psm = le16_to_cpu(req->psm);
4447 	scid = le16_to_cpu(req->scid);
4448 
4449 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4450 
4451 	/* For controller id 0 make BR/EDR connection */
4452 	if (req->amp_id == AMP_ID_BREDR) {
4453 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4454 			      req->amp_id);
4455 		return 0;
4456 	}
4457 
4458 	/* Validate AMP controller id */
4459 	hdev = hci_dev_get(req->amp_id);
4460 	if (!hdev)
4461 		goto error;
4462 
4463 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4464 		hci_dev_put(hdev);
4465 		goto error;
4466 	}
4467 
4468 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4469 			     req->amp_id);
4470 	if (chan) {
4471 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4472 		struct hci_conn *hs_hcon;
4473 
4474 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4475 						  &conn->hcon->dst);
4476 		if (!hs_hcon) {
4477 			hci_dev_put(hdev);
4478 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4479 					       chan->dcid);
4480 			return 0;
4481 		}
4482 
4483 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4484 
4485 		mgr->bredr_chan = chan;
4486 		chan->hs_hcon = hs_hcon;
4487 		chan->fcs = L2CAP_FCS_NONE;
4488 		conn->mtu = hdev->block_mtu;
4489 	}
4490 
4491 	hci_dev_put(hdev);
4492 
4493 	return 0;
4494 
4495 error:
4496 	rsp.dcid = 0;
4497 	rsp.scid = cpu_to_le16(scid);
4498 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4499 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4500 
4501 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4502 		       sizeof(rsp), &rsp);
4503 
4504 	return 0;
4505 }
4506 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4507 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4508 {
4509 	struct l2cap_move_chan_req req;
4510 	u8 ident;
4511 
4512 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4513 
4514 	ident = l2cap_get_ident(chan->conn);
4515 	chan->ident = ident;
4516 
4517 	req.icid = cpu_to_le16(chan->scid);
4518 	req.dest_amp_id = dest_amp_id;
4519 
4520 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4521 		       &req);
4522 
4523 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4524 }
4525 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4526 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4527 {
4528 	struct l2cap_move_chan_rsp rsp;
4529 
4530 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4531 
4532 	rsp.icid = cpu_to_le16(chan->dcid);
4533 	rsp.result = cpu_to_le16(result);
4534 
4535 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4536 		       sizeof(rsp), &rsp);
4537 }
4538 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4539 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4540 {
4541 	struct l2cap_move_chan_cfm cfm;
4542 
4543 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4544 
4545 	chan->ident = l2cap_get_ident(chan->conn);
4546 
4547 	cfm.icid = cpu_to_le16(chan->scid);
4548 	cfm.result = cpu_to_le16(result);
4549 
4550 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4551 		       sizeof(cfm), &cfm);
4552 
4553 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4554 }
4555 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4556 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4557 {
4558 	struct l2cap_move_chan_cfm cfm;
4559 
4560 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4561 
4562 	cfm.icid = cpu_to_le16(icid);
4563 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4564 
4565 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4566 		       sizeof(cfm), &cfm);
4567 }
4568 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4569 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4570 					 u16 icid)
4571 {
4572 	struct l2cap_move_chan_cfm_rsp rsp;
4573 
4574 	BT_DBG("icid 0x%4.4x", icid);
4575 
4576 	rsp.icid = cpu_to_le16(icid);
4577 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4578 }
4579 
__release_logical_link(struct l2cap_chan * chan)4580 static void __release_logical_link(struct l2cap_chan *chan)
4581 {
4582 	chan->hs_hchan = NULL;
4583 	chan->hs_hcon = NULL;
4584 
4585 	/* Placeholder - release the logical link */
4586 }
4587 
l2cap_logical_fail(struct l2cap_chan * chan)4588 static void l2cap_logical_fail(struct l2cap_chan *chan)
4589 {
4590 	/* Logical link setup failed */
4591 	if (chan->state != BT_CONNECTED) {
4592 		/* Create channel failure, disconnect */
4593 		l2cap_send_disconn_req(chan, ECONNRESET);
4594 		return;
4595 	}
4596 
4597 	switch (chan->move_role) {
4598 	case L2CAP_MOVE_ROLE_RESPONDER:
4599 		l2cap_move_done(chan);
4600 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4601 		break;
4602 	case L2CAP_MOVE_ROLE_INITIATOR:
4603 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4604 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4605 			/* Remote has only sent pending or
4606 			 * success responses, clean up
4607 			 */
4608 			l2cap_move_done(chan);
4609 		}
4610 
4611 		/* Other amp move states imply that the move
4612 		 * has already aborted
4613 		 */
4614 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4615 		break;
4616 	}
4617 }
4618 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4619 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4620 					struct hci_chan *hchan)
4621 {
4622 	struct l2cap_conf_rsp rsp;
4623 
4624 	chan->hs_hchan = hchan;
4625 	chan->hs_hcon->l2cap_data = chan->conn;
4626 
4627 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4628 
4629 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4630 		int err;
4631 
4632 		set_default_fcs(chan);
4633 
4634 		err = l2cap_ertm_init(chan);
4635 		if (err < 0)
4636 			l2cap_send_disconn_req(chan, -err);
4637 		else
4638 			l2cap_chan_ready(chan);
4639 	}
4640 }
4641 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4642 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4643 				      struct hci_chan *hchan)
4644 {
4645 	chan->hs_hcon = hchan->conn;
4646 	chan->hs_hcon->l2cap_data = chan->conn;
4647 
4648 	BT_DBG("move_state %d", chan->move_state);
4649 
4650 	switch (chan->move_state) {
4651 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4652 		/* Move confirm will be sent after a success
4653 		 * response is received
4654 		 */
4655 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4656 		break;
4657 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4658 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4659 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4660 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4661 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4662 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4663 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4664 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4665 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4666 		}
4667 		break;
4668 	default:
4669 		/* Move was not in expected state, free the channel */
4670 		__release_logical_link(chan);
4671 
4672 		chan->move_state = L2CAP_MOVE_STABLE;
4673 	}
4674 }
4675 
4676 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)4677 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4678 		       u8 status)
4679 {
4680 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4681 
4682 	if (status) {
4683 		l2cap_logical_fail(chan);
4684 		__release_logical_link(chan);
4685 		return;
4686 	}
4687 
4688 	if (chan->state != BT_CONNECTED) {
4689 		/* Ignore logical link if channel is on BR/EDR */
4690 		if (chan->local_amp_id != AMP_ID_BREDR)
4691 			l2cap_logical_finish_create(chan, hchan);
4692 	} else {
4693 		l2cap_logical_finish_move(chan, hchan);
4694 	}
4695 }
4696 
l2cap_move_start(struct l2cap_chan * chan)4697 void l2cap_move_start(struct l2cap_chan *chan)
4698 {
4699 	BT_DBG("chan %p", chan);
4700 
4701 	if (chan->local_amp_id == AMP_ID_BREDR) {
4702 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4703 			return;
4704 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4705 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4706 		/* Placeholder - start physical link setup */
4707 	} else {
4708 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4709 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4710 		chan->move_id = 0;
4711 		l2cap_move_setup(chan);
4712 		l2cap_send_move_chan_req(chan, 0);
4713 	}
4714 }
4715 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)4716 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4717 			    u8 local_amp_id, u8 remote_amp_id)
4718 {
4719 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4720 	       local_amp_id, remote_amp_id);
4721 
4722 	chan->fcs = L2CAP_FCS_NONE;
4723 
4724 	/* Outgoing channel on AMP */
4725 	if (chan->state == BT_CONNECT) {
4726 		if (result == L2CAP_CR_SUCCESS) {
4727 			chan->local_amp_id = local_amp_id;
4728 			l2cap_send_create_chan_req(chan, remote_amp_id);
4729 		} else {
4730 			/* Revert to BR/EDR connect */
4731 			l2cap_send_conn_req(chan);
4732 		}
4733 
4734 		return;
4735 	}
4736 
4737 	/* Incoming channel on AMP */
4738 	if (__l2cap_no_conn_pending(chan)) {
4739 		struct l2cap_conn_rsp rsp;
4740 		char buf[128];
4741 		rsp.scid = cpu_to_le16(chan->dcid);
4742 		rsp.dcid = cpu_to_le16(chan->scid);
4743 
4744 		if (result == L2CAP_CR_SUCCESS) {
4745 			/* Send successful response */
4746 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4747 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4748 		} else {
4749 			/* Send negative response */
4750 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4751 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4752 		}
4753 
4754 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4755 			       sizeof(rsp), &rsp);
4756 
4757 		if (result == L2CAP_CR_SUCCESS) {
4758 			l2cap_state_change(chan, BT_CONFIG);
4759 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4760 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4761 				       L2CAP_CONF_REQ,
4762 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4763 			chan->num_conf_req++;
4764 		}
4765 	}
4766 }
4767 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)4768 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4769 				   u8 remote_amp_id)
4770 {
4771 	l2cap_move_setup(chan);
4772 	chan->move_id = local_amp_id;
4773 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4774 
4775 	l2cap_send_move_chan_req(chan, remote_amp_id);
4776 }
4777 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)4778 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4779 {
4780 	struct hci_chan *hchan = NULL;
4781 
4782 	/* Placeholder - get hci_chan for logical link */
4783 
4784 	if (hchan) {
4785 		if (hchan->state == BT_CONNECTED) {
4786 			/* Logical link is ready to go */
4787 			chan->hs_hcon = hchan->conn;
4788 			chan->hs_hcon->l2cap_data = chan->conn;
4789 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4790 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4791 
4792 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4793 		} else {
4794 			/* Wait for logical link to be ready */
4795 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4796 		}
4797 	} else {
4798 		/* Logical link not available */
4799 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4800 	}
4801 }
4802 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)4803 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4804 {
4805 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4806 		u8 rsp_result;
4807 		if (result == -EINVAL)
4808 			rsp_result = L2CAP_MR_BAD_ID;
4809 		else
4810 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4811 
4812 		l2cap_send_move_chan_rsp(chan, rsp_result);
4813 	}
4814 
4815 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4816 	chan->move_state = L2CAP_MOVE_STABLE;
4817 
4818 	/* Restart data transmission */
4819 	l2cap_ertm_send(chan);
4820 }
4821 
4822 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)4823 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4824 {
4825 	u8 local_amp_id = chan->local_amp_id;
4826 	u8 remote_amp_id = chan->remote_amp_id;
4827 
4828 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4829 	       chan, result, local_amp_id, remote_amp_id);
4830 
4831 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4832 		l2cap_chan_unlock(chan);
4833 		return;
4834 	}
4835 
4836 	if (chan->state != BT_CONNECTED) {
4837 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4838 	} else if (result != L2CAP_MR_SUCCESS) {
4839 		l2cap_do_move_cancel(chan, result);
4840 	} else {
4841 		switch (chan->move_role) {
4842 		case L2CAP_MOVE_ROLE_INITIATOR:
4843 			l2cap_do_move_initiate(chan, local_amp_id,
4844 					       remote_amp_id);
4845 			break;
4846 		case L2CAP_MOVE_ROLE_RESPONDER:
4847 			l2cap_do_move_respond(chan, result);
4848 			break;
4849 		default:
4850 			l2cap_do_move_cancel(chan, result);
4851 			break;
4852 		}
4853 	}
4854 }
4855 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4856 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4857 					 struct l2cap_cmd_hdr *cmd,
4858 					 u16 cmd_len, void *data)
4859 {
4860 	struct l2cap_move_chan_req *req = data;
4861 	struct l2cap_move_chan_rsp rsp;
4862 	struct l2cap_chan *chan;
4863 	u16 icid = 0;
4864 	u16 result = L2CAP_MR_NOT_ALLOWED;
4865 
4866 	if (cmd_len != sizeof(*req))
4867 		return -EPROTO;
4868 
4869 	icid = le16_to_cpu(req->icid);
4870 
4871 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4872 
4873 	if (!conn->hs_enabled)
4874 		return -EINVAL;
4875 
4876 	chan = l2cap_get_chan_by_dcid(conn, icid);
4877 	if (!chan) {
4878 		rsp.icid = cpu_to_le16(icid);
4879 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4880 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4881 			       sizeof(rsp), &rsp);
4882 		return 0;
4883 	}
4884 
4885 	chan->ident = cmd->ident;
4886 
4887 	if (chan->scid < L2CAP_CID_DYN_START ||
4888 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4889 	    (chan->mode != L2CAP_MODE_ERTM &&
4890 	     chan->mode != L2CAP_MODE_STREAMING)) {
4891 		result = L2CAP_MR_NOT_ALLOWED;
4892 		goto send_move_response;
4893 	}
4894 
4895 	if (chan->local_amp_id == req->dest_amp_id) {
4896 		result = L2CAP_MR_SAME_ID;
4897 		goto send_move_response;
4898 	}
4899 
4900 	if (req->dest_amp_id != AMP_ID_BREDR) {
4901 		struct hci_dev *hdev;
4902 		hdev = hci_dev_get(req->dest_amp_id);
4903 		if (!hdev || hdev->dev_type != HCI_AMP ||
4904 		    !test_bit(HCI_UP, &hdev->flags)) {
4905 			if (hdev)
4906 				hci_dev_put(hdev);
4907 
4908 			result = L2CAP_MR_BAD_ID;
4909 			goto send_move_response;
4910 		}
4911 		hci_dev_put(hdev);
4912 	}
4913 
4914 	/* Detect a move collision.  Only send a collision response
4915 	 * if this side has "lost", otherwise proceed with the move.
4916 	 * The winner has the larger bd_addr.
4917 	 */
4918 	if ((__chan_is_moving(chan) ||
4919 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
4920 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
4921 		result = L2CAP_MR_COLLISION;
4922 		goto send_move_response;
4923 	}
4924 
4925 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4926 	l2cap_move_setup(chan);
4927 	chan->move_id = req->dest_amp_id;
4928 	icid = chan->dcid;
4929 
4930 	if (req->dest_amp_id == AMP_ID_BREDR) {
4931 		/* Moving to BR/EDR */
4932 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4933 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4934 			result = L2CAP_MR_PEND;
4935 		} else {
4936 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4937 			result = L2CAP_MR_SUCCESS;
4938 		}
4939 	} else {
4940 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4941 		/* Placeholder - uncomment when amp functions are available */
4942 		/*amp_accept_physical(chan, req->dest_amp_id);*/
4943 		result = L2CAP_MR_PEND;
4944 	}
4945 
4946 send_move_response:
4947 	l2cap_send_move_chan_rsp(chan, result);
4948 
4949 	l2cap_chan_unlock(chan);
4950 
4951 	return 0;
4952 }
4953 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)4954 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4955 {
4956 	struct l2cap_chan *chan;
4957 	struct hci_chan *hchan = NULL;
4958 
4959 	chan = l2cap_get_chan_by_scid(conn, icid);
4960 	if (!chan) {
4961 		l2cap_send_move_chan_cfm_icid(conn, icid);
4962 		return;
4963 	}
4964 
4965 	__clear_chan_timer(chan);
4966 	if (result == L2CAP_MR_PEND)
4967 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4968 
4969 	switch (chan->move_state) {
4970 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4971 		/* Move confirm will be sent when logical link
4972 		 * is complete.
4973 		 */
4974 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4975 		break;
4976 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4977 		if (result == L2CAP_MR_PEND) {
4978 			break;
4979 		} else if (test_bit(CONN_LOCAL_BUSY,
4980 				    &chan->conn_state)) {
4981 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4982 		} else {
4983 			/* Logical link is up or moving to BR/EDR,
4984 			 * proceed with move
4985 			 */
4986 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4987 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4988 		}
4989 		break;
4990 	case L2CAP_MOVE_WAIT_RSP:
4991 		/* Moving to AMP */
4992 		if (result == L2CAP_MR_SUCCESS) {
4993 			/* Remote is ready, send confirm immediately
4994 			 * after logical link is ready
4995 			 */
4996 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4997 		} else {
4998 			/* Both logical link and move success
4999 			 * are required to confirm
5000 			 */
5001 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5002 		}
5003 
5004 		/* Placeholder - get hci_chan for logical link */
5005 		if (!hchan) {
5006 			/* Logical link not available */
5007 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5008 			break;
5009 		}
5010 
5011 		/* If the logical link is not yet connected, do not
5012 		 * send confirmation.
5013 		 */
5014 		if (hchan->state != BT_CONNECTED)
5015 			break;
5016 
5017 		/* Logical link is already ready to go */
5018 
5019 		chan->hs_hcon = hchan->conn;
5020 		chan->hs_hcon->l2cap_data = chan->conn;
5021 
5022 		if (result == L2CAP_MR_SUCCESS) {
5023 			/* Can confirm now */
5024 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5025 		} else {
5026 			/* Now only need move success
5027 			 * to confirm
5028 			 */
5029 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5030 		}
5031 
5032 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5033 		break;
5034 	default:
5035 		/* Any other amp move state means the move failed. */
5036 		chan->move_id = chan->local_amp_id;
5037 		l2cap_move_done(chan);
5038 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5039 	}
5040 
5041 	l2cap_chan_unlock(chan);
5042 }
5043 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5044 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5045 			    u16 result)
5046 {
5047 	struct l2cap_chan *chan;
5048 
5049 	chan = l2cap_get_chan_by_ident(conn, ident);
5050 	if (!chan) {
5051 		/* Could not locate channel, icid is best guess */
5052 		l2cap_send_move_chan_cfm_icid(conn, icid);
5053 		return;
5054 	}
5055 
5056 	__clear_chan_timer(chan);
5057 
5058 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5059 		if (result == L2CAP_MR_COLLISION) {
5060 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5061 		} else {
5062 			/* Cleanup - cancel move */
5063 			chan->move_id = chan->local_amp_id;
5064 			l2cap_move_done(chan);
5065 		}
5066 	}
5067 
5068 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5069 
5070 	l2cap_chan_unlock(chan);
5071 }
5072 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5073 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5074 				  struct l2cap_cmd_hdr *cmd,
5075 				  u16 cmd_len, void *data)
5076 {
5077 	struct l2cap_move_chan_rsp *rsp = data;
5078 	u16 icid, result;
5079 
5080 	if (cmd_len != sizeof(*rsp))
5081 		return -EPROTO;
5082 
5083 	icid = le16_to_cpu(rsp->icid);
5084 	result = le16_to_cpu(rsp->result);
5085 
5086 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5087 
5088 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5089 		l2cap_move_continue(conn, icid, result);
5090 	else
5091 		l2cap_move_fail(conn, cmd->ident, icid, result);
5092 
5093 	return 0;
5094 }
5095 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5096 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5097 				      struct l2cap_cmd_hdr *cmd,
5098 				      u16 cmd_len, void *data)
5099 {
5100 	struct l2cap_move_chan_cfm *cfm = data;
5101 	struct l2cap_chan *chan;
5102 	u16 icid, result;
5103 
5104 	if (cmd_len != sizeof(*cfm))
5105 		return -EPROTO;
5106 
5107 	icid = le16_to_cpu(cfm->icid);
5108 	result = le16_to_cpu(cfm->result);
5109 
5110 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5111 
5112 	chan = l2cap_get_chan_by_dcid(conn, icid);
5113 	if (!chan) {
5114 		/* Spec requires a response even if the icid was not found */
5115 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5116 		return 0;
5117 	}
5118 
5119 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5120 		if (result == L2CAP_MC_CONFIRMED) {
5121 			chan->local_amp_id = chan->move_id;
5122 			if (chan->local_amp_id == AMP_ID_BREDR)
5123 				__release_logical_link(chan);
5124 		} else {
5125 			chan->move_id = chan->local_amp_id;
5126 		}
5127 
5128 		l2cap_move_done(chan);
5129 	}
5130 
5131 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5132 
5133 	l2cap_chan_unlock(chan);
5134 
5135 	return 0;
5136 }
5137 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5138 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5139 						 struct l2cap_cmd_hdr *cmd,
5140 						 u16 cmd_len, void *data)
5141 {
5142 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5143 	struct l2cap_chan *chan;
5144 	u16 icid;
5145 
5146 	if (cmd_len != sizeof(*rsp))
5147 		return -EPROTO;
5148 
5149 	icid = le16_to_cpu(rsp->icid);
5150 
5151 	BT_DBG("icid 0x%4.4x", icid);
5152 
5153 	chan = l2cap_get_chan_by_scid(conn, icid);
5154 	if (!chan)
5155 		return 0;
5156 
5157 	__clear_chan_timer(chan);
5158 
5159 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5160 		chan->local_amp_id = chan->move_id;
5161 
5162 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5163 			__release_logical_link(chan);
5164 
5165 		l2cap_move_done(chan);
5166 	}
5167 
5168 	l2cap_chan_unlock(chan);
5169 
5170 	return 0;
5171 }
5172 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5173 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5174 					      struct l2cap_cmd_hdr *cmd,
5175 					      u16 cmd_len, u8 *data)
5176 {
5177 	struct hci_conn *hcon = conn->hcon;
5178 	struct l2cap_conn_param_update_req *req;
5179 	struct l2cap_conn_param_update_rsp rsp;
5180 	u16 min, max, latency, to_multiplier;
5181 	int err;
5182 
5183 	if (hcon->role != HCI_ROLE_MASTER)
5184 		return -EINVAL;
5185 
5186 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5187 		return -EPROTO;
5188 
5189 	req = (struct l2cap_conn_param_update_req *) data;
5190 	min		= __le16_to_cpu(req->min);
5191 	max		= __le16_to_cpu(req->max);
5192 	latency		= __le16_to_cpu(req->latency);
5193 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5194 
5195 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5196 	       min, max, latency, to_multiplier);
5197 
5198 	memset(&rsp, 0, sizeof(rsp));
5199 
5200 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5201 	if (err)
5202 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5203 	else
5204 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5205 
5206 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5207 		       sizeof(rsp), &rsp);
5208 
5209 	if (!err) {
5210 		u8 store_hint;
5211 
5212 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5213 						to_multiplier);
5214 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5215 				    store_hint, min, max, latency,
5216 				    to_multiplier);
5217 
5218 	}
5219 
5220 	return 0;
5221 }
5222 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5223 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5224 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5225 				u8 *data)
5226 {
5227 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5228 	u16 dcid, mtu, mps, credits, result;
5229 	struct l2cap_chan *chan;
5230 	int err;
5231 
5232 	if (cmd_len < sizeof(*rsp))
5233 		return -EPROTO;
5234 
5235 	dcid    = __le16_to_cpu(rsp->dcid);
5236 	mtu     = __le16_to_cpu(rsp->mtu);
5237 	mps     = __le16_to_cpu(rsp->mps);
5238 	credits = __le16_to_cpu(rsp->credits);
5239 	result  = __le16_to_cpu(rsp->result);
5240 
5241 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5242 		return -EPROTO;
5243 
5244 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5245 	       dcid, mtu, mps, credits, result);
5246 
5247 	mutex_lock(&conn->chan_lock);
5248 
5249 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5250 	if (!chan) {
5251 		err = -EBADSLT;
5252 		goto unlock;
5253 	}
5254 
5255 	err = 0;
5256 
5257 	l2cap_chan_lock(chan);
5258 
5259 	switch (result) {
5260 	case L2CAP_CR_SUCCESS:
5261 		chan->ident = 0;
5262 		chan->dcid = dcid;
5263 		chan->omtu = mtu;
5264 		chan->remote_mps = mps;
5265 		chan->tx_credits = credits;
5266 		l2cap_chan_ready(chan);
5267 		break;
5268 
5269 	default:
5270 		l2cap_chan_del(chan, ECONNREFUSED);
5271 		break;
5272 	}
5273 
5274 	l2cap_chan_unlock(chan);
5275 
5276 unlock:
5277 	mutex_unlock(&conn->chan_lock);
5278 
5279 	return err;
5280 }
5281 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5282 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5283 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5284 				      u8 *data)
5285 {
5286 	int err = 0;
5287 
5288 	switch (cmd->code) {
5289 	case L2CAP_COMMAND_REJ:
5290 		l2cap_command_rej(conn, cmd, cmd_len, data);
5291 		break;
5292 
5293 	case L2CAP_CONN_REQ:
5294 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5295 		break;
5296 
5297 	case L2CAP_CONN_RSP:
5298 	case L2CAP_CREATE_CHAN_RSP:
5299 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5300 		break;
5301 
5302 	case L2CAP_CONF_REQ:
5303 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5304 		break;
5305 
5306 	case L2CAP_CONF_RSP:
5307 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5308 		break;
5309 
5310 	case L2CAP_DISCONN_REQ:
5311 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5312 		break;
5313 
5314 	case L2CAP_DISCONN_RSP:
5315 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5316 		break;
5317 
5318 	case L2CAP_ECHO_REQ:
5319 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5320 		break;
5321 
5322 	case L2CAP_ECHO_RSP:
5323 		break;
5324 
5325 	case L2CAP_INFO_REQ:
5326 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5327 		break;
5328 
5329 	case L2CAP_INFO_RSP:
5330 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5331 		break;
5332 
5333 	case L2CAP_CREATE_CHAN_REQ:
5334 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5335 		break;
5336 
5337 	case L2CAP_MOVE_CHAN_REQ:
5338 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5339 		break;
5340 
5341 	case L2CAP_MOVE_CHAN_RSP:
5342 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5343 		break;
5344 
5345 	case L2CAP_MOVE_CHAN_CFM:
5346 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5347 		break;
5348 
5349 	case L2CAP_MOVE_CHAN_CFM_RSP:
5350 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5351 		break;
5352 
5353 	default:
5354 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5355 		err = -EINVAL;
5356 		break;
5357 	}
5358 
5359 	return err;
5360 }
5361 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5362 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5363 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5364 				u8 *data)
5365 {
5366 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5367 	struct l2cap_le_conn_rsp rsp;
5368 	struct l2cap_chan *chan, *pchan;
5369 	u16 dcid, scid, credits, mtu, mps;
5370 	__le16 psm;
5371 	u8 result;
5372 
5373 	if (cmd_len != sizeof(*req))
5374 		return -EPROTO;
5375 
5376 	scid = __le16_to_cpu(req->scid);
5377 	mtu  = __le16_to_cpu(req->mtu);
5378 	mps  = __le16_to_cpu(req->mps);
5379 	psm  = req->psm;
5380 	dcid = 0;
5381 	credits = 0;
5382 
5383 	if (mtu < 23 || mps < 23)
5384 		return -EPROTO;
5385 
5386 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5387 	       scid, mtu, mps);
5388 
5389 	/* Check if we have socket listening on psm */
5390 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5391 					 &conn->hcon->dst, LE_LINK);
5392 	if (!pchan) {
5393 		result = L2CAP_CR_BAD_PSM;
5394 		chan = NULL;
5395 		goto response;
5396 	}
5397 
5398 	mutex_lock(&conn->chan_lock);
5399 	l2cap_chan_lock(pchan);
5400 
5401 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5402 		result = L2CAP_CR_AUTHENTICATION;
5403 		chan = NULL;
5404 		goto response_unlock;
5405 	}
5406 
5407 	/* Check if we already have channel with that dcid */
5408 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5409 		result = L2CAP_CR_NO_MEM;
5410 		chan = NULL;
5411 		goto response_unlock;
5412 	}
5413 
5414 	chan = pchan->ops->new_connection(pchan);
5415 	if (!chan) {
5416 		result = L2CAP_CR_NO_MEM;
5417 		goto response_unlock;
5418 	}
5419 
5420 	l2cap_le_flowctl_init(chan);
5421 
5422 	bacpy(&chan->src, &conn->hcon->src);
5423 	bacpy(&chan->dst, &conn->hcon->dst);
5424 	chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5425 	chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5426 	chan->psm  = psm;
5427 	chan->dcid = scid;
5428 	chan->omtu = mtu;
5429 	chan->remote_mps = mps;
5430 	chan->tx_credits = __le16_to_cpu(req->credits);
5431 
5432 	__l2cap_chan_add(conn, chan);
5433 	dcid = chan->scid;
5434 	credits = chan->rx_credits;
5435 
5436 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5437 
5438 	chan->ident = cmd->ident;
5439 
5440 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5441 		l2cap_state_change(chan, BT_CONNECT2);
5442 		/* The following result value is actually not defined
5443 		 * for LE CoC but we use it to let the function know
5444 		 * that it should bail out after doing its cleanup
5445 		 * instead of sending a response.
5446 		 */
5447 		result = L2CAP_CR_PEND;
5448 		chan->ops->defer(chan);
5449 	} else {
5450 		l2cap_chan_ready(chan);
5451 		result = L2CAP_CR_SUCCESS;
5452 	}
5453 
5454 response_unlock:
5455 	l2cap_chan_unlock(pchan);
5456 	mutex_unlock(&conn->chan_lock);
5457 	l2cap_chan_put(pchan);
5458 
5459 	if (result == L2CAP_CR_PEND)
5460 		return 0;
5461 
5462 response:
5463 	if (chan) {
5464 		rsp.mtu = cpu_to_le16(chan->imtu);
5465 		rsp.mps = cpu_to_le16(chan->mps);
5466 	} else {
5467 		rsp.mtu = 0;
5468 		rsp.mps = 0;
5469 	}
5470 
5471 	rsp.dcid    = cpu_to_le16(dcid);
5472 	rsp.credits = cpu_to_le16(credits);
5473 	rsp.result  = cpu_to_le16(result);
5474 
5475 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5476 
5477 	return 0;
5478 }
5479 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5480 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5481 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5482 				   u8 *data)
5483 {
5484 	struct l2cap_le_credits *pkt;
5485 	struct l2cap_chan *chan;
5486 	u16 cid, credits, max_credits;
5487 
5488 	if (cmd_len != sizeof(*pkt))
5489 		return -EPROTO;
5490 
5491 	pkt = (struct l2cap_le_credits *) data;
5492 	cid	= __le16_to_cpu(pkt->cid);
5493 	credits	= __le16_to_cpu(pkt->credits);
5494 
5495 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5496 
5497 	chan = l2cap_get_chan_by_dcid(conn, cid);
5498 	if (!chan)
5499 		return -EBADSLT;
5500 
5501 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5502 	if (credits > max_credits) {
5503 		BT_ERR("LE credits overflow");
5504 		l2cap_send_disconn_req(chan, ECONNRESET);
5505 
5506 		/* Return 0 so that we don't trigger an unnecessary
5507 		 * command reject packet.
5508 		 */
5509 		return 0;
5510 	}
5511 
5512 	chan->tx_credits += credits;
5513 
5514 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5515 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5516 		chan->tx_credits--;
5517 	}
5518 
5519 	if (chan->tx_credits)
5520 		chan->ops->resume(chan);
5521 
5522 	l2cap_chan_unlock(chan);
5523 
5524 	return 0;
5525 }
5526 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5527 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5528 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5529 				       u8 *data)
5530 {
5531 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5532 	struct l2cap_chan *chan;
5533 
5534 	if (cmd_len < sizeof(*rej))
5535 		return -EPROTO;
5536 
5537 	mutex_lock(&conn->chan_lock);
5538 
5539 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5540 	if (!chan)
5541 		goto done;
5542 
5543 	l2cap_chan_lock(chan);
5544 	l2cap_chan_del(chan, ECONNREFUSED);
5545 	l2cap_chan_unlock(chan);
5546 
5547 done:
5548 	mutex_unlock(&conn->chan_lock);
5549 	return 0;
5550 }
5551 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5552 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5553 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5554 				   u8 *data)
5555 {
5556 	int err = 0;
5557 
5558 	switch (cmd->code) {
5559 	case L2CAP_COMMAND_REJ:
5560 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5561 		break;
5562 
5563 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5564 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5565 		break;
5566 
5567 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5568 		break;
5569 
5570 	case L2CAP_LE_CONN_RSP:
5571 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5572 		break;
5573 
5574 	case L2CAP_LE_CONN_REQ:
5575 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5576 		break;
5577 
5578 	case L2CAP_LE_CREDITS:
5579 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5580 		break;
5581 
5582 	case L2CAP_DISCONN_REQ:
5583 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5584 		break;
5585 
5586 	case L2CAP_DISCONN_RSP:
5587 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5588 		break;
5589 
5590 	default:
5591 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5592 		err = -EINVAL;
5593 		break;
5594 	}
5595 
5596 	return err;
5597 }
5598 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5599 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5600 					struct sk_buff *skb)
5601 {
5602 	struct hci_conn *hcon = conn->hcon;
5603 	struct l2cap_cmd_hdr *cmd;
5604 	u16 len;
5605 	int err;
5606 
5607 	if (hcon->type != LE_LINK)
5608 		goto drop;
5609 
5610 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5611 		goto drop;
5612 
5613 	cmd = (void *) skb->data;
5614 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5615 
5616 	len = le16_to_cpu(cmd->len);
5617 
5618 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5619 
5620 	if (len != skb->len || !cmd->ident) {
5621 		BT_DBG("corrupted command");
5622 		goto drop;
5623 	}
5624 
5625 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5626 	if (err) {
5627 		struct l2cap_cmd_rej_unk rej;
5628 
5629 		BT_ERR("Wrong link type (%d)", err);
5630 
5631 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5632 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5633 			       sizeof(rej), &rej);
5634 	}
5635 
5636 drop:
5637 	kfree_skb(skb);
5638 }
5639 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5640 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5641 				     struct sk_buff *skb)
5642 {
5643 	struct hci_conn *hcon = conn->hcon;
5644 	u8 *data = skb->data;
5645 	int len = skb->len;
5646 	struct l2cap_cmd_hdr cmd;
5647 	int err;
5648 
5649 	l2cap_raw_recv(conn, skb);
5650 
5651 	if (hcon->type != ACL_LINK)
5652 		goto drop;
5653 
5654 	while (len >= L2CAP_CMD_HDR_SIZE) {
5655 		u16 cmd_len;
5656 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5657 		data += L2CAP_CMD_HDR_SIZE;
5658 		len  -= L2CAP_CMD_HDR_SIZE;
5659 
5660 		cmd_len = le16_to_cpu(cmd.len);
5661 
5662 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5663 		       cmd.ident);
5664 
5665 		if (cmd_len > len || !cmd.ident) {
5666 			BT_DBG("corrupted command");
5667 			break;
5668 		}
5669 
5670 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5671 		if (err) {
5672 			struct l2cap_cmd_rej_unk rej;
5673 
5674 			BT_ERR("Wrong link type (%d)", err);
5675 
5676 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5677 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5678 				       sizeof(rej), &rej);
5679 		}
5680 
5681 		data += cmd_len;
5682 		len  -= cmd_len;
5683 	}
5684 
5685 drop:
5686 	kfree_skb(skb);
5687 }
5688 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5689 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5690 {
5691 	u16 our_fcs, rcv_fcs;
5692 	int hdr_size;
5693 
5694 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5695 		hdr_size = L2CAP_EXT_HDR_SIZE;
5696 	else
5697 		hdr_size = L2CAP_ENH_HDR_SIZE;
5698 
5699 	if (chan->fcs == L2CAP_FCS_CRC16) {
5700 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5701 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5702 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5703 
5704 		if (our_fcs != rcv_fcs)
5705 			return -EBADMSG;
5706 	}
5707 	return 0;
5708 }
5709 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5710 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5711 {
5712 	struct l2cap_ctrl control;
5713 
5714 	BT_DBG("chan %p", chan);
5715 
5716 	memset(&control, 0, sizeof(control));
5717 	control.sframe = 1;
5718 	control.final = 1;
5719 	control.reqseq = chan->buffer_seq;
5720 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5721 
5722 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5723 		control.super = L2CAP_SUPER_RNR;
5724 		l2cap_send_sframe(chan, &control);
5725 	}
5726 
5727 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5728 	    chan->unacked_frames > 0)
5729 		__set_retrans_timer(chan);
5730 
5731 	/* Send pending iframes */
5732 	l2cap_ertm_send(chan);
5733 
5734 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5735 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5736 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5737 		 * send it now.
5738 		 */
5739 		control.super = L2CAP_SUPER_RR;
5740 		l2cap_send_sframe(chan, &control);
5741 	}
5742 }
5743 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5744 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5745 			    struct sk_buff **last_frag)
5746 {
5747 	/* skb->len reflects data in skb as well as all fragments
5748 	 * skb->data_len reflects only data in fragments
5749 	 */
5750 	if (!skb_has_frag_list(skb))
5751 		skb_shinfo(skb)->frag_list = new_frag;
5752 
5753 	new_frag->next = NULL;
5754 
5755 	(*last_frag)->next = new_frag;
5756 	*last_frag = new_frag;
5757 
5758 	skb->len += new_frag->len;
5759 	skb->data_len += new_frag->len;
5760 	skb->truesize += new_frag->truesize;
5761 }
5762 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5763 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5764 				struct l2cap_ctrl *control)
5765 {
5766 	int err = -EINVAL;
5767 
5768 	switch (control->sar) {
5769 	case L2CAP_SAR_UNSEGMENTED:
5770 		if (chan->sdu)
5771 			break;
5772 
5773 		err = chan->ops->recv(chan, skb);
5774 		break;
5775 
5776 	case L2CAP_SAR_START:
5777 		if (chan->sdu)
5778 			break;
5779 
5780 		chan->sdu_len = get_unaligned_le16(skb->data);
5781 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5782 
5783 		if (chan->sdu_len > chan->imtu) {
5784 			err = -EMSGSIZE;
5785 			break;
5786 		}
5787 
5788 		if (skb->len >= chan->sdu_len)
5789 			break;
5790 
5791 		chan->sdu = skb;
5792 		chan->sdu_last_frag = skb;
5793 
5794 		skb = NULL;
5795 		err = 0;
5796 		break;
5797 
5798 	case L2CAP_SAR_CONTINUE:
5799 		if (!chan->sdu)
5800 			break;
5801 
5802 		append_skb_frag(chan->sdu, skb,
5803 				&chan->sdu_last_frag);
5804 		skb = NULL;
5805 
5806 		if (chan->sdu->len >= chan->sdu_len)
5807 			break;
5808 
5809 		err = 0;
5810 		break;
5811 
5812 	case L2CAP_SAR_END:
5813 		if (!chan->sdu)
5814 			break;
5815 
5816 		append_skb_frag(chan->sdu, skb,
5817 				&chan->sdu_last_frag);
5818 		skb = NULL;
5819 
5820 		if (chan->sdu->len != chan->sdu_len)
5821 			break;
5822 
5823 		err = chan->ops->recv(chan, chan->sdu);
5824 
5825 		if (!err) {
5826 			/* Reassembly complete */
5827 			chan->sdu = NULL;
5828 			chan->sdu_last_frag = NULL;
5829 			chan->sdu_len = 0;
5830 		}
5831 		break;
5832 	}
5833 
5834 	if (err) {
5835 		kfree_skb(skb);
5836 		kfree_skb(chan->sdu);
5837 		chan->sdu = NULL;
5838 		chan->sdu_last_frag = NULL;
5839 		chan->sdu_len = 0;
5840 	}
5841 
5842 	return err;
5843 }
5844 
l2cap_resegment(struct l2cap_chan * chan)5845 static int l2cap_resegment(struct l2cap_chan *chan)
5846 {
5847 	/* Placeholder */
5848 	return 0;
5849 }
5850 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5851 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5852 {
5853 	u8 event;
5854 
5855 	if (chan->mode != L2CAP_MODE_ERTM)
5856 		return;
5857 
5858 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5859 	l2cap_tx(chan, NULL, NULL, event);
5860 }
5861 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5862 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5863 {
5864 	int err = 0;
5865 	/* Pass sequential frames to l2cap_reassemble_sdu()
5866 	 * until a gap is encountered.
5867 	 */
5868 
5869 	BT_DBG("chan %p", chan);
5870 
5871 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5872 		struct sk_buff *skb;
5873 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
5874 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
5875 
5876 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5877 
5878 		if (!skb)
5879 			break;
5880 
5881 		skb_unlink(skb, &chan->srej_q);
5882 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5883 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5884 		if (err)
5885 			break;
5886 	}
5887 
5888 	if (skb_queue_empty(&chan->srej_q)) {
5889 		chan->rx_state = L2CAP_RX_STATE_RECV;
5890 		l2cap_send_ack(chan);
5891 	}
5892 
5893 	return err;
5894 }
5895 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5896 static void l2cap_handle_srej(struct l2cap_chan *chan,
5897 			      struct l2cap_ctrl *control)
5898 {
5899 	struct sk_buff *skb;
5900 
5901 	BT_DBG("chan %p, control %p", chan, control);
5902 
5903 	if (control->reqseq == chan->next_tx_seq) {
5904 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5905 		l2cap_send_disconn_req(chan, ECONNRESET);
5906 		return;
5907 	}
5908 
5909 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5910 
5911 	if (skb == NULL) {
5912 		BT_DBG("Seq %d not available for retransmission",
5913 		       control->reqseq);
5914 		return;
5915 	}
5916 
5917 	if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5918 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5919 		l2cap_send_disconn_req(chan, ECONNRESET);
5920 		return;
5921 	}
5922 
5923 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5924 
5925 	if (control->poll) {
5926 		l2cap_pass_to_tx(chan, control);
5927 
5928 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
5929 		l2cap_retransmit(chan, control);
5930 		l2cap_ertm_send(chan);
5931 
5932 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5933 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
5934 			chan->srej_save_reqseq = control->reqseq;
5935 		}
5936 	} else {
5937 		l2cap_pass_to_tx_fbit(chan, control);
5938 
5939 		if (control->final) {
5940 			if (chan->srej_save_reqseq != control->reqseq ||
5941 			    !test_and_clear_bit(CONN_SREJ_ACT,
5942 						&chan->conn_state))
5943 				l2cap_retransmit(chan, control);
5944 		} else {
5945 			l2cap_retransmit(chan, control);
5946 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5947 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
5948 				chan->srej_save_reqseq = control->reqseq;
5949 			}
5950 		}
5951 	}
5952 }
5953 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)5954 static void l2cap_handle_rej(struct l2cap_chan *chan,
5955 			     struct l2cap_ctrl *control)
5956 {
5957 	struct sk_buff *skb;
5958 
5959 	BT_DBG("chan %p, control %p", chan, control);
5960 
5961 	if (control->reqseq == chan->next_tx_seq) {
5962 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
5963 		l2cap_send_disconn_req(chan, ECONNRESET);
5964 		return;
5965 	}
5966 
5967 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5968 
5969 	if (chan->max_tx && skb &&
5970 	    bt_cb(skb)->control.retries >= chan->max_tx) {
5971 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
5972 		l2cap_send_disconn_req(chan, ECONNRESET);
5973 		return;
5974 	}
5975 
5976 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5977 
5978 	l2cap_pass_to_tx(chan, control);
5979 
5980 	if (control->final) {
5981 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5982 			l2cap_retransmit_all(chan, control);
5983 	} else {
5984 		l2cap_retransmit_all(chan, control);
5985 		l2cap_ertm_send(chan);
5986 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5987 			set_bit(CONN_REJ_ACT, &chan->conn_state);
5988 	}
5989 }
5990 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)5991 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5992 {
5993 	BT_DBG("chan %p, txseq %d", chan, txseq);
5994 
5995 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5996 	       chan->expected_tx_seq);
5997 
5998 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5999 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6000 		    chan->tx_win) {
6001 			/* See notes below regarding "double poll" and
6002 			 * invalid packets.
6003 			 */
6004 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6005 				BT_DBG("Invalid/Ignore - after SREJ");
6006 				return L2CAP_TXSEQ_INVALID_IGNORE;
6007 			} else {
6008 				BT_DBG("Invalid - in window after SREJ sent");
6009 				return L2CAP_TXSEQ_INVALID;
6010 			}
6011 		}
6012 
6013 		if (chan->srej_list.head == txseq) {
6014 			BT_DBG("Expected SREJ");
6015 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6016 		}
6017 
6018 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6019 			BT_DBG("Duplicate SREJ - txseq already stored");
6020 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6021 		}
6022 
6023 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6024 			BT_DBG("Unexpected SREJ - not requested");
6025 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6026 		}
6027 	}
6028 
6029 	if (chan->expected_tx_seq == txseq) {
6030 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6031 		    chan->tx_win) {
6032 			BT_DBG("Invalid - txseq outside tx window");
6033 			return L2CAP_TXSEQ_INVALID;
6034 		} else {
6035 			BT_DBG("Expected");
6036 			return L2CAP_TXSEQ_EXPECTED;
6037 		}
6038 	}
6039 
6040 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6041 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6042 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6043 		return L2CAP_TXSEQ_DUPLICATE;
6044 	}
6045 
6046 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6047 		/* A source of invalid packets is a "double poll" condition,
6048 		 * where delays cause us to send multiple poll packets.  If
6049 		 * the remote stack receives and processes both polls,
6050 		 * sequence numbers can wrap around in such a way that a
6051 		 * resent frame has a sequence number that looks like new data
6052 		 * with a sequence gap.  This would trigger an erroneous SREJ
6053 		 * request.
6054 		 *
6055 		 * Fortunately, this is impossible with a tx window that's
6056 		 * less than half of the maximum sequence number, which allows
6057 		 * invalid frames to be safely ignored.
6058 		 *
6059 		 * With tx window sizes greater than half of the tx window
6060 		 * maximum, the frame is invalid and cannot be ignored.  This
6061 		 * causes a disconnect.
6062 		 */
6063 
6064 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6065 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6066 			return L2CAP_TXSEQ_INVALID_IGNORE;
6067 		} else {
6068 			BT_DBG("Invalid - txseq outside tx window");
6069 			return L2CAP_TXSEQ_INVALID;
6070 		}
6071 	} else {
6072 		BT_DBG("Unexpected - txseq indicates missing frames");
6073 		return L2CAP_TXSEQ_UNEXPECTED;
6074 	}
6075 }
6076 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6077 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6078 			       struct l2cap_ctrl *control,
6079 			       struct sk_buff *skb, u8 event)
6080 {
6081 	int err = 0;
6082 	bool skb_in_use = false;
6083 
6084 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6085 	       event);
6086 
6087 	switch (event) {
6088 	case L2CAP_EV_RECV_IFRAME:
6089 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6090 		case L2CAP_TXSEQ_EXPECTED:
6091 			l2cap_pass_to_tx(chan, control);
6092 
6093 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6094 				BT_DBG("Busy, discarding expected seq %d",
6095 				       control->txseq);
6096 				break;
6097 			}
6098 
6099 			chan->expected_tx_seq = __next_seq(chan,
6100 							   control->txseq);
6101 
6102 			chan->buffer_seq = chan->expected_tx_seq;
6103 			skb_in_use = true;
6104 
6105 			err = l2cap_reassemble_sdu(chan, skb, control);
6106 			if (err)
6107 				break;
6108 
6109 			if (control->final) {
6110 				if (!test_and_clear_bit(CONN_REJ_ACT,
6111 							&chan->conn_state)) {
6112 					control->final = 0;
6113 					l2cap_retransmit_all(chan, control);
6114 					l2cap_ertm_send(chan);
6115 				}
6116 			}
6117 
6118 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6119 				l2cap_send_ack(chan);
6120 			break;
6121 		case L2CAP_TXSEQ_UNEXPECTED:
6122 			l2cap_pass_to_tx(chan, control);
6123 
6124 			/* Can't issue SREJ frames in the local busy state.
6125 			 * Drop this frame, it will be seen as missing
6126 			 * when local busy is exited.
6127 			 */
6128 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6129 				BT_DBG("Busy, discarding unexpected seq %d",
6130 				       control->txseq);
6131 				break;
6132 			}
6133 
6134 			/* There was a gap in the sequence, so an SREJ
6135 			 * must be sent for each missing frame.  The
6136 			 * current frame is stored for later use.
6137 			 */
6138 			skb_queue_tail(&chan->srej_q, skb);
6139 			skb_in_use = true;
6140 			BT_DBG("Queued %p (queue len %d)", skb,
6141 			       skb_queue_len(&chan->srej_q));
6142 
6143 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6144 			l2cap_seq_list_clear(&chan->srej_list);
6145 			l2cap_send_srej(chan, control->txseq);
6146 
6147 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6148 			break;
6149 		case L2CAP_TXSEQ_DUPLICATE:
6150 			l2cap_pass_to_tx(chan, control);
6151 			break;
6152 		case L2CAP_TXSEQ_INVALID_IGNORE:
6153 			break;
6154 		case L2CAP_TXSEQ_INVALID:
6155 		default:
6156 			l2cap_send_disconn_req(chan, ECONNRESET);
6157 			break;
6158 		}
6159 		break;
6160 	case L2CAP_EV_RECV_RR:
6161 		l2cap_pass_to_tx(chan, control);
6162 		if (control->final) {
6163 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6164 
6165 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6166 			    !__chan_is_moving(chan)) {
6167 				control->final = 0;
6168 				l2cap_retransmit_all(chan, control);
6169 			}
6170 
6171 			l2cap_ertm_send(chan);
6172 		} else if (control->poll) {
6173 			l2cap_send_i_or_rr_or_rnr(chan);
6174 		} else {
6175 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6176 					       &chan->conn_state) &&
6177 			    chan->unacked_frames)
6178 				__set_retrans_timer(chan);
6179 
6180 			l2cap_ertm_send(chan);
6181 		}
6182 		break;
6183 	case L2CAP_EV_RECV_RNR:
6184 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6185 		l2cap_pass_to_tx(chan, control);
6186 		if (control && control->poll) {
6187 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6188 			l2cap_send_rr_or_rnr(chan, 0);
6189 		}
6190 		__clear_retrans_timer(chan);
6191 		l2cap_seq_list_clear(&chan->retrans_list);
6192 		break;
6193 	case L2CAP_EV_RECV_REJ:
6194 		l2cap_handle_rej(chan, control);
6195 		break;
6196 	case L2CAP_EV_RECV_SREJ:
6197 		l2cap_handle_srej(chan, control);
6198 		break;
6199 	default:
6200 		break;
6201 	}
6202 
6203 	if (skb && !skb_in_use) {
6204 		BT_DBG("Freeing %p", skb);
6205 		kfree_skb(skb);
6206 	}
6207 
6208 	return err;
6209 }
6210 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6211 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6212 				    struct l2cap_ctrl *control,
6213 				    struct sk_buff *skb, u8 event)
6214 {
6215 	int err = 0;
6216 	u16 txseq = control->txseq;
6217 	bool skb_in_use = false;
6218 
6219 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6220 	       event);
6221 
6222 	switch (event) {
6223 	case L2CAP_EV_RECV_IFRAME:
6224 		switch (l2cap_classify_txseq(chan, txseq)) {
6225 		case L2CAP_TXSEQ_EXPECTED:
6226 			/* Keep frame for reassembly later */
6227 			l2cap_pass_to_tx(chan, control);
6228 			skb_queue_tail(&chan->srej_q, skb);
6229 			skb_in_use = true;
6230 			BT_DBG("Queued %p (queue len %d)", skb,
6231 			       skb_queue_len(&chan->srej_q));
6232 
6233 			chan->expected_tx_seq = __next_seq(chan, txseq);
6234 			break;
6235 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6236 			l2cap_seq_list_pop(&chan->srej_list);
6237 
6238 			l2cap_pass_to_tx(chan, control);
6239 			skb_queue_tail(&chan->srej_q, skb);
6240 			skb_in_use = true;
6241 			BT_DBG("Queued %p (queue len %d)", skb,
6242 			       skb_queue_len(&chan->srej_q));
6243 
6244 			err = l2cap_rx_queued_iframes(chan);
6245 			if (err)
6246 				break;
6247 
6248 			break;
6249 		case L2CAP_TXSEQ_UNEXPECTED:
6250 			/* Got a frame that can't be reassembled yet.
6251 			 * Save it for later, and send SREJs to cover
6252 			 * the missing frames.
6253 			 */
6254 			skb_queue_tail(&chan->srej_q, skb);
6255 			skb_in_use = true;
6256 			BT_DBG("Queued %p (queue len %d)", skb,
6257 			       skb_queue_len(&chan->srej_q));
6258 
6259 			l2cap_pass_to_tx(chan, control);
6260 			l2cap_send_srej(chan, control->txseq);
6261 			break;
6262 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6263 			/* This frame was requested with an SREJ, but
6264 			 * some expected retransmitted frames are
6265 			 * missing.  Request retransmission of missing
6266 			 * SREJ'd frames.
6267 			 */
6268 			skb_queue_tail(&chan->srej_q, skb);
6269 			skb_in_use = true;
6270 			BT_DBG("Queued %p (queue len %d)", skb,
6271 			       skb_queue_len(&chan->srej_q));
6272 
6273 			l2cap_pass_to_tx(chan, control);
6274 			l2cap_send_srej_list(chan, control->txseq);
6275 			break;
6276 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6277 			/* We've already queued this frame.  Drop this copy. */
6278 			l2cap_pass_to_tx(chan, control);
6279 			break;
6280 		case L2CAP_TXSEQ_DUPLICATE:
6281 			/* Expecting a later sequence number, so this frame
6282 			 * was already received.  Ignore it completely.
6283 			 */
6284 			break;
6285 		case L2CAP_TXSEQ_INVALID_IGNORE:
6286 			break;
6287 		case L2CAP_TXSEQ_INVALID:
6288 		default:
6289 			l2cap_send_disconn_req(chan, ECONNRESET);
6290 			break;
6291 		}
6292 		break;
6293 	case L2CAP_EV_RECV_RR:
6294 		l2cap_pass_to_tx(chan, control);
6295 		if (control->final) {
6296 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6297 
6298 			if (!test_and_clear_bit(CONN_REJ_ACT,
6299 						&chan->conn_state)) {
6300 				control->final = 0;
6301 				l2cap_retransmit_all(chan, control);
6302 			}
6303 
6304 			l2cap_ertm_send(chan);
6305 		} else if (control->poll) {
6306 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6307 					       &chan->conn_state) &&
6308 			    chan->unacked_frames) {
6309 				__set_retrans_timer(chan);
6310 			}
6311 
6312 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6313 			l2cap_send_srej_tail(chan);
6314 		} else {
6315 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6316 					       &chan->conn_state) &&
6317 			    chan->unacked_frames)
6318 				__set_retrans_timer(chan);
6319 
6320 			l2cap_send_ack(chan);
6321 		}
6322 		break;
6323 	case L2CAP_EV_RECV_RNR:
6324 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6325 		l2cap_pass_to_tx(chan, control);
6326 		if (control->poll) {
6327 			l2cap_send_srej_tail(chan);
6328 		} else {
6329 			struct l2cap_ctrl rr_control;
6330 			memset(&rr_control, 0, sizeof(rr_control));
6331 			rr_control.sframe = 1;
6332 			rr_control.super = L2CAP_SUPER_RR;
6333 			rr_control.reqseq = chan->buffer_seq;
6334 			l2cap_send_sframe(chan, &rr_control);
6335 		}
6336 
6337 		break;
6338 	case L2CAP_EV_RECV_REJ:
6339 		l2cap_handle_rej(chan, control);
6340 		break;
6341 	case L2CAP_EV_RECV_SREJ:
6342 		l2cap_handle_srej(chan, control);
6343 		break;
6344 	}
6345 
6346 	if (skb && !skb_in_use) {
6347 		BT_DBG("Freeing %p", skb);
6348 		kfree_skb(skb);
6349 	}
6350 
6351 	return err;
6352 }
6353 
l2cap_finish_move(struct l2cap_chan * chan)6354 static int l2cap_finish_move(struct l2cap_chan *chan)
6355 {
6356 	BT_DBG("chan %p", chan);
6357 
6358 	chan->rx_state = L2CAP_RX_STATE_RECV;
6359 
6360 	if (chan->hs_hcon)
6361 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6362 	else
6363 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6364 
6365 	return l2cap_resegment(chan);
6366 }
6367 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6368 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6369 				 struct l2cap_ctrl *control,
6370 				 struct sk_buff *skb, u8 event)
6371 {
6372 	int err;
6373 
6374 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6375 	       event);
6376 
6377 	if (!control->poll)
6378 		return -EPROTO;
6379 
6380 	l2cap_process_reqseq(chan, control->reqseq);
6381 
6382 	if (!skb_queue_empty(&chan->tx_q))
6383 		chan->tx_send_head = skb_peek(&chan->tx_q);
6384 	else
6385 		chan->tx_send_head = NULL;
6386 
6387 	/* Rewind next_tx_seq to the point expected
6388 	 * by the receiver.
6389 	 */
6390 	chan->next_tx_seq = control->reqseq;
6391 	chan->unacked_frames = 0;
6392 
6393 	err = l2cap_finish_move(chan);
6394 	if (err)
6395 		return err;
6396 
6397 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6398 	l2cap_send_i_or_rr_or_rnr(chan);
6399 
6400 	if (event == L2CAP_EV_RECV_IFRAME)
6401 		return -EPROTO;
6402 
6403 	return l2cap_rx_state_recv(chan, control, NULL, event);
6404 }
6405 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6406 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6407 				 struct l2cap_ctrl *control,
6408 				 struct sk_buff *skb, u8 event)
6409 {
6410 	int err;
6411 
6412 	if (!control->final)
6413 		return -EPROTO;
6414 
6415 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6416 
6417 	chan->rx_state = L2CAP_RX_STATE_RECV;
6418 	l2cap_process_reqseq(chan, control->reqseq);
6419 
6420 	if (!skb_queue_empty(&chan->tx_q))
6421 		chan->tx_send_head = skb_peek(&chan->tx_q);
6422 	else
6423 		chan->tx_send_head = NULL;
6424 
6425 	/* Rewind next_tx_seq to the point expected
6426 	 * by the receiver.
6427 	 */
6428 	chan->next_tx_seq = control->reqseq;
6429 	chan->unacked_frames = 0;
6430 
6431 	if (chan->hs_hcon)
6432 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6433 	else
6434 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6435 
6436 	err = l2cap_resegment(chan);
6437 
6438 	if (!err)
6439 		err = l2cap_rx_state_recv(chan, control, skb, event);
6440 
6441 	return err;
6442 }
6443 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6444 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6445 {
6446 	/* Make sure reqseq is for a packet that has been sent but not acked */
6447 	u16 unacked;
6448 
6449 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6450 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6451 }
6452 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6453 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6454 		    struct sk_buff *skb, u8 event)
6455 {
6456 	int err = 0;
6457 
6458 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6459 	       control, skb, event, chan->rx_state);
6460 
6461 	if (__valid_reqseq(chan, control->reqseq)) {
6462 		switch (chan->rx_state) {
6463 		case L2CAP_RX_STATE_RECV:
6464 			err = l2cap_rx_state_recv(chan, control, skb, event);
6465 			break;
6466 		case L2CAP_RX_STATE_SREJ_SENT:
6467 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6468 						       event);
6469 			break;
6470 		case L2CAP_RX_STATE_WAIT_P:
6471 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6472 			break;
6473 		case L2CAP_RX_STATE_WAIT_F:
6474 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6475 			break;
6476 		default:
6477 			/* shut it down */
6478 			break;
6479 		}
6480 	} else {
6481 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6482 		       control->reqseq, chan->next_tx_seq,
6483 		       chan->expected_ack_seq);
6484 		l2cap_send_disconn_req(chan, ECONNRESET);
6485 	}
6486 
6487 	return err;
6488 }
6489 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6490 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6491 			   struct sk_buff *skb)
6492 {
6493 	int err = 0;
6494 
6495 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6496 	       chan->rx_state);
6497 
6498 	if (l2cap_classify_txseq(chan, control->txseq) ==
6499 	    L2CAP_TXSEQ_EXPECTED) {
6500 		l2cap_pass_to_tx(chan, control);
6501 
6502 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6503 		       __next_seq(chan, chan->buffer_seq));
6504 
6505 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6506 
6507 		l2cap_reassemble_sdu(chan, skb, control);
6508 	} else {
6509 		if (chan->sdu) {
6510 			kfree_skb(chan->sdu);
6511 			chan->sdu = NULL;
6512 		}
6513 		chan->sdu_last_frag = NULL;
6514 		chan->sdu_len = 0;
6515 
6516 		if (skb) {
6517 			BT_DBG("Freeing %p", skb);
6518 			kfree_skb(skb);
6519 		}
6520 	}
6521 
6522 	chan->last_acked_seq = control->txseq;
6523 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6524 
6525 	return err;
6526 }
6527 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6528 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6529 {
6530 	struct l2cap_ctrl *control = &bt_cb(skb)->control;
6531 	u16 len;
6532 	u8 event;
6533 
6534 	__unpack_control(chan, skb);
6535 
6536 	len = skb->len;
6537 
6538 	/*
6539 	 * We can just drop the corrupted I-frame here.
6540 	 * Receiver will miss it and start proper recovery
6541 	 * procedures and ask for retransmission.
6542 	 */
6543 	if (l2cap_check_fcs(chan, skb))
6544 		goto drop;
6545 
6546 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6547 		len -= L2CAP_SDULEN_SIZE;
6548 
6549 	if (chan->fcs == L2CAP_FCS_CRC16)
6550 		len -= L2CAP_FCS_SIZE;
6551 
6552 	if (len > chan->mps) {
6553 		l2cap_send_disconn_req(chan, ECONNRESET);
6554 		goto drop;
6555 	}
6556 
6557 	if (!control->sframe) {
6558 		int err;
6559 
6560 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6561 		       control->sar, control->reqseq, control->final,
6562 		       control->txseq);
6563 
6564 		/* Validate F-bit - F=0 always valid, F=1 only
6565 		 * valid in TX WAIT_F
6566 		 */
6567 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6568 			goto drop;
6569 
6570 		if (chan->mode != L2CAP_MODE_STREAMING) {
6571 			event = L2CAP_EV_RECV_IFRAME;
6572 			err = l2cap_rx(chan, control, skb, event);
6573 		} else {
6574 			err = l2cap_stream_rx(chan, control, skb);
6575 		}
6576 
6577 		if (err)
6578 			l2cap_send_disconn_req(chan, ECONNRESET);
6579 	} else {
6580 		const u8 rx_func_to_event[4] = {
6581 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6582 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6583 		};
6584 
6585 		/* Only I-frames are expected in streaming mode */
6586 		if (chan->mode == L2CAP_MODE_STREAMING)
6587 			goto drop;
6588 
6589 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6590 		       control->reqseq, control->final, control->poll,
6591 		       control->super);
6592 
6593 		if (len != 0) {
6594 			BT_ERR("Trailing bytes: %d in sframe", len);
6595 			l2cap_send_disconn_req(chan, ECONNRESET);
6596 			goto drop;
6597 		}
6598 
6599 		/* Validate F and P bits */
6600 		if (control->final && (control->poll ||
6601 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6602 			goto drop;
6603 
6604 		event = rx_func_to_event[control->super];
6605 		if (l2cap_rx(chan, control, skb, event))
6606 			l2cap_send_disconn_req(chan, ECONNRESET);
6607 	}
6608 
6609 	return 0;
6610 
6611 drop:
6612 	kfree_skb(skb);
6613 	return 0;
6614 }
6615 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6616 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6617 {
6618 	struct l2cap_conn *conn = chan->conn;
6619 	struct l2cap_le_credits pkt;
6620 	u16 return_credits;
6621 
6622 	/* We return more credits to the sender only after the amount of
6623 	 * credits falls below half of the initial amount.
6624 	 */
6625 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6626 		return;
6627 
6628 	return_credits = le_max_credits - chan->rx_credits;
6629 
6630 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6631 
6632 	chan->rx_credits += return_credits;
6633 
6634 	pkt.cid     = cpu_to_le16(chan->scid);
6635 	pkt.credits = cpu_to_le16(return_credits);
6636 
6637 	chan->ident = l2cap_get_ident(conn);
6638 
6639 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6640 }
6641 
l2cap_le_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6642 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6643 {
6644 	int err;
6645 
6646 	if (!chan->rx_credits) {
6647 		BT_ERR("No credits to receive LE L2CAP data");
6648 		l2cap_send_disconn_req(chan, ECONNRESET);
6649 		return -ENOBUFS;
6650 	}
6651 
6652 	if (chan->imtu < skb->len) {
6653 		BT_ERR("Too big LE L2CAP PDU");
6654 		return -ENOBUFS;
6655 	}
6656 
6657 	chan->rx_credits--;
6658 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6659 
6660 	l2cap_chan_le_send_credits(chan);
6661 
6662 	err = 0;
6663 
6664 	if (!chan->sdu) {
6665 		u16 sdu_len;
6666 
6667 		sdu_len = get_unaligned_le16(skb->data);
6668 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6669 
6670 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6671 		       sdu_len, skb->len, chan->imtu);
6672 
6673 		if (sdu_len > chan->imtu) {
6674 			BT_ERR("Too big LE L2CAP SDU length received");
6675 			err = -EMSGSIZE;
6676 			goto failed;
6677 		}
6678 
6679 		if (skb->len > sdu_len) {
6680 			BT_ERR("Too much LE L2CAP data received");
6681 			err = -EINVAL;
6682 			goto failed;
6683 		}
6684 
6685 		if (skb->len == sdu_len)
6686 			return chan->ops->recv(chan, skb);
6687 
6688 		chan->sdu = skb;
6689 		chan->sdu_len = sdu_len;
6690 		chan->sdu_last_frag = skb;
6691 
6692 		return 0;
6693 	}
6694 
6695 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6696 	       chan->sdu->len, skb->len, chan->sdu_len);
6697 
6698 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6699 		BT_ERR("Too much LE L2CAP data received");
6700 		err = -EINVAL;
6701 		goto failed;
6702 	}
6703 
6704 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6705 	skb = NULL;
6706 
6707 	if (chan->sdu->len == chan->sdu_len) {
6708 		err = chan->ops->recv(chan, chan->sdu);
6709 		if (!err) {
6710 			chan->sdu = NULL;
6711 			chan->sdu_last_frag = NULL;
6712 			chan->sdu_len = 0;
6713 		}
6714 	}
6715 
6716 failed:
6717 	if (err) {
6718 		kfree_skb(skb);
6719 		kfree_skb(chan->sdu);
6720 		chan->sdu = NULL;
6721 		chan->sdu_last_frag = NULL;
6722 		chan->sdu_len = 0;
6723 	}
6724 
6725 	/* We can't return an error here since we took care of the skb
6726 	 * freeing internally. An error return would cause the caller to
6727 	 * do a double-free of the skb.
6728 	 */
6729 	return 0;
6730 }
6731 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6732 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6733 			       struct sk_buff *skb)
6734 {
6735 	struct l2cap_chan *chan;
6736 
6737 	chan = l2cap_get_chan_by_scid(conn, cid);
6738 	if (!chan) {
6739 		if (cid == L2CAP_CID_A2MP) {
6740 			chan = a2mp_channel_create(conn, skb);
6741 			if (!chan) {
6742 				kfree_skb(skb);
6743 				return;
6744 			}
6745 
6746 			l2cap_chan_lock(chan);
6747 		} else {
6748 			BT_DBG("unknown cid 0x%4.4x", cid);
6749 			/* Drop packet and return */
6750 			kfree_skb(skb);
6751 			return;
6752 		}
6753 	}
6754 
6755 	BT_DBG("chan %p, len %d", chan, skb->len);
6756 
6757 	if (chan->state != BT_CONNECTED)
6758 		goto drop;
6759 
6760 	switch (chan->mode) {
6761 	case L2CAP_MODE_LE_FLOWCTL:
6762 		if (l2cap_le_data_rcv(chan, skb) < 0)
6763 			goto drop;
6764 
6765 		goto done;
6766 
6767 	case L2CAP_MODE_BASIC:
6768 		/* If socket recv buffers overflows we drop data here
6769 		 * which is *bad* because L2CAP has to be reliable.
6770 		 * But we don't have any other choice. L2CAP doesn't
6771 		 * provide flow control mechanism. */
6772 
6773 		if (chan->imtu < skb->len) {
6774 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6775 			goto drop;
6776 		}
6777 
6778 		if (!chan->ops->recv(chan, skb))
6779 			goto done;
6780 		break;
6781 
6782 	case L2CAP_MODE_ERTM:
6783 	case L2CAP_MODE_STREAMING:
6784 		l2cap_data_rcv(chan, skb);
6785 		goto done;
6786 
6787 	default:
6788 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6789 		break;
6790 	}
6791 
6792 drop:
6793 	kfree_skb(skb);
6794 
6795 done:
6796 	l2cap_chan_unlock(chan);
6797 }
6798 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6799 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6800 				  struct sk_buff *skb)
6801 {
6802 	struct hci_conn *hcon = conn->hcon;
6803 	struct l2cap_chan *chan;
6804 
6805 	if (hcon->type != ACL_LINK)
6806 		goto free_skb;
6807 
6808 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6809 					ACL_LINK);
6810 	if (!chan)
6811 		goto free_skb;
6812 
6813 	BT_DBG("chan %p, len %d", chan, skb->len);
6814 
6815 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6816 		goto drop;
6817 
6818 	if (chan->imtu < skb->len)
6819 		goto drop;
6820 
6821 	/* Store remote BD_ADDR and PSM for msg_name */
6822 	bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
6823 	bt_cb(skb)->psm = psm;
6824 
6825 	if (!chan->ops->recv(chan, skb)) {
6826 		l2cap_chan_put(chan);
6827 		return;
6828 	}
6829 
6830 drop:
6831 	l2cap_chan_put(chan);
6832 free_skb:
6833 	kfree_skb(skb);
6834 }
6835 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6836 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6837 {
6838 	struct l2cap_hdr *lh = (void *) skb->data;
6839 	struct hci_conn *hcon = conn->hcon;
6840 	u16 cid, len;
6841 	__le16 psm;
6842 
6843 	if (hcon->state != BT_CONNECTED) {
6844 		BT_DBG("queueing pending rx skb");
6845 		skb_queue_tail(&conn->pending_rx, skb);
6846 		return;
6847 	}
6848 
6849 	skb_pull(skb, L2CAP_HDR_SIZE);
6850 	cid = __le16_to_cpu(lh->cid);
6851 	len = __le16_to_cpu(lh->len);
6852 
6853 	if (len != skb->len) {
6854 		kfree_skb(skb);
6855 		return;
6856 	}
6857 
6858 	/* Since we can't actively block incoming LE connections we must
6859 	 * at least ensure that we ignore incoming data from them.
6860 	 */
6861 	if (hcon->type == LE_LINK &&
6862 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6863 				   bdaddr_type(hcon, hcon->dst_type))) {
6864 		kfree_skb(skb);
6865 		return;
6866 	}
6867 
6868 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
6869 
6870 	switch (cid) {
6871 	case L2CAP_CID_SIGNALING:
6872 		l2cap_sig_channel(conn, skb);
6873 		break;
6874 
6875 	case L2CAP_CID_CONN_LESS:
6876 		psm = get_unaligned((__le16 *) skb->data);
6877 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
6878 		l2cap_conless_channel(conn, psm, skb);
6879 		break;
6880 
6881 	case L2CAP_CID_LE_SIGNALING:
6882 		l2cap_le_sig_channel(conn, skb);
6883 		break;
6884 
6885 	default:
6886 		l2cap_data_channel(conn, cid, skb);
6887 		break;
6888 	}
6889 }
6890 
process_pending_rx(struct work_struct * work)6891 static void process_pending_rx(struct work_struct *work)
6892 {
6893 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6894 					       pending_rx_work);
6895 	struct sk_buff *skb;
6896 
6897 	BT_DBG("");
6898 
6899 	while ((skb = skb_dequeue(&conn->pending_rx)))
6900 		l2cap_recv_frame(conn, skb);
6901 }
6902 
l2cap_conn_add(struct hci_conn * hcon)6903 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6904 {
6905 	struct l2cap_conn *conn = hcon->l2cap_data;
6906 	struct hci_chan *hchan;
6907 
6908 	if (conn)
6909 		return conn;
6910 
6911 	hchan = hci_chan_create(hcon);
6912 	if (!hchan)
6913 		return NULL;
6914 
6915 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
6916 	if (!conn) {
6917 		hci_chan_del(hchan);
6918 		return NULL;
6919 	}
6920 
6921 	kref_init(&conn->ref);
6922 	hcon->l2cap_data = conn;
6923 	conn->hcon = hci_conn_get(hcon);
6924 	conn->hchan = hchan;
6925 
6926 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6927 
6928 	switch (hcon->type) {
6929 	case LE_LINK:
6930 		if (hcon->hdev->le_mtu) {
6931 			conn->mtu = hcon->hdev->le_mtu;
6932 			break;
6933 		}
6934 		/* fall through */
6935 	default:
6936 		conn->mtu = hcon->hdev->acl_mtu;
6937 		break;
6938 	}
6939 
6940 	conn->feat_mask = 0;
6941 
6942 	if (hcon->type == ACL_LINK)
6943 		conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6944 					    &hcon->hdev->dev_flags);
6945 
6946 	mutex_init(&conn->ident_lock);
6947 	mutex_init(&conn->chan_lock);
6948 
6949 	INIT_LIST_HEAD(&conn->chan_l);
6950 	INIT_LIST_HEAD(&conn->users);
6951 
6952 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
6953 
6954 	skb_queue_head_init(&conn->pending_rx);
6955 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6956 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
6957 
6958 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6959 
6960 	return conn;
6961 }
6962 
is_valid_psm(u16 psm,u8 dst_type)6963 static bool is_valid_psm(u16 psm, u8 dst_type) {
6964 	if (!psm)
6965 		return false;
6966 
6967 	if (bdaddr_type_is_le(dst_type))
6968 		return (psm <= 0x00ff);
6969 
6970 	/* PSM must be odd and lsb of upper byte must be 0 */
6971 	return ((psm & 0x0101) == 0x0001);
6972 }
6973 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)6974 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6975 		       bdaddr_t *dst, u8 dst_type)
6976 {
6977 	struct l2cap_conn *conn;
6978 	struct hci_conn *hcon;
6979 	struct hci_dev *hdev;
6980 	int err;
6981 
6982 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6983 	       dst_type, __le16_to_cpu(psm));
6984 
6985 	hdev = hci_get_route(dst, &chan->src);
6986 	if (!hdev)
6987 		return -EHOSTUNREACH;
6988 
6989 	hci_dev_lock(hdev);
6990 
6991 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6992 	    chan->chan_type != L2CAP_CHAN_RAW) {
6993 		err = -EINVAL;
6994 		goto done;
6995 	}
6996 
6997 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6998 		err = -EINVAL;
6999 		goto done;
7000 	}
7001 
7002 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7003 		err = -EINVAL;
7004 		goto done;
7005 	}
7006 
7007 	switch (chan->mode) {
7008 	case L2CAP_MODE_BASIC:
7009 		break;
7010 	case L2CAP_MODE_LE_FLOWCTL:
7011 		l2cap_le_flowctl_init(chan);
7012 		break;
7013 	case L2CAP_MODE_ERTM:
7014 	case L2CAP_MODE_STREAMING:
7015 		if (!disable_ertm)
7016 			break;
7017 		/* fall through */
7018 	default:
7019 		err = -EOPNOTSUPP;
7020 		goto done;
7021 	}
7022 
7023 	switch (chan->state) {
7024 	case BT_CONNECT:
7025 	case BT_CONNECT2:
7026 	case BT_CONFIG:
7027 		/* Already connecting */
7028 		err = 0;
7029 		goto done;
7030 
7031 	case BT_CONNECTED:
7032 		/* Already connected */
7033 		err = -EISCONN;
7034 		goto done;
7035 
7036 	case BT_OPEN:
7037 	case BT_BOUND:
7038 		/* Can connect */
7039 		break;
7040 
7041 	default:
7042 		err = -EBADFD;
7043 		goto done;
7044 	}
7045 
7046 	/* Set destination address and psm */
7047 	bacpy(&chan->dst, dst);
7048 	chan->dst_type = dst_type;
7049 
7050 	chan->psm = psm;
7051 	chan->dcid = cid;
7052 
7053 	if (bdaddr_type_is_le(dst_type)) {
7054 		u8 role;
7055 
7056 		/* Convert from L2CAP channel address type to HCI address type
7057 		 */
7058 		if (dst_type == BDADDR_LE_PUBLIC)
7059 			dst_type = ADDR_LE_DEV_PUBLIC;
7060 		else
7061 			dst_type = ADDR_LE_DEV_RANDOM;
7062 
7063 		if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7064 			role = HCI_ROLE_SLAVE;
7065 		else
7066 			role = HCI_ROLE_MASTER;
7067 
7068 		hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
7069 				      HCI_LE_CONN_TIMEOUT, role);
7070 	} else {
7071 		u8 auth_type = l2cap_get_auth_type(chan);
7072 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7073 	}
7074 
7075 	if (IS_ERR(hcon)) {
7076 		err = PTR_ERR(hcon);
7077 		goto done;
7078 	}
7079 
7080 	conn = l2cap_conn_add(hcon);
7081 	if (!conn) {
7082 		hci_conn_drop(hcon);
7083 		err = -ENOMEM;
7084 		goto done;
7085 	}
7086 
7087 	mutex_lock(&conn->chan_lock);
7088 	l2cap_chan_lock(chan);
7089 
7090 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7091 		hci_conn_drop(hcon);
7092 		err = -EBUSY;
7093 		goto chan_unlock;
7094 	}
7095 
7096 	/* Update source addr of the socket */
7097 	bacpy(&chan->src, &hcon->src);
7098 	chan->src_type = bdaddr_type(hcon, hcon->src_type);
7099 
7100 	__l2cap_chan_add(conn, chan);
7101 
7102 	/* l2cap_chan_add takes its own ref so we can drop this one */
7103 	hci_conn_drop(hcon);
7104 
7105 	l2cap_state_change(chan, BT_CONNECT);
7106 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7107 
7108 	/* Release chan->sport so that it can be reused by other
7109 	 * sockets (as it's only used for listening sockets).
7110 	 */
7111 	write_lock(&chan_list_lock);
7112 	chan->sport = 0;
7113 	write_unlock(&chan_list_lock);
7114 
7115 	if (hcon->state == BT_CONNECTED) {
7116 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7117 			__clear_chan_timer(chan);
7118 			if (l2cap_chan_check_security(chan, true))
7119 				l2cap_state_change(chan, BT_CONNECTED);
7120 		} else
7121 			l2cap_do_start(chan);
7122 	}
7123 
7124 	err = 0;
7125 
7126 chan_unlock:
7127 	l2cap_chan_unlock(chan);
7128 	mutex_unlock(&conn->chan_lock);
7129 done:
7130 	hci_dev_unlock(hdev);
7131 	hci_dev_put(hdev);
7132 	return err;
7133 }
7134 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7135 
7136 /* ---- L2CAP interface with lower layer (HCI) ---- */
7137 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7138 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7139 {
7140 	int exact = 0, lm1 = 0, lm2 = 0;
7141 	struct l2cap_chan *c;
7142 
7143 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7144 
7145 	/* Find listening sockets and check their link_mode */
7146 	read_lock(&chan_list_lock);
7147 	list_for_each_entry(c, &chan_list, global_l) {
7148 		if (c->state != BT_LISTEN)
7149 			continue;
7150 
7151 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7152 			lm1 |= HCI_LM_ACCEPT;
7153 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7154 				lm1 |= HCI_LM_MASTER;
7155 			exact++;
7156 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7157 			lm2 |= HCI_LM_ACCEPT;
7158 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7159 				lm2 |= HCI_LM_MASTER;
7160 		}
7161 	}
7162 	read_unlock(&chan_list_lock);
7163 
7164 	return exact ? lm1 : lm2;
7165 }
7166 
7167 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7168  * from an existing channel in the list or from the beginning of the
7169  * global list (by passing NULL as first parameter).
7170  */
l2cap_global_fixed_chan(struct l2cap_chan * c,bdaddr_t * src,u8 link_type)7171 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7172 						  bdaddr_t *src, u8 link_type)
7173 {
7174 	read_lock(&chan_list_lock);
7175 
7176 	if (c)
7177 		c = list_next_entry(c, global_l);
7178 	else
7179 		c = list_entry(chan_list.next, typeof(*c), global_l);
7180 
7181 	list_for_each_entry_from(c, &chan_list, global_l) {
7182 		if (c->chan_type != L2CAP_CHAN_FIXED)
7183 			continue;
7184 		if (c->state != BT_LISTEN)
7185 			continue;
7186 		if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7187 			continue;
7188 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7189 			continue;
7190 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7191 			continue;
7192 
7193 		l2cap_chan_hold(c);
7194 		read_unlock(&chan_list_lock);
7195 		return c;
7196 	}
7197 
7198 	read_unlock(&chan_list_lock);
7199 
7200 	return NULL;
7201 }
7202 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7203 void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7204 {
7205 	struct hci_dev *hdev = hcon->hdev;
7206 	struct l2cap_conn *conn;
7207 	struct l2cap_chan *pchan;
7208 	u8 dst_type;
7209 
7210 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7211 
7212 	if (status) {
7213 		l2cap_conn_del(hcon, bt_to_errno(status));
7214 		return;
7215 	}
7216 
7217 	conn = l2cap_conn_add(hcon);
7218 	if (!conn)
7219 		return;
7220 
7221 	dst_type = bdaddr_type(hcon, hcon->dst_type);
7222 
7223 	/* If device is blocked, do not create channels for it */
7224 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7225 		return;
7226 
7227 	/* Find fixed channels and notify them of the new connection. We
7228 	 * use multiple individual lookups, continuing each time where
7229 	 * we left off, because the list lock would prevent calling the
7230 	 * potentially sleeping l2cap_chan_lock() function.
7231 	 */
7232 	pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
7233 	while (pchan) {
7234 		struct l2cap_chan *chan, *next;
7235 
7236 		/* Client fixed channels should override server ones */
7237 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7238 			goto next;
7239 
7240 		l2cap_chan_lock(pchan);
7241 		chan = pchan->ops->new_connection(pchan);
7242 		if (chan) {
7243 			bacpy(&chan->src, &hcon->src);
7244 			bacpy(&chan->dst, &hcon->dst);
7245 			chan->src_type = bdaddr_type(hcon, hcon->src_type);
7246 			chan->dst_type = dst_type;
7247 
7248 			__l2cap_chan_add(conn, chan);
7249 		}
7250 
7251 		l2cap_chan_unlock(pchan);
7252 next:
7253 		next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7254 					       hcon->type);
7255 		l2cap_chan_put(pchan);
7256 		pchan = next;
7257 	}
7258 
7259 	l2cap_conn_ready(conn);
7260 }
7261 
l2cap_disconn_ind(struct hci_conn * hcon)7262 int l2cap_disconn_ind(struct hci_conn *hcon)
7263 {
7264 	struct l2cap_conn *conn = hcon->l2cap_data;
7265 
7266 	BT_DBG("hcon %p", hcon);
7267 
7268 	if (!conn)
7269 		return HCI_ERROR_REMOTE_USER_TERM;
7270 	return conn->disc_reason;
7271 }
7272 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7273 void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7274 {
7275 	BT_DBG("hcon %p reason %d", hcon, reason);
7276 
7277 	l2cap_conn_del(hcon, bt_to_errno(reason));
7278 }
7279 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7280 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7281 {
7282 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7283 		return;
7284 
7285 	if (encrypt == 0x00) {
7286 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7287 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7288 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7289 			   chan->sec_level == BT_SECURITY_FIPS)
7290 			l2cap_chan_close(chan, ECONNREFUSED);
7291 	} else {
7292 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7293 			__clear_chan_timer(chan);
7294 	}
7295 }
7296 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7297 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7298 {
7299 	struct l2cap_conn *conn = hcon->l2cap_data;
7300 	struct l2cap_chan *chan;
7301 
7302 	if (!conn)
7303 		return 0;
7304 
7305 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7306 
7307 	mutex_lock(&conn->chan_lock);
7308 
7309 	list_for_each_entry(chan, &conn->chan_l, list) {
7310 		l2cap_chan_lock(chan);
7311 
7312 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7313 		       state_to_string(chan->state));
7314 
7315 		if (chan->scid == L2CAP_CID_A2MP) {
7316 			l2cap_chan_unlock(chan);
7317 			continue;
7318 		}
7319 
7320 		if (!status && encrypt)
7321 			chan->sec_level = hcon->sec_level;
7322 
7323 		if (!__l2cap_no_conn_pending(chan)) {
7324 			l2cap_chan_unlock(chan);
7325 			continue;
7326 		}
7327 
7328 		if (!status && (chan->state == BT_CONNECTED ||
7329 				chan->state == BT_CONFIG)) {
7330 			chan->ops->resume(chan);
7331 			l2cap_check_encryption(chan, encrypt);
7332 			l2cap_chan_unlock(chan);
7333 			continue;
7334 		}
7335 
7336 		if (chan->state == BT_CONNECT) {
7337 			if (!status)
7338 				l2cap_start_connection(chan);
7339 			else
7340 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7341 		} else if (chan->state == BT_CONNECT2) {
7342 			struct l2cap_conn_rsp rsp;
7343 			__u16 res, stat;
7344 
7345 			if (!status) {
7346 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7347 					res = L2CAP_CR_PEND;
7348 					stat = L2CAP_CS_AUTHOR_PEND;
7349 					chan->ops->defer(chan);
7350 				} else {
7351 					l2cap_state_change(chan, BT_CONFIG);
7352 					res = L2CAP_CR_SUCCESS;
7353 					stat = L2CAP_CS_NO_INFO;
7354 				}
7355 			} else {
7356 				l2cap_state_change(chan, BT_DISCONN);
7357 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7358 				res = L2CAP_CR_SEC_BLOCK;
7359 				stat = L2CAP_CS_NO_INFO;
7360 			}
7361 
7362 			rsp.scid   = cpu_to_le16(chan->dcid);
7363 			rsp.dcid   = cpu_to_le16(chan->scid);
7364 			rsp.result = cpu_to_le16(res);
7365 			rsp.status = cpu_to_le16(stat);
7366 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7367 				       sizeof(rsp), &rsp);
7368 
7369 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7370 			    res == L2CAP_CR_SUCCESS) {
7371 				char buf[128];
7372 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7373 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7374 					       L2CAP_CONF_REQ,
7375 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7376 					       buf);
7377 				chan->num_conf_req++;
7378 			}
7379 		}
7380 
7381 		l2cap_chan_unlock(chan);
7382 	}
7383 
7384 	mutex_unlock(&conn->chan_lock);
7385 
7386 	return 0;
7387 }
7388 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7389 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7390 {
7391 	struct l2cap_conn *conn = hcon->l2cap_data;
7392 	struct l2cap_hdr *hdr;
7393 	int len;
7394 
7395 	/* For AMP controller do not create l2cap conn */
7396 	if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7397 		goto drop;
7398 
7399 	if (!conn)
7400 		conn = l2cap_conn_add(hcon);
7401 
7402 	if (!conn)
7403 		goto drop;
7404 
7405 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7406 
7407 	switch (flags) {
7408 	case ACL_START:
7409 	case ACL_START_NO_FLUSH:
7410 	case ACL_COMPLETE:
7411 		if (conn->rx_len) {
7412 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7413 			kfree_skb(conn->rx_skb);
7414 			conn->rx_skb = NULL;
7415 			conn->rx_len = 0;
7416 			l2cap_conn_unreliable(conn, ECOMM);
7417 		}
7418 
7419 		/* Start fragment always begin with Basic L2CAP header */
7420 		if (skb->len < L2CAP_HDR_SIZE) {
7421 			BT_ERR("Frame is too short (len %d)", skb->len);
7422 			l2cap_conn_unreliable(conn, ECOMM);
7423 			goto drop;
7424 		}
7425 
7426 		hdr = (struct l2cap_hdr *) skb->data;
7427 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7428 
7429 		if (len == skb->len) {
7430 			/* Complete frame received */
7431 			l2cap_recv_frame(conn, skb);
7432 			return 0;
7433 		}
7434 
7435 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7436 
7437 		if (skb->len > len) {
7438 			BT_ERR("Frame is too long (len %d, expected len %d)",
7439 			       skb->len, len);
7440 			l2cap_conn_unreliable(conn, ECOMM);
7441 			goto drop;
7442 		}
7443 
7444 		/* Allocate skb for the complete frame (with header) */
7445 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7446 		if (!conn->rx_skb)
7447 			goto drop;
7448 
7449 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7450 					  skb->len);
7451 		conn->rx_len = len - skb->len;
7452 		break;
7453 
7454 	case ACL_CONT:
7455 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7456 
7457 		if (!conn->rx_len) {
7458 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7459 			l2cap_conn_unreliable(conn, ECOMM);
7460 			goto drop;
7461 		}
7462 
7463 		if (skb->len > conn->rx_len) {
7464 			BT_ERR("Fragment is too long (len %d, expected %d)",
7465 			       skb->len, conn->rx_len);
7466 			kfree_skb(conn->rx_skb);
7467 			conn->rx_skb = NULL;
7468 			conn->rx_len = 0;
7469 			l2cap_conn_unreliable(conn, ECOMM);
7470 			goto drop;
7471 		}
7472 
7473 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7474 					  skb->len);
7475 		conn->rx_len -= skb->len;
7476 
7477 		if (!conn->rx_len) {
7478 			/* Complete frame received. l2cap_recv_frame
7479 			 * takes ownership of the skb so set the global
7480 			 * rx_skb pointer to NULL first.
7481 			 */
7482 			struct sk_buff *rx_skb = conn->rx_skb;
7483 			conn->rx_skb = NULL;
7484 			l2cap_recv_frame(conn, rx_skb);
7485 		}
7486 		break;
7487 	}
7488 
7489 drop:
7490 	kfree_skb(skb);
7491 	return 0;
7492 }
7493 
l2cap_debugfs_show(struct seq_file * f,void * p)7494 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7495 {
7496 	struct l2cap_chan *c;
7497 
7498 	read_lock(&chan_list_lock);
7499 
7500 	list_for_each_entry(c, &chan_list, global_l) {
7501 		seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7502 			   &c->src, &c->dst,
7503 			   c->state, __le16_to_cpu(c->psm),
7504 			   c->scid, c->dcid, c->imtu, c->omtu,
7505 			   c->sec_level, c->mode);
7506 	}
7507 
7508 	read_unlock(&chan_list_lock);
7509 
7510 	return 0;
7511 }
7512 
l2cap_debugfs_open(struct inode * inode,struct file * file)7513 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7514 {
7515 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7516 }
7517 
7518 static const struct file_operations l2cap_debugfs_fops = {
7519 	.open		= l2cap_debugfs_open,
7520 	.read		= seq_read,
7521 	.llseek		= seq_lseek,
7522 	.release	= single_release,
7523 };
7524 
7525 static struct dentry *l2cap_debugfs;
7526 
l2cap_init(void)7527 int __init l2cap_init(void)
7528 {
7529 	int err;
7530 
7531 	err = l2cap_init_sockets();
7532 	if (err < 0)
7533 		return err;
7534 
7535 	if (IS_ERR_OR_NULL(bt_debugfs))
7536 		return 0;
7537 
7538 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7539 					    NULL, &l2cap_debugfs_fops);
7540 
7541 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7542 			   &le_max_credits);
7543 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7544 			   &le_default_mps);
7545 
7546 	return 0;
7547 }
7548 
l2cap_exit(void)7549 void l2cap_exit(void)
7550 {
7551 	debugfs_remove(l2cap_debugfs);
7552 	l2cap_cleanup_sockets();
7553 }
7554 
7555 module_param(disable_ertm, bool, 0644);
7556 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7557