• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 
49 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
50 
51 static LIST_HEAD(chan_list);
52 static DEFINE_RWLOCK(chan_list_lock);
53 
54 static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55 static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56 
57 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
58 				       u8 code, u8 ident, u16 dlen, void *data);
59 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
60 			   void *data);
61 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
62 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
63 
64 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
65 		     struct sk_buff_head *skbs, u8 event);
66 
bdaddr_type(u8 link_type,u8 bdaddr_type)67 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
68 {
69 	if (link_type == LE_LINK) {
70 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
71 			return BDADDR_LE_PUBLIC;
72 		else
73 			return BDADDR_LE_RANDOM;
74 	}
75 
76 	return BDADDR_BREDR;
77 }
78 
bdaddr_src_type(struct hci_conn * hcon)79 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80 {
81 	return bdaddr_type(hcon->type, hcon->src_type);
82 }
83 
bdaddr_dst_type(struct hci_conn * hcon)84 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85 {
86 	return bdaddr_type(hcon->type, hcon->dst_type);
87 }
88 
89 /* ---- L2CAP channels ---- */
90 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
92 						   u16 cid)
93 {
94 	struct l2cap_chan *c;
95 
96 	list_for_each_entry(c, &conn->chan_l, list) {
97 		if (c->dcid == cid)
98 			return c;
99 	}
100 	return NULL;
101 }
102 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 						   u16 cid)
105 {
106 	struct l2cap_chan *c;
107 
108 	list_for_each_entry(c, &conn->chan_l, list) {
109 		if (c->scid == cid)
110 			return c;
111 	}
112 	return NULL;
113 }
114 
115 /* Find channel with given SCID.
116  * Returns locked channel. */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)117 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 						 u16 cid)
119 {
120 	struct l2cap_chan *c;
121 
122 	mutex_lock(&conn->chan_lock);
123 	c = __l2cap_get_chan_by_scid(conn, cid);
124 	if (c)
125 		l2cap_chan_lock(c);
126 	mutex_unlock(&conn->chan_lock);
127 
128 	return c;
129 }
130 
131 /* Find channel with given DCID.
132  * Returns locked channel.
133  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)134 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 						 u16 cid)
136 {
137 	struct l2cap_chan *c;
138 
139 	mutex_lock(&conn->chan_lock);
140 	c = __l2cap_get_chan_by_dcid(conn, cid);
141 	if (c)
142 		l2cap_chan_lock(c);
143 	mutex_unlock(&conn->chan_lock);
144 
145 	return c;
146 }
147 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)148 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 						    u8 ident)
150 {
151 	struct l2cap_chan *c;
152 
153 	list_for_each_entry(c, &conn->chan_l, list) {
154 		if (c->ident == ident)
155 			return c;
156 	}
157 	return NULL;
158 }
159 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)160 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 						  u8 ident)
162 {
163 	struct l2cap_chan *c;
164 
165 	mutex_lock(&conn->chan_lock);
166 	c = __l2cap_get_chan_by_ident(conn, ident);
167 	if (c)
168 		l2cap_chan_lock(c);
169 	mutex_unlock(&conn->chan_lock);
170 
171 	return c;
172 }
173 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src)174 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
175 {
176 	struct l2cap_chan *c;
177 
178 	list_for_each_entry(c, &chan_list, global_l) {
179 		if (c->sport == psm && !bacmp(&c->src, src))
180 			return c;
181 	}
182 	return NULL;
183 }
184 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)185 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186 {
187 	int err;
188 
189 	write_lock(&chan_list_lock);
190 
191 	if (psm && __l2cap_global_chan_by_addr(psm, src)) {
192 		err = -EADDRINUSE;
193 		goto done;
194 	}
195 
196 	if (psm) {
197 		chan->psm = psm;
198 		chan->sport = psm;
199 		err = 0;
200 	} else {
201 		u16 p, start, end, incr;
202 
203 		if (chan->src_type == BDADDR_BREDR) {
204 			start = L2CAP_PSM_DYN_START;
205 			end = L2CAP_PSM_AUTO_END;
206 			incr = 2;
207 		} else {
208 			start = L2CAP_PSM_LE_DYN_START;
209 			end = L2CAP_PSM_LE_DYN_END;
210 			incr = 1;
211 		}
212 
213 		err = -EINVAL;
214 		for (p = start; p <= end; p += incr)
215 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
216 				chan->psm   = cpu_to_le16(p);
217 				chan->sport = cpu_to_le16(p);
218 				err = 0;
219 				break;
220 			}
221 	}
222 
223 done:
224 	write_unlock(&chan_list_lock);
225 	return err;
226 }
227 EXPORT_SYMBOL_GPL(l2cap_add_psm);
228 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)229 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
230 {
231 	write_lock(&chan_list_lock);
232 
233 	/* Override the defaults (which are for conn-oriented) */
234 	chan->omtu = L2CAP_DEFAULT_MTU;
235 	chan->chan_type = L2CAP_CHAN_FIXED;
236 
237 	chan->scid = scid;
238 
239 	write_unlock(&chan_list_lock);
240 
241 	return 0;
242 }
243 
l2cap_alloc_cid(struct l2cap_conn * conn)244 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
245 {
246 	u16 cid, dyn_end;
247 
248 	if (conn->hcon->type == LE_LINK)
249 		dyn_end = L2CAP_CID_LE_DYN_END;
250 	else
251 		dyn_end = L2CAP_CID_DYN_END;
252 
253 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
254 		if (!__l2cap_get_chan_by_scid(conn, cid))
255 			return cid;
256 	}
257 
258 	return 0;
259 }
260 
l2cap_state_change(struct l2cap_chan * chan,int state)261 static void l2cap_state_change(struct l2cap_chan *chan, int state)
262 {
263 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
264 	       state_to_string(state));
265 
266 	chan->state = state;
267 	chan->ops->state_change(chan, state, 0);
268 }
269 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)270 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 						int state, int err)
272 {
273 	chan->state = state;
274 	chan->ops->state_change(chan, chan->state, err);
275 }
276 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)277 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
278 {
279 	chan->ops->state_change(chan, chan->state, err);
280 }
281 
__set_retrans_timer(struct l2cap_chan * chan)282 static void __set_retrans_timer(struct l2cap_chan *chan)
283 {
284 	if (!delayed_work_pending(&chan->monitor_timer) &&
285 	    chan->retrans_timeout) {
286 		l2cap_set_timer(chan, &chan->retrans_timer,
287 				msecs_to_jiffies(chan->retrans_timeout));
288 	}
289 }
290 
__set_monitor_timer(struct l2cap_chan * chan)291 static void __set_monitor_timer(struct l2cap_chan *chan)
292 {
293 	__clear_retrans_timer(chan);
294 	if (chan->monitor_timeout) {
295 		l2cap_set_timer(chan, &chan->monitor_timer,
296 				msecs_to_jiffies(chan->monitor_timeout));
297 	}
298 }
299 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)300 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
301 					       u16 seq)
302 {
303 	struct sk_buff *skb;
304 
305 	skb_queue_walk(head, skb) {
306 		if (bt_cb(skb)->l2cap.txseq == seq)
307 			return skb;
308 	}
309 
310 	return NULL;
311 }
312 
313 /* ---- L2CAP sequence number lists ---- */
314 
315 /* For ERTM, ordered lists of sequence numbers must be tracked for
316  * SREJ requests that are received and for frames that are to be
317  * retransmitted. These seq_list functions implement a singly-linked
318  * list in an array, where membership in the list can also be checked
319  * in constant time. Items can also be added to the tail of the list
320  * and removed from the head in constant time, without further memory
321  * allocs or frees.
322  */
323 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)324 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
325 {
326 	size_t alloc_size, i;
327 
328 	/* Allocated size is a power of 2 to map sequence numbers
329 	 * (which may be up to 14 bits) in to a smaller array that is
330 	 * sized for the negotiated ERTM transmit windows.
331 	 */
332 	alloc_size = roundup_pow_of_two(size);
333 
334 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
335 	if (!seq_list->list)
336 		return -ENOMEM;
337 
338 	seq_list->mask = alloc_size - 1;
339 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 	for (i = 0; i < alloc_size; i++)
342 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343 
344 	return 0;
345 }
346 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)347 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
348 {
349 	kfree(seq_list->list);
350 }
351 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)352 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
353 					   u16 seq)
354 {
355 	/* Constant-time check for list membership */
356 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
357 }
358 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)359 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360 {
361 	u16 seq = seq_list->head;
362 	u16 mask = seq_list->mask;
363 
364 	seq_list->head = seq_list->list[seq & mask];
365 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
366 
367 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
370 	}
371 
372 	return seq;
373 }
374 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)375 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376 {
377 	u16 i;
378 
379 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 		return;
381 
382 	for (i = 0; i <= seq_list->mask; i++)
383 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
384 
385 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
387 }
388 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)389 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
390 {
391 	u16 mask = seq_list->mask;
392 
393 	/* All appends happen in constant time */
394 
395 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 		return;
397 
398 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 		seq_list->head = seq;
400 	else
401 		seq_list->list[seq_list->tail & mask] = seq;
402 
403 	seq_list->tail = seq;
404 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
405 }
406 
l2cap_chan_timeout(struct work_struct * work)407 static void l2cap_chan_timeout(struct work_struct *work)
408 {
409 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
410 					       chan_timer.work);
411 	struct l2cap_conn *conn = chan->conn;
412 	int reason;
413 
414 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
415 
416 	mutex_lock(&conn->chan_lock);
417 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
418 	 * this work. No need to call l2cap_chan_hold(chan) here again.
419 	 */
420 	l2cap_chan_lock(chan);
421 
422 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
423 		reason = ECONNREFUSED;
424 	else if (chan->state == BT_CONNECT &&
425 		 chan->sec_level != BT_SECURITY_SDP)
426 		reason = ECONNREFUSED;
427 	else
428 		reason = ETIMEDOUT;
429 
430 	l2cap_chan_close(chan, reason);
431 
432 	chan->ops->close(chan);
433 
434 	l2cap_chan_unlock(chan);
435 	l2cap_chan_put(chan);
436 
437 	mutex_unlock(&conn->chan_lock);
438 }
439 
l2cap_chan_create(void)440 struct l2cap_chan *l2cap_chan_create(void)
441 {
442 	struct l2cap_chan *chan;
443 
444 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
445 	if (!chan)
446 		return NULL;
447 
448 	mutex_init(&chan->lock);
449 
450 	/* Set default lock nesting level */
451 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
452 
453 	write_lock(&chan_list_lock);
454 	list_add(&chan->global_l, &chan_list);
455 	write_unlock(&chan_list_lock);
456 
457 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
458 
459 	chan->state = BT_OPEN;
460 
461 	kref_init(&chan->kref);
462 
463 	/* This flag is cleared in l2cap_chan_ready() */
464 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
465 
466 	BT_DBG("chan %p", chan);
467 
468 	return chan;
469 }
470 EXPORT_SYMBOL_GPL(l2cap_chan_create);
471 
l2cap_chan_destroy(struct kref * kref)472 static void l2cap_chan_destroy(struct kref *kref)
473 {
474 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
475 
476 	BT_DBG("chan %p", chan);
477 
478 	write_lock(&chan_list_lock);
479 	list_del(&chan->global_l);
480 	write_unlock(&chan_list_lock);
481 
482 	kfree(chan);
483 }
484 
l2cap_chan_hold(struct l2cap_chan * c)485 void l2cap_chan_hold(struct l2cap_chan *c)
486 {
487 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
488 
489 	kref_get(&c->kref);
490 }
491 
l2cap_chan_put(struct l2cap_chan * c)492 void l2cap_chan_put(struct l2cap_chan *c)
493 {
494 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
495 
496 	kref_put(&c->kref, l2cap_chan_destroy);
497 }
498 EXPORT_SYMBOL_GPL(l2cap_chan_put);
499 
l2cap_chan_set_defaults(struct l2cap_chan * chan)500 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
501 {
502 	chan->fcs  = L2CAP_FCS_CRC16;
503 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
504 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
505 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
506 	chan->remote_max_tx = chan->max_tx;
507 	chan->remote_tx_win = chan->tx_win;
508 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
509 	chan->sec_level = BT_SECURITY_LOW;
510 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
511 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
512 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
513 	chan->conf_state = 0;
514 
515 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
516 }
517 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
518 
l2cap_le_flowctl_init(struct l2cap_chan * chan)519 static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
520 {
521 	chan->sdu = NULL;
522 	chan->sdu_last_frag = NULL;
523 	chan->sdu_len = 0;
524 	chan->tx_credits = 0;
525 	chan->rx_credits = le_max_credits;
526 	chan->mps = min_t(u16, chan->imtu, le_default_mps);
527 
528 	skb_queue_head_init(&chan->tx_q);
529 }
530 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)531 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
532 {
533 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
534 	       __le16_to_cpu(chan->psm), chan->dcid);
535 
536 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
537 
538 	chan->conn = conn;
539 
540 	switch (chan->chan_type) {
541 	case L2CAP_CHAN_CONN_ORIENTED:
542 		/* Alloc CID for connection-oriented socket */
543 		chan->scid = l2cap_alloc_cid(conn);
544 		if (conn->hcon->type == ACL_LINK)
545 			chan->omtu = L2CAP_DEFAULT_MTU;
546 		break;
547 
548 	case L2CAP_CHAN_CONN_LESS:
549 		/* Connectionless socket */
550 		chan->scid = L2CAP_CID_CONN_LESS;
551 		chan->dcid = L2CAP_CID_CONN_LESS;
552 		chan->omtu = L2CAP_DEFAULT_MTU;
553 		break;
554 
555 	case L2CAP_CHAN_FIXED:
556 		/* Caller will set CID and CID specific MTU values */
557 		break;
558 
559 	default:
560 		/* Raw socket can send/recv signalling messages only */
561 		chan->scid = L2CAP_CID_SIGNALING;
562 		chan->dcid = L2CAP_CID_SIGNALING;
563 		chan->omtu = L2CAP_DEFAULT_MTU;
564 	}
565 
566 	chan->local_id		= L2CAP_BESTEFFORT_ID;
567 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
568 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
569 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
570 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
571 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
572 
573 	l2cap_chan_hold(chan);
574 
575 	/* Only keep a reference for fixed channels if they requested it */
576 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
577 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
578 		hci_conn_hold(conn->hcon);
579 
580 	list_add(&chan->list, &conn->chan_l);
581 }
582 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	mutex_lock(&conn->chan_lock);
586 	__l2cap_chan_add(conn, chan);
587 	mutex_unlock(&conn->chan_lock);
588 }
589 
l2cap_chan_del(struct l2cap_chan * chan,int err)590 void l2cap_chan_del(struct l2cap_chan *chan, int err)
591 {
592 	struct l2cap_conn *conn = chan->conn;
593 
594 	__clear_chan_timer(chan);
595 
596 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
597 	       state_to_string(chan->state));
598 
599 	chan->ops->teardown(chan, err);
600 
601 	if (conn) {
602 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
603 		/* Delete from channel list */
604 		list_del(&chan->list);
605 
606 		l2cap_chan_put(chan);
607 
608 		chan->conn = NULL;
609 
610 		/* Reference was only held for non-fixed channels or
611 		 * fixed channels that explicitly requested it using the
612 		 * FLAG_HOLD_HCI_CONN flag.
613 		 */
614 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
615 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
616 			hci_conn_drop(conn->hcon);
617 
618 		if (mgr && mgr->bredr_chan == chan)
619 			mgr->bredr_chan = NULL;
620 	}
621 
622 	if (chan->hs_hchan) {
623 		struct hci_chan *hs_hchan = chan->hs_hchan;
624 
625 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
626 		amp_disconnect_logical_link(hs_hchan);
627 	}
628 
629 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
630 		return;
631 
632 	switch(chan->mode) {
633 	case L2CAP_MODE_BASIC:
634 		break;
635 
636 	case L2CAP_MODE_LE_FLOWCTL:
637 		skb_queue_purge(&chan->tx_q);
638 		break;
639 
640 	case L2CAP_MODE_ERTM:
641 		__clear_retrans_timer(chan);
642 		__clear_monitor_timer(chan);
643 		__clear_ack_timer(chan);
644 
645 		skb_queue_purge(&chan->srej_q);
646 
647 		l2cap_seq_list_free(&chan->srej_list);
648 		l2cap_seq_list_free(&chan->retrans_list);
649 
650 		/* fall through */
651 
652 	case L2CAP_MODE_STREAMING:
653 		skb_queue_purge(&chan->tx_q);
654 		break;
655 	}
656 
657 	return;
658 }
659 EXPORT_SYMBOL_GPL(l2cap_chan_del);
660 
l2cap_conn_update_id_addr(struct work_struct * work)661 static void l2cap_conn_update_id_addr(struct work_struct *work)
662 {
663 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
664 					       id_addr_update_work);
665 	struct hci_conn *hcon = conn->hcon;
666 	struct l2cap_chan *chan;
667 
668 	mutex_lock(&conn->chan_lock);
669 
670 	list_for_each_entry(chan, &conn->chan_l, list) {
671 		l2cap_chan_lock(chan);
672 		bacpy(&chan->dst, &hcon->dst);
673 		chan->dst_type = bdaddr_dst_type(hcon);
674 		l2cap_chan_unlock(chan);
675 	}
676 
677 	mutex_unlock(&conn->chan_lock);
678 }
679 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)680 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
681 {
682 	struct l2cap_conn *conn = chan->conn;
683 	struct l2cap_le_conn_rsp rsp;
684 	u16 result;
685 
686 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
687 		result = L2CAP_CR_AUTHORIZATION;
688 	else
689 		result = L2CAP_CR_BAD_PSM;
690 
691 	l2cap_state_change(chan, BT_DISCONN);
692 
693 	rsp.dcid    = cpu_to_le16(chan->scid);
694 	rsp.mtu     = cpu_to_le16(chan->imtu);
695 	rsp.mps     = cpu_to_le16(chan->mps);
696 	rsp.credits = cpu_to_le16(chan->rx_credits);
697 	rsp.result  = cpu_to_le16(result);
698 
699 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
700 		       &rsp);
701 }
702 
l2cap_chan_connect_reject(struct l2cap_chan * chan)703 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
704 {
705 	struct l2cap_conn *conn = chan->conn;
706 	struct l2cap_conn_rsp rsp;
707 	u16 result;
708 
709 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
710 		result = L2CAP_CR_SEC_BLOCK;
711 	else
712 		result = L2CAP_CR_BAD_PSM;
713 
714 	l2cap_state_change(chan, BT_DISCONN);
715 
716 	rsp.scid   = cpu_to_le16(chan->dcid);
717 	rsp.dcid   = cpu_to_le16(chan->scid);
718 	rsp.result = cpu_to_le16(result);
719 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
720 
721 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
722 }
723 
l2cap_chan_close(struct l2cap_chan * chan,int reason)724 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
725 {
726 	struct l2cap_conn *conn = chan->conn;
727 
728 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
729 
730 	switch (chan->state) {
731 	case BT_LISTEN:
732 		chan->ops->teardown(chan, 0);
733 		break;
734 
735 	case BT_CONNECTED:
736 	case BT_CONFIG:
737 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
738 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
739 			l2cap_send_disconn_req(chan, reason);
740 		} else
741 			l2cap_chan_del(chan, reason);
742 		break;
743 
744 	case BT_CONNECT2:
745 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
746 			if (conn->hcon->type == ACL_LINK)
747 				l2cap_chan_connect_reject(chan);
748 			else if (conn->hcon->type == LE_LINK)
749 				l2cap_chan_le_connect_reject(chan);
750 		}
751 
752 		l2cap_chan_del(chan, reason);
753 		break;
754 
755 	case BT_CONNECT:
756 	case BT_DISCONN:
757 		l2cap_chan_del(chan, reason);
758 		break;
759 
760 	default:
761 		chan->ops->teardown(chan, 0);
762 		break;
763 	}
764 }
765 EXPORT_SYMBOL(l2cap_chan_close);
766 
l2cap_get_auth_type(struct l2cap_chan * chan)767 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
768 {
769 	switch (chan->chan_type) {
770 	case L2CAP_CHAN_RAW:
771 		switch (chan->sec_level) {
772 		case BT_SECURITY_HIGH:
773 		case BT_SECURITY_FIPS:
774 			return HCI_AT_DEDICATED_BONDING_MITM;
775 		case BT_SECURITY_MEDIUM:
776 			return HCI_AT_DEDICATED_BONDING;
777 		default:
778 			return HCI_AT_NO_BONDING;
779 		}
780 		break;
781 	case L2CAP_CHAN_CONN_LESS:
782 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
783 			if (chan->sec_level == BT_SECURITY_LOW)
784 				chan->sec_level = BT_SECURITY_SDP;
785 		}
786 		if (chan->sec_level == BT_SECURITY_HIGH ||
787 		    chan->sec_level == BT_SECURITY_FIPS)
788 			return HCI_AT_NO_BONDING_MITM;
789 		else
790 			return HCI_AT_NO_BONDING;
791 		break;
792 	case L2CAP_CHAN_CONN_ORIENTED:
793 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
794 			if (chan->sec_level == BT_SECURITY_LOW)
795 				chan->sec_level = BT_SECURITY_SDP;
796 
797 			if (chan->sec_level == BT_SECURITY_HIGH ||
798 			    chan->sec_level == BT_SECURITY_FIPS)
799 				return HCI_AT_NO_BONDING_MITM;
800 			else
801 				return HCI_AT_NO_BONDING;
802 		}
803 		/* fall through */
804 	default:
805 		switch (chan->sec_level) {
806 		case BT_SECURITY_HIGH:
807 		case BT_SECURITY_FIPS:
808 			return HCI_AT_GENERAL_BONDING_MITM;
809 		case BT_SECURITY_MEDIUM:
810 			return HCI_AT_GENERAL_BONDING;
811 		default:
812 			return HCI_AT_NO_BONDING;
813 		}
814 		break;
815 	}
816 }
817 
818 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)819 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
820 {
821 	struct l2cap_conn *conn = chan->conn;
822 	__u8 auth_type;
823 
824 	if (conn->hcon->type == LE_LINK)
825 		return smp_conn_security(conn->hcon, chan->sec_level);
826 
827 	auth_type = l2cap_get_auth_type(chan);
828 
829 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
830 				 initiator);
831 }
832 
l2cap_get_ident(struct l2cap_conn * conn)833 static u8 l2cap_get_ident(struct l2cap_conn *conn)
834 {
835 	u8 id;
836 
837 	/* Get next available identificator.
838 	 *    1 - 128 are used by kernel.
839 	 *  129 - 199 are reserved.
840 	 *  200 - 254 are used by utilities like l2ping, etc.
841 	 */
842 
843 	mutex_lock(&conn->ident_lock);
844 
845 	if (++conn->tx_ident > 128)
846 		conn->tx_ident = 1;
847 
848 	id = conn->tx_ident;
849 
850 	mutex_unlock(&conn->ident_lock);
851 
852 	return id;
853 }
854 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)855 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
856 			   void *data)
857 {
858 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
859 	u8 flags;
860 
861 	BT_DBG("code 0x%2.2x", code);
862 
863 	if (!skb)
864 		return;
865 
866 	/* Use NO_FLUSH if supported or we have an LE link (which does
867 	 * not support auto-flushing packets) */
868 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
869 	    conn->hcon->type == LE_LINK)
870 		flags = ACL_START_NO_FLUSH;
871 	else
872 		flags = ACL_START;
873 
874 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
875 	skb->priority = HCI_PRIO_MAX;
876 
877 	hci_send_acl(conn->hchan, skb, flags);
878 }
879 
__chan_is_moving(struct l2cap_chan * chan)880 static bool __chan_is_moving(struct l2cap_chan *chan)
881 {
882 	return chan->move_state != L2CAP_MOVE_STABLE &&
883 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
884 }
885 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)886 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
887 {
888 	struct hci_conn *hcon = chan->conn->hcon;
889 	u16 flags;
890 
891 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
892 	       skb->priority);
893 
894 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
895 		if (chan->hs_hchan)
896 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
897 		else
898 			kfree_skb(skb);
899 
900 		return;
901 	}
902 
903 	/* Use NO_FLUSH for LE links (where this is the only option) or
904 	 * if the BR/EDR link supports it and flushing has not been
905 	 * explicitly requested (through FLAG_FLUSHABLE).
906 	 */
907 	if (hcon->type == LE_LINK ||
908 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
909 	     lmp_no_flush_capable(hcon->hdev)))
910 		flags = ACL_START_NO_FLUSH;
911 	else
912 		flags = ACL_START;
913 
914 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
915 	hci_send_acl(chan->conn->hchan, skb, flags);
916 }
917 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)918 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
919 {
920 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
921 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
922 
923 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
924 		/* S-Frame */
925 		control->sframe = 1;
926 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
927 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
928 
929 		control->sar = 0;
930 		control->txseq = 0;
931 	} else {
932 		/* I-Frame */
933 		control->sframe = 0;
934 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
935 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
936 
937 		control->poll = 0;
938 		control->super = 0;
939 	}
940 }
941 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)942 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
943 {
944 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
945 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
946 
947 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
948 		/* S-Frame */
949 		control->sframe = 1;
950 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
951 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
952 
953 		control->sar = 0;
954 		control->txseq = 0;
955 	} else {
956 		/* I-Frame */
957 		control->sframe = 0;
958 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
959 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
960 
961 		control->poll = 0;
962 		control->super = 0;
963 	}
964 }
965 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)966 static inline void __unpack_control(struct l2cap_chan *chan,
967 				    struct sk_buff *skb)
968 {
969 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
970 		__unpack_extended_control(get_unaligned_le32(skb->data),
971 					  &bt_cb(skb)->l2cap);
972 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
973 	} else {
974 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
975 					  &bt_cb(skb)->l2cap);
976 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
977 	}
978 }
979 
__pack_extended_control(struct l2cap_ctrl * control)980 static u32 __pack_extended_control(struct l2cap_ctrl *control)
981 {
982 	u32 packed;
983 
984 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
985 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
986 
987 	if (control->sframe) {
988 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
989 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
990 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
991 	} else {
992 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
993 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
994 	}
995 
996 	return packed;
997 }
998 
__pack_enhanced_control(struct l2cap_ctrl * control)999 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1000 {
1001 	u16 packed;
1002 
1003 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1004 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1005 
1006 	if (control->sframe) {
1007 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1008 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1009 		packed |= L2CAP_CTRL_FRAME_TYPE;
1010 	} else {
1011 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1012 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1013 	}
1014 
1015 	return packed;
1016 }
1017 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1018 static inline void __pack_control(struct l2cap_chan *chan,
1019 				  struct l2cap_ctrl *control,
1020 				  struct sk_buff *skb)
1021 {
1022 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1023 		put_unaligned_le32(__pack_extended_control(control),
1024 				   skb->data + L2CAP_HDR_SIZE);
1025 	} else {
1026 		put_unaligned_le16(__pack_enhanced_control(control),
1027 				   skb->data + L2CAP_HDR_SIZE);
1028 	}
1029 }
1030 
__ertm_hdr_size(struct l2cap_chan * chan)1031 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1032 {
1033 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1034 		return L2CAP_EXT_HDR_SIZE;
1035 	else
1036 		return L2CAP_ENH_HDR_SIZE;
1037 }
1038 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1039 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1040 					       u32 control)
1041 {
1042 	struct sk_buff *skb;
1043 	struct l2cap_hdr *lh;
1044 	int hlen = __ertm_hdr_size(chan);
1045 
1046 	if (chan->fcs == L2CAP_FCS_CRC16)
1047 		hlen += L2CAP_FCS_SIZE;
1048 
1049 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1050 
1051 	if (!skb)
1052 		return ERR_PTR(-ENOMEM);
1053 
1054 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1055 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1056 	lh->cid = cpu_to_le16(chan->dcid);
1057 
1058 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1059 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1060 	else
1061 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1062 
1063 	if (chan->fcs == L2CAP_FCS_CRC16) {
1064 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1065 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1066 	}
1067 
1068 	skb->priority = HCI_PRIO_MAX;
1069 	return skb;
1070 }
1071 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1072 static void l2cap_send_sframe(struct l2cap_chan *chan,
1073 			      struct l2cap_ctrl *control)
1074 {
1075 	struct sk_buff *skb;
1076 	u32 control_field;
1077 
1078 	BT_DBG("chan %p, control %p", chan, control);
1079 
1080 	if (!control->sframe)
1081 		return;
1082 
1083 	if (__chan_is_moving(chan))
1084 		return;
1085 
1086 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1087 	    !control->poll)
1088 		control->final = 1;
1089 
1090 	if (control->super == L2CAP_SUPER_RR)
1091 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1092 	else if (control->super == L2CAP_SUPER_RNR)
1093 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1094 
1095 	if (control->super != L2CAP_SUPER_SREJ) {
1096 		chan->last_acked_seq = control->reqseq;
1097 		__clear_ack_timer(chan);
1098 	}
1099 
1100 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1101 	       control->final, control->poll, control->super);
1102 
1103 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1104 		control_field = __pack_extended_control(control);
1105 	else
1106 		control_field = __pack_enhanced_control(control);
1107 
1108 	skb = l2cap_create_sframe_pdu(chan, control_field);
1109 	if (!IS_ERR(skb))
1110 		l2cap_do_send(chan, skb);
1111 }
1112 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1113 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1114 {
1115 	struct l2cap_ctrl control;
1116 
1117 	BT_DBG("chan %p, poll %d", chan, poll);
1118 
1119 	memset(&control, 0, sizeof(control));
1120 	control.sframe = 1;
1121 	control.poll = poll;
1122 
1123 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1124 		control.super = L2CAP_SUPER_RNR;
1125 	else
1126 		control.super = L2CAP_SUPER_RR;
1127 
1128 	control.reqseq = chan->buffer_seq;
1129 	l2cap_send_sframe(chan, &control);
1130 }
1131 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1132 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1133 {
1134 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1135 		return true;
1136 
1137 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1138 }
1139 
__amp_capable(struct l2cap_chan * chan)1140 static bool __amp_capable(struct l2cap_chan *chan)
1141 {
1142 	struct l2cap_conn *conn = chan->conn;
1143 	struct hci_dev *hdev;
1144 	bool amp_available = false;
1145 
1146 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1147 		return false;
1148 
1149 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1150 		return false;
1151 
1152 	read_lock(&hci_dev_list_lock);
1153 	list_for_each_entry(hdev, &hci_dev_list, list) {
1154 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1155 		    test_bit(HCI_UP, &hdev->flags)) {
1156 			amp_available = true;
1157 			break;
1158 		}
1159 	}
1160 	read_unlock(&hci_dev_list_lock);
1161 
1162 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1163 		return amp_available;
1164 
1165 	return false;
1166 }
1167 
l2cap_check_efs(struct l2cap_chan * chan)1168 static bool l2cap_check_efs(struct l2cap_chan *chan)
1169 {
1170 	/* Check EFS parameters */
1171 	return true;
1172 }
1173 
l2cap_send_conn_req(struct l2cap_chan * chan)1174 void l2cap_send_conn_req(struct l2cap_chan *chan)
1175 {
1176 	struct l2cap_conn *conn = chan->conn;
1177 	struct l2cap_conn_req req;
1178 
1179 	req.scid = cpu_to_le16(chan->scid);
1180 	req.psm  = chan->psm;
1181 
1182 	chan->ident = l2cap_get_ident(conn);
1183 
1184 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1185 
1186 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1187 }
1188 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1189 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1190 {
1191 	struct l2cap_create_chan_req req;
1192 	req.scid = cpu_to_le16(chan->scid);
1193 	req.psm  = chan->psm;
1194 	req.amp_id = amp_id;
1195 
1196 	chan->ident = l2cap_get_ident(chan->conn);
1197 
1198 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1199 		       sizeof(req), &req);
1200 }
1201 
l2cap_move_setup(struct l2cap_chan * chan)1202 static void l2cap_move_setup(struct l2cap_chan *chan)
1203 {
1204 	struct sk_buff *skb;
1205 
1206 	BT_DBG("chan %p", chan);
1207 
1208 	if (chan->mode != L2CAP_MODE_ERTM)
1209 		return;
1210 
1211 	__clear_retrans_timer(chan);
1212 	__clear_monitor_timer(chan);
1213 	__clear_ack_timer(chan);
1214 
1215 	chan->retry_count = 0;
1216 	skb_queue_walk(&chan->tx_q, skb) {
1217 		if (bt_cb(skb)->l2cap.retries)
1218 			bt_cb(skb)->l2cap.retries = 1;
1219 		else
1220 			break;
1221 	}
1222 
1223 	chan->expected_tx_seq = chan->buffer_seq;
1224 
1225 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1226 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1227 	l2cap_seq_list_clear(&chan->retrans_list);
1228 	l2cap_seq_list_clear(&chan->srej_list);
1229 	skb_queue_purge(&chan->srej_q);
1230 
1231 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1232 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1233 
1234 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1235 }
1236 
l2cap_move_done(struct l2cap_chan * chan)1237 static void l2cap_move_done(struct l2cap_chan *chan)
1238 {
1239 	u8 move_role = chan->move_role;
1240 	BT_DBG("chan %p", chan);
1241 
1242 	chan->move_state = L2CAP_MOVE_STABLE;
1243 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1244 
1245 	if (chan->mode != L2CAP_MODE_ERTM)
1246 		return;
1247 
1248 	switch (move_role) {
1249 	case L2CAP_MOVE_ROLE_INITIATOR:
1250 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1251 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1252 		break;
1253 	case L2CAP_MOVE_ROLE_RESPONDER:
1254 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1255 		break;
1256 	}
1257 }
1258 
l2cap_chan_ready(struct l2cap_chan * chan)1259 static void l2cap_chan_ready(struct l2cap_chan *chan)
1260 {
1261 	/* The channel may have already been flagged as connected in
1262 	 * case of receiving data before the L2CAP info req/rsp
1263 	 * procedure is complete.
1264 	 */
1265 	if (chan->state == BT_CONNECTED)
1266 		return;
1267 
1268 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1269 	chan->conf_state = 0;
1270 	__clear_chan_timer(chan);
1271 
1272 	if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1273 		chan->ops->suspend(chan);
1274 
1275 	chan->state = BT_CONNECTED;
1276 
1277 	chan->ops->ready(chan);
1278 }
1279 
l2cap_le_connect(struct l2cap_chan * chan)1280 static void l2cap_le_connect(struct l2cap_chan *chan)
1281 {
1282 	struct l2cap_conn *conn = chan->conn;
1283 	struct l2cap_le_conn_req req;
1284 
1285 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1286 		return;
1287 
1288 	req.psm     = chan->psm;
1289 	req.scid    = cpu_to_le16(chan->scid);
1290 	req.mtu     = cpu_to_le16(chan->imtu);
1291 	req.mps     = cpu_to_le16(chan->mps);
1292 	req.credits = cpu_to_le16(chan->rx_credits);
1293 
1294 	chan->ident = l2cap_get_ident(conn);
1295 
1296 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1297 		       sizeof(req), &req);
1298 }
1299 
l2cap_le_start(struct l2cap_chan * chan)1300 static void l2cap_le_start(struct l2cap_chan *chan)
1301 {
1302 	struct l2cap_conn *conn = chan->conn;
1303 
1304 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1305 		return;
1306 
1307 	if (!chan->psm) {
1308 		l2cap_chan_ready(chan);
1309 		return;
1310 	}
1311 
1312 	if (chan->state == BT_CONNECT)
1313 		l2cap_le_connect(chan);
1314 }
1315 
l2cap_start_connection(struct l2cap_chan * chan)1316 static void l2cap_start_connection(struct l2cap_chan *chan)
1317 {
1318 	if (__amp_capable(chan)) {
1319 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1320 		a2mp_discover_amp(chan);
1321 	} else if (chan->conn->hcon->type == LE_LINK) {
1322 		l2cap_le_start(chan);
1323 	} else {
1324 		l2cap_send_conn_req(chan);
1325 	}
1326 }
1327 
l2cap_request_info(struct l2cap_conn * conn)1328 static void l2cap_request_info(struct l2cap_conn *conn)
1329 {
1330 	struct l2cap_info_req req;
1331 
1332 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1333 		return;
1334 
1335 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1336 
1337 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1338 	conn->info_ident = l2cap_get_ident(conn);
1339 
1340 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1341 
1342 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1343 		       sizeof(req), &req);
1344 }
1345 
l2cap_check_enc_key_size(struct hci_conn * hcon)1346 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1347 {
1348 	/* The minimum encryption key size needs to be enforced by the
1349 	 * host stack before establishing any L2CAP connections. The
1350 	 * specification in theory allows a minimum of 1, but to align
1351 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1352 	 *
1353 	 * This check might also be called for unencrypted connections
1354 	 * that have no key size requirements. Ensure that the link is
1355 	 * actually encrypted before enforcing a key size.
1356 	 */
1357 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1358 		hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
1359 }
1360 
l2cap_do_start(struct l2cap_chan * chan)1361 static void l2cap_do_start(struct l2cap_chan *chan)
1362 {
1363 	struct l2cap_conn *conn = chan->conn;
1364 
1365 	if (conn->hcon->type == LE_LINK) {
1366 		l2cap_le_start(chan);
1367 		return;
1368 	}
1369 
1370 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1371 		l2cap_request_info(conn);
1372 		return;
1373 	}
1374 
1375 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1376 		return;
1377 
1378 	if (!l2cap_chan_check_security(chan, true) ||
1379 	    !__l2cap_no_conn_pending(chan))
1380 		return;
1381 
1382 	if (l2cap_check_enc_key_size(conn->hcon))
1383 		l2cap_start_connection(chan);
1384 	else
1385 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1386 }
1387 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1388 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1389 {
1390 	u32 local_feat_mask = l2cap_feat_mask;
1391 	if (!disable_ertm)
1392 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1393 
1394 	switch (mode) {
1395 	case L2CAP_MODE_ERTM:
1396 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1397 	case L2CAP_MODE_STREAMING:
1398 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1399 	default:
1400 		return 0x00;
1401 	}
1402 }
1403 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1404 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1405 {
1406 	struct l2cap_conn *conn = chan->conn;
1407 	struct l2cap_disconn_req req;
1408 
1409 	if (!conn)
1410 		return;
1411 
1412 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1413 		__clear_retrans_timer(chan);
1414 		__clear_monitor_timer(chan);
1415 		__clear_ack_timer(chan);
1416 	}
1417 
1418 	if (chan->scid == L2CAP_CID_A2MP) {
1419 		l2cap_state_change(chan, BT_DISCONN);
1420 		return;
1421 	}
1422 
1423 	req.dcid = cpu_to_le16(chan->dcid);
1424 	req.scid = cpu_to_le16(chan->scid);
1425 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1426 		       sizeof(req), &req);
1427 
1428 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1429 }
1430 
1431 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1432 static void l2cap_conn_start(struct l2cap_conn *conn)
1433 {
1434 	struct l2cap_chan *chan, *tmp;
1435 
1436 	BT_DBG("conn %p", conn);
1437 
1438 	mutex_lock(&conn->chan_lock);
1439 
1440 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1441 		l2cap_chan_lock(chan);
1442 
1443 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1444 			l2cap_chan_ready(chan);
1445 			l2cap_chan_unlock(chan);
1446 			continue;
1447 		}
1448 
1449 		if (chan->state == BT_CONNECT) {
1450 			if (!l2cap_chan_check_security(chan, true) ||
1451 			    !__l2cap_no_conn_pending(chan)) {
1452 				l2cap_chan_unlock(chan);
1453 				continue;
1454 			}
1455 
1456 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1457 			    && test_bit(CONF_STATE2_DEVICE,
1458 					&chan->conf_state)) {
1459 				l2cap_chan_close(chan, ECONNRESET);
1460 				l2cap_chan_unlock(chan);
1461 				continue;
1462 			}
1463 
1464 			if (l2cap_check_enc_key_size(conn->hcon))
1465 				l2cap_start_connection(chan);
1466 			else
1467 				l2cap_chan_close(chan, ECONNREFUSED);
1468 
1469 		} else if (chan->state == BT_CONNECT2) {
1470 			struct l2cap_conn_rsp rsp;
1471 			char buf[128];
1472 			rsp.scid = cpu_to_le16(chan->dcid);
1473 			rsp.dcid = cpu_to_le16(chan->scid);
1474 
1475 			if (l2cap_chan_check_security(chan, false)) {
1476 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1477 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1478 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1479 					chan->ops->defer(chan);
1480 
1481 				} else {
1482 					l2cap_state_change(chan, BT_CONFIG);
1483 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1484 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1485 				}
1486 			} else {
1487 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1488 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1489 			}
1490 
1491 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1492 				       sizeof(rsp), &rsp);
1493 
1494 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1495 			    rsp.result != L2CAP_CR_SUCCESS) {
1496 				l2cap_chan_unlock(chan);
1497 				continue;
1498 			}
1499 
1500 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1501 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1502 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1503 			chan->num_conf_req++;
1504 		}
1505 
1506 		l2cap_chan_unlock(chan);
1507 	}
1508 
1509 	mutex_unlock(&conn->chan_lock);
1510 }
1511 
l2cap_le_conn_ready(struct l2cap_conn * conn)1512 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1513 {
1514 	struct hci_conn *hcon = conn->hcon;
1515 	struct hci_dev *hdev = hcon->hdev;
1516 
1517 	BT_DBG("%s conn %p", hdev->name, conn);
1518 
1519 	/* For outgoing pairing which doesn't necessarily have an
1520 	 * associated socket (e.g. mgmt_pair_device).
1521 	 */
1522 	if (hcon->out)
1523 		smp_conn_security(hcon, hcon->pending_sec_level);
1524 
1525 	/* For LE slave connections, make sure the connection interval
1526 	 * is in the range of the minium and maximum interval that has
1527 	 * been configured for this connection. If not, then trigger
1528 	 * the connection update procedure.
1529 	 */
1530 	if (hcon->role == HCI_ROLE_SLAVE &&
1531 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1532 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1533 		struct l2cap_conn_param_update_req req;
1534 
1535 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1536 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1537 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1538 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1539 
1540 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1541 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1542 	}
1543 }
1544 
l2cap_conn_ready(struct l2cap_conn * conn)1545 static void l2cap_conn_ready(struct l2cap_conn *conn)
1546 {
1547 	struct l2cap_chan *chan;
1548 	struct hci_conn *hcon = conn->hcon;
1549 
1550 	BT_DBG("conn %p", conn);
1551 
1552 	if (hcon->type == ACL_LINK)
1553 		l2cap_request_info(conn);
1554 
1555 	mutex_lock(&conn->chan_lock);
1556 
1557 	list_for_each_entry(chan, &conn->chan_l, list) {
1558 
1559 		l2cap_chan_lock(chan);
1560 
1561 		if (chan->scid == L2CAP_CID_A2MP) {
1562 			l2cap_chan_unlock(chan);
1563 			continue;
1564 		}
1565 
1566 		if (hcon->type == LE_LINK) {
1567 			l2cap_le_start(chan);
1568 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1569 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1570 				l2cap_chan_ready(chan);
1571 		} else if (chan->state == BT_CONNECT) {
1572 			l2cap_do_start(chan);
1573 		}
1574 
1575 		l2cap_chan_unlock(chan);
1576 	}
1577 
1578 	mutex_unlock(&conn->chan_lock);
1579 
1580 	if (hcon->type == LE_LINK)
1581 		l2cap_le_conn_ready(conn);
1582 
1583 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1584 }
1585 
1586 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1587 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1588 {
1589 	struct l2cap_chan *chan;
1590 
1591 	BT_DBG("conn %p", conn);
1592 
1593 	mutex_lock(&conn->chan_lock);
1594 
1595 	list_for_each_entry(chan, &conn->chan_l, list) {
1596 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1597 			l2cap_chan_set_err(chan, err);
1598 	}
1599 
1600 	mutex_unlock(&conn->chan_lock);
1601 }
1602 
l2cap_info_timeout(struct work_struct * work)1603 static void l2cap_info_timeout(struct work_struct *work)
1604 {
1605 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1606 					       info_timer.work);
1607 
1608 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1609 	conn->info_ident = 0;
1610 
1611 	l2cap_conn_start(conn);
1612 }
1613 
1614 /*
1615  * l2cap_user
1616  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1617  * callback is called during registration. The ->remove callback is called
1618  * during unregistration.
1619  * An l2cap_user object can either be explicitly unregistered or when the
1620  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1621  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1622  * External modules must own a reference to the l2cap_conn object if they intend
1623  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1624  * any time if they don't.
1625  */
1626 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1627 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1628 {
1629 	struct hci_dev *hdev = conn->hcon->hdev;
1630 	int ret;
1631 
1632 	/* We need to check whether l2cap_conn is registered. If it is not, we
1633 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1634 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1635 	 * relies on the parent hci_conn object to be locked. This itself relies
1636 	 * on the hci_dev object to be locked. So we must lock the hci device
1637 	 * here, too. */
1638 
1639 	hci_dev_lock(hdev);
1640 
1641 	if (!list_empty(&user->list)) {
1642 		ret = -EINVAL;
1643 		goto out_unlock;
1644 	}
1645 
1646 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1647 	if (!conn->hchan) {
1648 		ret = -ENODEV;
1649 		goto out_unlock;
1650 	}
1651 
1652 	ret = user->probe(conn, user);
1653 	if (ret)
1654 		goto out_unlock;
1655 
1656 	list_add(&user->list, &conn->users);
1657 	ret = 0;
1658 
1659 out_unlock:
1660 	hci_dev_unlock(hdev);
1661 	return ret;
1662 }
1663 EXPORT_SYMBOL(l2cap_register_user);
1664 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1665 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1666 {
1667 	struct hci_dev *hdev = conn->hcon->hdev;
1668 
1669 	hci_dev_lock(hdev);
1670 
1671 	if (list_empty(&user->list))
1672 		goto out_unlock;
1673 
1674 	list_del_init(&user->list);
1675 	user->remove(conn, user);
1676 
1677 out_unlock:
1678 	hci_dev_unlock(hdev);
1679 }
1680 EXPORT_SYMBOL(l2cap_unregister_user);
1681 
l2cap_unregister_all_users(struct l2cap_conn * conn)1682 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1683 {
1684 	struct l2cap_user *user;
1685 
1686 	while (!list_empty(&conn->users)) {
1687 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1688 		list_del_init(&user->list);
1689 		user->remove(conn, user);
1690 	}
1691 }
1692 
l2cap_conn_del(struct hci_conn * hcon,int err)1693 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1694 {
1695 	struct l2cap_conn *conn = hcon->l2cap_data;
1696 	struct l2cap_chan *chan, *l;
1697 
1698 	if (!conn)
1699 		return;
1700 
1701 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1702 
1703 	kfree_skb(conn->rx_skb);
1704 
1705 	skb_queue_purge(&conn->pending_rx);
1706 
1707 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1708 	 * might block if we are running on a worker from the same workqueue
1709 	 * pending_rx_work is waiting on.
1710 	 */
1711 	if (work_pending(&conn->pending_rx_work))
1712 		cancel_work_sync(&conn->pending_rx_work);
1713 
1714 	if (work_pending(&conn->id_addr_update_work))
1715 		cancel_work_sync(&conn->id_addr_update_work);
1716 
1717 	l2cap_unregister_all_users(conn);
1718 
1719 	/* Force the connection to be immediately dropped */
1720 	hcon->disc_timeout = 0;
1721 
1722 	mutex_lock(&conn->chan_lock);
1723 
1724 	/* Kill channels */
1725 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1726 		l2cap_chan_hold(chan);
1727 		l2cap_chan_lock(chan);
1728 
1729 		l2cap_chan_del(chan, err);
1730 
1731 		chan->ops->close(chan);
1732 
1733 		l2cap_chan_unlock(chan);
1734 		l2cap_chan_put(chan);
1735 	}
1736 
1737 	mutex_unlock(&conn->chan_lock);
1738 
1739 	hci_chan_del(conn->hchan);
1740 
1741 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1742 		cancel_delayed_work_sync(&conn->info_timer);
1743 
1744 	hcon->l2cap_data = NULL;
1745 	conn->hchan = NULL;
1746 	l2cap_conn_put(conn);
1747 }
1748 
l2cap_conn_free(struct kref * ref)1749 static void l2cap_conn_free(struct kref *ref)
1750 {
1751 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1752 
1753 	hci_conn_put(conn->hcon);
1754 	kfree(conn);
1755 }
1756 
l2cap_conn_get(struct l2cap_conn * conn)1757 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1758 {
1759 	kref_get(&conn->ref);
1760 	return conn;
1761 }
1762 EXPORT_SYMBOL(l2cap_conn_get);
1763 
l2cap_conn_put(struct l2cap_conn * conn)1764 void l2cap_conn_put(struct l2cap_conn *conn)
1765 {
1766 	kref_put(&conn->ref, l2cap_conn_free);
1767 }
1768 EXPORT_SYMBOL(l2cap_conn_put);
1769 
1770 /* ---- Socket interface ---- */
1771 
1772 /* Find socket with psm and source / destination bdaddr.
1773  * Returns closest match.
1774  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1775 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1776 						   bdaddr_t *src,
1777 						   bdaddr_t *dst,
1778 						   u8 link_type)
1779 {
1780 	struct l2cap_chan *c, *c1 = NULL;
1781 
1782 	read_lock(&chan_list_lock);
1783 
1784 	list_for_each_entry(c, &chan_list, global_l) {
1785 		if (state && c->state != state)
1786 			continue;
1787 
1788 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1789 			continue;
1790 
1791 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1792 			continue;
1793 
1794 		if (c->psm == psm) {
1795 			int src_match, dst_match;
1796 			int src_any, dst_any;
1797 
1798 			/* Exact match. */
1799 			src_match = !bacmp(&c->src, src);
1800 			dst_match = !bacmp(&c->dst, dst);
1801 			if (src_match && dst_match) {
1802 				l2cap_chan_hold(c);
1803 				read_unlock(&chan_list_lock);
1804 				return c;
1805 			}
1806 
1807 			/* Closest match */
1808 			src_any = !bacmp(&c->src, BDADDR_ANY);
1809 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
1810 			if ((src_match && dst_any) || (src_any && dst_match) ||
1811 			    (src_any && dst_any))
1812 				c1 = c;
1813 		}
1814 	}
1815 
1816 	if (c1)
1817 		l2cap_chan_hold(c1);
1818 
1819 	read_unlock(&chan_list_lock);
1820 
1821 	return c1;
1822 }
1823 
l2cap_monitor_timeout(struct work_struct * work)1824 static void l2cap_monitor_timeout(struct work_struct *work)
1825 {
1826 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1827 					       monitor_timer.work);
1828 
1829 	BT_DBG("chan %p", chan);
1830 
1831 	l2cap_chan_lock(chan);
1832 
1833 	if (!chan->conn) {
1834 		l2cap_chan_unlock(chan);
1835 		l2cap_chan_put(chan);
1836 		return;
1837 	}
1838 
1839 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
1840 
1841 	l2cap_chan_unlock(chan);
1842 	l2cap_chan_put(chan);
1843 }
1844 
l2cap_retrans_timeout(struct work_struct * work)1845 static void l2cap_retrans_timeout(struct work_struct *work)
1846 {
1847 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1848 					       retrans_timer.work);
1849 
1850 	BT_DBG("chan %p", chan);
1851 
1852 	l2cap_chan_lock(chan);
1853 
1854 	if (!chan->conn) {
1855 		l2cap_chan_unlock(chan);
1856 		l2cap_chan_put(chan);
1857 		return;
1858 	}
1859 
1860 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
1861 	l2cap_chan_unlock(chan);
1862 	l2cap_chan_put(chan);
1863 }
1864 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)1865 static void l2cap_streaming_send(struct l2cap_chan *chan,
1866 				 struct sk_buff_head *skbs)
1867 {
1868 	struct sk_buff *skb;
1869 	struct l2cap_ctrl *control;
1870 
1871 	BT_DBG("chan %p, skbs %p", chan, skbs);
1872 
1873 	if (__chan_is_moving(chan))
1874 		return;
1875 
1876 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
1877 
1878 	while (!skb_queue_empty(&chan->tx_q)) {
1879 
1880 		skb = skb_dequeue(&chan->tx_q);
1881 
1882 		bt_cb(skb)->l2cap.retries = 1;
1883 		control = &bt_cb(skb)->l2cap;
1884 
1885 		control->reqseq = 0;
1886 		control->txseq = chan->next_tx_seq;
1887 
1888 		__pack_control(chan, control, skb);
1889 
1890 		if (chan->fcs == L2CAP_FCS_CRC16) {
1891 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1892 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1893 		}
1894 
1895 		l2cap_do_send(chan, skb);
1896 
1897 		BT_DBG("Sent txseq %u", control->txseq);
1898 
1899 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1900 		chan->frames_sent++;
1901 	}
1902 }
1903 
l2cap_ertm_send(struct l2cap_chan * chan)1904 static int l2cap_ertm_send(struct l2cap_chan *chan)
1905 {
1906 	struct sk_buff *skb, *tx_skb;
1907 	struct l2cap_ctrl *control;
1908 	int sent = 0;
1909 
1910 	BT_DBG("chan %p", chan);
1911 
1912 	if (chan->state != BT_CONNECTED)
1913 		return -ENOTCONN;
1914 
1915 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1916 		return 0;
1917 
1918 	if (__chan_is_moving(chan))
1919 		return 0;
1920 
1921 	while (chan->tx_send_head &&
1922 	       chan->unacked_frames < chan->remote_tx_win &&
1923 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
1924 
1925 		skb = chan->tx_send_head;
1926 
1927 		bt_cb(skb)->l2cap.retries = 1;
1928 		control = &bt_cb(skb)->l2cap;
1929 
1930 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1931 			control->final = 1;
1932 
1933 		control->reqseq = chan->buffer_seq;
1934 		chan->last_acked_seq = chan->buffer_seq;
1935 		control->txseq = chan->next_tx_seq;
1936 
1937 		__pack_control(chan, control, skb);
1938 
1939 		if (chan->fcs == L2CAP_FCS_CRC16) {
1940 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1941 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1942 		}
1943 
1944 		/* Clone after data has been modified. Data is assumed to be
1945 		   read-only (for locking purposes) on cloned sk_buffs.
1946 		 */
1947 		tx_skb = skb_clone(skb, GFP_KERNEL);
1948 
1949 		if (!tx_skb)
1950 			break;
1951 
1952 		__set_retrans_timer(chan);
1953 
1954 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1955 		chan->unacked_frames++;
1956 		chan->frames_sent++;
1957 		sent++;
1958 
1959 		if (skb_queue_is_last(&chan->tx_q, skb))
1960 			chan->tx_send_head = NULL;
1961 		else
1962 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1963 
1964 		l2cap_do_send(chan, tx_skb);
1965 		BT_DBG("Sent txseq %u", control->txseq);
1966 	}
1967 
1968 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1969 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
1970 
1971 	return sent;
1972 }
1973 
l2cap_ertm_resend(struct l2cap_chan * chan)1974 static void l2cap_ertm_resend(struct l2cap_chan *chan)
1975 {
1976 	struct l2cap_ctrl control;
1977 	struct sk_buff *skb;
1978 	struct sk_buff *tx_skb;
1979 	u16 seq;
1980 
1981 	BT_DBG("chan %p", chan);
1982 
1983 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1984 		return;
1985 
1986 	if (__chan_is_moving(chan))
1987 		return;
1988 
1989 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1990 		seq = l2cap_seq_list_pop(&chan->retrans_list);
1991 
1992 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1993 		if (!skb) {
1994 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
1995 			       seq);
1996 			continue;
1997 		}
1998 
1999 		bt_cb(skb)->l2cap.retries++;
2000 		control = bt_cb(skb)->l2cap;
2001 
2002 		if (chan->max_tx != 0 &&
2003 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2004 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2005 			l2cap_send_disconn_req(chan, ECONNRESET);
2006 			l2cap_seq_list_clear(&chan->retrans_list);
2007 			break;
2008 		}
2009 
2010 		control.reqseq = chan->buffer_seq;
2011 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2012 			control.final = 1;
2013 		else
2014 			control.final = 0;
2015 
2016 		if (skb_cloned(skb)) {
2017 			/* Cloned sk_buffs are read-only, so we need a
2018 			 * writeable copy
2019 			 */
2020 			tx_skb = skb_copy(skb, GFP_KERNEL);
2021 		} else {
2022 			tx_skb = skb_clone(skb, GFP_KERNEL);
2023 		}
2024 
2025 		if (!tx_skb) {
2026 			l2cap_seq_list_clear(&chan->retrans_list);
2027 			break;
2028 		}
2029 
2030 		/* Update skb contents */
2031 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2032 			put_unaligned_le32(__pack_extended_control(&control),
2033 					   tx_skb->data + L2CAP_HDR_SIZE);
2034 		} else {
2035 			put_unaligned_le16(__pack_enhanced_control(&control),
2036 					   tx_skb->data + L2CAP_HDR_SIZE);
2037 		}
2038 
2039 		/* Update FCS */
2040 		if (chan->fcs == L2CAP_FCS_CRC16) {
2041 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2042 					tx_skb->len - L2CAP_FCS_SIZE);
2043 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2044 						L2CAP_FCS_SIZE);
2045 		}
2046 
2047 		l2cap_do_send(chan, tx_skb);
2048 
2049 		BT_DBG("Resent txseq %d", control.txseq);
2050 
2051 		chan->last_acked_seq = chan->buffer_seq;
2052 	}
2053 }
2054 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2055 static void l2cap_retransmit(struct l2cap_chan *chan,
2056 			     struct l2cap_ctrl *control)
2057 {
2058 	BT_DBG("chan %p, control %p", chan, control);
2059 
2060 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2061 	l2cap_ertm_resend(chan);
2062 }
2063 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2064 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2065 				 struct l2cap_ctrl *control)
2066 {
2067 	struct sk_buff *skb;
2068 
2069 	BT_DBG("chan %p, control %p", chan, control);
2070 
2071 	if (control->poll)
2072 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2073 
2074 	l2cap_seq_list_clear(&chan->retrans_list);
2075 
2076 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2077 		return;
2078 
2079 	if (chan->unacked_frames) {
2080 		skb_queue_walk(&chan->tx_q, skb) {
2081 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2082 			    skb == chan->tx_send_head)
2083 				break;
2084 		}
2085 
2086 		skb_queue_walk_from(&chan->tx_q, skb) {
2087 			if (skb == chan->tx_send_head)
2088 				break;
2089 
2090 			l2cap_seq_list_append(&chan->retrans_list,
2091 					      bt_cb(skb)->l2cap.txseq);
2092 		}
2093 
2094 		l2cap_ertm_resend(chan);
2095 	}
2096 }
2097 
l2cap_send_ack(struct l2cap_chan * chan)2098 static void l2cap_send_ack(struct l2cap_chan *chan)
2099 {
2100 	struct l2cap_ctrl control;
2101 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2102 					 chan->last_acked_seq);
2103 	int threshold;
2104 
2105 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2106 	       chan, chan->last_acked_seq, chan->buffer_seq);
2107 
2108 	memset(&control, 0, sizeof(control));
2109 	control.sframe = 1;
2110 
2111 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2112 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2113 		__clear_ack_timer(chan);
2114 		control.super = L2CAP_SUPER_RNR;
2115 		control.reqseq = chan->buffer_seq;
2116 		l2cap_send_sframe(chan, &control);
2117 	} else {
2118 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2119 			l2cap_ertm_send(chan);
2120 			/* If any i-frames were sent, they included an ack */
2121 			if (chan->buffer_seq == chan->last_acked_seq)
2122 				frames_to_ack = 0;
2123 		}
2124 
2125 		/* Ack now if the window is 3/4ths full.
2126 		 * Calculate without mul or div
2127 		 */
2128 		threshold = chan->ack_win;
2129 		threshold += threshold << 1;
2130 		threshold >>= 2;
2131 
2132 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2133 		       threshold);
2134 
2135 		if (frames_to_ack >= threshold) {
2136 			__clear_ack_timer(chan);
2137 			control.super = L2CAP_SUPER_RR;
2138 			control.reqseq = chan->buffer_seq;
2139 			l2cap_send_sframe(chan, &control);
2140 			frames_to_ack = 0;
2141 		}
2142 
2143 		if (frames_to_ack)
2144 			__set_ack_timer(chan);
2145 	}
2146 }
2147 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2148 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2149 					 struct msghdr *msg, int len,
2150 					 int count, struct sk_buff *skb)
2151 {
2152 	struct l2cap_conn *conn = chan->conn;
2153 	struct sk_buff **frag;
2154 	int sent = 0;
2155 
2156 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2157 		return -EFAULT;
2158 
2159 	sent += count;
2160 	len  -= count;
2161 
2162 	/* Continuation fragments (no L2CAP header) */
2163 	frag = &skb_shinfo(skb)->frag_list;
2164 	while (len) {
2165 		struct sk_buff *tmp;
2166 
2167 		count = min_t(unsigned int, conn->mtu, len);
2168 
2169 		tmp = chan->ops->alloc_skb(chan, 0, count,
2170 					   msg->msg_flags & MSG_DONTWAIT);
2171 		if (IS_ERR(tmp))
2172 			return PTR_ERR(tmp);
2173 
2174 		*frag = tmp;
2175 
2176 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2177 				   &msg->msg_iter))
2178 			return -EFAULT;
2179 
2180 		sent += count;
2181 		len  -= count;
2182 
2183 		skb->len += (*frag)->len;
2184 		skb->data_len += (*frag)->len;
2185 
2186 		frag = &(*frag)->next;
2187 	}
2188 
2189 	return sent;
2190 }
2191 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2192 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2193 						 struct msghdr *msg, size_t len)
2194 {
2195 	struct l2cap_conn *conn = chan->conn;
2196 	struct sk_buff *skb;
2197 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2198 	struct l2cap_hdr *lh;
2199 
2200 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2201 	       __le16_to_cpu(chan->psm), len);
2202 
2203 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2204 
2205 	skb = chan->ops->alloc_skb(chan, hlen, count,
2206 				   msg->msg_flags & MSG_DONTWAIT);
2207 	if (IS_ERR(skb))
2208 		return skb;
2209 
2210 	/* Create L2CAP header */
2211 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2212 	lh->cid = cpu_to_le16(chan->dcid);
2213 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2214 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2215 
2216 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2217 	if (unlikely(err < 0)) {
2218 		kfree_skb(skb);
2219 		return ERR_PTR(err);
2220 	}
2221 	return skb;
2222 }
2223 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2224 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2225 					      struct msghdr *msg, size_t len)
2226 {
2227 	struct l2cap_conn *conn = chan->conn;
2228 	struct sk_buff *skb;
2229 	int err, count;
2230 	struct l2cap_hdr *lh;
2231 
2232 	BT_DBG("chan %p len %zu", chan, len);
2233 
2234 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2235 
2236 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2237 				   msg->msg_flags & MSG_DONTWAIT);
2238 	if (IS_ERR(skb))
2239 		return skb;
2240 
2241 	/* Create L2CAP header */
2242 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2243 	lh->cid = cpu_to_le16(chan->dcid);
2244 	lh->len = cpu_to_le16(len);
2245 
2246 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2247 	if (unlikely(err < 0)) {
2248 		kfree_skb(skb);
2249 		return ERR_PTR(err);
2250 	}
2251 	return skb;
2252 }
2253 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2254 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2255 					       struct msghdr *msg, size_t len,
2256 					       u16 sdulen)
2257 {
2258 	struct l2cap_conn *conn = chan->conn;
2259 	struct sk_buff *skb;
2260 	int err, count, hlen;
2261 	struct l2cap_hdr *lh;
2262 
2263 	BT_DBG("chan %p len %zu", chan, len);
2264 
2265 	if (!conn)
2266 		return ERR_PTR(-ENOTCONN);
2267 
2268 	hlen = __ertm_hdr_size(chan);
2269 
2270 	if (sdulen)
2271 		hlen += L2CAP_SDULEN_SIZE;
2272 
2273 	if (chan->fcs == L2CAP_FCS_CRC16)
2274 		hlen += L2CAP_FCS_SIZE;
2275 
2276 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2277 
2278 	skb = chan->ops->alloc_skb(chan, hlen, count,
2279 				   msg->msg_flags & MSG_DONTWAIT);
2280 	if (IS_ERR(skb))
2281 		return skb;
2282 
2283 	/* Create L2CAP header */
2284 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2285 	lh->cid = cpu_to_le16(chan->dcid);
2286 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2287 
2288 	/* Control header is populated later */
2289 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2290 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2291 	else
2292 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2293 
2294 	if (sdulen)
2295 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2296 
2297 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2298 	if (unlikely(err < 0)) {
2299 		kfree_skb(skb);
2300 		return ERR_PTR(err);
2301 	}
2302 
2303 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2304 	bt_cb(skb)->l2cap.retries = 0;
2305 	return skb;
2306 }
2307 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2308 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2309 			     struct sk_buff_head *seg_queue,
2310 			     struct msghdr *msg, size_t len)
2311 {
2312 	struct sk_buff *skb;
2313 	u16 sdu_len;
2314 	size_t pdu_len;
2315 	u8 sar;
2316 
2317 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2318 
2319 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2320 	 * so fragmented skbs are not used.  The HCI layer's handling
2321 	 * of fragmented skbs is not compatible with ERTM's queueing.
2322 	 */
2323 
2324 	/* PDU size is derived from the HCI MTU */
2325 	pdu_len = chan->conn->mtu;
2326 
2327 	/* Constrain PDU size for BR/EDR connections */
2328 	if (!chan->hs_hcon)
2329 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2330 
2331 	/* Adjust for largest possible L2CAP overhead. */
2332 	if (chan->fcs)
2333 		pdu_len -= L2CAP_FCS_SIZE;
2334 
2335 	pdu_len -= __ertm_hdr_size(chan);
2336 
2337 	/* Remote device may have requested smaller PDUs */
2338 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2339 
2340 	if (len <= pdu_len) {
2341 		sar = L2CAP_SAR_UNSEGMENTED;
2342 		sdu_len = 0;
2343 		pdu_len = len;
2344 	} else {
2345 		sar = L2CAP_SAR_START;
2346 		sdu_len = len;
2347 	}
2348 
2349 	while (len > 0) {
2350 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2351 
2352 		if (IS_ERR(skb)) {
2353 			__skb_queue_purge(seg_queue);
2354 			return PTR_ERR(skb);
2355 		}
2356 
2357 		bt_cb(skb)->l2cap.sar = sar;
2358 		__skb_queue_tail(seg_queue, skb);
2359 
2360 		len -= pdu_len;
2361 		if (sdu_len)
2362 			sdu_len = 0;
2363 
2364 		if (len <= pdu_len) {
2365 			sar = L2CAP_SAR_END;
2366 			pdu_len = len;
2367 		} else {
2368 			sar = L2CAP_SAR_CONTINUE;
2369 		}
2370 	}
2371 
2372 	return 0;
2373 }
2374 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2375 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2376 						   struct msghdr *msg,
2377 						   size_t len, u16 sdulen)
2378 {
2379 	struct l2cap_conn *conn = chan->conn;
2380 	struct sk_buff *skb;
2381 	int err, count, hlen;
2382 	struct l2cap_hdr *lh;
2383 
2384 	BT_DBG("chan %p len %zu", chan, len);
2385 
2386 	if (!conn)
2387 		return ERR_PTR(-ENOTCONN);
2388 
2389 	hlen = L2CAP_HDR_SIZE;
2390 
2391 	if (sdulen)
2392 		hlen += L2CAP_SDULEN_SIZE;
2393 
2394 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2395 
2396 	skb = chan->ops->alloc_skb(chan, hlen, count,
2397 				   msg->msg_flags & MSG_DONTWAIT);
2398 	if (IS_ERR(skb))
2399 		return skb;
2400 
2401 	/* Create L2CAP header */
2402 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2403 	lh->cid = cpu_to_le16(chan->dcid);
2404 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2405 
2406 	if (sdulen)
2407 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2408 
2409 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2410 	if (unlikely(err < 0)) {
2411 		kfree_skb(skb);
2412 		return ERR_PTR(err);
2413 	}
2414 
2415 	return skb;
2416 }
2417 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2418 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2419 				struct sk_buff_head *seg_queue,
2420 				struct msghdr *msg, size_t len)
2421 {
2422 	struct sk_buff *skb;
2423 	size_t pdu_len;
2424 	u16 sdu_len;
2425 
2426 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2427 
2428 	sdu_len = len;
2429 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2430 
2431 	while (len > 0) {
2432 		if (len <= pdu_len)
2433 			pdu_len = len;
2434 
2435 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2436 		if (IS_ERR(skb)) {
2437 			__skb_queue_purge(seg_queue);
2438 			return PTR_ERR(skb);
2439 		}
2440 
2441 		__skb_queue_tail(seg_queue, skb);
2442 
2443 		len -= pdu_len;
2444 
2445 		if (sdu_len) {
2446 			sdu_len = 0;
2447 			pdu_len += L2CAP_SDULEN_SIZE;
2448 		}
2449 	}
2450 
2451 	return 0;
2452 }
2453 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2454 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2455 {
2456 	int sent = 0;
2457 
2458 	BT_DBG("chan %p", chan);
2459 
2460 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2461 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2462 		chan->tx_credits--;
2463 		sent++;
2464 	}
2465 
2466 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2467 	       skb_queue_len(&chan->tx_q));
2468 }
2469 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2470 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2471 {
2472 	struct sk_buff *skb;
2473 	int err;
2474 	struct sk_buff_head seg_queue;
2475 
2476 	if (!chan->conn)
2477 		return -ENOTCONN;
2478 
2479 	/* Connectionless channel */
2480 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2481 		skb = l2cap_create_connless_pdu(chan, msg, len);
2482 		if (IS_ERR(skb))
2483 			return PTR_ERR(skb);
2484 
2485 		/* Channel lock is released before requesting new skb and then
2486 		 * reacquired thus we need to recheck channel state.
2487 		 */
2488 		if (chan->state != BT_CONNECTED) {
2489 			kfree_skb(skb);
2490 			return -ENOTCONN;
2491 		}
2492 
2493 		l2cap_do_send(chan, skb);
2494 		return len;
2495 	}
2496 
2497 	switch (chan->mode) {
2498 	case L2CAP_MODE_LE_FLOWCTL:
2499 		/* Check outgoing MTU */
2500 		if (len > chan->omtu)
2501 			return -EMSGSIZE;
2502 
2503 		__skb_queue_head_init(&seg_queue);
2504 
2505 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2506 
2507 		if (chan->state != BT_CONNECTED) {
2508 			__skb_queue_purge(&seg_queue);
2509 			err = -ENOTCONN;
2510 		}
2511 
2512 		if (err)
2513 			return err;
2514 
2515 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2516 
2517 		l2cap_le_flowctl_send(chan);
2518 
2519 		if (!chan->tx_credits)
2520 			chan->ops->suspend(chan);
2521 
2522 		err = len;
2523 
2524 		break;
2525 
2526 	case L2CAP_MODE_BASIC:
2527 		/* Check outgoing MTU */
2528 		if (len > chan->omtu)
2529 			return -EMSGSIZE;
2530 
2531 		/* Create a basic PDU */
2532 		skb = l2cap_create_basic_pdu(chan, msg, len);
2533 		if (IS_ERR(skb))
2534 			return PTR_ERR(skb);
2535 
2536 		/* Channel lock is released before requesting new skb and then
2537 		 * reacquired thus we need to recheck channel state.
2538 		 */
2539 		if (chan->state != BT_CONNECTED) {
2540 			kfree_skb(skb);
2541 			return -ENOTCONN;
2542 		}
2543 
2544 		l2cap_do_send(chan, skb);
2545 		err = len;
2546 		break;
2547 
2548 	case L2CAP_MODE_ERTM:
2549 	case L2CAP_MODE_STREAMING:
2550 		/* Check outgoing MTU */
2551 		if (len > chan->omtu) {
2552 			err = -EMSGSIZE;
2553 			break;
2554 		}
2555 
2556 		__skb_queue_head_init(&seg_queue);
2557 
2558 		/* Do segmentation before calling in to the state machine,
2559 		 * since it's possible to block while waiting for memory
2560 		 * allocation.
2561 		 */
2562 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2563 
2564 		/* The channel could have been closed while segmenting,
2565 		 * check that it is still connected.
2566 		 */
2567 		if (chan->state != BT_CONNECTED) {
2568 			__skb_queue_purge(&seg_queue);
2569 			err = -ENOTCONN;
2570 		}
2571 
2572 		if (err)
2573 			break;
2574 
2575 		if (chan->mode == L2CAP_MODE_ERTM)
2576 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2577 		else
2578 			l2cap_streaming_send(chan, &seg_queue);
2579 
2580 		err = len;
2581 
2582 		/* If the skbs were not queued for sending, they'll still be in
2583 		 * seg_queue and need to be purged.
2584 		 */
2585 		__skb_queue_purge(&seg_queue);
2586 		break;
2587 
2588 	default:
2589 		BT_DBG("bad state %1.1x", chan->mode);
2590 		err = -EBADFD;
2591 	}
2592 
2593 	return err;
2594 }
2595 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2596 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2597 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2598 {
2599 	struct l2cap_ctrl control;
2600 	u16 seq;
2601 
2602 	BT_DBG("chan %p, txseq %u", chan, txseq);
2603 
2604 	memset(&control, 0, sizeof(control));
2605 	control.sframe = 1;
2606 	control.super = L2CAP_SUPER_SREJ;
2607 
2608 	for (seq = chan->expected_tx_seq; seq != txseq;
2609 	     seq = __next_seq(chan, seq)) {
2610 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2611 			control.reqseq = seq;
2612 			l2cap_send_sframe(chan, &control);
2613 			l2cap_seq_list_append(&chan->srej_list, seq);
2614 		}
2615 	}
2616 
2617 	chan->expected_tx_seq = __next_seq(chan, txseq);
2618 }
2619 
l2cap_send_srej_tail(struct l2cap_chan * chan)2620 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2621 {
2622 	struct l2cap_ctrl control;
2623 
2624 	BT_DBG("chan %p", chan);
2625 
2626 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2627 		return;
2628 
2629 	memset(&control, 0, sizeof(control));
2630 	control.sframe = 1;
2631 	control.super = L2CAP_SUPER_SREJ;
2632 	control.reqseq = chan->srej_list.tail;
2633 	l2cap_send_sframe(chan, &control);
2634 }
2635 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2636 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2637 {
2638 	struct l2cap_ctrl control;
2639 	u16 initial_head;
2640 	u16 seq;
2641 
2642 	BT_DBG("chan %p, txseq %u", chan, txseq);
2643 
2644 	memset(&control, 0, sizeof(control));
2645 	control.sframe = 1;
2646 	control.super = L2CAP_SUPER_SREJ;
2647 
2648 	/* Capture initial list head to allow only one pass through the list. */
2649 	initial_head = chan->srej_list.head;
2650 
2651 	do {
2652 		seq = l2cap_seq_list_pop(&chan->srej_list);
2653 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2654 			break;
2655 
2656 		control.reqseq = seq;
2657 		l2cap_send_sframe(chan, &control);
2658 		l2cap_seq_list_append(&chan->srej_list, seq);
2659 	} while (chan->srej_list.head != initial_head);
2660 }
2661 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2662 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2663 {
2664 	struct sk_buff *acked_skb;
2665 	u16 ackseq;
2666 
2667 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2668 
2669 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2670 		return;
2671 
2672 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2673 	       chan->expected_ack_seq, chan->unacked_frames);
2674 
2675 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2676 	     ackseq = __next_seq(chan, ackseq)) {
2677 
2678 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2679 		if (acked_skb) {
2680 			skb_unlink(acked_skb, &chan->tx_q);
2681 			kfree_skb(acked_skb);
2682 			chan->unacked_frames--;
2683 		}
2684 	}
2685 
2686 	chan->expected_ack_seq = reqseq;
2687 
2688 	if (chan->unacked_frames == 0)
2689 		__clear_retrans_timer(chan);
2690 
2691 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2692 }
2693 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2694 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2695 {
2696 	BT_DBG("chan %p", chan);
2697 
2698 	chan->expected_tx_seq = chan->buffer_seq;
2699 	l2cap_seq_list_clear(&chan->srej_list);
2700 	skb_queue_purge(&chan->srej_q);
2701 	chan->rx_state = L2CAP_RX_STATE_RECV;
2702 }
2703 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2704 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2705 				struct l2cap_ctrl *control,
2706 				struct sk_buff_head *skbs, u8 event)
2707 {
2708 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2709 	       event);
2710 
2711 	switch (event) {
2712 	case L2CAP_EV_DATA_REQUEST:
2713 		if (chan->tx_send_head == NULL)
2714 			chan->tx_send_head = skb_peek(skbs);
2715 
2716 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2717 		l2cap_ertm_send(chan);
2718 		break;
2719 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2720 		BT_DBG("Enter LOCAL_BUSY");
2721 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2722 
2723 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2724 			/* The SREJ_SENT state must be aborted if we are to
2725 			 * enter the LOCAL_BUSY state.
2726 			 */
2727 			l2cap_abort_rx_srej_sent(chan);
2728 		}
2729 
2730 		l2cap_send_ack(chan);
2731 
2732 		break;
2733 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2734 		BT_DBG("Exit LOCAL_BUSY");
2735 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2736 
2737 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2738 			struct l2cap_ctrl local_control;
2739 
2740 			memset(&local_control, 0, sizeof(local_control));
2741 			local_control.sframe = 1;
2742 			local_control.super = L2CAP_SUPER_RR;
2743 			local_control.poll = 1;
2744 			local_control.reqseq = chan->buffer_seq;
2745 			l2cap_send_sframe(chan, &local_control);
2746 
2747 			chan->retry_count = 1;
2748 			__set_monitor_timer(chan);
2749 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 		}
2751 		break;
2752 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 		l2cap_process_reqseq(chan, control->reqseq);
2754 		break;
2755 	case L2CAP_EV_EXPLICIT_POLL:
2756 		l2cap_send_rr_or_rnr(chan, 1);
2757 		chan->retry_count = 1;
2758 		__set_monitor_timer(chan);
2759 		__clear_ack_timer(chan);
2760 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2761 		break;
2762 	case L2CAP_EV_RETRANS_TO:
2763 		l2cap_send_rr_or_rnr(chan, 1);
2764 		chan->retry_count = 1;
2765 		__set_monitor_timer(chan);
2766 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2767 		break;
2768 	case L2CAP_EV_RECV_FBIT:
2769 		/* Nothing to process */
2770 		break;
2771 	default:
2772 		break;
2773 	}
2774 }
2775 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2776 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2777 				  struct l2cap_ctrl *control,
2778 				  struct sk_buff_head *skbs, u8 event)
2779 {
2780 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2781 	       event);
2782 
2783 	switch (event) {
2784 	case L2CAP_EV_DATA_REQUEST:
2785 		if (chan->tx_send_head == NULL)
2786 			chan->tx_send_head = skb_peek(skbs);
2787 		/* Queue data, but don't send. */
2788 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2789 		break;
2790 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2791 		BT_DBG("Enter LOCAL_BUSY");
2792 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2793 
2794 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2795 			/* The SREJ_SENT state must be aborted if we are to
2796 			 * enter the LOCAL_BUSY state.
2797 			 */
2798 			l2cap_abort_rx_srej_sent(chan);
2799 		}
2800 
2801 		l2cap_send_ack(chan);
2802 
2803 		break;
2804 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2805 		BT_DBG("Exit LOCAL_BUSY");
2806 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2807 
2808 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2809 			struct l2cap_ctrl local_control;
2810 			memset(&local_control, 0, sizeof(local_control));
2811 			local_control.sframe = 1;
2812 			local_control.super = L2CAP_SUPER_RR;
2813 			local_control.poll = 1;
2814 			local_control.reqseq = chan->buffer_seq;
2815 			l2cap_send_sframe(chan, &local_control);
2816 
2817 			chan->retry_count = 1;
2818 			__set_monitor_timer(chan);
2819 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2820 		}
2821 		break;
2822 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2823 		l2cap_process_reqseq(chan, control->reqseq);
2824 
2825 		/* Fall through */
2826 
2827 	case L2CAP_EV_RECV_FBIT:
2828 		if (control && control->final) {
2829 			__clear_monitor_timer(chan);
2830 			if (chan->unacked_frames > 0)
2831 				__set_retrans_timer(chan);
2832 			chan->retry_count = 0;
2833 			chan->tx_state = L2CAP_TX_STATE_XMIT;
2834 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2835 		}
2836 		break;
2837 	case L2CAP_EV_EXPLICIT_POLL:
2838 		/* Ignore */
2839 		break;
2840 	case L2CAP_EV_MONITOR_TO:
2841 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2842 			l2cap_send_rr_or_rnr(chan, 1);
2843 			__set_monitor_timer(chan);
2844 			chan->retry_count++;
2845 		} else {
2846 			l2cap_send_disconn_req(chan, ECONNABORTED);
2847 		}
2848 		break;
2849 	default:
2850 		break;
2851 	}
2852 }
2853 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2854 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2855 		     struct sk_buff_head *skbs, u8 event)
2856 {
2857 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2858 	       chan, control, skbs, event, chan->tx_state);
2859 
2860 	switch (chan->tx_state) {
2861 	case L2CAP_TX_STATE_XMIT:
2862 		l2cap_tx_state_xmit(chan, control, skbs, event);
2863 		break;
2864 	case L2CAP_TX_STATE_WAIT_F:
2865 		l2cap_tx_state_wait_f(chan, control, skbs, event);
2866 		break;
2867 	default:
2868 		/* Ignore event */
2869 		break;
2870 	}
2871 }
2872 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)2873 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2874 			     struct l2cap_ctrl *control)
2875 {
2876 	BT_DBG("chan %p, control %p", chan, control);
2877 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
2878 }
2879 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2880 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2881 				  struct l2cap_ctrl *control)
2882 {
2883 	BT_DBG("chan %p, control %p", chan, control);
2884 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
2885 }
2886 
2887 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)2888 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2889 {
2890 	struct sk_buff *nskb;
2891 	struct l2cap_chan *chan;
2892 
2893 	BT_DBG("conn %p", conn);
2894 
2895 	mutex_lock(&conn->chan_lock);
2896 
2897 	list_for_each_entry(chan, &conn->chan_l, list) {
2898 		if (chan->chan_type != L2CAP_CHAN_RAW)
2899 			continue;
2900 
2901 		/* Don't send frame to the channel it came from */
2902 		if (bt_cb(skb)->l2cap.chan == chan)
2903 			continue;
2904 
2905 		nskb = skb_clone(skb, GFP_KERNEL);
2906 		if (!nskb)
2907 			continue;
2908 		if (chan->ops->recv(chan, nskb))
2909 			kfree_skb(nskb);
2910 	}
2911 
2912 	mutex_unlock(&conn->chan_lock);
2913 }
2914 
2915 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)2916 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2917 				       u8 ident, u16 dlen, void *data)
2918 {
2919 	struct sk_buff *skb, **frag;
2920 	struct l2cap_cmd_hdr *cmd;
2921 	struct l2cap_hdr *lh;
2922 	int len, count;
2923 
2924 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2925 	       conn, code, ident, dlen);
2926 
2927 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2928 		return NULL;
2929 
2930 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2931 	count = min_t(unsigned int, conn->mtu, len);
2932 
2933 	skb = bt_skb_alloc(count, GFP_KERNEL);
2934 	if (!skb)
2935 		return NULL;
2936 
2937 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2938 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2939 
2940 	if (conn->hcon->type == LE_LINK)
2941 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2942 	else
2943 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2944 
2945 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
2946 	cmd->code  = code;
2947 	cmd->ident = ident;
2948 	cmd->len   = cpu_to_le16(dlen);
2949 
2950 	if (dlen) {
2951 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2952 		skb_put_data(skb, data, count);
2953 		data += count;
2954 	}
2955 
2956 	len -= skb->len;
2957 
2958 	/* Continuation fragments (no L2CAP header) */
2959 	frag = &skb_shinfo(skb)->frag_list;
2960 	while (len) {
2961 		count = min_t(unsigned int, conn->mtu, len);
2962 
2963 		*frag = bt_skb_alloc(count, GFP_KERNEL);
2964 		if (!*frag)
2965 			goto fail;
2966 
2967 		skb_put_data(*frag, data, count);
2968 
2969 		len  -= count;
2970 		data += count;
2971 
2972 		frag = &(*frag)->next;
2973 	}
2974 
2975 	return skb;
2976 
2977 fail:
2978 	kfree_skb(skb);
2979 	return NULL;
2980 }
2981 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)2982 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2983 				     unsigned long *val)
2984 {
2985 	struct l2cap_conf_opt *opt = *ptr;
2986 	int len;
2987 
2988 	len = L2CAP_CONF_OPT_SIZE + opt->len;
2989 	*ptr += len;
2990 
2991 	*type = opt->type;
2992 	*olen = opt->len;
2993 
2994 	switch (opt->len) {
2995 	case 1:
2996 		*val = *((u8 *) opt->val);
2997 		break;
2998 
2999 	case 2:
3000 		*val = get_unaligned_le16(opt->val);
3001 		break;
3002 
3003 	case 4:
3004 		*val = get_unaligned_le32(opt->val);
3005 		break;
3006 
3007 	default:
3008 		*val = (unsigned long) opt->val;
3009 		break;
3010 	}
3011 
3012 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3013 	return len;
3014 }
3015 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3016 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3017 {
3018 	struct l2cap_conf_opt *opt = *ptr;
3019 
3020 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3021 
3022 	if (size < L2CAP_CONF_OPT_SIZE + len)
3023 		return;
3024 
3025 	opt->type = type;
3026 	opt->len  = len;
3027 
3028 	switch (len) {
3029 	case 1:
3030 		*((u8 *) opt->val)  = val;
3031 		break;
3032 
3033 	case 2:
3034 		put_unaligned_le16(val, opt->val);
3035 		break;
3036 
3037 	case 4:
3038 		put_unaligned_le32(val, opt->val);
3039 		break;
3040 
3041 	default:
3042 		memcpy(opt->val, (void *) val, len);
3043 		break;
3044 	}
3045 
3046 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3047 }
3048 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3049 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3050 {
3051 	struct l2cap_conf_efs efs;
3052 
3053 	switch (chan->mode) {
3054 	case L2CAP_MODE_ERTM:
3055 		efs.id		= chan->local_id;
3056 		efs.stype	= chan->local_stype;
3057 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3058 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3059 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3060 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3061 		break;
3062 
3063 	case L2CAP_MODE_STREAMING:
3064 		efs.id		= 1;
3065 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3066 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3067 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3068 		efs.acc_lat	= 0;
3069 		efs.flush_to	= 0;
3070 		break;
3071 
3072 	default:
3073 		return;
3074 	}
3075 
3076 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3077 			   (unsigned long) &efs, size);
3078 }
3079 
l2cap_ack_timeout(struct work_struct * work)3080 static void l2cap_ack_timeout(struct work_struct *work)
3081 {
3082 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3083 					       ack_timer.work);
3084 	u16 frames_to_ack;
3085 
3086 	BT_DBG("chan %p", chan);
3087 
3088 	l2cap_chan_lock(chan);
3089 
3090 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3091 				     chan->last_acked_seq);
3092 
3093 	if (frames_to_ack)
3094 		l2cap_send_rr_or_rnr(chan, 0);
3095 
3096 	l2cap_chan_unlock(chan);
3097 	l2cap_chan_put(chan);
3098 }
3099 
l2cap_ertm_init(struct l2cap_chan * chan)3100 int l2cap_ertm_init(struct l2cap_chan *chan)
3101 {
3102 	int err;
3103 
3104 	chan->next_tx_seq = 0;
3105 	chan->expected_tx_seq = 0;
3106 	chan->expected_ack_seq = 0;
3107 	chan->unacked_frames = 0;
3108 	chan->buffer_seq = 0;
3109 	chan->frames_sent = 0;
3110 	chan->last_acked_seq = 0;
3111 	chan->sdu = NULL;
3112 	chan->sdu_last_frag = NULL;
3113 	chan->sdu_len = 0;
3114 
3115 	skb_queue_head_init(&chan->tx_q);
3116 
3117 	chan->local_amp_id = AMP_ID_BREDR;
3118 	chan->move_id = AMP_ID_BREDR;
3119 	chan->move_state = L2CAP_MOVE_STABLE;
3120 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3121 
3122 	if (chan->mode != L2CAP_MODE_ERTM)
3123 		return 0;
3124 
3125 	chan->rx_state = L2CAP_RX_STATE_RECV;
3126 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3127 
3128 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3129 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3130 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
3131 
3132 	skb_queue_head_init(&chan->srej_q);
3133 
3134 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3135 	if (err < 0)
3136 		return err;
3137 
3138 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3139 	if (err < 0)
3140 		l2cap_seq_list_free(&chan->srej_list);
3141 
3142 	return err;
3143 }
3144 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3145 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3146 {
3147 	switch (mode) {
3148 	case L2CAP_MODE_STREAMING:
3149 	case L2CAP_MODE_ERTM:
3150 		if (l2cap_mode_supported(mode, remote_feat_mask))
3151 			return mode;
3152 		/* fall through */
3153 	default:
3154 		return L2CAP_MODE_BASIC;
3155 	}
3156 }
3157 
__l2cap_ews_supported(struct l2cap_conn * conn)3158 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3159 {
3160 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3161 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3162 }
3163 
__l2cap_efs_supported(struct l2cap_conn * conn)3164 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3165 {
3166 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3167 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3168 }
3169 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3170 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3171 				      struct l2cap_conf_rfc *rfc)
3172 {
3173 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3174 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3175 
3176 		/* Class 1 devices have must have ERTM timeouts
3177 		 * exceeding the Link Supervision Timeout.  The
3178 		 * default Link Supervision Timeout for AMP
3179 		 * controllers is 10 seconds.
3180 		 *
3181 		 * Class 1 devices use 0xffffffff for their
3182 		 * best-effort flush timeout, so the clamping logic
3183 		 * will result in a timeout that meets the above
3184 		 * requirement.  ERTM timeouts are 16-bit values, so
3185 		 * the maximum timeout is 65.535 seconds.
3186 		 */
3187 
3188 		/* Convert timeout to milliseconds and round */
3189 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3190 
3191 		/* This is the recommended formula for class 2 devices
3192 		 * that start ERTM timers when packets are sent to the
3193 		 * controller.
3194 		 */
3195 		ertm_to = 3 * ertm_to + 500;
3196 
3197 		if (ertm_to > 0xffff)
3198 			ertm_to = 0xffff;
3199 
3200 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3201 		rfc->monitor_timeout = rfc->retrans_timeout;
3202 	} else {
3203 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3204 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3205 	}
3206 }
3207 
l2cap_txwin_setup(struct l2cap_chan * chan)3208 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3209 {
3210 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3211 	    __l2cap_ews_supported(chan->conn)) {
3212 		/* use extended control field */
3213 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3214 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3215 	} else {
3216 		chan->tx_win = min_t(u16, chan->tx_win,
3217 				     L2CAP_DEFAULT_TX_WINDOW);
3218 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3219 	}
3220 	chan->ack_win = chan->tx_win;
3221 }
3222 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3223 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3224 {
3225 	struct l2cap_conf_req *req = data;
3226 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3227 	void *ptr = req->data;
3228 	void *endptr = data + data_size;
3229 	u16 size;
3230 
3231 	BT_DBG("chan %p", chan);
3232 
3233 	if (chan->num_conf_req || chan->num_conf_rsp)
3234 		goto done;
3235 
3236 	switch (chan->mode) {
3237 	case L2CAP_MODE_STREAMING:
3238 	case L2CAP_MODE_ERTM:
3239 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3240 			break;
3241 
3242 		if (__l2cap_efs_supported(chan->conn))
3243 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3244 
3245 		/* fall through */
3246 	default:
3247 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3248 		break;
3249 	}
3250 
3251 done:
3252 	if (chan->imtu != L2CAP_DEFAULT_MTU)
3253 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
3254 
3255 	switch (chan->mode) {
3256 	case L2CAP_MODE_BASIC:
3257 		if (disable_ertm)
3258 			break;
3259 
3260 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3261 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3262 			break;
3263 
3264 		rfc.mode            = L2CAP_MODE_BASIC;
3265 		rfc.txwin_size      = 0;
3266 		rfc.max_transmit    = 0;
3267 		rfc.retrans_timeout = 0;
3268 		rfc.monitor_timeout = 0;
3269 		rfc.max_pdu_size    = 0;
3270 
3271 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3272 				   (unsigned long) &rfc, endptr - ptr);
3273 		break;
3274 
3275 	case L2CAP_MODE_ERTM:
3276 		rfc.mode            = L2CAP_MODE_ERTM;
3277 		rfc.max_transmit    = chan->max_tx;
3278 
3279 		__l2cap_set_ertm_timeouts(chan, &rfc);
3280 
3281 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3282 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3283 			     L2CAP_FCS_SIZE);
3284 		rfc.max_pdu_size = cpu_to_le16(size);
3285 
3286 		l2cap_txwin_setup(chan);
3287 
3288 		rfc.txwin_size = min_t(u16, chan->tx_win,
3289 				       L2CAP_DEFAULT_TX_WINDOW);
3290 
3291 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3292 				   (unsigned long) &rfc, endptr - ptr);
3293 
3294 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3295 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3296 
3297 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3298 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3299 					   chan->tx_win, endptr - ptr);
3300 
3301 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3302 			if (chan->fcs == L2CAP_FCS_NONE ||
3303 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3304 				chan->fcs = L2CAP_FCS_NONE;
3305 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3306 						   chan->fcs, endptr - ptr);
3307 			}
3308 		break;
3309 
3310 	case L2CAP_MODE_STREAMING:
3311 		l2cap_txwin_setup(chan);
3312 		rfc.mode            = L2CAP_MODE_STREAMING;
3313 		rfc.txwin_size      = 0;
3314 		rfc.max_transmit    = 0;
3315 		rfc.retrans_timeout = 0;
3316 		rfc.monitor_timeout = 0;
3317 
3318 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3319 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3320 			     L2CAP_FCS_SIZE);
3321 		rfc.max_pdu_size = cpu_to_le16(size);
3322 
3323 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3324 				   (unsigned long) &rfc, endptr - ptr);
3325 
3326 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3327 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3328 
3329 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3330 			if (chan->fcs == L2CAP_FCS_NONE ||
3331 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3332 				chan->fcs = L2CAP_FCS_NONE;
3333 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3334 						   chan->fcs, endptr - ptr);
3335 			}
3336 		break;
3337 	}
3338 
3339 	req->dcid  = cpu_to_le16(chan->dcid);
3340 	req->flags = cpu_to_le16(0);
3341 
3342 	return ptr - data;
3343 }
3344 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3345 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3346 {
3347 	struct l2cap_conf_rsp *rsp = data;
3348 	void *ptr = rsp->data;
3349 	void *endptr = data + data_size;
3350 	void *req = chan->conf_req;
3351 	int len = chan->conf_len;
3352 	int type, hint, olen;
3353 	unsigned long val;
3354 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3355 	struct l2cap_conf_efs efs;
3356 	u8 remote_efs = 0;
3357 	u16 mtu = L2CAP_DEFAULT_MTU;
3358 	u16 result = L2CAP_CONF_SUCCESS;
3359 	u16 size;
3360 
3361 	BT_DBG("chan %p", chan);
3362 
3363 	while (len >= L2CAP_CONF_OPT_SIZE) {
3364 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3365 		if (len < 0)
3366 			break;
3367 
3368 		hint  = type & L2CAP_CONF_HINT;
3369 		type &= L2CAP_CONF_MASK;
3370 
3371 		switch (type) {
3372 		case L2CAP_CONF_MTU:
3373 			if (olen != 2)
3374 				break;
3375 			mtu = val;
3376 			break;
3377 
3378 		case L2CAP_CONF_FLUSH_TO:
3379 			if (olen != 2)
3380 				break;
3381 			chan->flush_to = val;
3382 			break;
3383 
3384 		case L2CAP_CONF_QOS:
3385 			break;
3386 
3387 		case L2CAP_CONF_RFC:
3388 			if (olen != sizeof(rfc))
3389 				break;
3390 			memcpy(&rfc, (void *) val, olen);
3391 			break;
3392 
3393 		case L2CAP_CONF_FCS:
3394 			if (olen != 1)
3395 				break;
3396 			if (val == L2CAP_FCS_NONE)
3397 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3398 			break;
3399 
3400 		case L2CAP_CONF_EFS:
3401 			if (olen != sizeof(efs))
3402 				break;
3403 			remote_efs = 1;
3404 			memcpy(&efs, (void *) val, olen);
3405 			break;
3406 
3407 		case L2CAP_CONF_EWS:
3408 			if (olen != 2)
3409 				break;
3410 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3411 				return -ECONNREFUSED;
3412 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3413 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3414 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3415 			chan->remote_tx_win = val;
3416 			break;
3417 
3418 		default:
3419 			if (hint)
3420 				break;
3421 			result = L2CAP_CONF_UNKNOWN;
3422 			*((u8 *) ptr++) = type;
3423 			break;
3424 		}
3425 	}
3426 
3427 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3428 		goto done;
3429 
3430 	switch (chan->mode) {
3431 	case L2CAP_MODE_STREAMING:
3432 	case L2CAP_MODE_ERTM:
3433 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3434 			chan->mode = l2cap_select_mode(rfc.mode,
3435 						       chan->conn->feat_mask);
3436 			break;
3437 		}
3438 
3439 		if (remote_efs) {
3440 			if (__l2cap_efs_supported(chan->conn))
3441 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3442 			else
3443 				return -ECONNREFUSED;
3444 		}
3445 
3446 		if (chan->mode != rfc.mode)
3447 			return -ECONNREFUSED;
3448 
3449 		break;
3450 	}
3451 
3452 done:
3453 	if (chan->mode != rfc.mode) {
3454 		result = L2CAP_CONF_UNACCEPT;
3455 		rfc.mode = chan->mode;
3456 
3457 		if (chan->num_conf_rsp == 1)
3458 			return -ECONNREFUSED;
3459 
3460 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3461 				   (unsigned long) &rfc, endptr - ptr);
3462 	}
3463 
3464 	if (result == L2CAP_CONF_SUCCESS) {
3465 		/* Configure output options and let the other side know
3466 		 * which ones we don't like. */
3467 
3468 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3469 			result = L2CAP_CONF_UNACCEPT;
3470 		else {
3471 			chan->omtu = mtu;
3472 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3473 		}
3474 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3475 
3476 		if (remote_efs) {
3477 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3478 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3479 			    efs.stype != chan->local_stype) {
3480 
3481 				result = L2CAP_CONF_UNACCEPT;
3482 
3483 				if (chan->num_conf_req >= 1)
3484 					return -ECONNREFUSED;
3485 
3486 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3487 						   sizeof(efs),
3488 						   (unsigned long) &efs, endptr - ptr);
3489 			} else {
3490 				/* Send PENDING Conf Rsp */
3491 				result = L2CAP_CONF_PENDING;
3492 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3493 			}
3494 		}
3495 
3496 		switch (rfc.mode) {
3497 		case L2CAP_MODE_BASIC:
3498 			chan->fcs = L2CAP_FCS_NONE;
3499 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3500 			break;
3501 
3502 		case L2CAP_MODE_ERTM:
3503 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3504 				chan->remote_tx_win = rfc.txwin_size;
3505 			else
3506 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3507 
3508 			chan->remote_max_tx = rfc.max_transmit;
3509 
3510 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3511 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3512 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3513 			rfc.max_pdu_size = cpu_to_le16(size);
3514 			chan->remote_mps = size;
3515 
3516 			__l2cap_set_ertm_timeouts(chan, &rfc);
3517 
3518 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3519 
3520 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3521 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3522 
3523 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3524 				chan->remote_id = efs.id;
3525 				chan->remote_stype = efs.stype;
3526 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3527 				chan->remote_flush_to =
3528 					le32_to_cpu(efs.flush_to);
3529 				chan->remote_acc_lat =
3530 					le32_to_cpu(efs.acc_lat);
3531 				chan->remote_sdu_itime =
3532 					le32_to_cpu(efs.sdu_itime);
3533 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3534 						   sizeof(efs),
3535 						   (unsigned long) &efs, endptr - ptr);
3536 			}
3537 			break;
3538 
3539 		case L2CAP_MODE_STREAMING:
3540 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3541 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3542 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3543 			rfc.max_pdu_size = cpu_to_le16(size);
3544 			chan->remote_mps = size;
3545 
3546 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3547 
3548 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3549 					   (unsigned long) &rfc, endptr - ptr);
3550 
3551 			break;
3552 
3553 		default:
3554 			result = L2CAP_CONF_UNACCEPT;
3555 
3556 			memset(&rfc, 0, sizeof(rfc));
3557 			rfc.mode = chan->mode;
3558 		}
3559 
3560 		if (result == L2CAP_CONF_SUCCESS)
3561 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3562 	}
3563 	rsp->scid   = cpu_to_le16(chan->dcid);
3564 	rsp->result = cpu_to_le16(result);
3565 	rsp->flags  = cpu_to_le16(0);
3566 
3567 	return ptr - data;
3568 }
3569 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3570 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3571 				void *data, size_t size, u16 *result)
3572 {
3573 	struct l2cap_conf_req *req = data;
3574 	void *ptr = req->data;
3575 	void *endptr = data + size;
3576 	int type, olen;
3577 	unsigned long val;
3578 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3579 	struct l2cap_conf_efs efs;
3580 
3581 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3582 
3583 	while (len >= L2CAP_CONF_OPT_SIZE) {
3584 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3585 		if (len < 0)
3586 			break;
3587 
3588 		switch (type) {
3589 		case L2CAP_CONF_MTU:
3590 			if (olen != 2)
3591 				break;
3592 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3593 				*result = L2CAP_CONF_UNACCEPT;
3594 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3595 			} else
3596 				chan->imtu = val;
3597 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3598 					   endptr - ptr);
3599 			break;
3600 
3601 		case L2CAP_CONF_FLUSH_TO:
3602 			if (olen != 2)
3603 				break;
3604 			chan->flush_to = val;
3605 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3606 					   chan->flush_to, endptr - ptr);
3607 			break;
3608 
3609 		case L2CAP_CONF_RFC:
3610 			if (olen != sizeof(rfc))
3611 				break;
3612 			memcpy(&rfc, (void *)val, olen);
3613 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3614 			    rfc.mode != chan->mode)
3615 				return -ECONNREFUSED;
3616 			chan->fcs = 0;
3617 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3618 					   (unsigned long) &rfc, endptr - ptr);
3619 			break;
3620 
3621 		case L2CAP_CONF_EWS:
3622 			if (olen != 2)
3623 				break;
3624 			chan->ack_win = min_t(u16, val, chan->ack_win);
3625 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3626 					   chan->tx_win, endptr - ptr);
3627 			break;
3628 
3629 		case L2CAP_CONF_EFS:
3630 			if (olen != sizeof(efs))
3631 				break;
3632 			memcpy(&efs, (void *)val, olen);
3633 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3634 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3635 			    efs.stype != chan->local_stype)
3636 				return -ECONNREFUSED;
3637 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3638 					   (unsigned long) &efs, endptr - ptr);
3639 			break;
3640 
3641 		case L2CAP_CONF_FCS:
3642 			if (olen != 1)
3643 				break;
3644 			if (*result == L2CAP_CONF_PENDING)
3645 				if (val == L2CAP_FCS_NONE)
3646 					set_bit(CONF_RECV_NO_FCS,
3647 						&chan->conf_state);
3648 			break;
3649 		}
3650 	}
3651 
3652 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3653 		return -ECONNREFUSED;
3654 
3655 	chan->mode = rfc.mode;
3656 
3657 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3658 		switch (rfc.mode) {
3659 		case L2CAP_MODE_ERTM:
3660 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3661 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3662 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3663 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3664 				chan->ack_win = min_t(u16, chan->ack_win,
3665 						      rfc.txwin_size);
3666 
3667 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3668 				chan->local_msdu = le16_to_cpu(efs.msdu);
3669 				chan->local_sdu_itime =
3670 					le32_to_cpu(efs.sdu_itime);
3671 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3672 				chan->local_flush_to =
3673 					le32_to_cpu(efs.flush_to);
3674 			}
3675 			break;
3676 
3677 		case L2CAP_MODE_STREAMING:
3678 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3679 		}
3680 	}
3681 
3682 	req->dcid   = cpu_to_le16(chan->dcid);
3683 	req->flags  = cpu_to_le16(0);
3684 
3685 	return ptr - data;
3686 }
3687 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3688 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3689 				u16 result, u16 flags)
3690 {
3691 	struct l2cap_conf_rsp *rsp = data;
3692 	void *ptr = rsp->data;
3693 
3694 	BT_DBG("chan %p", chan);
3695 
3696 	rsp->scid   = cpu_to_le16(chan->dcid);
3697 	rsp->result = cpu_to_le16(result);
3698 	rsp->flags  = cpu_to_le16(flags);
3699 
3700 	return ptr - data;
3701 }
3702 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3703 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3704 {
3705 	struct l2cap_le_conn_rsp rsp;
3706 	struct l2cap_conn *conn = chan->conn;
3707 
3708 	BT_DBG("chan %p", chan);
3709 
3710 	rsp.dcid    = cpu_to_le16(chan->scid);
3711 	rsp.mtu     = cpu_to_le16(chan->imtu);
3712 	rsp.mps     = cpu_to_le16(chan->mps);
3713 	rsp.credits = cpu_to_le16(chan->rx_credits);
3714 	rsp.result  = cpu_to_le16(L2CAP_CR_SUCCESS);
3715 
3716 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3717 		       &rsp);
3718 }
3719 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)3720 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
3721 {
3722 	struct l2cap_conn_rsp rsp;
3723 	struct l2cap_conn *conn = chan->conn;
3724 	u8 buf[128];
3725 	u8 rsp_code;
3726 
3727 	rsp.scid   = cpu_to_le16(chan->dcid);
3728 	rsp.dcid   = cpu_to_le16(chan->scid);
3729 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3730 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3731 
3732 	if (chan->hs_hcon)
3733 		rsp_code = L2CAP_CREATE_CHAN_RSP;
3734 	else
3735 		rsp_code = L2CAP_CONN_RSP;
3736 
3737 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3738 
3739 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
3740 
3741 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
3742 		return;
3743 
3744 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3745 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3746 	chan->num_conf_req++;
3747 }
3748 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)3749 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
3750 {
3751 	int type, olen;
3752 	unsigned long val;
3753 	/* Use sane default values in case a misbehaving remote device
3754 	 * did not send an RFC or extended window size option.
3755 	 */
3756 	u16 txwin_ext = chan->ack_win;
3757 	struct l2cap_conf_rfc rfc = {
3758 		.mode = chan->mode,
3759 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3760 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
3761 		.max_pdu_size = cpu_to_le16(chan->imtu),
3762 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3763 	};
3764 
3765 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
3766 
3767 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
3768 		return;
3769 
3770 	while (len >= L2CAP_CONF_OPT_SIZE) {
3771 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3772 		if (len < 0)
3773 			break;
3774 
3775 		switch (type) {
3776 		case L2CAP_CONF_RFC:
3777 			if (olen != sizeof(rfc))
3778 				break;
3779 			memcpy(&rfc, (void *)val, olen);
3780 			break;
3781 		case L2CAP_CONF_EWS:
3782 			if (olen != 2)
3783 				break;
3784 			txwin_ext = val;
3785 			break;
3786 		}
3787 	}
3788 
3789 	switch (rfc.mode) {
3790 	case L2CAP_MODE_ERTM:
3791 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3792 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3793 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
3794 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3795 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3796 		else
3797 			chan->ack_win = min_t(u16, chan->ack_win,
3798 					      rfc.txwin_size);
3799 		break;
3800 	case L2CAP_MODE_STREAMING:
3801 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3802 	}
3803 }
3804 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3805 static inline int l2cap_command_rej(struct l2cap_conn *conn,
3806 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3807 				    u8 *data)
3808 {
3809 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
3810 
3811 	if (cmd_len < sizeof(*rej))
3812 		return -EPROTO;
3813 
3814 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
3815 		return 0;
3816 
3817 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3818 	    cmd->ident == conn->info_ident) {
3819 		cancel_delayed_work(&conn->info_timer);
3820 
3821 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3822 		conn->info_ident = 0;
3823 
3824 		l2cap_conn_start(conn);
3825 	}
3826 
3827 	return 0;
3828 }
3829 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)3830 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3831 					struct l2cap_cmd_hdr *cmd,
3832 					u8 *data, u8 rsp_code, u8 amp_id)
3833 {
3834 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3835 	struct l2cap_conn_rsp rsp;
3836 	struct l2cap_chan *chan = NULL, *pchan;
3837 	int result, status = L2CAP_CS_NO_INFO;
3838 
3839 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
3840 	__le16 psm = req->psm;
3841 
3842 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
3843 
3844 	/* Check if we have socket listening on psm */
3845 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
3846 					 &conn->hcon->dst, ACL_LINK);
3847 	if (!pchan) {
3848 		result = L2CAP_CR_BAD_PSM;
3849 		goto sendresp;
3850 	}
3851 
3852 	mutex_lock(&conn->chan_lock);
3853 	l2cap_chan_lock(pchan);
3854 
3855 	/* Check if the ACL is secure enough (if not SDP) */
3856 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
3857 	    !hci_conn_check_link_mode(conn->hcon)) {
3858 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
3859 		result = L2CAP_CR_SEC_BLOCK;
3860 		goto response;
3861 	}
3862 
3863 	result = L2CAP_CR_NO_MEM;
3864 
3865 	/* Check if we already have channel with that dcid */
3866 	if (__l2cap_get_chan_by_dcid(conn, scid))
3867 		goto response;
3868 
3869 	chan = pchan->ops->new_connection(pchan);
3870 	if (!chan)
3871 		goto response;
3872 
3873 	/* For certain devices (ex: HID mouse), support for authentication,
3874 	 * pairing and bonding is optional. For such devices, inorder to avoid
3875 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3876 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3877 	 */
3878 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3879 
3880 	bacpy(&chan->src, &conn->hcon->src);
3881 	bacpy(&chan->dst, &conn->hcon->dst);
3882 	chan->src_type = bdaddr_src_type(conn->hcon);
3883 	chan->dst_type = bdaddr_dst_type(conn->hcon);
3884 	chan->psm  = psm;
3885 	chan->dcid = scid;
3886 	chan->local_amp_id = amp_id;
3887 
3888 	__l2cap_chan_add(conn, chan);
3889 
3890 	dcid = chan->scid;
3891 
3892 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
3893 
3894 	chan->ident = cmd->ident;
3895 
3896 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
3897 		if (l2cap_chan_check_security(chan, false)) {
3898 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
3899 				l2cap_state_change(chan, BT_CONNECT2);
3900 				result = L2CAP_CR_PEND;
3901 				status = L2CAP_CS_AUTHOR_PEND;
3902 				chan->ops->defer(chan);
3903 			} else {
3904 				/* Force pending result for AMP controllers.
3905 				 * The connection will succeed after the
3906 				 * physical link is up.
3907 				 */
3908 				if (amp_id == AMP_ID_BREDR) {
3909 					l2cap_state_change(chan, BT_CONFIG);
3910 					result = L2CAP_CR_SUCCESS;
3911 				} else {
3912 					l2cap_state_change(chan, BT_CONNECT2);
3913 					result = L2CAP_CR_PEND;
3914 				}
3915 				status = L2CAP_CS_NO_INFO;
3916 			}
3917 		} else {
3918 			l2cap_state_change(chan, BT_CONNECT2);
3919 			result = L2CAP_CR_PEND;
3920 			status = L2CAP_CS_AUTHEN_PEND;
3921 		}
3922 	} else {
3923 		l2cap_state_change(chan, BT_CONNECT2);
3924 		result = L2CAP_CR_PEND;
3925 		status = L2CAP_CS_NO_INFO;
3926 	}
3927 
3928 response:
3929 	l2cap_chan_unlock(pchan);
3930 	mutex_unlock(&conn->chan_lock);
3931 	l2cap_chan_put(pchan);
3932 
3933 sendresp:
3934 	rsp.scid   = cpu_to_le16(scid);
3935 	rsp.dcid   = cpu_to_le16(dcid);
3936 	rsp.result = cpu_to_le16(result);
3937 	rsp.status = cpu_to_le16(status);
3938 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
3939 
3940 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3941 		struct l2cap_info_req info;
3942 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3943 
3944 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3945 		conn->info_ident = l2cap_get_ident(conn);
3946 
3947 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
3948 
3949 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3950 			       sizeof(info), &info);
3951 	}
3952 
3953 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
3954 	    result == L2CAP_CR_SUCCESS) {
3955 		u8 buf[128];
3956 		set_bit(CONF_REQ_SENT, &chan->conf_state);
3957 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3958 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
3959 		chan->num_conf_req++;
3960 	}
3961 
3962 	return chan;
3963 }
3964 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3965 static int l2cap_connect_req(struct l2cap_conn *conn,
3966 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3967 {
3968 	struct hci_dev *hdev = conn->hcon->hdev;
3969 	struct hci_conn *hcon = conn->hcon;
3970 
3971 	if (cmd_len < sizeof(struct l2cap_conn_req))
3972 		return -EPROTO;
3973 
3974 	hci_dev_lock(hdev);
3975 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
3976 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3977 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
3978 	hci_dev_unlock(hdev);
3979 
3980 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
3981 	return 0;
3982 }
3983 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)3984 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
3985 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3986 				    u8 *data)
3987 {
3988 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3989 	u16 scid, dcid, result, status;
3990 	struct l2cap_chan *chan;
3991 	u8 req[128];
3992 	int err;
3993 
3994 	if (cmd_len < sizeof(*rsp))
3995 		return -EPROTO;
3996 
3997 	scid   = __le16_to_cpu(rsp->scid);
3998 	dcid   = __le16_to_cpu(rsp->dcid);
3999 	result = __le16_to_cpu(rsp->result);
4000 	status = __le16_to_cpu(rsp->status);
4001 
4002 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4003 	       dcid, scid, result, status);
4004 
4005 	mutex_lock(&conn->chan_lock);
4006 
4007 	if (scid) {
4008 		chan = __l2cap_get_chan_by_scid(conn, scid);
4009 		if (!chan) {
4010 			err = -EBADSLT;
4011 			goto unlock;
4012 		}
4013 	} else {
4014 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4015 		if (!chan) {
4016 			err = -EBADSLT;
4017 			goto unlock;
4018 		}
4019 	}
4020 
4021 	err = 0;
4022 
4023 	l2cap_chan_lock(chan);
4024 
4025 	switch (result) {
4026 	case L2CAP_CR_SUCCESS:
4027 		l2cap_state_change(chan, BT_CONFIG);
4028 		chan->ident = 0;
4029 		chan->dcid = dcid;
4030 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4031 
4032 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4033 			break;
4034 
4035 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4036 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4037 		chan->num_conf_req++;
4038 		break;
4039 
4040 	case L2CAP_CR_PEND:
4041 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4042 		break;
4043 
4044 	default:
4045 		l2cap_chan_del(chan, ECONNREFUSED);
4046 		break;
4047 	}
4048 
4049 	l2cap_chan_unlock(chan);
4050 
4051 unlock:
4052 	mutex_unlock(&conn->chan_lock);
4053 
4054 	return err;
4055 }
4056 
set_default_fcs(struct l2cap_chan * chan)4057 static inline void set_default_fcs(struct l2cap_chan *chan)
4058 {
4059 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4060 	 * sides request it.
4061 	 */
4062 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4063 		chan->fcs = L2CAP_FCS_NONE;
4064 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4065 		chan->fcs = L2CAP_FCS_CRC16;
4066 }
4067 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4068 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4069 				    u8 ident, u16 flags)
4070 {
4071 	struct l2cap_conn *conn = chan->conn;
4072 
4073 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4074 	       flags);
4075 
4076 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4077 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4078 
4079 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4080 		       l2cap_build_conf_rsp(chan, data,
4081 					    L2CAP_CONF_SUCCESS, flags), data);
4082 }
4083 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4084 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4085 				   u16 scid, u16 dcid)
4086 {
4087 	struct l2cap_cmd_rej_cid rej;
4088 
4089 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4090 	rej.scid = __cpu_to_le16(scid);
4091 	rej.dcid = __cpu_to_le16(dcid);
4092 
4093 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4094 }
4095 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4096 static inline int l2cap_config_req(struct l2cap_conn *conn,
4097 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4098 				   u8 *data)
4099 {
4100 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4101 	u16 dcid, flags;
4102 	u8 rsp[64];
4103 	struct l2cap_chan *chan;
4104 	int len, err = 0;
4105 
4106 	if (cmd_len < sizeof(*req))
4107 		return -EPROTO;
4108 
4109 	dcid  = __le16_to_cpu(req->dcid);
4110 	flags = __le16_to_cpu(req->flags);
4111 
4112 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4113 
4114 	chan = l2cap_get_chan_by_scid(conn, dcid);
4115 	if (!chan) {
4116 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4117 		return 0;
4118 	}
4119 
4120 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4121 	    chan->state != BT_CONNECTED) {
4122 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4123 				       chan->dcid);
4124 		goto unlock;
4125 	}
4126 
4127 	/* Reject if config buffer is too small. */
4128 	len = cmd_len - sizeof(*req);
4129 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4130 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4131 			       l2cap_build_conf_rsp(chan, rsp,
4132 			       L2CAP_CONF_REJECT, flags), rsp);
4133 		goto unlock;
4134 	}
4135 
4136 	/* Store config. */
4137 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4138 	chan->conf_len += len;
4139 
4140 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4141 		/* Incomplete config. Send empty response. */
4142 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4143 			       l2cap_build_conf_rsp(chan, rsp,
4144 			       L2CAP_CONF_SUCCESS, flags), rsp);
4145 		goto unlock;
4146 	}
4147 
4148 	/* Complete config. */
4149 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4150 	if (len < 0) {
4151 		l2cap_send_disconn_req(chan, ECONNRESET);
4152 		goto unlock;
4153 	}
4154 
4155 	chan->ident = cmd->ident;
4156 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4157 	chan->num_conf_rsp++;
4158 
4159 	/* Reset config buffer. */
4160 	chan->conf_len = 0;
4161 
4162 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4163 		goto unlock;
4164 
4165 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4166 		set_default_fcs(chan);
4167 
4168 		if (chan->mode == L2CAP_MODE_ERTM ||
4169 		    chan->mode == L2CAP_MODE_STREAMING)
4170 			err = l2cap_ertm_init(chan);
4171 
4172 		if (err < 0)
4173 			l2cap_send_disconn_req(chan, -err);
4174 		else
4175 			l2cap_chan_ready(chan);
4176 
4177 		goto unlock;
4178 	}
4179 
4180 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4181 		u8 buf[64];
4182 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4183 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4184 		chan->num_conf_req++;
4185 	}
4186 
4187 	/* Got Conf Rsp PENDING from remote side and assume we sent
4188 	   Conf Rsp PENDING in the code above */
4189 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4190 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4191 
4192 		/* check compatibility */
4193 
4194 		/* Send rsp for BR/EDR channel */
4195 		if (!chan->hs_hcon)
4196 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4197 		else
4198 			chan->ident = cmd->ident;
4199 	}
4200 
4201 unlock:
4202 	l2cap_chan_unlock(chan);
4203 	return err;
4204 }
4205 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4206 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4207 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4208 				   u8 *data)
4209 {
4210 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4211 	u16 scid, flags, result;
4212 	struct l2cap_chan *chan;
4213 	int len = cmd_len - sizeof(*rsp);
4214 	int err = 0;
4215 
4216 	if (cmd_len < sizeof(*rsp))
4217 		return -EPROTO;
4218 
4219 	scid   = __le16_to_cpu(rsp->scid);
4220 	flags  = __le16_to_cpu(rsp->flags);
4221 	result = __le16_to_cpu(rsp->result);
4222 
4223 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4224 	       result, len);
4225 
4226 	chan = l2cap_get_chan_by_scid(conn, scid);
4227 	if (!chan)
4228 		return 0;
4229 
4230 	switch (result) {
4231 	case L2CAP_CONF_SUCCESS:
4232 		l2cap_conf_rfc_get(chan, rsp->data, len);
4233 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4234 		break;
4235 
4236 	case L2CAP_CONF_PENDING:
4237 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4238 
4239 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4240 			char buf[64];
4241 
4242 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4243 						   buf, sizeof(buf), &result);
4244 			if (len < 0) {
4245 				l2cap_send_disconn_req(chan, ECONNRESET);
4246 				goto done;
4247 			}
4248 
4249 			if (!chan->hs_hcon) {
4250 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4251 							0);
4252 			} else {
4253 				if (l2cap_check_efs(chan)) {
4254 					amp_create_logical_link(chan);
4255 					chan->ident = cmd->ident;
4256 				}
4257 			}
4258 		}
4259 		goto done;
4260 
4261 	case L2CAP_CONF_UNACCEPT:
4262 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4263 			char req[64];
4264 
4265 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4266 				l2cap_send_disconn_req(chan, ECONNRESET);
4267 				goto done;
4268 			}
4269 
4270 			/* throw out any old stored conf requests */
4271 			result = L2CAP_CONF_SUCCESS;
4272 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4273 						   req, sizeof(req), &result);
4274 			if (len < 0) {
4275 				l2cap_send_disconn_req(chan, ECONNRESET);
4276 				goto done;
4277 			}
4278 
4279 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4280 				       L2CAP_CONF_REQ, len, req);
4281 			chan->num_conf_req++;
4282 			if (result != L2CAP_CONF_SUCCESS)
4283 				goto done;
4284 			break;
4285 		}
4286 
4287 	default:
4288 		l2cap_chan_set_err(chan, ECONNRESET);
4289 
4290 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4291 		l2cap_send_disconn_req(chan, ECONNRESET);
4292 		goto done;
4293 	}
4294 
4295 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4296 		goto done;
4297 
4298 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4299 
4300 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4301 		set_default_fcs(chan);
4302 
4303 		if (chan->mode == L2CAP_MODE_ERTM ||
4304 		    chan->mode == L2CAP_MODE_STREAMING)
4305 			err = l2cap_ertm_init(chan);
4306 
4307 		if (err < 0)
4308 			l2cap_send_disconn_req(chan, -err);
4309 		else
4310 			l2cap_chan_ready(chan);
4311 	}
4312 
4313 done:
4314 	l2cap_chan_unlock(chan);
4315 	return err;
4316 }
4317 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4318 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4319 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4320 				       u8 *data)
4321 {
4322 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4323 	struct l2cap_disconn_rsp rsp;
4324 	u16 dcid, scid;
4325 	struct l2cap_chan *chan;
4326 
4327 	if (cmd_len != sizeof(*req))
4328 		return -EPROTO;
4329 
4330 	scid = __le16_to_cpu(req->scid);
4331 	dcid = __le16_to_cpu(req->dcid);
4332 
4333 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4334 
4335 	mutex_lock(&conn->chan_lock);
4336 
4337 	chan = __l2cap_get_chan_by_scid(conn, dcid);
4338 	if (!chan) {
4339 		mutex_unlock(&conn->chan_lock);
4340 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4341 		return 0;
4342 	}
4343 
4344 	l2cap_chan_hold(chan);
4345 	l2cap_chan_lock(chan);
4346 
4347 	rsp.dcid = cpu_to_le16(chan->scid);
4348 	rsp.scid = cpu_to_le16(chan->dcid);
4349 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4350 
4351 	chan->ops->set_shutdown(chan);
4352 
4353 	l2cap_chan_del(chan, ECONNRESET);
4354 
4355 	chan->ops->close(chan);
4356 
4357 	l2cap_chan_unlock(chan);
4358 	l2cap_chan_put(chan);
4359 
4360 	mutex_unlock(&conn->chan_lock);
4361 
4362 	return 0;
4363 }
4364 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4365 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4366 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4367 				       u8 *data)
4368 {
4369 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4370 	u16 dcid, scid;
4371 	struct l2cap_chan *chan;
4372 
4373 	if (cmd_len != sizeof(*rsp))
4374 		return -EPROTO;
4375 
4376 	scid = __le16_to_cpu(rsp->scid);
4377 	dcid = __le16_to_cpu(rsp->dcid);
4378 
4379 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4380 
4381 	mutex_lock(&conn->chan_lock);
4382 
4383 	chan = __l2cap_get_chan_by_scid(conn, scid);
4384 	if (!chan) {
4385 		mutex_unlock(&conn->chan_lock);
4386 		return 0;
4387 	}
4388 
4389 	l2cap_chan_hold(chan);
4390 	l2cap_chan_lock(chan);
4391 
4392 	if (chan->state != BT_DISCONN) {
4393 		l2cap_chan_unlock(chan);
4394 		l2cap_chan_put(chan);
4395 		mutex_unlock(&conn->chan_lock);
4396 		return 0;
4397 	}
4398 
4399 	l2cap_chan_del(chan, 0);
4400 
4401 	chan->ops->close(chan);
4402 
4403 	l2cap_chan_unlock(chan);
4404 	l2cap_chan_put(chan);
4405 
4406 	mutex_unlock(&conn->chan_lock);
4407 
4408 	return 0;
4409 }
4410 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4411 static inline int l2cap_information_req(struct l2cap_conn *conn,
4412 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4413 					u8 *data)
4414 {
4415 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4416 	u16 type;
4417 
4418 	if (cmd_len != sizeof(*req))
4419 		return -EPROTO;
4420 
4421 	type = __le16_to_cpu(req->type);
4422 
4423 	BT_DBG("type 0x%4.4x", type);
4424 
4425 	if (type == L2CAP_IT_FEAT_MASK) {
4426 		u8 buf[8];
4427 		u32 feat_mask = l2cap_feat_mask;
4428 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4429 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4430 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4431 		if (!disable_ertm)
4432 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4433 				| L2CAP_FEAT_FCS;
4434 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4435 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4436 				| L2CAP_FEAT_EXT_WINDOW;
4437 
4438 		put_unaligned_le32(feat_mask, rsp->data);
4439 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4440 			       buf);
4441 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4442 		u8 buf[12];
4443 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4444 
4445 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4446 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4447 		rsp->data[0] = conn->local_fixed_chan;
4448 		memset(rsp->data + 1, 0, 7);
4449 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4450 			       buf);
4451 	} else {
4452 		struct l2cap_info_rsp rsp;
4453 		rsp.type   = cpu_to_le16(type);
4454 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4455 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4456 			       &rsp);
4457 	}
4458 
4459 	return 0;
4460 }
4461 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4462 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4463 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4464 					u8 *data)
4465 {
4466 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4467 	u16 type, result;
4468 
4469 	if (cmd_len < sizeof(*rsp))
4470 		return -EPROTO;
4471 
4472 	type   = __le16_to_cpu(rsp->type);
4473 	result = __le16_to_cpu(rsp->result);
4474 
4475 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4476 
4477 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4478 	if (cmd->ident != conn->info_ident ||
4479 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4480 		return 0;
4481 
4482 	cancel_delayed_work(&conn->info_timer);
4483 
4484 	if (result != L2CAP_IR_SUCCESS) {
4485 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4486 		conn->info_ident = 0;
4487 
4488 		l2cap_conn_start(conn);
4489 
4490 		return 0;
4491 	}
4492 
4493 	switch (type) {
4494 	case L2CAP_IT_FEAT_MASK:
4495 		conn->feat_mask = get_unaligned_le32(rsp->data);
4496 
4497 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4498 			struct l2cap_info_req req;
4499 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4500 
4501 			conn->info_ident = l2cap_get_ident(conn);
4502 
4503 			l2cap_send_cmd(conn, conn->info_ident,
4504 				       L2CAP_INFO_REQ, sizeof(req), &req);
4505 		} else {
4506 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4507 			conn->info_ident = 0;
4508 
4509 			l2cap_conn_start(conn);
4510 		}
4511 		break;
4512 
4513 	case L2CAP_IT_FIXED_CHAN:
4514 		conn->remote_fixed_chan = rsp->data[0];
4515 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4516 		conn->info_ident = 0;
4517 
4518 		l2cap_conn_start(conn);
4519 		break;
4520 	}
4521 
4522 	return 0;
4523 }
4524 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4525 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4526 				    struct l2cap_cmd_hdr *cmd,
4527 				    u16 cmd_len, void *data)
4528 {
4529 	struct l2cap_create_chan_req *req = data;
4530 	struct l2cap_create_chan_rsp rsp;
4531 	struct l2cap_chan *chan;
4532 	struct hci_dev *hdev;
4533 	u16 psm, scid;
4534 
4535 	if (cmd_len != sizeof(*req))
4536 		return -EPROTO;
4537 
4538 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4539 		return -EINVAL;
4540 
4541 	psm = le16_to_cpu(req->psm);
4542 	scid = le16_to_cpu(req->scid);
4543 
4544 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4545 
4546 	/* For controller id 0 make BR/EDR connection */
4547 	if (req->amp_id == AMP_ID_BREDR) {
4548 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4549 			      req->amp_id);
4550 		return 0;
4551 	}
4552 
4553 	/* Validate AMP controller id */
4554 	hdev = hci_dev_get(req->amp_id);
4555 	if (!hdev)
4556 		goto error;
4557 
4558 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4559 		hci_dev_put(hdev);
4560 		goto error;
4561 	}
4562 
4563 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4564 			     req->amp_id);
4565 	if (chan) {
4566 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4567 		struct hci_conn *hs_hcon;
4568 
4569 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4570 						  &conn->hcon->dst);
4571 		if (!hs_hcon) {
4572 			hci_dev_put(hdev);
4573 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4574 					       chan->dcid);
4575 			return 0;
4576 		}
4577 
4578 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4579 
4580 		mgr->bredr_chan = chan;
4581 		chan->hs_hcon = hs_hcon;
4582 		chan->fcs = L2CAP_FCS_NONE;
4583 		conn->mtu = hdev->block_mtu;
4584 	}
4585 
4586 	hci_dev_put(hdev);
4587 
4588 	return 0;
4589 
4590 error:
4591 	rsp.dcid = 0;
4592 	rsp.scid = cpu_to_le16(scid);
4593 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4594 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4595 
4596 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4597 		       sizeof(rsp), &rsp);
4598 
4599 	return 0;
4600 }
4601 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4602 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4603 {
4604 	struct l2cap_move_chan_req req;
4605 	u8 ident;
4606 
4607 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4608 
4609 	ident = l2cap_get_ident(chan->conn);
4610 	chan->ident = ident;
4611 
4612 	req.icid = cpu_to_le16(chan->scid);
4613 	req.dest_amp_id = dest_amp_id;
4614 
4615 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4616 		       &req);
4617 
4618 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4619 }
4620 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4621 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4622 {
4623 	struct l2cap_move_chan_rsp rsp;
4624 
4625 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4626 
4627 	rsp.icid = cpu_to_le16(chan->dcid);
4628 	rsp.result = cpu_to_le16(result);
4629 
4630 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4631 		       sizeof(rsp), &rsp);
4632 }
4633 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4634 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4635 {
4636 	struct l2cap_move_chan_cfm cfm;
4637 
4638 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4639 
4640 	chan->ident = l2cap_get_ident(chan->conn);
4641 
4642 	cfm.icid = cpu_to_le16(chan->scid);
4643 	cfm.result = cpu_to_le16(result);
4644 
4645 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4646 		       sizeof(cfm), &cfm);
4647 
4648 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4649 }
4650 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4651 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4652 {
4653 	struct l2cap_move_chan_cfm cfm;
4654 
4655 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4656 
4657 	cfm.icid = cpu_to_le16(icid);
4658 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4659 
4660 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4661 		       sizeof(cfm), &cfm);
4662 }
4663 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4664 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4665 					 u16 icid)
4666 {
4667 	struct l2cap_move_chan_cfm_rsp rsp;
4668 
4669 	BT_DBG("icid 0x%4.4x", icid);
4670 
4671 	rsp.icid = cpu_to_le16(icid);
4672 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4673 }
4674 
__release_logical_link(struct l2cap_chan * chan)4675 static void __release_logical_link(struct l2cap_chan *chan)
4676 {
4677 	chan->hs_hchan = NULL;
4678 	chan->hs_hcon = NULL;
4679 
4680 	/* Placeholder - release the logical link */
4681 }
4682 
l2cap_logical_fail(struct l2cap_chan * chan)4683 static void l2cap_logical_fail(struct l2cap_chan *chan)
4684 {
4685 	/* Logical link setup failed */
4686 	if (chan->state != BT_CONNECTED) {
4687 		/* Create channel failure, disconnect */
4688 		l2cap_send_disconn_req(chan, ECONNRESET);
4689 		return;
4690 	}
4691 
4692 	switch (chan->move_role) {
4693 	case L2CAP_MOVE_ROLE_RESPONDER:
4694 		l2cap_move_done(chan);
4695 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4696 		break;
4697 	case L2CAP_MOVE_ROLE_INITIATOR:
4698 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4699 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4700 			/* Remote has only sent pending or
4701 			 * success responses, clean up
4702 			 */
4703 			l2cap_move_done(chan);
4704 		}
4705 
4706 		/* Other amp move states imply that the move
4707 		 * has already aborted
4708 		 */
4709 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4710 		break;
4711 	}
4712 }
4713 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)4714 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4715 					struct hci_chan *hchan)
4716 {
4717 	struct l2cap_conf_rsp rsp;
4718 
4719 	chan->hs_hchan = hchan;
4720 	chan->hs_hcon->l2cap_data = chan->conn;
4721 
4722 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
4723 
4724 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4725 		int err;
4726 
4727 		set_default_fcs(chan);
4728 
4729 		err = l2cap_ertm_init(chan);
4730 		if (err < 0)
4731 			l2cap_send_disconn_req(chan, -err);
4732 		else
4733 			l2cap_chan_ready(chan);
4734 	}
4735 }
4736 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)4737 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4738 				      struct hci_chan *hchan)
4739 {
4740 	chan->hs_hcon = hchan->conn;
4741 	chan->hs_hcon->l2cap_data = chan->conn;
4742 
4743 	BT_DBG("move_state %d", chan->move_state);
4744 
4745 	switch (chan->move_state) {
4746 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4747 		/* Move confirm will be sent after a success
4748 		 * response is received
4749 		 */
4750 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4751 		break;
4752 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4753 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4754 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4755 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4756 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4757 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4758 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4759 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4760 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4761 		}
4762 		break;
4763 	default:
4764 		/* Move was not in expected state, free the channel */
4765 		__release_logical_link(chan);
4766 
4767 		chan->move_state = L2CAP_MOVE_STABLE;
4768 	}
4769 }
4770 
4771 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)4772 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4773 		       u8 status)
4774 {
4775 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4776 
4777 	if (status) {
4778 		l2cap_logical_fail(chan);
4779 		__release_logical_link(chan);
4780 		return;
4781 	}
4782 
4783 	if (chan->state != BT_CONNECTED) {
4784 		/* Ignore logical link if channel is on BR/EDR */
4785 		if (chan->local_amp_id != AMP_ID_BREDR)
4786 			l2cap_logical_finish_create(chan, hchan);
4787 	} else {
4788 		l2cap_logical_finish_move(chan, hchan);
4789 	}
4790 }
4791 
l2cap_move_start(struct l2cap_chan * chan)4792 void l2cap_move_start(struct l2cap_chan *chan)
4793 {
4794 	BT_DBG("chan %p", chan);
4795 
4796 	if (chan->local_amp_id == AMP_ID_BREDR) {
4797 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4798 			return;
4799 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4800 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4801 		/* Placeholder - start physical link setup */
4802 	} else {
4803 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4804 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4805 		chan->move_id = 0;
4806 		l2cap_move_setup(chan);
4807 		l2cap_send_move_chan_req(chan, 0);
4808 	}
4809 }
4810 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)4811 static void l2cap_do_create(struct l2cap_chan *chan, int result,
4812 			    u8 local_amp_id, u8 remote_amp_id)
4813 {
4814 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4815 	       local_amp_id, remote_amp_id);
4816 
4817 	chan->fcs = L2CAP_FCS_NONE;
4818 
4819 	/* Outgoing channel on AMP */
4820 	if (chan->state == BT_CONNECT) {
4821 		if (result == L2CAP_CR_SUCCESS) {
4822 			chan->local_amp_id = local_amp_id;
4823 			l2cap_send_create_chan_req(chan, remote_amp_id);
4824 		} else {
4825 			/* Revert to BR/EDR connect */
4826 			l2cap_send_conn_req(chan);
4827 		}
4828 
4829 		return;
4830 	}
4831 
4832 	/* Incoming channel on AMP */
4833 	if (__l2cap_no_conn_pending(chan)) {
4834 		struct l2cap_conn_rsp rsp;
4835 		char buf[128];
4836 		rsp.scid = cpu_to_le16(chan->dcid);
4837 		rsp.dcid = cpu_to_le16(chan->scid);
4838 
4839 		if (result == L2CAP_CR_SUCCESS) {
4840 			/* Send successful response */
4841 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4842 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4843 		} else {
4844 			/* Send negative response */
4845 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4846 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4847 		}
4848 
4849 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4850 			       sizeof(rsp), &rsp);
4851 
4852 		if (result == L2CAP_CR_SUCCESS) {
4853 			l2cap_state_change(chan, BT_CONFIG);
4854 			set_bit(CONF_REQ_SENT, &chan->conf_state);
4855 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4856 				       L2CAP_CONF_REQ,
4857 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4858 			chan->num_conf_req++;
4859 		}
4860 	}
4861 }
4862 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)4863 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4864 				   u8 remote_amp_id)
4865 {
4866 	l2cap_move_setup(chan);
4867 	chan->move_id = local_amp_id;
4868 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
4869 
4870 	l2cap_send_move_chan_req(chan, remote_amp_id);
4871 }
4872 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)4873 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4874 {
4875 	struct hci_chan *hchan = NULL;
4876 
4877 	/* Placeholder - get hci_chan for logical link */
4878 
4879 	if (hchan) {
4880 		if (hchan->state == BT_CONNECTED) {
4881 			/* Logical link is ready to go */
4882 			chan->hs_hcon = hchan->conn;
4883 			chan->hs_hcon->l2cap_data = chan->conn;
4884 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4885 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4886 
4887 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4888 		} else {
4889 			/* Wait for logical link to be ready */
4890 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4891 		}
4892 	} else {
4893 		/* Logical link not available */
4894 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4895 	}
4896 }
4897 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)4898 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4899 {
4900 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4901 		u8 rsp_result;
4902 		if (result == -EINVAL)
4903 			rsp_result = L2CAP_MR_BAD_ID;
4904 		else
4905 			rsp_result = L2CAP_MR_NOT_ALLOWED;
4906 
4907 		l2cap_send_move_chan_rsp(chan, rsp_result);
4908 	}
4909 
4910 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
4911 	chan->move_state = L2CAP_MOVE_STABLE;
4912 
4913 	/* Restart data transmission */
4914 	l2cap_ertm_send(chan);
4915 }
4916 
4917 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)4918 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
4919 {
4920 	u8 local_amp_id = chan->local_amp_id;
4921 	u8 remote_amp_id = chan->remote_amp_id;
4922 
4923 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4924 	       chan, result, local_amp_id, remote_amp_id);
4925 
4926 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
4927 		return;
4928 
4929 	if (chan->state != BT_CONNECTED) {
4930 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4931 	} else if (result != L2CAP_MR_SUCCESS) {
4932 		l2cap_do_move_cancel(chan, result);
4933 	} else {
4934 		switch (chan->move_role) {
4935 		case L2CAP_MOVE_ROLE_INITIATOR:
4936 			l2cap_do_move_initiate(chan, local_amp_id,
4937 					       remote_amp_id);
4938 			break;
4939 		case L2CAP_MOVE_ROLE_RESPONDER:
4940 			l2cap_do_move_respond(chan, result);
4941 			break;
4942 		default:
4943 			l2cap_do_move_cancel(chan, result);
4944 			break;
4945 		}
4946 	}
4947 }
4948 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4949 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4950 					 struct l2cap_cmd_hdr *cmd,
4951 					 u16 cmd_len, void *data)
4952 {
4953 	struct l2cap_move_chan_req *req = data;
4954 	struct l2cap_move_chan_rsp rsp;
4955 	struct l2cap_chan *chan;
4956 	u16 icid = 0;
4957 	u16 result = L2CAP_MR_NOT_ALLOWED;
4958 
4959 	if (cmd_len != sizeof(*req))
4960 		return -EPROTO;
4961 
4962 	icid = le16_to_cpu(req->icid);
4963 
4964 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
4965 
4966 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4967 		return -EINVAL;
4968 
4969 	chan = l2cap_get_chan_by_dcid(conn, icid);
4970 	if (!chan) {
4971 		rsp.icid = cpu_to_le16(icid);
4972 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
4973 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4974 			       sizeof(rsp), &rsp);
4975 		return 0;
4976 	}
4977 
4978 	chan->ident = cmd->ident;
4979 
4980 	if (chan->scid < L2CAP_CID_DYN_START ||
4981 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4982 	    (chan->mode != L2CAP_MODE_ERTM &&
4983 	     chan->mode != L2CAP_MODE_STREAMING)) {
4984 		result = L2CAP_MR_NOT_ALLOWED;
4985 		goto send_move_response;
4986 	}
4987 
4988 	if (chan->local_amp_id == req->dest_amp_id) {
4989 		result = L2CAP_MR_SAME_ID;
4990 		goto send_move_response;
4991 	}
4992 
4993 	if (req->dest_amp_id != AMP_ID_BREDR) {
4994 		struct hci_dev *hdev;
4995 		hdev = hci_dev_get(req->dest_amp_id);
4996 		if (!hdev || hdev->dev_type != HCI_AMP ||
4997 		    !test_bit(HCI_UP, &hdev->flags)) {
4998 			if (hdev)
4999 				hci_dev_put(hdev);
5000 
5001 			result = L2CAP_MR_BAD_ID;
5002 			goto send_move_response;
5003 		}
5004 		hci_dev_put(hdev);
5005 	}
5006 
5007 	/* Detect a move collision.  Only send a collision response
5008 	 * if this side has "lost", otherwise proceed with the move.
5009 	 * The winner has the larger bd_addr.
5010 	 */
5011 	if ((__chan_is_moving(chan) ||
5012 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5013 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5014 		result = L2CAP_MR_COLLISION;
5015 		goto send_move_response;
5016 	}
5017 
5018 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5019 	l2cap_move_setup(chan);
5020 	chan->move_id = req->dest_amp_id;
5021 	icid = chan->dcid;
5022 
5023 	if (req->dest_amp_id == AMP_ID_BREDR) {
5024 		/* Moving to BR/EDR */
5025 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5026 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5027 			result = L2CAP_MR_PEND;
5028 		} else {
5029 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5030 			result = L2CAP_MR_SUCCESS;
5031 		}
5032 	} else {
5033 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5034 		/* Placeholder - uncomment when amp functions are available */
5035 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5036 		result = L2CAP_MR_PEND;
5037 	}
5038 
5039 send_move_response:
5040 	l2cap_send_move_chan_rsp(chan, result);
5041 
5042 	l2cap_chan_unlock(chan);
5043 
5044 	return 0;
5045 }
5046 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5047 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5048 {
5049 	struct l2cap_chan *chan;
5050 	struct hci_chan *hchan = NULL;
5051 
5052 	chan = l2cap_get_chan_by_scid(conn, icid);
5053 	if (!chan) {
5054 		l2cap_send_move_chan_cfm_icid(conn, icid);
5055 		return;
5056 	}
5057 
5058 	__clear_chan_timer(chan);
5059 	if (result == L2CAP_MR_PEND)
5060 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5061 
5062 	switch (chan->move_state) {
5063 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5064 		/* Move confirm will be sent when logical link
5065 		 * is complete.
5066 		 */
5067 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5068 		break;
5069 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5070 		if (result == L2CAP_MR_PEND) {
5071 			break;
5072 		} else if (test_bit(CONN_LOCAL_BUSY,
5073 				    &chan->conn_state)) {
5074 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5075 		} else {
5076 			/* Logical link is up or moving to BR/EDR,
5077 			 * proceed with move
5078 			 */
5079 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5080 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5081 		}
5082 		break;
5083 	case L2CAP_MOVE_WAIT_RSP:
5084 		/* Moving to AMP */
5085 		if (result == L2CAP_MR_SUCCESS) {
5086 			/* Remote is ready, send confirm immediately
5087 			 * after logical link is ready
5088 			 */
5089 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5090 		} else {
5091 			/* Both logical link and move success
5092 			 * are required to confirm
5093 			 */
5094 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5095 		}
5096 
5097 		/* Placeholder - get hci_chan for logical link */
5098 		if (!hchan) {
5099 			/* Logical link not available */
5100 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5101 			break;
5102 		}
5103 
5104 		/* If the logical link is not yet connected, do not
5105 		 * send confirmation.
5106 		 */
5107 		if (hchan->state != BT_CONNECTED)
5108 			break;
5109 
5110 		/* Logical link is already ready to go */
5111 
5112 		chan->hs_hcon = hchan->conn;
5113 		chan->hs_hcon->l2cap_data = chan->conn;
5114 
5115 		if (result == L2CAP_MR_SUCCESS) {
5116 			/* Can confirm now */
5117 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5118 		} else {
5119 			/* Now only need move success
5120 			 * to confirm
5121 			 */
5122 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5123 		}
5124 
5125 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5126 		break;
5127 	default:
5128 		/* Any other amp move state means the move failed. */
5129 		chan->move_id = chan->local_amp_id;
5130 		l2cap_move_done(chan);
5131 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5132 	}
5133 
5134 	l2cap_chan_unlock(chan);
5135 }
5136 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5137 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5138 			    u16 result)
5139 {
5140 	struct l2cap_chan *chan;
5141 
5142 	chan = l2cap_get_chan_by_ident(conn, ident);
5143 	if (!chan) {
5144 		/* Could not locate channel, icid is best guess */
5145 		l2cap_send_move_chan_cfm_icid(conn, icid);
5146 		return;
5147 	}
5148 
5149 	__clear_chan_timer(chan);
5150 
5151 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5152 		if (result == L2CAP_MR_COLLISION) {
5153 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5154 		} else {
5155 			/* Cleanup - cancel move */
5156 			chan->move_id = chan->local_amp_id;
5157 			l2cap_move_done(chan);
5158 		}
5159 	}
5160 
5161 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5162 
5163 	l2cap_chan_unlock(chan);
5164 }
5165 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5166 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5167 				  struct l2cap_cmd_hdr *cmd,
5168 				  u16 cmd_len, void *data)
5169 {
5170 	struct l2cap_move_chan_rsp *rsp = data;
5171 	u16 icid, result;
5172 
5173 	if (cmd_len != sizeof(*rsp))
5174 		return -EPROTO;
5175 
5176 	icid = le16_to_cpu(rsp->icid);
5177 	result = le16_to_cpu(rsp->result);
5178 
5179 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5180 
5181 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5182 		l2cap_move_continue(conn, icid, result);
5183 	else
5184 		l2cap_move_fail(conn, cmd->ident, icid, result);
5185 
5186 	return 0;
5187 }
5188 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5189 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5190 				      struct l2cap_cmd_hdr *cmd,
5191 				      u16 cmd_len, void *data)
5192 {
5193 	struct l2cap_move_chan_cfm *cfm = data;
5194 	struct l2cap_chan *chan;
5195 	u16 icid, result;
5196 
5197 	if (cmd_len != sizeof(*cfm))
5198 		return -EPROTO;
5199 
5200 	icid = le16_to_cpu(cfm->icid);
5201 	result = le16_to_cpu(cfm->result);
5202 
5203 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5204 
5205 	chan = l2cap_get_chan_by_dcid(conn, icid);
5206 	if (!chan) {
5207 		/* Spec requires a response even if the icid was not found */
5208 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5209 		return 0;
5210 	}
5211 
5212 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5213 		if (result == L2CAP_MC_CONFIRMED) {
5214 			chan->local_amp_id = chan->move_id;
5215 			if (chan->local_amp_id == AMP_ID_BREDR)
5216 				__release_logical_link(chan);
5217 		} else {
5218 			chan->move_id = chan->local_amp_id;
5219 		}
5220 
5221 		l2cap_move_done(chan);
5222 	}
5223 
5224 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5225 
5226 	l2cap_chan_unlock(chan);
5227 
5228 	return 0;
5229 }
5230 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5231 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5232 						 struct l2cap_cmd_hdr *cmd,
5233 						 u16 cmd_len, void *data)
5234 {
5235 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5236 	struct l2cap_chan *chan;
5237 	u16 icid;
5238 
5239 	if (cmd_len != sizeof(*rsp))
5240 		return -EPROTO;
5241 
5242 	icid = le16_to_cpu(rsp->icid);
5243 
5244 	BT_DBG("icid 0x%4.4x", icid);
5245 
5246 	chan = l2cap_get_chan_by_scid(conn, icid);
5247 	if (!chan)
5248 		return 0;
5249 
5250 	__clear_chan_timer(chan);
5251 
5252 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5253 		chan->local_amp_id = chan->move_id;
5254 
5255 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5256 			__release_logical_link(chan);
5257 
5258 		l2cap_move_done(chan);
5259 	}
5260 
5261 	l2cap_chan_unlock(chan);
5262 
5263 	return 0;
5264 }
5265 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5266 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5267 					      struct l2cap_cmd_hdr *cmd,
5268 					      u16 cmd_len, u8 *data)
5269 {
5270 	struct hci_conn *hcon = conn->hcon;
5271 	struct l2cap_conn_param_update_req *req;
5272 	struct l2cap_conn_param_update_rsp rsp;
5273 	u16 min, max, latency, to_multiplier;
5274 	int err;
5275 
5276 	if (hcon->role != HCI_ROLE_MASTER)
5277 		return -EINVAL;
5278 
5279 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5280 		return -EPROTO;
5281 
5282 	req = (struct l2cap_conn_param_update_req *) data;
5283 	min		= __le16_to_cpu(req->min);
5284 	max		= __le16_to_cpu(req->max);
5285 	latency		= __le16_to_cpu(req->latency);
5286 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5287 
5288 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5289 	       min, max, latency, to_multiplier);
5290 
5291 	memset(&rsp, 0, sizeof(rsp));
5292 
5293 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5294 	if (err)
5295 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5296 	else
5297 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5298 
5299 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5300 		       sizeof(rsp), &rsp);
5301 
5302 	if (!err) {
5303 		u8 store_hint;
5304 
5305 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5306 						to_multiplier);
5307 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5308 				    store_hint, min, max, latency,
5309 				    to_multiplier);
5310 
5311 	}
5312 
5313 	return 0;
5314 }
5315 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5316 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5317 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5318 				u8 *data)
5319 {
5320 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5321 	struct hci_conn *hcon = conn->hcon;
5322 	u16 dcid, mtu, mps, credits, result;
5323 	struct l2cap_chan *chan;
5324 	int err, sec_level;
5325 
5326 	if (cmd_len < sizeof(*rsp))
5327 		return -EPROTO;
5328 
5329 	dcid    = __le16_to_cpu(rsp->dcid);
5330 	mtu     = __le16_to_cpu(rsp->mtu);
5331 	mps     = __le16_to_cpu(rsp->mps);
5332 	credits = __le16_to_cpu(rsp->credits);
5333 	result  = __le16_to_cpu(rsp->result);
5334 
5335 	if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5336 					   dcid < L2CAP_CID_DYN_START ||
5337 					   dcid > L2CAP_CID_LE_DYN_END))
5338 		return -EPROTO;
5339 
5340 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5341 	       dcid, mtu, mps, credits, result);
5342 
5343 	mutex_lock(&conn->chan_lock);
5344 
5345 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5346 	if (!chan) {
5347 		err = -EBADSLT;
5348 		goto unlock;
5349 	}
5350 
5351 	err = 0;
5352 
5353 	l2cap_chan_lock(chan);
5354 
5355 	switch (result) {
5356 	case L2CAP_CR_SUCCESS:
5357 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5358 			err = -EBADSLT;
5359 			break;
5360 		}
5361 
5362 		chan->ident = 0;
5363 		chan->dcid = dcid;
5364 		chan->omtu = mtu;
5365 		chan->remote_mps = mps;
5366 		chan->tx_credits = credits;
5367 		l2cap_chan_ready(chan);
5368 		break;
5369 
5370 	case L2CAP_CR_AUTHENTICATION:
5371 	case L2CAP_CR_ENCRYPTION:
5372 		/* If we already have MITM protection we can't do
5373 		 * anything.
5374 		 */
5375 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5376 			l2cap_chan_del(chan, ECONNREFUSED);
5377 			break;
5378 		}
5379 
5380 		sec_level = hcon->sec_level + 1;
5381 		if (chan->sec_level < sec_level)
5382 			chan->sec_level = sec_level;
5383 
5384 		/* We'll need to send a new Connect Request */
5385 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5386 
5387 		smp_conn_security(hcon, chan->sec_level);
5388 		break;
5389 
5390 	default:
5391 		l2cap_chan_del(chan, ECONNREFUSED);
5392 		break;
5393 	}
5394 
5395 	l2cap_chan_unlock(chan);
5396 
5397 unlock:
5398 	mutex_unlock(&conn->chan_lock);
5399 
5400 	return err;
5401 }
5402 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5403 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5404 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5405 				      u8 *data)
5406 {
5407 	int err = 0;
5408 
5409 	switch (cmd->code) {
5410 	case L2CAP_COMMAND_REJ:
5411 		l2cap_command_rej(conn, cmd, cmd_len, data);
5412 		break;
5413 
5414 	case L2CAP_CONN_REQ:
5415 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5416 		break;
5417 
5418 	case L2CAP_CONN_RSP:
5419 	case L2CAP_CREATE_CHAN_RSP:
5420 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5421 		break;
5422 
5423 	case L2CAP_CONF_REQ:
5424 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5425 		break;
5426 
5427 	case L2CAP_CONF_RSP:
5428 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5429 		break;
5430 
5431 	case L2CAP_DISCONN_REQ:
5432 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5433 		break;
5434 
5435 	case L2CAP_DISCONN_RSP:
5436 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5437 		break;
5438 
5439 	case L2CAP_ECHO_REQ:
5440 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5441 		break;
5442 
5443 	case L2CAP_ECHO_RSP:
5444 		break;
5445 
5446 	case L2CAP_INFO_REQ:
5447 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5448 		break;
5449 
5450 	case L2CAP_INFO_RSP:
5451 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5452 		break;
5453 
5454 	case L2CAP_CREATE_CHAN_REQ:
5455 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5456 		break;
5457 
5458 	case L2CAP_MOVE_CHAN_REQ:
5459 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5460 		break;
5461 
5462 	case L2CAP_MOVE_CHAN_RSP:
5463 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5464 		break;
5465 
5466 	case L2CAP_MOVE_CHAN_CFM:
5467 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5468 		break;
5469 
5470 	case L2CAP_MOVE_CHAN_CFM_RSP:
5471 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5472 		break;
5473 
5474 	default:
5475 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5476 		err = -EINVAL;
5477 		break;
5478 	}
5479 
5480 	return err;
5481 }
5482 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5483 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5484 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5485 				u8 *data)
5486 {
5487 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5488 	struct l2cap_le_conn_rsp rsp;
5489 	struct l2cap_chan *chan, *pchan;
5490 	u16 dcid, scid, credits, mtu, mps;
5491 	__le16 psm;
5492 	u8 result;
5493 
5494 	if (cmd_len != sizeof(*req))
5495 		return -EPROTO;
5496 
5497 	scid = __le16_to_cpu(req->scid);
5498 	mtu  = __le16_to_cpu(req->mtu);
5499 	mps  = __le16_to_cpu(req->mps);
5500 	psm  = req->psm;
5501 	dcid = 0;
5502 	credits = 0;
5503 
5504 	if (mtu < 23 || mps < 23)
5505 		return -EPROTO;
5506 
5507 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5508 	       scid, mtu, mps);
5509 
5510 	/* Check if we have socket listening on psm */
5511 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5512 					 &conn->hcon->dst, LE_LINK);
5513 	if (!pchan) {
5514 		result = L2CAP_CR_BAD_PSM;
5515 		chan = NULL;
5516 		goto response;
5517 	}
5518 
5519 	mutex_lock(&conn->chan_lock);
5520 	l2cap_chan_lock(pchan);
5521 
5522 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5523 				     SMP_ALLOW_STK)) {
5524 		result = L2CAP_CR_AUTHENTICATION;
5525 		chan = NULL;
5526 		goto response_unlock;
5527 	}
5528 
5529 	/* Check for valid dynamic CID range */
5530 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5531 		result = L2CAP_CR_INVALID_SCID;
5532 		chan = NULL;
5533 		goto response_unlock;
5534 	}
5535 
5536 	/* Check if we already have channel with that dcid */
5537 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5538 		result = L2CAP_CR_SCID_IN_USE;
5539 		chan = NULL;
5540 		goto response_unlock;
5541 	}
5542 
5543 	chan = pchan->ops->new_connection(pchan);
5544 	if (!chan) {
5545 		result = L2CAP_CR_NO_MEM;
5546 		goto response_unlock;
5547 	}
5548 
5549 	l2cap_le_flowctl_init(chan);
5550 
5551 	bacpy(&chan->src, &conn->hcon->src);
5552 	bacpy(&chan->dst, &conn->hcon->dst);
5553 	chan->src_type = bdaddr_src_type(conn->hcon);
5554 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5555 	chan->psm  = psm;
5556 	chan->dcid = scid;
5557 	chan->omtu = mtu;
5558 	chan->remote_mps = mps;
5559 	chan->tx_credits = __le16_to_cpu(req->credits);
5560 
5561 	__l2cap_chan_add(conn, chan);
5562 	dcid = chan->scid;
5563 	credits = chan->rx_credits;
5564 
5565 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5566 
5567 	chan->ident = cmd->ident;
5568 
5569 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5570 		l2cap_state_change(chan, BT_CONNECT2);
5571 		/* The following result value is actually not defined
5572 		 * for LE CoC but we use it to let the function know
5573 		 * that it should bail out after doing its cleanup
5574 		 * instead of sending a response.
5575 		 */
5576 		result = L2CAP_CR_PEND;
5577 		chan->ops->defer(chan);
5578 	} else {
5579 		l2cap_chan_ready(chan);
5580 		result = L2CAP_CR_SUCCESS;
5581 	}
5582 
5583 response_unlock:
5584 	l2cap_chan_unlock(pchan);
5585 	mutex_unlock(&conn->chan_lock);
5586 	l2cap_chan_put(pchan);
5587 
5588 	if (result == L2CAP_CR_PEND)
5589 		return 0;
5590 
5591 response:
5592 	if (chan) {
5593 		rsp.mtu = cpu_to_le16(chan->imtu);
5594 		rsp.mps = cpu_to_le16(chan->mps);
5595 	} else {
5596 		rsp.mtu = 0;
5597 		rsp.mps = 0;
5598 	}
5599 
5600 	rsp.dcid    = cpu_to_le16(dcid);
5601 	rsp.credits = cpu_to_le16(credits);
5602 	rsp.result  = cpu_to_le16(result);
5603 
5604 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5605 
5606 	return 0;
5607 }
5608 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5609 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5610 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5611 				   u8 *data)
5612 {
5613 	struct l2cap_le_credits *pkt;
5614 	struct l2cap_chan *chan;
5615 	u16 cid, credits, max_credits;
5616 
5617 	if (cmd_len != sizeof(*pkt))
5618 		return -EPROTO;
5619 
5620 	pkt = (struct l2cap_le_credits *) data;
5621 	cid	= __le16_to_cpu(pkt->cid);
5622 	credits	= __le16_to_cpu(pkt->credits);
5623 
5624 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5625 
5626 	chan = l2cap_get_chan_by_dcid(conn, cid);
5627 	if (!chan)
5628 		return -EBADSLT;
5629 
5630 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5631 	if (credits > max_credits) {
5632 		BT_ERR("LE credits overflow");
5633 		l2cap_send_disconn_req(chan, ECONNRESET);
5634 		l2cap_chan_unlock(chan);
5635 
5636 		/* Return 0 so that we don't trigger an unnecessary
5637 		 * command reject packet.
5638 		 */
5639 		return 0;
5640 	}
5641 
5642 	chan->tx_credits += credits;
5643 
5644 	/* Resume sending */
5645 	l2cap_le_flowctl_send(chan);
5646 
5647 	if (chan->tx_credits)
5648 		chan->ops->resume(chan);
5649 
5650 	l2cap_chan_unlock(chan);
5651 
5652 	return 0;
5653 }
5654 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5655 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5656 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5657 				       u8 *data)
5658 {
5659 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5660 	struct l2cap_chan *chan;
5661 
5662 	if (cmd_len < sizeof(*rej))
5663 		return -EPROTO;
5664 
5665 	mutex_lock(&conn->chan_lock);
5666 
5667 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5668 	if (!chan)
5669 		goto done;
5670 
5671 	l2cap_chan_lock(chan);
5672 	l2cap_chan_del(chan, ECONNREFUSED);
5673 	l2cap_chan_unlock(chan);
5674 
5675 done:
5676 	mutex_unlock(&conn->chan_lock);
5677 	return 0;
5678 }
5679 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5680 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5681 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5682 				   u8 *data)
5683 {
5684 	int err = 0;
5685 
5686 	switch (cmd->code) {
5687 	case L2CAP_COMMAND_REJ:
5688 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
5689 		break;
5690 
5691 	case L2CAP_CONN_PARAM_UPDATE_REQ:
5692 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5693 		break;
5694 
5695 	case L2CAP_CONN_PARAM_UPDATE_RSP:
5696 		break;
5697 
5698 	case L2CAP_LE_CONN_RSP:
5699 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
5700 		break;
5701 
5702 	case L2CAP_LE_CONN_REQ:
5703 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5704 		break;
5705 
5706 	case L2CAP_LE_CREDITS:
5707 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
5708 		break;
5709 
5710 	case L2CAP_DISCONN_REQ:
5711 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5712 		break;
5713 
5714 	case L2CAP_DISCONN_RSP:
5715 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5716 		break;
5717 
5718 	default:
5719 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5720 		err = -EINVAL;
5721 		break;
5722 	}
5723 
5724 	return err;
5725 }
5726 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5727 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5728 					struct sk_buff *skb)
5729 {
5730 	struct hci_conn *hcon = conn->hcon;
5731 	struct l2cap_cmd_hdr *cmd;
5732 	u16 len;
5733 	int err;
5734 
5735 	if (hcon->type != LE_LINK)
5736 		goto drop;
5737 
5738 	if (skb->len < L2CAP_CMD_HDR_SIZE)
5739 		goto drop;
5740 
5741 	cmd = (void *) skb->data;
5742 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
5743 
5744 	len = le16_to_cpu(cmd->len);
5745 
5746 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
5747 
5748 	if (len != skb->len || !cmd->ident) {
5749 		BT_DBG("corrupted command");
5750 		goto drop;
5751 	}
5752 
5753 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
5754 	if (err) {
5755 		struct l2cap_cmd_rej_unk rej;
5756 
5757 		BT_ERR("Wrong link type (%d)", err);
5758 
5759 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5760 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5761 			       sizeof(rej), &rej);
5762 	}
5763 
5764 drop:
5765 	kfree_skb(skb);
5766 }
5767 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)5768 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5769 				     struct sk_buff *skb)
5770 {
5771 	struct hci_conn *hcon = conn->hcon;
5772 	u8 *data = skb->data;
5773 	int len = skb->len;
5774 	struct l2cap_cmd_hdr cmd;
5775 	int err;
5776 
5777 	l2cap_raw_recv(conn, skb);
5778 
5779 	if (hcon->type != ACL_LINK)
5780 		goto drop;
5781 
5782 	while (len >= L2CAP_CMD_HDR_SIZE) {
5783 		u16 cmd_len;
5784 		memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5785 		data += L2CAP_CMD_HDR_SIZE;
5786 		len  -= L2CAP_CMD_HDR_SIZE;
5787 
5788 		cmd_len = le16_to_cpu(cmd.len);
5789 
5790 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5791 		       cmd.ident);
5792 
5793 		if (cmd_len > len || !cmd.ident) {
5794 			BT_DBG("corrupted command");
5795 			break;
5796 		}
5797 
5798 		err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
5799 		if (err) {
5800 			struct l2cap_cmd_rej_unk rej;
5801 
5802 			BT_ERR("Wrong link type (%d)", err);
5803 
5804 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
5805 			l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5806 				       sizeof(rej), &rej);
5807 		}
5808 
5809 		data += cmd_len;
5810 		len  -= cmd_len;
5811 	}
5812 
5813 drop:
5814 	kfree_skb(skb);
5815 }
5816 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)5817 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
5818 {
5819 	u16 our_fcs, rcv_fcs;
5820 	int hdr_size;
5821 
5822 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5823 		hdr_size = L2CAP_EXT_HDR_SIZE;
5824 	else
5825 		hdr_size = L2CAP_ENH_HDR_SIZE;
5826 
5827 	if (chan->fcs == L2CAP_FCS_CRC16) {
5828 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
5829 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5830 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5831 
5832 		if (our_fcs != rcv_fcs)
5833 			return -EBADMSG;
5834 	}
5835 	return 0;
5836 }
5837 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)5838 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
5839 {
5840 	struct l2cap_ctrl control;
5841 
5842 	BT_DBG("chan %p", chan);
5843 
5844 	memset(&control, 0, sizeof(control));
5845 	control.sframe = 1;
5846 	control.final = 1;
5847 	control.reqseq = chan->buffer_seq;
5848 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
5849 
5850 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5851 		control.super = L2CAP_SUPER_RNR;
5852 		l2cap_send_sframe(chan, &control);
5853 	}
5854 
5855 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5856 	    chan->unacked_frames > 0)
5857 		__set_retrans_timer(chan);
5858 
5859 	/* Send pending iframes */
5860 	l2cap_ertm_send(chan);
5861 
5862 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
5863 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5864 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
5865 		 * send it now.
5866 		 */
5867 		control.super = L2CAP_SUPER_RR;
5868 		l2cap_send_sframe(chan, &control);
5869 	}
5870 }
5871 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)5872 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5873 			    struct sk_buff **last_frag)
5874 {
5875 	/* skb->len reflects data in skb as well as all fragments
5876 	 * skb->data_len reflects only data in fragments
5877 	 */
5878 	if (!skb_has_frag_list(skb))
5879 		skb_shinfo(skb)->frag_list = new_frag;
5880 
5881 	new_frag->next = NULL;
5882 
5883 	(*last_frag)->next = new_frag;
5884 	*last_frag = new_frag;
5885 
5886 	skb->len += new_frag->len;
5887 	skb->data_len += new_frag->len;
5888 	skb->truesize += new_frag->truesize;
5889 }
5890 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)5891 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5892 				struct l2cap_ctrl *control)
5893 {
5894 	int err = -EINVAL;
5895 
5896 	switch (control->sar) {
5897 	case L2CAP_SAR_UNSEGMENTED:
5898 		if (chan->sdu)
5899 			break;
5900 
5901 		err = chan->ops->recv(chan, skb);
5902 		break;
5903 
5904 	case L2CAP_SAR_START:
5905 		if (chan->sdu)
5906 			break;
5907 
5908 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5909 			break;
5910 
5911 		chan->sdu_len = get_unaligned_le16(skb->data);
5912 		skb_pull(skb, L2CAP_SDULEN_SIZE);
5913 
5914 		if (chan->sdu_len > chan->imtu) {
5915 			err = -EMSGSIZE;
5916 			break;
5917 		}
5918 
5919 		if (skb->len >= chan->sdu_len)
5920 			break;
5921 
5922 		chan->sdu = skb;
5923 		chan->sdu_last_frag = skb;
5924 
5925 		skb = NULL;
5926 		err = 0;
5927 		break;
5928 
5929 	case L2CAP_SAR_CONTINUE:
5930 		if (!chan->sdu)
5931 			break;
5932 
5933 		append_skb_frag(chan->sdu, skb,
5934 				&chan->sdu_last_frag);
5935 		skb = NULL;
5936 
5937 		if (chan->sdu->len >= chan->sdu_len)
5938 			break;
5939 
5940 		err = 0;
5941 		break;
5942 
5943 	case L2CAP_SAR_END:
5944 		if (!chan->sdu)
5945 			break;
5946 
5947 		append_skb_frag(chan->sdu, skb,
5948 				&chan->sdu_last_frag);
5949 		skb = NULL;
5950 
5951 		if (chan->sdu->len != chan->sdu_len)
5952 			break;
5953 
5954 		err = chan->ops->recv(chan, chan->sdu);
5955 
5956 		if (!err) {
5957 			/* Reassembly complete */
5958 			chan->sdu = NULL;
5959 			chan->sdu_last_frag = NULL;
5960 			chan->sdu_len = 0;
5961 		}
5962 		break;
5963 	}
5964 
5965 	if (err) {
5966 		kfree_skb(skb);
5967 		kfree_skb(chan->sdu);
5968 		chan->sdu = NULL;
5969 		chan->sdu_last_frag = NULL;
5970 		chan->sdu_len = 0;
5971 	}
5972 
5973 	return err;
5974 }
5975 
l2cap_resegment(struct l2cap_chan * chan)5976 static int l2cap_resegment(struct l2cap_chan *chan)
5977 {
5978 	/* Placeholder */
5979 	return 0;
5980 }
5981 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)5982 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
5983 {
5984 	u8 event;
5985 
5986 	if (chan->mode != L2CAP_MODE_ERTM)
5987 		return;
5988 
5989 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
5990 	l2cap_tx(chan, NULL, NULL, event);
5991 }
5992 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)5993 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5994 {
5995 	int err = 0;
5996 	/* Pass sequential frames to l2cap_reassemble_sdu()
5997 	 * until a gap is encountered.
5998 	 */
5999 
6000 	BT_DBG("chan %p", chan);
6001 
6002 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6003 		struct sk_buff *skb;
6004 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6005 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6006 
6007 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6008 
6009 		if (!skb)
6010 			break;
6011 
6012 		skb_unlink(skb, &chan->srej_q);
6013 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6014 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6015 		if (err)
6016 			break;
6017 	}
6018 
6019 	if (skb_queue_empty(&chan->srej_q)) {
6020 		chan->rx_state = L2CAP_RX_STATE_RECV;
6021 		l2cap_send_ack(chan);
6022 	}
6023 
6024 	return err;
6025 }
6026 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6027 static void l2cap_handle_srej(struct l2cap_chan *chan,
6028 			      struct l2cap_ctrl *control)
6029 {
6030 	struct sk_buff *skb;
6031 
6032 	BT_DBG("chan %p, control %p", chan, control);
6033 
6034 	if (control->reqseq == chan->next_tx_seq) {
6035 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6036 		l2cap_send_disconn_req(chan, ECONNRESET);
6037 		return;
6038 	}
6039 
6040 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6041 
6042 	if (skb == NULL) {
6043 		BT_DBG("Seq %d not available for retransmission",
6044 		       control->reqseq);
6045 		return;
6046 	}
6047 
6048 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6049 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6050 		l2cap_send_disconn_req(chan, ECONNRESET);
6051 		return;
6052 	}
6053 
6054 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6055 
6056 	if (control->poll) {
6057 		l2cap_pass_to_tx(chan, control);
6058 
6059 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6060 		l2cap_retransmit(chan, control);
6061 		l2cap_ertm_send(chan);
6062 
6063 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6064 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6065 			chan->srej_save_reqseq = control->reqseq;
6066 		}
6067 	} else {
6068 		l2cap_pass_to_tx_fbit(chan, control);
6069 
6070 		if (control->final) {
6071 			if (chan->srej_save_reqseq != control->reqseq ||
6072 			    !test_and_clear_bit(CONN_SREJ_ACT,
6073 						&chan->conn_state))
6074 				l2cap_retransmit(chan, control);
6075 		} else {
6076 			l2cap_retransmit(chan, control);
6077 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6078 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6079 				chan->srej_save_reqseq = control->reqseq;
6080 			}
6081 		}
6082 	}
6083 }
6084 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6085 static void l2cap_handle_rej(struct l2cap_chan *chan,
6086 			     struct l2cap_ctrl *control)
6087 {
6088 	struct sk_buff *skb;
6089 
6090 	BT_DBG("chan %p, control %p", chan, control);
6091 
6092 	if (control->reqseq == chan->next_tx_seq) {
6093 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6094 		l2cap_send_disconn_req(chan, ECONNRESET);
6095 		return;
6096 	}
6097 
6098 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6099 
6100 	if (chan->max_tx && skb &&
6101 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6102 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6103 		l2cap_send_disconn_req(chan, ECONNRESET);
6104 		return;
6105 	}
6106 
6107 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6108 
6109 	l2cap_pass_to_tx(chan, control);
6110 
6111 	if (control->final) {
6112 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6113 			l2cap_retransmit_all(chan, control);
6114 	} else {
6115 		l2cap_retransmit_all(chan, control);
6116 		l2cap_ertm_send(chan);
6117 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6118 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6119 	}
6120 }
6121 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6122 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6123 {
6124 	BT_DBG("chan %p, txseq %d", chan, txseq);
6125 
6126 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6127 	       chan->expected_tx_seq);
6128 
6129 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6130 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6131 		    chan->tx_win) {
6132 			/* See notes below regarding "double poll" and
6133 			 * invalid packets.
6134 			 */
6135 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6136 				BT_DBG("Invalid/Ignore - after SREJ");
6137 				return L2CAP_TXSEQ_INVALID_IGNORE;
6138 			} else {
6139 				BT_DBG("Invalid - in window after SREJ sent");
6140 				return L2CAP_TXSEQ_INVALID;
6141 			}
6142 		}
6143 
6144 		if (chan->srej_list.head == txseq) {
6145 			BT_DBG("Expected SREJ");
6146 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6147 		}
6148 
6149 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6150 			BT_DBG("Duplicate SREJ - txseq already stored");
6151 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6152 		}
6153 
6154 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6155 			BT_DBG("Unexpected SREJ - not requested");
6156 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6157 		}
6158 	}
6159 
6160 	if (chan->expected_tx_seq == txseq) {
6161 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6162 		    chan->tx_win) {
6163 			BT_DBG("Invalid - txseq outside tx window");
6164 			return L2CAP_TXSEQ_INVALID;
6165 		} else {
6166 			BT_DBG("Expected");
6167 			return L2CAP_TXSEQ_EXPECTED;
6168 		}
6169 	}
6170 
6171 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6172 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6173 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6174 		return L2CAP_TXSEQ_DUPLICATE;
6175 	}
6176 
6177 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6178 		/* A source of invalid packets is a "double poll" condition,
6179 		 * where delays cause us to send multiple poll packets.  If
6180 		 * the remote stack receives and processes both polls,
6181 		 * sequence numbers can wrap around in such a way that a
6182 		 * resent frame has a sequence number that looks like new data
6183 		 * with a sequence gap.  This would trigger an erroneous SREJ
6184 		 * request.
6185 		 *
6186 		 * Fortunately, this is impossible with a tx window that's
6187 		 * less than half of the maximum sequence number, which allows
6188 		 * invalid frames to be safely ignored.
6189 		 *
6190 		 * With tx window sizes greater than half of the tx window
6191 		 * maximum, the frame is invalid and cannot be ignored.  This
6192 		 * causes a disconnect.
6193 		 */
6194 
6195 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6196 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6197 			return L2CAP_TXSEQ_INVALID_IGNORE;
6198 		} else {
6199 			BT_DBG("Invalid - txseq outside tx window");
6200 			return L2CAP_TXSEQ_INVALID;
6201 		}
6202 	} else {
6203 		BT_DBG("Unexpected - txseq indicates missing frames");
6204 		return L2CAP_TXSEQ_UNEXPECTED;
6205 	}
6206 }
6207 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6208 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6209 			       struct l2cap_ctrl *control,
6210 			       struct sk_buff *skb, u8 event)
6211 {
6212 	int err = 0;
6213 	bool skb_in_use = false;
6214 
6215 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6216 	       event);
6217 
6218 	switch (event) {
6219 	case L2CAP_EV_RECV_IFRAME:
6220 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6221 		case L2CAP_TXSEQ_EXPECTED:
6222 			l2cap_pass_to_tx(chan, control);
6223 
6224 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6225 				BT_DBG("Busy, discarding expected seq %d",
6226 				       control->txseq);
6227 				break;
6228 			}
6229 
6230 			chan->expected_tx_seq = __next_seq(chan,
6231 							   control->txseq);
6232 
6233 			chan->buffer_seq = chan->expected_tx_seq;
6234 			skb_in_use = true;
6235 
6236 			err = l2cap_reassemble_sdu(chan, skb, control);
6237 			if (err)
6238 				break;
6239 
6240 			if (control->final) {
6241 				if (!test_and_clear_bit(CONN_REJ_ACT,
6242 							&chan->conn_state)) {
6243 					control->final = 0;
6244 					l2cap_retransmit_all(chan, control);
6245 					l2cap_ertm_send(chan);
6246 				}
6247 			}
6248 
6249 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6250 				l2cap_send_ack(chan);
6251 			break;
6252 		case L2CAP_TXSEQ_UNEXPECTED:
6253 			l2cap_pass_to_tx(chan, control);
6254 
6255 			/* Can't issue SREJ frames in the local busy state.
6256 			 * Drop this frame, it will be seen as missing
6257 			 * when local busy is exited.
6258 			 */
6259 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6260 				BT_DBG("Busy, discarding unexpected seq %d",
6261 				       control->txseq);
6262 				break;
6263 			}
6264 
6265 			/* There was a gap in the sequence, so an SREJ
6266 			 * must be sent for each missing frame.  The
6267 			 * current frame is stored for later use.
6268 			 */
6269 			skb_queue_tail(&chan->srej_q, skb);
6270 			skb_in_use = true;
6271 			BT_DBG("Queued %p (queue len %d)", skb,
6272 			       skb_queue_len(&chan->srej_q));
6273 
6274 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6275 			l2cap_seq_list_clear(&chan->srej_list);
6276 			l2cap_send_srej(chan, control->txseq);
6277 
6278 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6279 			break;
6280 		case L2CAP_TXSEQ_DUPLICATE:
6281 			l2cap_pass_to_tx(chan, control);
6282 			break;
6283 		case L2CAP_TXSEQ_INVALID_IGNORE:
6284 			break;
6285 		case L2CAP_TXSEQ_INVALID:
6286 		default:
6287 			l2cap_send_disconn_req(chan, ECONNRESET);
6288 			break;
6289 		}
6290 		break;
6291 	case L2CAP_EV_RECV_RR:
6292 		l2cap_pass_to_tx(chan, control);
6293 		if (control->final) {
6294 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6295 
6296 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6297 			    !__chan_is_moving(chan)) {
6298 				control->final = 0;
6299 				l2cap_retransmit_all(chan, control);
6300 			}
6301 
6302 			l2cap_ertm_send(chan);
6303 		} else if (control->poll) {
6304 			l2cap_send_i_or_rr_or_rnr(chan);
6305 		} else {
6306 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6307 					       &chan->conn_state) &&
6308 			    chan->unacked_frames)
6309 				__set_retrans_timer(chan);
6310 
6311 			l2cap_ertm_send(chan);
6312 		}
6313 		break;
6314 	case L2CAP_EV_RECV_RNR:
6315 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6316 		l2cap_pass_to_tx(chan, control);
6317 		if (control && control->poll) {
6318 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6319 			l2cap_send_rr_or_rnr(chan, 0);
6320 		}
6321 		__clear_retrans_timer(chan);
6322 		l2cap_seq_list_clear(&chan->retrans_list);
6323 		break;
6324 	case L2CAP_EV_RECV_REJ:
6325 		l2cap_handle_rej(chan, control);
6326 		break;
6327 	case L2CAP_EV_RECV_SREJ:
6328 		l2cap_handle_srej(chan, control);
6329 		break;
6330 	default:
6331 		break;
6332 	}
6333 
6334 	if (skb && !skb_in_use) {
6335 		BT_DBG("Freeing %p", skb);
6336 		kfree_skb(skb);
6337 	}
6338 
6339 	return err;
6340 }
6341 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6342 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6343 				    struct l2cap_ctrl *control,
6344 				    struct sk_buff *skb, u8 event)
6345 {
6346 	int err = 0;
6347 	u16 txseq = control->txseq;
6348 	bool skb_in_use = false;
6349 
6350 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6351 	       event);
6352 
6353 	switch (event) {
6354 	case L2CAP_EV_RECV_IFRAME:
6355 		switch (l2cap_classify_txseq(chan, txseq)) {
6356 		case L2CAP_TXSEQ_EXPECTED:
6357 			/* Keep frame for reassembly later */
6358 			l2cap_pass_to_tx(chan, control);
6359 			skb_queue_tail(&chan->srej_q, skb);
6360 			skb_in_use = true;
6361 			BT_DBG("Queued %p (queue len %d)", skb,
6362 			       skb_queue_len(&chan->srej_q));
6363 
6364 			chan->expected_tx_seq = __next_seq(chan, txseq);
6365 			break;
6366 		case L2CAP_TXSEQ_EXPECTED_SREJ:
6367 			l2cap_seq_list_pop(&chan->srej_list);
6368 
6369 			l2cap_pass_to_tx(chan, control);
6370 			skb_queue_tail(&chan->srej_q, skb);
6371 			skb_in_use = true;
6372 			BT_DBG("Queued %p (queue len %d)", skb,
6373 			       skb_queue_len(&chan->srej_q));
6374 
6375 			err = l2cap_rx_queued_iframes(chan);
6376 			if (err)
6377 				break;
6378 
6379 			break;
6380 		case L2CAP_TXSEQ_UNEXPECTED:
6381 			/* Got a frame that can't be reassembled yet.
6382 			 * Save it for later, and send SREJs to cover
6383 			 * the missing frames.
6384 			 */
6385 			skb_queue_tail(&chan->srej_q, skb);
6386 			skb_in_use = true;
6387 			BT_DBG("Queued %p (queue len %d)", skb,
6388 			       skb_queue_len(&chan->srej_q));
6389 
6390 			l2cap_pass_to_tx(chan, control);
6391 			l2cap_send_srej(chan, control->txseq);
6392 			break;
6393 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6394 			/* This frame was requested with an SREJ, but
6395 			 * some expected retransmitted frames are
6396 			 * missing.  Request retransmission of missing
6397 			 * SREJ'd frames.
6398 			 */
6399 			skb_queue_tail(&chan->srej_q, skb);
6400 			skb_in_use = true;
6401 			BT_DBG("Queued %p (queue len %d)", skb,
6402 			       skb_queue_len(&chan->srej_q));
6403 
6404 			l2cap_pass_to_tx(chan, control);
6405 			l2cap_send_srej_list(chan, control->txseq);
6406 			break;
6407 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
6408 			/* We've already queued this frame.  Drop this copy. */
6409 			l2cap_pass_to_tx(chan, control);
6410 			break;
6411 		case L2CAP_TXSEQ_DUPLICATE:
6412 			/* Expecting a later sequence number, so this frame
6413 			 * was already received.  Ignore it completely.
6414 			 */
6415 			break;
6416 		case L2CAP_TXSEQ_INVALID_IGNORE:
6417 			break;
6418 		case L2CAP_TXSEQ_INVALID:
6419 		default:
6420 			l2cap_send_disconn_req(chan, ECONNRESET);
6421 			break;
6422 		}
6423 		break;
6424 	case L2CAP_EV_RECV_RR:
6425 		l2cap_pass_to_tx(chan, control);
6426 		if (control->final) {
6427 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6428 
6429 			if (!test_and_clear_bit(CONN_REJ_ACT,
6430 						&chan->conn_state)) {
6431 				control->final = 0;
6432 				l2cap_retransmit_all(chan, control);
6433 			}
6434 
6435 			l2cap_ertm_send(chan);
6436 		} else if (control->poll) {
6437 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6438 					       &chan->conn_state) &&
6439 			    chan->unacked_frames) {
6440 				__set_retrans_timer(chan);
6441 			}
6442 
6443 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
6444 			l2cap_send_srej_tail(chan);
6445 		} else {
6446 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
6447 					       &chan->conn_state) &&
6448 			    chan->unacked_frames)
6449 				__set_retrans_timer(chan);
6450 
6451 			l2cap_send_ack(chan);
6452 		}
6453 		break;
6454 	case L2CAP_EV_RECV_RNR:
6455 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6456 		l2cap_pass_to_tx(chan, control);
6457 		if (control->poll) {
6458 			l2cap_send_srej_tail(chan);
6459 		} else {
6460 			struct l2cap_ctrl rr_control;
6461 			memset(&rr_control, 0, sizeof(rr_control));
6462 			rr_control.sframe = 1;
6463 			rr_control.super = L2CAP_SUPER_RR;
6464 			rr_control.reqseq = chan->buffer_seq;
6465 			l2cap_send_sframe(chan, &rr_control);
6466 		}
6467 
6468 		break;
6469 	case L2CAP_EV_RECV_REJ:
6470 		l2cap_handle_rej(chan, control);
6471 		break;
6472 	case L2CAP_EV_RECV_SREJ:
6473 		l2cap_handle_srej(chan, control);
6474 		break;
6475 	}
6476 
6477 	if (skb && !skb_in_use) {
6478 		BT_DBG("Freeing %p", skb);
6479 		kfree_skb(skb);
6480 	}
6481 
6482 	return err;
6483 }
6484 
l2cap_finish_move(struct l2cap_chan * chan)6485 static int l2cap_finish_move(struct l2cap_chan *chan)
6486 {
6487 	BT_DBG("chan %p", chan);
6488 
6489 	chan->rx_state = L2CAP_RX_STATE_RECV;
6490 
6491 	if (chan->hs_hcon)
6492 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6493 	else
6494 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6495 
6496 	return l2cap_resegment(chan);
6497 }
6498 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6499 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6500 				 struct l2cap_ctrl *control,
6501 				 struct sk_buff *skb, u8 event)
6502 {
6503 	int err;
6504 
6505 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6506 	       event);
6507 
6508 	if (!control->poll)
6509 		return -EPROTO;
6510 
6511 	l2cap_process_reqseq(chan, control->reqseq);
6512 
6513 	if (!skb_queue_empty(&chan->tx_q))
6514 		chan->tx_send_head = skb_peek(&chan->tx_q);
6515 	else
6516 		chan->tx_send_head = NULL;
6517 
6518 	/* Rewind next_tx_seq to the point expected
6519 	 * by the receiver.
6520 	 */
6521 	chan->next_tx_seq = control->reqseq;
6522 	chan->unacked_frames = 0;
6523 
6524 	err = l2cap_finish_move(chan);
6525 	if (err)
6526 		return err;
6527 
6528 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6529 	l2cap_send_i_or_rr_or_rnr(chan);
6530 
6531 	if (event == L2CAP_EV_RECV_IFRAME)
6532 		return -EPROTO;
6533 
6534 	return l2cap_rx_state_recv(chan, control, NULL, event);
6535 }
6536 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6537 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6538 				 struct l2cap_ctrl *control,
6539 				 struct sk_buff *skb, u8 event)
6540 {
6541 	int err;
6542 
6543 	if (!control->final)
6544 		return -EPROTO;
6545 
6546 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6547 
6548 	chan->rx_state = L2CAP_RX_STATE_RECV;
6549 	l2cap_process_reqseq(chan, control->reqseq);
6550 
6551 	if (!skb_queue_empty(&chan->tx_q))
6552 		chan->tx_send_head = skb_peek(&chan->tx_q);
6553 	else
6554 		chan->tx_send_head = NULL;
6555 
6556 	/* Rewind next_tx_seq to the point expected
6557 	 * by the receiver.
6558 	 */
6559 	chan->next_tx_seq = control->reqseq;
6560 	chan->unacked_frames = 0;
6561 
6562 	if (chan->hs_hcon)
6563 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6564 	else
6565 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6566 
6567 	err = l2cap_resegment(chan);
6568 
6569 	if (!err)
6570 		err = l2cap_rx_state_recv(chan, control, skb, event);
6571 
6572 	return err;
6573 }
6574 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)6575 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6576 {
6577 	/* Make sure reqseq is for a packet that has been sent but not acked */
6578 	u16 unacked;
6579 
6580 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6581 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6582 }
6583 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6584 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6585 		    struct sk_buff *skb, u8 event)
6586 {
6587 	int err = 0;
6588 
6589 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6590 	       control, skb, event, chan->rx_state);
6591 
6592 	if (__valid_reqseq(chan, control->reqseq)) {
6593 		switch (chan->rx_state) {
6594 		case L2CAP_RX_STATE_RECV:
6595 			err = l2cap_rx_state_recv(chan, control, skb, event);
6596 			break;
6597 		case L2CAP_RX_STATE_SREJ_SENT:
6598 			err = l2cap_rx_state_srej_sent(chan, control, skb,
6599 						       event);
6600 			break;
6601 		case L2CAP_RX_STATE_WAIT_P:
6602 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
6603 			break;
6604 		case L2CAP_RX_STATE_WAIT_F:
6605 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
6606 			break;
6607 		default:
6608 			/* shut it down */
6609 			break;
6610 		}
6611 	} else {
6612 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6613 		       control->reqseq, chan->next_tx_seq,
6614 		       chan->expected_ack_seq);
6615 		l2cap_send_disconn_req(chan, ECONNRESET);
6616 	}
6617 
6618 	return err;
6619 }
6620 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)6621 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6622 			   struct sk_buff *skb)
6623 {
6624 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6625 	       chan->rx_state);
6626 
6627 	if (l2cap_classify_txseq(chan, control->txseq) ==
6628 	    L2CAP_TXSEQ_EXPECTED) {
6629 		l2cap_pass_to_tx(chan, control);
6630 
6631 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6632 		       __next_seq(chan, chan->buffer_seq));
6633 
6634 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6635 
6636 		l2cap_reassemble_sdu(chan, skb, control);
6637 	} else {
6638 		if (chan->sdu) {
6639 			kfree_skb(chan->sdu);
6640 			chan->sdu = NULL;
6641 		}
6642 		chan->sdu_last_frag = NULL;
6643 		chan->sdu_len = 0;
6644 
6645 		if (skb) {
6646 			BT_DBG("Freeing %p", skb);
6647 			kfree_skb(skb);
6648 		}
6649 	}
6650 
6651 	chan->last_acked_seq = control->txseq;
6652 	chan->expected_tx_seq = __next_seq(chan, control->txseq);
6653 
6654 	return 0;
6655 }
6656 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6657 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6658 {
6659 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
6660 	u16 len;
6661 	u8 event;
6662 
6663 	__unpack_control(chan, skb);
6664 
6665 	len = skb->len;
6666 
6667 	/*
6668 	 * We can just drop the corrupted I-frame here.
6669 	 * Receiver will miss it and start proper recovery
6670 	 * procedures and ask for retransmission.
6671 	 */
6672 	if (l2cap_check_fcs(chan, skb))
6673 		goto drop;
6674 
6675 	if (!control->sframe && control->sar == L2CAP_SAR_START)
6676 		len -= L2CAP_SDULEN_SIZE;
6677 
6678 	if (chan->fcs == L2CAP_FCS_CRC16)
6679 		len -= L2CAP_FCS_SIZE;
6680 
6681 	if (len > chan->mps) {
6682 		l2cap_send_disconn_req(chan, ECONNRESET);
6683 		goto drop;
6684 	}
6685 
6686 	if (chan->ops->filter) {
6687 		if (chan->ops->filter(chan, skb))
6688 			goto drop;
6689 	}
6690 
6691 	if (!control->sframe) {
6692 		int err;
6693 
6694 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6695 		       control->sar, control->reqseq, control->final,
6696 		       control->txseq);
6697 
6698 		/* Validate F-bit - F=0 always valid, F=1 only
6699 		 * valid in TX WAIT_F
6700 		 */
6701 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
6702 			goto drop;
6703 
6704 		if (chan->mode != L2CAP_MODE_STREAMING) {
6705 			event = L2CAP_EV_RECV_IFRAME;
6706 			err = l2cap_rx(chan, control, skb, event);
6707 		} else {
6708 			err = l2cap_stream_rx(chan, control, skb);
6709 		}
6710 
6711 		if (err)
6712 			l2cap_send_disconn_req(chan, ECONNRESET);
6713 	} else {
6714 		const u8 rx_func_to_event[4] = {
6715 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6716 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6717 		};
6718 
6719 		/* Only I-frames are expected in streaming mode */
6720 		if (chan->mode == L2CAP_MODE_STREAMING)
6721 			goto drop;
6722 
6723 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6724 		       control->reqseq, control->final, control->poll,
6725 		       control->super);
6726 
6727 		if (len != 0) {
6728 			BT_ERR("Trailing bytes: %d in sframe", len);
6729 			l2cap_send_disconn_req(chan, ECONNRESET);
6730 			goto drop;
6731 		}
6732 
6733 		/* Validate F and P bits */
6734 		if (control->final && (control->poll ||
6735 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6736 			goto drop;
6737 
6738 		event = rx_func_to_event[control->super];
6739 		if (l2cap_rx(chan, control, skb, event))
6740 			l2cap_send_disconn_req(chan, ECONNRESET);
6741 	}
6742 
6743 	return 0;
6744 
6745 drop:
6746 	kfree_skb(skb);
6747 	return 0;
6748 }
6749 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)6750 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6751 {
6752 	struct l2cap_conn *conn = chan->conn;
6753 	struct l2cap_le_credits pkt;
6754 	u16 return_credits;
6755 
6756 	/* We return more credits to the sender only after the amount of
6757 	 * credits falls below half of the initial amount.
6758 	 */
6759 	if (chan->rx_credits >= (le_max_credits + 1) / 2)
6760 		return;
6761 
6762 	return_credits = le_max_credits - chan->rx_credits;
6763 
6764 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6765 
6766 	chan->rx_credits += return_credits;
6767 
6768 	pkt.cid     = cpu_to_le16(chan->scid);
6769 	pkt.credits = cpu_to_le16(return_credits);
6770 
6771 	chan->ident = l2cap_get_ident(conn);
6772 
6773 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6774 }
6775 
l2cap_le_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)6776 static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6777 {
6778 	int err;
6779 
6780 	if (!chan->rx_credits) {
6781 		BT_ERR("No credits to receive LE L2CAP data");
6782 		l2cap_send_disconn_req(chan, ECONNRESET);
6783 		return -ENOBUFS;
6784 	}
6785 
6786 	if (chan->imtu < skb->len) {
6787 		BT_ERR("Too big LE L2CAP PDU");
6788 		return -ENOBUFS;
6789 	}
6790 
6791 	chan->rx_credits--;
6792 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6793 
6794 	l2cap_chan_le_send_credits(chan);
6795 
6796 	err = 0;
6797 
6798 	if (!chan->sdu) {
6799 		u16 sdu_len;
6800 
6801 		sdu_len = get_unaligned_le16(skb->data);
6802 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6803 
6804 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6805 		       sdu_len, skb->len, chan->imtu);
6806 
6807 		if (sdu_len > chan->imtu) {
6808 			BT_ERR("Too big LE L2CAP SDU length received");
6809 			err = -EMSGSIZE;
6810 			goto failed;
6811 		}
6812 
6813 		if (skb->len > sdu_len) {
6814 			BT_ERR("Too much LE L2CAP data received");
6815 			err = -EINVAL;
6816 			goto failed;
6817 		}
6818 
6819 		if (skb->len == sdu_len)
6820 			return chan->ops->recv(chan, skb);
6821 
6822 		chan->sdu = skb;
6823 		chan->sdu_len = sdu_len;
6824 		chan->sdu_last_frag = skb;
6825 
6826 		/* Detect if remote is not able to use the selected MPS */
6827 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6828 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6829 
6830 			/* Adjust the number of credits */
6831 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6832 			chan->mps = mps_len;
6833 			l2cap_chan_le_send_credits(chan);
6834 		}
6835 
6836 		return 0;
6837 	}
6838 
6839 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6840 	       chan->sdu->len, skb->len, chan->sdu_len);
6841 
6842 	if (chan->sdu->len + skb->len > chan->sdu_len) {
6843 		BT_ERR("Too much LE L2CAP data received");
6844 		err = -EINVAL;
6845 		goto failed;
6846 	}
6847 
6848 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6849 	skb = NULL;
6850 
6851 	if (chan->sdu->len == chan->sdu_len) {
6852 		err = chan->ops->recv(chan, chan->sdu);
6853 		if (!err) {
6854 			chan->sdu = NULL;
6855 			chan->sdu_last_frag = NULL;
6856 			chan->sdu_len = 0;
6857 		}
6858 	}
6859 
6860 failed:
6861 	if (err) {
6862 		kfree_skb(skb);
6863 		kfree_skb(chan->sdu);
6864 		chan->sdu = NULL;
6865 		chan->sdu_last_frag = NULL;
6866 		chan->sdu_len = 0;
6867 	}
6868 
6869 	/* We can't return an error here since we took care of the skb
6870 	 * freeing internally. An error return would cause the caller to
6871 	 * do a double-free of the skb.
6872 	 */
6873 	return 0;
6874 }
6875 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)6876 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6877 			       struct sk_buff *skb)
6878 {
6879 	struct l2cap_chan *chan;
6880 
6881 	chan = l2cap_get_chan_by_scid(conn, cid);
6882 	if (!chan) {
6883 		if (cid == L2CAP_CID_A2MP) {
6884 			chan = a2mp_channel_create(conn, skb);
6885 			if (!chan) {
6886 				kfree_skb(skb);
6887 				return;
6888 			}
6889 
6890 			l2cap_chan_lock(chan);
6891 		} else {
6892 			BT_DBG("unknown cid 0x%4.4x", cid);
6893 			/* Drop packet and return */
6894 			kfree_skb(skb);
6895 			return;
6896 		}
6897 	}
6898 
6899 	BT_DBG("chan %p, len %d", chan, skb->len);
6900 
6901 	/* If we receive data on a fixed channel before the info req/rsp
6902 	 * procdure is done simply assume that the channel is supported
6903 	 * and mark it as ready.
6904 	 */
6905 	if (chan->chan_type == L2CAP_CHAN_FIXED)
6906 		l2cap_chan_ready(chan);
6907 
6908 	if (chan->state != BT_CONNECTED)
6909 		goto drop;
6910 
6911 	switch (chan->mode) {
6912 	case L2CAP_MODE_LE_FLOWCTL:
6913 		if (l2cap_le_data_rcv(chan, skb) < 0)
6914 			goto drop;
6915 
6916 		goto done;
6917 
6918 	case L2CAP_MODE_BASIC:
6919 		/* If socket recv buffers overflows we drop data here
6920 		 * which is *bad* because L2CAP has to be reliable.
6921 		 * But we don't have any other choice. L2CAP doesn't
6922 		 * provide flow control mechanism. */
6923 
6924 		if (chan->imtu < skb->len) {
6925 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
6926 			goto drop;
6927 		}
6928 
6929 		if (!chan->ops->recv(chan, skb))
6930 			goto done;
6931 		break;
6932 
6933 	case L2CAP_MODE_ERTM:
6934 	case L2CAP_MODE_STREAMING:
6935 		l2cap_data_rcv(chan, skb);
6936 		goto done;
6937 
6938 	default:
6939 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
6940 		break;
6941 	}
6942 
6943 drop:
6944 	kfree_skb(skb);
6945 
6946 done:
6947 	l2cap_chan_unlock(chan);
6948 }
6949 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)6950 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6951 				  struct sk_buff *skb)
6952 {
6953 	struct hci_conn *hcon = conn->hcon;
6954 	struct l2cap_chan *chan;
6955 
6956 	if (hcon->type != ACL_LINK)
6957 		goto free_skb;
6958 
6959 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6960 					ACL_LINK);
6961 	if (!chan)
6962 		goto free_skb;
6963 
6964 	BT_DBG("chan %p, len %d", chan, skb->len);
6965 
6966 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
6967 		goto drop;
6968 
6969 	if (chan->imtu < skb->len)
6970 		goto drop;
6971 
6972 	/* Store remote BD_ADDR and PSM for msg_name */
6973 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6974 	bt_cb(skb)->l2cap.psm = psm;
6975 
6976 	if (!chan->ops->recv(chan, skb)) {
6977 		l2cap_chan_put(chan);
6978 		return;
6979 	}
6980 
6981 drop:
6982 	l2cap_chan_put(chan);
6983 free_skb:
6984 	kfree_skb(skb);
6985 }
6986 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)6987 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6988 {
6989 	struct l2cap_hdr *lh = (void *) skb->data;
6990 	struct hci_conn *hcon = conn->hcon;
6991 	u16 cid, len;
6992 	__le16 psm;
6993 
6994 	if (hcon->state != BT_CONNECTED) {
6995 		BT_DBG("queueing pending rx skb");
6996 		skb_queue_tail(&conn->pending_rx, skb);
6997 		return;
6998 	}
6999 
7000 	skb_pull(skb, L2CAP_HDR_SIZE);
7001 	cid = __le16_to_cpu(lh->cid);
7002 	len = __le16_to_cpu(lh->len);
7003 
7004 	if (len != skb->len) {
7005 		kfree_skb(skb);
7006 		return;
7007 	}
7008 
7009 	/* Since we can't actively block incoming LE connections we must
7010 	 * at least ensure that we ignore incoming data from them.
7011 	 */
7012 	if (hcon->type == LE_LINK &&
7013 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7014 				   bdaddr_dst_type(hcon))) {
7015 		kfree_skb(skb);
7016 		return;
7017 	}
7018 
7019 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7020 
7021 	switch (cid) {
7022 	case L2CAP_CID_SIGNALING:
7023 		l2cap_sig_channel(conn, skb);
7024 		break;
7025 
7026 	case L2CAP_CID_CONN_LESS:
7027 		psm = get_unaligned((__le16 *) skb->data);
7028 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7029 		l2cap_conless_channel(conn, psm, skb);
7030 		break;
7031 
7032 	case L2CAP_CID_LE_SIGNALING:
7033 		l2cap_le_sig_channel(conn, skb);
7034 		break;
7035 
7036 	default:
7037 		l2cap_data_channel(conn, cid, skb);
7038 		break;
7039 	}
7040 }
7041 
process_pending_rx(struct work_struct * work)7042 static void process_pending_rx(struct work_struct *work)
7043 {
7044 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7045 					       pending_rx_work);
7046 	struct sk_buff *skb;
7047 
7048 	BT_DBG("");
7049 
7050 	while ((skb = skb_dequeue(&conn->pending_rx)))
7051 		l2cap_recv_frame(conn, skb);
7052 }
7053 
l2cap_conn_add(struct hci_conn * hcon)7054 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7055 {
7056 	struct l2cap_conn *conn = hcon->l2cap_data;
7057 	struct hci_chan *hchan;
7058 
7059 	if (conn)
7060 		return conn;
7061 
7062 	hchan = hci_chan_create(hcon);
7063 	if (!hchan)
7064 		return NULL;
7065 
7066 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7067 	if (!conn) {
7068 		hci_chan_del(hchan);
7069 		return NULL;
7070 	}
7071 
7072 	kref_init(&conn->ref);
7073 	hcon->l2cap_data = conn;
7074 	conn->hcon = hci_conn_get(hcon);
7075 	conn->hchan = hchan;
7076 
7077 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7078 
7079 	switch (hcon->type) {
7080 	case LE_LINK:
7081 		if (hcon->hdev->le_mtu) {
7082 			conn->mtu = hcon->hdev->le_mtu;
7083 			break;
7084 		}
7085 		/* fall through */
7086 	default:
7087 		conn->mtu = hcon->hdev->acl_mtu;
7088 		break;
7089 	}
7090 
7091 	conn->feat_mask = 0;
7092 
7093 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7094 
7095 	if (hcon->type == ACL_LINK &&
7096 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7097 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7098 
7099 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7100 	    (bredr_sc_enabled(hcon->hdev) ||
7101 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7102 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7103 
7104 	mutex_init(&conn->ident_lock);
7105 	mutex_init(&conn->chan_lock);
7106 
7107 	INIT_LIST_HEAD(&conn->chan_l);
7108 	INIT_LIST_HEAD(&conn->users);
7109 
7110 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7111 
7112 	skb_queue_head_init(&conn->pending_rx);
7113 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7114 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7115 
7116 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7117 
7118 	return conn;
7119 }
7120 
is_valid_psm(u16 psm,u8 dst_type)7121 static bool is_valid_psm(u16 psm, u8 dst_type) {
7122 	if (!psm)
7123 		return false;
7124 
7125 	if (bdaddr_type_is_le(dst_type))
7126 		return (psm <= 0x00ff);
7127 
7128 	/* PSM must be odd and lsb of upper byte must be 0 */
7129 	return ((psm & 0x0101) == 0x0001);
7130 }
7131 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7132 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7133 		       bdaddr_t *dst, u8 dst_type)
7134 {
7135 	struct l2cap_conn *conn;
7136 	struct hci_conn *hcon;
7137 	struct hci_dev *hdev;
7138 	int err;
7139 
7140 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7141 	       dst_type, __le16_to_cpu(psm));
7142 
7143 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7144 	if (!hdev)
7145 		return -EHOSTUNREACH;
7146 
7147 	hci_dev_lock(hdev);
7148 
7149 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7150 	    chan->chan_type != L2CAP_CHAN_RAW) {
7151 		err = -EINVAL;
7152 		goto done;
7153 	}
7154 
7155 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7156 		err = -EINVAL;
7157 		goto done;
7158 	}
7159 
7160 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7161 		err = -EINVAL;
7162 		goto done;
7163 	}
7164 
7165 	switch (chan->mode) {
7166 	case L2CAP_MODE_BASIC:
7167 		break;
7168 	case L2CAP_MODE_LE_FLOWCTL:
7169 		l2cap_le_flowctl_init(chan);
7170 		break;
7171 	case L2CAP_MODE_ERTM:
7172 	case L2CAP_MODE_STREAMING:
7173 		if (!disable_ertm)
7174 			break;
7175 		/* fall through */
7176 	default:
7177 		err = -EOPNOTSUPP;
7178 		goto done;
7179 	}
7180 
7181 	switch (chan->state) {
7182 	case BT_CONNECT:
7183 	case BT_CONNECT2:
7184 	case BT_CONFIG:
7185 		/* Already connecting */
7186 		err = 0;
7187 		goto done;
7188 
7189 	case BT_CONNECTED:
7190 		/* Already connected */
7191 		err = -EISCONN;
7192 		goto done;
7193 
7194 	case BT_OPEN:
7195 	case BT_BOUND:
7196 		/* Can connect */
7197 		break;
7198 
7199 	default:
7200 		err = -EBADFD;
7201 		goto done;
7202 	}
7203 
7204 	/* Set destination address and psm */
7205 	bacpy(&chan->dst, dst);
7206 	chan->dst_type = dst_type;
7207 
7208 	chan->psm = psm;
7209 	chan->dcid = cid;
7210 
7211 	if (bdaddr_type_is_le(dst_type)) {
7212 		/* Convert from L2CAP channel address type to HCI address type
7213 		 */
7214 		if (dst_type == BDADDR_LE_PUBLIC)
7215 			dst_type = ADDR_LE_DEV_PUBLIC;
7216 		else
7217 			dst_type = ADDR_LE_DEV_RANDOM;
7218 
7219 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7220 			hcon = hci_connect_le(hdev, dst, dst_type,
7221 					      chan->sec_level,
7222 					      HCI_LE_CONN_TIMEOUT,
7223 					      HCI_ROLE_SLAVE, NULL);
7224 		else
7225 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
7226 						   chan->sec_level,
7227 						   HCI_LE_CONN_TIMEOUT);
7228 
7229 	} else {
7230 		u8 auth_type = l2cap_get_auth_type(chan);
7231 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
7232 	}
7233 
7234 	if (IS_ERR(hcon)) {
7235 		err = PTR_ERR(hcon);
7236 		goto done;
7237 	}
7238 
7239 	conn = l2cap_conn_add(hcon);
7240 	if (!conn) {
7241 		hci_conn_drop(hcon);
7242 		err = -ENOMEM;
7243 		goto done;
7244 	}
7245 
7246 	mutex_lock(&conn->chan_lock);
7247 	l2cap_chan_lock(chan);
7248 
7249 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7250 		hci_conn_drop(hcon);
7251 		err = -EBUSY;
7252 		goto chan_unlock;
7253 	}
7254 
7255 	/* Update source addr of the socket */
7256 	bacpy(&chan->src, &hcon->src);
7257 	chan->src_type = bdaddr_src_type(hcon);
7258 
7259 	__l2cap_chan_add(conn, chan);
7260 
7261 	/* l2cap_chan_add takes its own ref so we can drop this one */
7262 	hci_conn_drop(hcon);
7263 
7264 	l2cap_state_change(chan, BT_CONNECT);
7265 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7266 
7267 	/* Release chan->sport so that it can be reused by other
7268 	 * sockets (as it's only used for listening sockets).
7269 	 */
7270 	write_lock(&chan_list_lock);
7271 	chan->sport = 0;
7272 	write_unlock(&chan_list_lock);
7273 
7274 	if (hcon->state == BT_CONNECTED) {
7275 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7276 			__clear_chan_timer(chan);
7277 			if (l2cap_chan_check_security(chan, true))
7278 				l2cap_state_change(chan, BT_CONNECTED);
7279 		} else
7280 			l2cap_do_start(chan);
7281 	}
7282 
7283 	err = 0;
7284 
7285 chan_unlock:
7286 	l2cap_chan_unlock(chan);
7287 	mutex_unlock(&conn->chan_lock);
7288 done:
7289 	hci_dev_unlock(hdev);
7290 	hci_dev_put(hdev);
7291 	return err;
7292 }
7293 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
7294 
7295 /* ---- L2CAP interface with lower layer (HCI) ---- */
7296 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)7297 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
7298 {
7299 	int exact = 0, lm1 = 0, lm2 = 0;
7300 	struct l2cap_chan *c;
7301 
7302 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
7303 
7304 	/* Find listening sockets and check their link_mode */
7305 	read_lock(&chan_list_lock);
7306 	list_for_each_entry(c, &chan_list, global_l) {
7307 		if (c->state != BT_LISTEN)
7308 			continue;
7309 
7310 		if (!bacmp(&c->src, &hdev->bdaddr)) {
7311 			lm1 |= HCI_LM_ACCEPT;
7312 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7313 				lm1 |= HCI_LM_MASTER;
7314 			exact++;
7315 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
7316 			lm2 |= HCI_LM_ACCEPT;
7317 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
7318 				lm2 |= HCI_LM_MASTER;
7319 		}
7320 	}
7321 	read_unlock(&chan_list_lock);
7322 
7323 	return exact ? lm1 : lm2;
7324 }
7325 
7326 /* Find the next fixed channel in BT_LISTEN state, continue iteration
7327  * from an existing channel in the list or from the beginning of the
7328  * global list (by passing NULL as first parameter).
7329  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)7330 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
7331 						  struct hci_conn *hcon)
7332 {
7333 	u8 src_type = bdaddr_src_type(hcon);
7334 
7335 	read_lock(&chan_list_lock);
7336 
7337 	if (c)
7338 		c = list_next_entry(c, global_l);
7339 	else
7340 		c = list_entry(chan_list.next, typeof(*c), global_l);
7341 
7342 	list_for_each_entry_from(c, &chan_list, global_l) {
7343 		if (c->chan_type != L2CAP_CHAN_FIXED)
7344 			continue;
7345 		if (c->state != BT_LISTEN)
7346 			continue;
7347 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
7348 			continue;
7349 		if (src_type != c->src_type)
7350 			continue;
7351 
7352 		l2cap_chan_hold(c);
7353 		read_unlock(&chan_list_lock);
7354 		return c;
7355 	}
7356 
7357 	read_unlock(&chan_list_lock);
7358 
7359 	return NULL;
7360 }
7361 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)7362 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7363 {
7364 	struct hci_dev *hdev = hcon->hdev;
7365 	struct l2cap_conn *conn;
7366 	struct l2cap_chan *pchan;
7367 	u8 dst_type;
7368 
7369 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7370 		return;
7371 
7372 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
7373 
7374 	if (status) {
7375 		l2cap_conn_del(hcon, bt_to_errno(status));
7376 		return;
7377 	}
7378 
7379 	conn = l2cap_conn_add(hcon);
7380 	if (!conn)
7381 		return;
7382 
7383 	dst_type = bdaddr_dst_type(hcon);
7384 
7385 	/* If device is blocked, do not create channels for it */
7386 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7387 		return;
7388 
7389 	/* Find fixed channels and notify them of the new connection. We
7390 	 * use multiple individual lookups, continuing each time where
7391 	 * we left off, because the list lock would prevent calling the
7392 	 * potentially sleeping l2cap_chan_lock() function.
7393 	 */
7394 	pchan = l2cap_global_fixed_chan(NULL, hcon);
7395 	while (pchan) {
7396 		struct l2cap_chan *chan, *next;
7397 
7398 		/* Client fixed channels should override server ones */
7399 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7400 			goto next;
7401 
7402 		l2cap_chan_lock(pchan);
7403 		chan = pchan->ops->new_connection(pchan);
7404 		if (chan) {
7405 			bacpy(&chan->src, &hcon->src);
7406 			bacpy(&chan->dst, &hcon->dst);
7407 			chan->src_type = bdaddr_src_type(hcon);
7408 			chan->dst_type = dst_type;
7409 
7410 			__l2cap_chan_add(conn, chan);
7411 		}
7412 
7413 		l2cap_chan_unlock(pchan);
7414 next:
7415 		next = l2cap_global_fixed_chan(pchan, hcon);
7416 		l2cap_chan_put(pchan);
7417 		pchan = next;
7418 	}
7419 
7420 	l2cap_conn_ready(conn);
7421 }
7422 
l2cap_disconn_ind(struct hci_conn * hcon)7423 int l2cap_disconn_ind(struct hci_conn *hcon)
7424 {
7425 	struct l2cap_conn *conn = hcon->l2cap_data;
7426 
7427 	BT_DBG("hcon %p", hcon);
7428 
7429 	if (!conn)
7430 		return HCI_ERROR_REMOTE_USER_TERM;
7431 	return conn->disc_reason;
7432 }
7433 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)7434 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
7435 {
7436 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7437 		return;
7438 
7439 	BT_DBG("hcon %p reason %d", hcon, reason);
7440 
7441 	l2cap_conn_del(hcon, bt_to_errno(reason));
7442 }
7443 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)7444 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
7445 {
7446 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
7447 		return;
7448 
7449 	if (encrypt == 0x00) {
7450 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
7451 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
7452 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
7453 			   chan->sec_level == BT_SECURITY_FIPS)
7454 			l2cap_chan_close(chan, ECONNREFUSED);
7455 	} else {
7456 		if (chan->sec_level == BT_SECURITY_MEDIUM)
7457 			__clear_chan_timer(chan);
7458 	}
7459 }
7460 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)7461 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
7462 {
7463 	struct l2cap_conn *conn = hcon->l2cap_data;
7464 	struct l2cap_chan *chan;
7465 
7466 	if (!conn)
7467 		return;
7468 
7469 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
7470 
7471 	mutex_lock(&conn->chan_lock);
7472 
7473 	list_for_each_entry(chan, &conn->chan_l, list) {
7474 		l2cap_chan_lock(chan);
7475 
7476 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7477 		       state_to_string(chan->state));
7478 
7479 		if (chan->scid == L2CAP_CID_A2MP) {
7480 			l2cap_chan_unlock(chan);
7481 			continue;
7482 		}
7483 
7484 		if (!status && encrypt)
7485 			chan->sec_level = hcon->sec_level;
7486 
7487 		if (!__l2cap_no_conn_pending(chan)) {
7488 			l2cap_chan_unlock(chan);
7489 			continue;
7490 		}
7491 
7492 		if (!status && (chan->state == BT_CONNECTED ||
7493 				chan->state == BT_CONFIG)) {
7494 			chan->ops->resume(chan);
7495 			l2cap_check_encryption(chan, encrypt);
7496 			l2cap_chan_unlock(chan);
7497 			continue;
7498 		}
7499 
7500 		if (chan->state == BT_CONNECT) {
7501 			if (!status && l2cap_check_enc_key_size(hcon))
7502 				l2cap_start_connection(chan);
7503 			else
7504 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7505 		} else if (chan->state == BT_CONNECT2 &&
7506 			   chan->mode != L2CAP_MODE_LE_FLOWCTL) {
7507 			struct l2cap_conn_rsp rsp;
7508 			__u16 res, stat;
7509 
7510 			if (!status && l2cap_check_enc_key_size(hcon)) {
7511 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
7512 					res = L2CAP_CR_PEND;
7513 					stat = L2CAP_CS_AUTHOR_PEND;
7514 					chan->ops->defer(chan);
7515 				} else {
7516 					l2cap_state_change(chan, BT_CONFIG);
7517 					res = L2CAP_CR_SUCCESS;
7518 					stat = L2CAP_CS_NO_INFO;
7519 				}
7520 			} else {
7521 				l2cap_state_change(chan, BT_DISCONN);
7522 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
7523 				res = L2CAP_CR_SEC_BLOCK;
7524 				stat = L2CAP_CS_NO_INFO;
7525 			}
7526 
7527 			rsp.scid   = cpu_to_le16(chan->dcid);
7528 			rsp.dcid   = cpu_to_le16(chan->scid);
7529 			rsp.result = cpu_to_le16(res);
7530 			rsp.status = cpu_to_le16(stat);
7531 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
7532 				       sizeof(rsp), &rsp);
7533 
7534 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7535 			    res == L2CAP_CR_SUCCESS) {
7536 				char buf[128];
7537 				set_bit(CONF_REQ_SENT, &chan->conf_state);
7538 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
7539 					       L2CAP_CONF_REQ,
7540 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
7541 					       buf);
7542 				chan->num_conf_req++;
7543 			}
7544 		}
7545 
7546 		l2cap_chan_unlock(chan);
7547 	}
7548 
7549 	mutex_unlock(&conn->chan_lock);
7550 }
7551 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)7552 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7553 {
7554 	struct l2cap_conn *conn = hcon->l2cap_data;
7555 	struct l2cap_hdr *hdr;
7556 	int len;
7557 
7558 	/* For AMP controller do not create l2cap conn */
7559 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
7560 		goto drop;
7561 
7562 	if (!conn)
7563 		conn = l2cap_conn_add(hcon);
7564 
7565 	if (!conn)
7566 		goto drop;
7567 
7568 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7569 
7570 	switch (flags) {
7571 	case ACL_START:
7572 	case ACL_START_NO_FLUSH:
7573 	case ACL_COMPLETE:
7574 		if (conn->rx_len) {
7575 			BT_ERR("Unexpected start frame (len %d)", skb->len);
7576 			kfree_skb(conn->rx_skb);
7577 			conn->rx_skb = NULL;
7578 			conn->rx_len = 0;
7579 			l2cap_conn_unreliable(conn, ECOMM);
7580 		}
7581 
7582 		/* Start fragment always begin with Basic L2CAP header */
7583 		if (skb->len < L2CAP_HDR_SIZE) {
7584 			BT_ERR("Frame is too short (len %d)", skb->len);
7585 			l2cap_conn_unreliable(conn, ECOMM);
7586 			goto drop;
7587 		}
7588 
7589 		hdr = (struct l2cap_hdr *) skb->data;
7590 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7591 
7592 		if (len == skb->len) {
7593 			/* Complete frame received */
7594 			l2cap_recv_frame(conn, skb);
7595 			return;
7596 		}
7597 
7598 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7599 
7600 		if (skb->len > len) {
7601 			BT_ERR("Frame is too long (len %d, expected len %d)",
7602 			       skb->len, len);
7603 			l2cap_conn_unreliable(conn, ECOMM);
7604 			goto drop;
7605 		}
7606 
7607 		/* Allocate skb for the complete frame (with header) */
7608 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
7609 		if (!conn->rx_skb)
7610 			goto drop;
7611 
7612 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7613 					  skb->len);
7614 		conn->rx_len = len - skb->len;
7615 		break;
7616 
7617 	case ACL_CONT:
7618 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7619 
7620 		if (!conn->rx_len) {
7621 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7622 			l2cap_conn_unreliable(conn, ECOMM);
7623 			goto drop;
7624 		}
7625 
7626 		if (skb->len > conn->rx_len) {
7627 			BT_ERR("Fragment is too long (len %d, expected %d)",
7628 			       skb->len, conn->rx_len);
7629 			kfree_skb(conn->rx_skb);
7630 			conn->rx_skb = NULL;
7631 			conn->rx_len = 0;
7632 			l2cap_conn_unreliable(conn, ECOMM);
7633 			goto drop;
7634 		}
7635 
7636 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
7637 					  skb->len);
7638 		conn->rx_len -= skb->len;
7639 
7640 		if (!conn->rx_len) {
7641 			/* Complete frame received. l2cap_recv_frame
7642 			 * takes ownership of the skb so set the global
7643 			 * rx_skb pointer to NULL first.
7644 			 */
7645 			struct sk_buff *rx_skb = conn->rx_skb;
7646 			conn->rx_skb = NULL;
7647 			l2cap_recv_frame(conn, rx_skb);
7648 		}
7649 		break;
7650 	}
7651 
7652 drop:
7653 	kfree_skb(skb);
7654 }
7655 
7656 static struct hci_cb l2cap_cb = {
7657 	.name		= "L2CAP",
7658 	.connect_cfm	= l2cap_connect_cfm,
7659 	.disconn_cfm	= l2cap_disconn_cfm,
7660 	.security_cfm	= l2cap_security_cfm,
7661 };
7662 
l2cap_debugfs_show(struct seq_file * f,void * p)7663 static int l2cap_debugfs_show(struct seq_file *f, void *p)
7664 {
7665 	struct l2cap_chan *c;
7666 
7667 	read_lock(&chan_list_lock);
7668 
7669 	list_for_each_entry(c, &chan_list, global_l) {
7670 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7671 			   &c->src, c->src_type, &c->dst, c->dst_type,
7672 			   c->state, __le16_to_cpu(c->psm),
7673 			   c->scid, c->dcid, c->imtu, c->omtu,
7674 			   c->sec_level, c->mode);
7675 	}
7676 
7677 	read_unlock(&chan_list_lock);
7678 
7679 	return 0;
7680 }
7681 
l2cap_debugfs_open(struct inode * inode,struct file * file)7682 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7683 {
7684 	return single_open(file, l2cap_debugfs_show, inode->i_private);
7685 }
7686 
7687 static const struct file_operations l2cap_debugfs_fops = {
7688 	.open		= l2cap_debugfs_open,
7689 	.read		= seq_read,
7690 	.llseek		= seq_lseek,
7691 	.release	= single_release,
7692 };
7693 
7694 static struct dentry *l2cap_debugfs;
7695 
l2cap_init(void)7696 int __init l2cap_init(void)
7697 {
7698 	int err;
7699 
7700 	err = l2cap_init_sockets();
7701 	if (err < 0)
7702 		return err;
7703 
7704 	hci_register_cb(&l2cap_cb);
7705 
7706 	if (IS_ERR_OR_NULL(bt_debugfs))
7707 		return 0;
7708 
7709 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7710 					    NULL, &l2cap_debugfs_fops);
7711 
7712 	debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
7713 			   &le_max_credits);
7714 	debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
7715 			   &le_default_mps);
7716 
7717 	return 0;
7718 }
7719 
l2cap_exit(void)7720 void l2cap_exit(void)
7721 {
7722 	debugfs_remove(l2cap_debugfs);
7723 	hci_unregister_cb(&l2cap_cb);
7724 	l2cap_cleanup_sockets();
7725 }
7726 
7727 module_param(disable_ertm, bool, 0644);
7728 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
7729