• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch(chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 
709 	return;
710 }
711 EXPORT_SYMBOL_GPL(l2cap_chan_del);
712 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)713 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
714 				 l2cap_chan_func_t func, void *data)
715 {
716 	struct l2cap_chan *chan, *l;
717 
718 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
719 		if (chan->ident == id)
720 			func(chan, data);
721 	}
722 }
723 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)724 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
725 			      void *data)
726 {
727 	struct l2cap_chan *chan;
728 
729 	list_for_each_entry(chan, &conn->chan_l, list) {
730 		func(chan, data);
731 	}
732 }
733 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)734 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
735 		     void *data)
736 {
737 	if (!conn)
738 		return;
739 
740 	mutex_lock(&conn->chan_lock);
741 	__l2cap_chan_list(conn, func, data);
742 	mutex_unlock(&conn->chan_lock);
743 }
744 
745 EXPORT_SYMBOL_GPL(l2cap_chan_list);
746 
l2cap_conn_update_id_addr(struct work_struct * work)747 static void l2cap_conn_update_id_addr(struct work_struct *work)
748 {
749 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
750 					       id_addr_update_work);
751 	struct hci_conn *hcon = conn->hcon;
752 	struct l2cap_chan *chan;
753 
754 	mutex_lock(&conn->chan_lock);
755 
756 	list_for_each_entry(chan, &conn->chan_l, list) {
757 		l2cap_chan_lock(chan);
758 		bacpy(&chan->dst, &hcon->dst);
759 		chan->dst_type = bdaddr_dst_type(hcon);
760 		l2cap_chan_unlock(chan);
761 	}
762 
763 	mutex_unlock(&conn->chan_lock);
764 }
765 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)766 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_le_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_LE_AUTHORIZATION;
774 	else
775 		result = L2CAP_CR_LE_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.dcid    = cpu_to_le16(chan->scid);
780 	rsp.mtu     = cpu_to_le16(chan->imtu);
781 	rsp.mps     = cpu_to_le16(chan->mps);
782 	rsp.credits = cpu_to_le16(chan->rx_credits);
783 	rsp.result  = cpu_to_le16(result);
784 
785 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
786 		       &rsp);
787 }
788 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)789 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
790 {
791 	l2cap_state_change(chan, BT_DISCONN);
792 
793 	__l2cap_ecred_conn_rsp_defer(chan);
794 }
795 
l2cap_chan_connect_reject(struct l2cap_chan * chan)796 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
797 {
798 	struct l2cap_conn *conn = chan->conn;
799 	struct l2cap_conn_rsp rsp;
800 	u16 result;
801 
802 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
803 		result = L2CAP_CR_SEC_BLOCK;
804 	else
805 		result = L2CAP_CR_BAD_PSM;
806 
807 	l2cap_state_change(chan, BT_DISCONN);
808 
809 	rsp.scid   = cpu_to_le16(chan->dcid);
810 	rsp.dcid   = cpu_to_le16(chan->scid);
811 	rsp.result = cpu_to_le16(result);
812 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
813 
814 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
815 }
816 
l2cap_chan_close(struct l2cap_chan * chan,int reason)817 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
818 {
819 	struct l2cap_conn *conn = chan->conn;
820 
821 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
822 
823 	switch (chan->state) {
824 	case BT_LISTEN:
825 		chan->ops->teardown(chan, 0);
826 		break;
827 
828 	case BT_CONNECTED:
829 	case BT_CONFIG:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
832 			l2cap_send_disconn_req(chan, reason);
833 		} else
834 			l2cap_chan_del(chan, reason);
835 		break;
836 
837 	case BT_CONNECT2:
838 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
839 			if (conn->hcon->type == ACL_LINK)
840 				l2cap_chan_connect_reject(chan);
841 			else if (conn->hcon->type == LE_LINK) {
842 				switch (chan->mode) {
843 				case L2CAP_MODE_LE_FLOWCTL:
844 					l2cap_chan_le_connect_reject(chan);
845 					break;
846 				case L2CAP_MODE_EXT_FLOWCTL:
847 					l2cap_chan_ecred_connect_reject(chan);
848 					return;
849 				}
850 			}
851 		}
852 
853 		l2cap_chan_del(chan, reason);
854 		break;
855 
856 	case BT_CONNECT:
857 	case BT_DISCONN:
858 		l2cap_chan_del(chan, reason);
859 		break;
860 
861 	default:
862 		chan->ops->teardown(chan, 0);
863 		break;
864 	}
865 }
866 EXPORT_SYMBOL(l2cap_chan_close);
867 
l2cap_get_auth_type(struct l2cap_chan * chan)868 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
869 {
870 	switch (chan->chan_type) {
871 	case L2CAP_CHAN_RAW:
872 		switch (chan->sec_level) {
873 		case BT_SECURITY_HIGH:
874 		case BT_SECURITY_FIPS:
875 			return HCI_AT_DEDICATED_BONDING_MITM;
876 		case BT_SECURITY_MEDIUM:
877 			return HCI_AT_DEDICATED_BONDING;
878 		default:
879 			return HCI_AT_NO_BONDING;
880 		}
881 		break;
882 	case L2CAP_CHAN_CONN_LESS:
883 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
884 			if (chan->sec_level == BT_SECURITY_LOW)
885 				chan->sec_level = BT_SECURITY_SDP;
886 		}
887 		if (chan->sec_level == BT_SECURITY_HIGH ||
888 		    chan->sec_level == BT_SECURITY_FIPS)
889 			return HCI_AT_NO_BONDING_MITM;
890 		else
891 			return HCI_AT_NO_BONDING;
892 		break;
893 	case L2CAP_CHAN_CONN_ORIENTED:
894 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
895 			if (chan->sec_level == BT_SECURITY_LOW)
896 				chan->sec_level = BT_SECURITY_SDP;
897 
898 			if (chan->sec_level == BT_SECURITY_HIGH ||
899 			    chan->sec_level == BT_SECURITY_FIPS)
900 				return HCI_AT_NO_BONDING_MITM;
901 			else
902 				return HCI_AT_NO_BONDING;
903 		}
904 		fallthrough;
905 
906 	default:
907 		switch (chan->sec_level) {
908 		case BT_SECURITY_HIGH:
909 		case BT_SECURITY_FIPS:
910 			return HCI_AT_GENERAL_BONDING_MITM;
911 		case BT_SECURITY_MEDIUM:
912 			return HCI_AT_GENERAL_BONDING;
913 		default:
914 			return HCI_AT_NO_BONDING;
915 		}
916 		break;
917 	}
918 }
919 
920 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)921 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
922 {
923 	struct l2cap_conn *conn = chan->conn;
924 	__u8 auth_type;
925 
926 	if (conn->hcon->type == LE_LINK)
927 		return smp_conn_security(conn->hcon, chan->sec_level);
928 
929 	auth_type = l2cap_get_auth_type(chan);
930 
931 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
932 				 initiator);
933 }
934 
l2cap_get_ident(struct l2cap_conn * conn)935 static u8 l2cap_get_ident(struct l2cap_conn *conn)
936 {
937 	u8 id;
938 
939 	/* Get next available identificator.
940 	 *    1 - 128 are used by kernel.
941 	 *  129 - 199 are reserved.
942 	 *  200 - 254 are used by utilities like l2ping, etc.
943 	 */
944 
945 	mutex_lock(&conn->ident_lock);
946 
947 	if (++conn->tx_ident > 128)
948 		conn->tx_ident = 1;
949 
950 	id = conn->tx_ident;
951 
952 	mutex_unlock(&conn->ident_lock);
953 
954 	return id;
955 }
956 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)957 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
958 			   void *data)
959 {
960 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
961 	u8 flags;
962 
963 	BT_DBG("code 0x%2.2x", code);
964 
965 	if (!skb)
966 		return;
967 
968 	/* Use NO_FLUSH if supported or we have an LE link (which does
969 	 * not support auto-flushing packets) */
970 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
971 	    conn->hcon->type == LE_LINK)
972 		flags = ACL_START_NO_FLUSH;
973 	else
974 		flags = ACL_START;
975 
976 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
977 	skb->priority = HCI_PRIO_MAX;
978 
979 	hci_send_acl(conn->hchan, skb, flags);
980 }
981 
__chan_is_moving(struct l2cap_chan * chan)982 static bool __chan_is_moving(struct l2cap_chan *chan)
983 {
984 	return chan->move_state != L2CAP_MOVE_STABLE &&
985 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
986 }
987 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)988 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
989 {
990 	struct hci_conn *hcon = chan->conn->hcon;
991 	u16 flags;
992 
993 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
994 	       skb->priority);
995 
996 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
997 		if (chan->hs_hchan)
998 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
999 		else
1000 			kfree_skb(skb);
1001 
1002 		return;
1003 	}
1004 
1005 	/* Use NO_FLUSH for LE links (where this is the only option) or
1006 	 * if the BR/EDR link supports it and flushing has not been
1007 	 * explicitly requested (through FLAG_FLUSHABLE).
1008 	 */
1009 	if (hcon->type == LE_LINK ||
1010 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1011 	     lmp_no_flush_capable(hcon->hdev)))
1012 		flags = ACL_START_NO_FLUSH;
1013 	else
1014 		flags = ACL_START;
1015 
1016 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1017 	hci_send_acl(chan->conn->hchan, skb, flags);
1018 }
1019 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1020 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1021 {
1022 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1023 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1024 
1025 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1026 		/* S-Frame */
1027 		control->sframe = 1;
1028 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1029 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1030 
1031 		control->sar = 0;
1032 		control->txseq = 0;
1033 	} else {
1034 		/* I-Frame */
1035 		control->sframe = 0;
1036 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1037 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1038 
1039 		control->poll = 0;
1040 		control->super = 0;
1041 	}
1042 }
1043 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1044 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1045 {
1046 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1047 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1048 
1049 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1050 		/* S-Frame */
1051 		control->sframe = 1;
1052 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1053 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1054 
1055 		control->sar = 0;
1056 		control->txseq = 0;
1057 	} else {
1058 		/* I-Frame */
1059 		control->sframe = 0;
1060 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1061 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1062 
1063 		control->poll = 0;
1064 		control->super = 0;
1065 	}
1066 }
1067 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1068 static inline void __unpack_control(struct l2cap_chan *chan,
1069 				    struct sk_buff *skb)
1070 {
1071 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1072 		__unpack_extended_control(get_unaligned_le32(skb->data),
1073 					  &bt_cb(skb)->l2cap);
1074 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1075 	} else {
1076 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1077 					  &bt_cb(skb)->l2cap);
1078 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1079 	}
1080 }
1081 
__pack_extended_control(struct l2cap_ctrl * control)1082 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1083 {
1084 	u32 packed;
1085 
1086 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1087 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1088 
1089 	if (control->sframe) {
1090 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1091 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1092 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1093 	} else {
1094 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1095 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1096 	}
1097 
1098 	return packed;
1099 }
1100 
__pack_enhanced_control(struct l2cap_ctrl * control)1101 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1102 {
1103 	u16 packed;
1104 
1105 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1106 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1107 
1108 	if (control->sframe) {
1109 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1110 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1111 		packed |= L2CAP_CTRL_FRAME_TYPE;
1112 	} else {
1113 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1114 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1115 	}
1116 
1117 	return packed;
1118 }
1119 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1120 static inline void __pack_control(struct l2cap_chan *chan,
1121 				  struct l2cap_ctrl *control,
1122 				  struct sk_buff *skb)
1123 {
1124 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1125 		put_unaligned_le32(__pack_extended_control(control),
1126 				   skb->data + L2CAP_HDR_SIZE);
1127 	} else {
1128 		put_unaligned_le16(__pack_enhanced_control(control),
1129 				   skb->data + L2CAP_HDR_SIZE);
1130 	}
1131 }
1132 
__ertm_hdr_size(struct l2cap_chan * chan)1133 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1134 {
1135 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1136 		return L2CAP_EXT_HDR_SIZE;
1137 	else
1138 		return L2CAP_ENH_HDR_SIZE;
1139 }
1140 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1141 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1142 					       u32 control)
1143 {
1144 	struct sk_buff *skb;
1145 	struct l2cap_hdr *lh;
1146 	int hlen = __ertm_hdr_size(chan);
1147 
1148 	if (chan->fcs == L2CAP_FCS_CRC16)
1149 		hlen += L2CAP_FCS_SIZE;
1150 
1151 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1152 
1153 	if (!skb)
1154 		return ERR_PTR(-ENOMEM);
1155 
1156 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1157 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1158 	lh->cid = cpu_to_le16(chan->dcid);
1159 
1160 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1161 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1162 	else
1163 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1164 
1165 	if (chan->fcs == L2CAP_FCS_CRC16) {
1166 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1167 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1168 	}
1169 
1170 	skb->priority = HCI_PRIO_MAX;
1171 	return skb;
1172 }
1173 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1174 static void l2cap_send_sframe(struct l2cap_chan *chan,
1175 			      struct l2cap_ctrl *control)
1176 {
1177 	struct sk_buff *skb;
1178 	u32 control_field;
1179 
1180 	BT_DBG("chan %p, control %p", chan, control);
1181 
1182 	if (!control->sframe)
1183 		return;
1184 
1185 	if (__chan_is_moving(chan))
1186 		return;
1187 
1188 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1189 	    !control->poll)
1190 		control->final = 1;
1191 
1192 	if (control->super == L2CAP_SUPER_RR)
1193 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1194 	else if (control->super == L2CAP_SUPER_RNR)
1195 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1196 
1197 	if (control->super != L2CAP_SUPER_SREJ) {
1198 		chan->last_acked_seq = control->reqseq;
1199 		__clear_ack_timer(chan);
1200 	}
1201 
1202 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1203 	       control->final, control->poll, control->super);
1204 
1205 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1206 		control_field = __pack_extended_control(control);
1207 	else
1208 		control_field = __pack_enhanced_control(control);
1209 
1210 	skb = l2cap_create_sframe_pdu(chan, control_field);
1211 	if (!IS_ERR(skb))
1212 		l2cap_do_send(chan, skb);
1213 }
1214 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1215 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1216 {
1217 	struct l2cap_ctrl control;
1218 
1219 	BT_DBG("chan %p, poll %d", chan, poll);
1220 
1221 	memset(&control, 0, sizeof(control));
1222 	control.sframe = 1;
1223 	control.poll = poll;
1224 
1225 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1226 		control.super = L2CAP_SUPER_RNR;
1227 	else
1228 		control.super = L2CAP_SUPER_RR;
1229 
1230 	control.reqseq = chan->buffer_seq;
1231 	l2cap_send_sframe(chan, &control);
1232 }
1233 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1234 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1235 {
1236 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1237 		return true;
1238 
1239 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1240 }
1241 
__amp_capable(struct l2cap_chan * chan)1242 static bool __amp_capable(struct l2cap_chan *chan)
1243 {
1244 	struct l2cap_conn *conn = chan->conn;
1245 	struct hci_dev *hdev;
1246 	bool amp_available = false;
1247 
1248 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1249 		return false;
1250 
1251 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1252 		return false;
1253 
1254 	read_lock(&hci_dev_list_lock);
1255 	list_for_each_entry(hdev, &hci_dev_list, list) {
1256 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1257 		    test_bit(HCI_UP, &hdev->flags)) {
1258 			amp_available = true;
1259 			break;
1260 		}
1261 	}
1262 	read_unlock(&hci_dev_list_lock);
1263 
1264 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1265 		return amp_available;
1266 
1267 	return false;
1268 }
1269 
l2cap_check_efs(struct l2cap_chan * chan)1270 static bool l2cap_check_efs(struct l2cap_chan *chan)
1271 {
1272 	/* Check EFS parameters */
1273 	return true;
1274 }
1275 
l2cap_send_conn_req(struct l2cap_chan * chan)1276 void l2cap_send_conn_req(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_conn_req req;
1280 
1281 	req.scid = cpu_to_le16(chan->scid);
1282 	req.psm  = chan->psm;
1283 
1284 	chan->ident = l2cap_get_ident(conn);
1285 
1286 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1289 }
1290 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1291 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1292 {
1293 	struct l2cap_create_chan_req req;
1294 	req.scid = cpu_to_le16(chan->scid);
1295 	req.psm  = chan->psm;
1296 	req.amp_id = amp_id;
1297 
1298 	chan->ident = l2cap_get_ident(chan->conn);
1299 
1300 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1301 		       sizeof(req), &req);
1302 }
1303 
l2cap_move_setup(struct l2cap_chan * chan)1304 static void l2cap_move_setup(struct l2cap_chan *chan)
1305 {
1306 	struct sk_buff *skb;
1307 
1308 	BT_DBG("chan %p", chan);
1309 
1310 	if (chan->mode != L2CAP_MODE_ERTM)
1311 		return;
1312 
1313 	__clear_retrans_timer(chan);
1314 	__clear_monitor_timer(chan);
1315 	__clear_ack_timer(chan);
1316 
1317 	chan->retry_count = 0;
1318 	skb_queue_walk(&chan->tx_q, skb) {
1319 		if (bt_cb(skb)->l2cap.retries)
1320 			bt_cb(skb)->l2cap.retries = 1;
1321 		else
1322 			break;
1323 	}
1324 
1325 	chan->expected_tx_seq = chan->buffer_seq;
1326 
1327 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1328 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1329 	l2cap_seq_list_clear(&chan->retrans_list);
1330 	l2cap_seq_list_clear(&chan->srej_list);
1331 	skb_queue_purge(&chan->srej_q);
1332 
1333 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1334 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1335 
1336 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1337 }
1338 
l2cap_move_done(struct l2cap_chan * chan)1339 static void l2cap_move_done(struct l2cap_chan *chan)
1340 {
1341 	u8 move_role = chan->move_role;
1342 	BT_DBG("chan %p", chan);
1343 
1344 	chan->move_state = L2CAP_MOVE_STABLE;
1345 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1346 
1347 	if (chan->mode != L2CAP_MODE_ERTM)
1348 		return;
1349 
1350 	switch (move_role) {
1351 	case L2CAP_MOVE_ROLE_INITIATOR:
1352 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1353 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1354 		break;
1355 	case L2CAP_MOVE_ROLE_RESPONDER:
1356 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1357 		break;
1358 	}
1359 }
1360 
l2cap_chan_ready(struct l2cap_chan * chan)1361 static void l2cap_chan_ready(struct l2cap_chan *chan)
1362 {
1363 	/* The channel may have already been flagged as connected in
1364 	 * case of receiving data before the L2CAP info req/rsp
1365 	 * procedure is complete.
1366 	 */
1367 	if (chan->state == BT_CONNECTED)
1368 		return;
1369 
1370 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1371 	chan->conf_state = 0;
1372 	__clear_chan_timer(chan);
1373 
1374 	switch (chan->mode) {
1375 	case L2CAP_MODE_LE_FLOWCTL:
1376 	case L2CAP_MODE_EXT_FLOWCTL:
1377 		if (!chan->tx_credits)
1378 			chan->ops->suspend(chan);
1379 		break;
1380 	}
1381 
1382 	chan->state = BT_CONNECTED;
1383 
1384 	chan->ops->ready(chan);
1385 }
1386 
l2cap_le_connect(struct l2cap_chan * chan)1387 static void l2cap_le_connect(struct l2cap_chan *chan)
1388 {
1389 	struct l2cap_conn *conn = chan->conn;
1390 	struct l2cap_le_conn_req req;
1391 
1392 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1393 		return;
1394 
1395 	if (!chan->imtu)
1396 		chan->imtu = chan->conn->mtu;
1397 
1398 	l2cap_le_flowctl_init(chan, 0);
1399 
1400 	req.psm     = chan->psm;
1401 	req.scid    = cpu_to_le16(chan->scid);
1402 	req.mtu     = cpu_to_le16(chan->imtu);
1403 	req.mps     = cpu_to_le16(chan->mps);
1404 	req.credits = cpu_to_le16(chan->rx_credits);
1405 
1406 	chan->ident = l2cap_get_ident(conn);
1407 
1408 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1409 		       sizeof(req), &req);
1410 }
1411 
1412 struct l2cap_ecred_conn_data {
1413 	struct {
1414 		struct l2cap_ecred_conn_req req;
1415 		__le16 scid[5];
1416 	} __packed pdu;
1417 	struct l2cap_chan *chan;
1418 	struct pid *pid;
1419 	int count;
1420 };
1421 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1422 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1423 {
1424 	struct l2cap_ecred_conn_data *conn = data;
1425 	struct pid *pid;
1426 
1427 	if (chan == conn->chan)
1428 		return;
1429 
1430 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1431 		return;
1432 
1433 	pid = chan->ops->get_peer_pid(chan);
1434 
1435 	/* Only add deferred channels with the same PID/PSM */
1436 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1437 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1438 		return;
1439 
1440 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1441 		return;
1442 
1443 	l2cap_ecred_init(chan, 0);
1444 
1445 	/* Set the same ident so we can match on the rsp */
1446 	chan->ident = conn->chan->ident;
1447 
1448 	/* Include all channels deferred */
1449 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1450 
1451 	conn->count++;
1452 }
1453 
l2cap_ecred_connect(struct l2cap_chan * chan)1454 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1455 {
1456 	struct l2cap_conn *conn = chan->conn;
1457 	struct l2cap_ecred_conn_data data;
1458 
1459 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1460 		return;
1461 
1462 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1463 		return;
1464 
1465 	l2cap_ecred_init(chan, 0);
1466 
1467 	memset(&data, 0, sizeof(data));
1468 	data.pdu.req.psm     = chan->psm;
1469 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1470 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1471 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1472 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1473 
1474 	chan->ident = l2cap_get_ident(conn);
1475 	data.pid = chan->ops->get_peer_pid(chan);
1476 
1477 	data.count = 1;
1478 	data.chan = chan;
1479 	data.pid = chan->ops->get_peer_pid(chan);
1480 
1481 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1482 
1483 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1484 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1485 		       &data.pdu);
1486 }
1487 
l2cap_le_start(struct l2cap_chan * chan)1488 static void l2cap_le_start(struct l2cap_chan *chan)
1489 {
1490 	struct l2cap_conn *conn = chan->conn;
1491 
1492 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1493 		return;
1494 
1495 	if (!chan->psm) {
1496 		l2cap_chan_ready(chan);
1497 		return;
1498 	}
1499 
1500 	if (chan->state == BT_CONNECT) {
1501 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1502 			l2cap_ecred_connect(chan);
1503 		else
1504 			l2cap_le_connect(chan);
1505 	}
1506 }
1507 
l2cap_start_connection(struct l2cap_chan * chan)1508 static void l2cap_start_connection(struct l2cap_chan *chan)
1509 {
1510 	if (__amp_capable(chan)) {
1511 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1512 		a2mp_discover_amp(chan);
1513 	} else if (chan->conn->hcon->type == LE_LINK) {
1514 		l2cap_le_start(chan);
1515 	} else {
1516 		l2cap_send_conn_req(chan);
1517 	}
1518 }
1519 
l2cap_request_info(struct l2cap_conn * conn)1520 static void l2cap_request_info(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_info_req req;
1523 
1524 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1525 		return;
1526 
1527 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1528 
1529 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1530 	conn->info_ident = l2cap_get_ident(conn);
1531 
1532 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1533 
1534 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1535 		       sizeof(req), &req);
1536 }
1537 
l2cap_check_enc_key_size(struct hci_conn * hcon)1538 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1539 {
1540 	/* The minimum encryption key size needs to be enforced by the
1541 	 * host stack before establishing any L2CAP connections. The
1542 	 * specification in theory allows a minimum of 1, but to align
1543 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1544 	 *
1545 	 * This check might also be called for unencrypted connections
1546 	 * that have no key size requirements. Ensure that the link is
1547 	 * actually encrypted before enforcing a key size.
1548 	 */
1549 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1550 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1551 }
1552 
l2cap_do_start(struct l2cap_chan * chan)1553 static void l2cap_do_start(struct l2cap_chan *chan)
1554 {
1555 	struct l2cap_conn *conn = chan->conn;
1556 
1557 	if (conn->hcon->type == LE_LINK) {
1558 		l2cap_le_start(chan);
1559 		return;
1560 	}
1561 
1562 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1563 		l2cap_request_info(conn);
1564 		return;
1565 	}
1566 
1567 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1568 		return;
1569 
1570 	if (!l2cap_chan_check_security(chan, true) ||
1571 	    !__l2cap_no_conn_pending(chan))
1572 		return;
1573 
1574 	if (l2cap_check_enc_key_size(conn->hcon))
1575 		l2cap_start_connection(chan);
1576 	else
1577 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1578 }
1579 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1580 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1581 {
1582 	u32 local_feat_mask = l2cap_feat_mask;
1583 	if (!disable_ertm)
1584 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1585 
1586 	switch (mode) {
1587 	case L2CAP_MODE_ERTM:
1588 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1589 	case L2CAP_MODE_STREAMING:
1590 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1591 	default:
1592 		return 0x00;
1593 	}
1594 }
1595 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1596 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1597 {
1598 	struct l2cap_conn *conn = chan->conn;
1599 	struct l2cap_disconn_req req;
1600 
1601 	if (!conn)
1602 		return;
1603 
1604 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1605 		__clear_retrans_timer(chan);
1606 		__clear_monitor_timer(chan);
1607 		__clear_ack_timer(chan);
1608 	}
1609 
1610 	if (chan->scid == L2CAP_CID_A2MP) {
1611 		l2cap_state_change(chan, BT_DISCONN);
1612 		return;
1613 	}
1614 
1615 	req.dcid = cpu_to_le16(chan->dcid);
1616 	req.scid = cpu_to_le16(chan->scid);
1617 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1618 		       sizeof(req), &req);
1619 
1620 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1621 }
1622 
1623 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1624 static void l2cap_conn_start(struct l2cap_conn *conn)
1625 {
1626 	struct l2cap_chan *chan, *tmp;
1627 
1628 	BT_DBG("conn %p", conn);
1629 
1630 	mutex_lock(&conn->chan_lock);
1631 
1632 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1633 		l2cap_chan_lock(chan);
1634 
1635 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1636 			l2cap_chan_ready(chan);
1637 			l2cap_chan_unlock(chan);
1638 			continue;
1639 		}
1640 
1641 		if (chan->state == BT_CONNECT) {
1642 			if (!l2cap_chan_check_security(chan, true) ||
1643 			    !__l2cap_no_conn_pending(chan)) {
1644 				l2cap_chan_unlock(chan);
1645 				continue;
1646 			}
1647 
1648 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1649 			    && test_bit(CONF_STATE2_DEVICE,
1650 					&chan->conf_state)) {
1651 				l2cap_chan_close(chan, ECONNRESET);
1652 				l2cap_chan_unlock(chan);
1653 				continue;
1654 			}
1655 
1656 			if (l2cap_check_enc_key_size(conn->hcon))
1657 				l2cap_start_connection(chan);
1658 			else
1659 				l2cap_chan_close(chan, ECONNREFUSED);
1660 
1661 		} else if (chan->state == BT_CONNECT2) {
1662 			struct l2cap_conn_rsp rsp;
1663 			char buf[128];
1664 			rsp.scid = cpu_to_le16(chan->dcid);
1665 			rsp.dcid = cpu_to_le16(chan->scid);
1666 
1667 			if (l2cap_chan_check_security(chan, false)) {
1668 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1669 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1670 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1671 					chan->ops->defer(chan);
1672 
1673 				} else {
1674 					l2cap_state_change(chan, BT_CONFIG);
1675 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1676 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1677 				}
1678 			} else {
1679 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1680 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1681 			}
1682 
1683 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1684 				       sizeof(rsp), &rsp);
1685 
1686 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1687 			    rsp.result != L2CAP_CR_SUCCESS) {
1688 				l2cap_chan_unlock(chan);
1689 				continue;
1690 			}
1691 
1692 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1693 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1694 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1695 			chan->num_conf_req++;
1696 		}
1697 
1698 		l2cap_chan_unlock(chan);
1699 	}
1700 
1701 	mutex_unlock(&conn->chan_lock);
1702 }
1703 
l2cap_le_conn_ready(struct l2cap_conn * conn)1704 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1705 {
1706 	struct hci_conn *hcon = conn->hcon;
1707 	struct hci_dev *hdev = hcon->hdev;
1708 
1709 	BT_DBG("%s conn %p", hdev->name, conn);
1710 
1711 	/* For outgoing pairing which doesn't necessarily have an
1712 	 * associated socket (e.g. mgmt_pair_device).
1713 	 */
1714 	if (hcon->out)
1715 		smp_conn_security(hcon, hcon->pending_sec_level);
1716 
1717 	/* For LE peripheral connections, make sure the connection interval
1718 	 * is in the range of the minimum and maximum interval that has
1719 	 * been configured for this connection. If not, then trigger
1720 	 * the connection update procedure.
1721 	 */
1722 	if (hcon->role == HCI_ROLE_SLAVE &&
1723 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1724 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1725 		struct l2cap_conn_param_update_req req;
1726 
1727 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1728 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1729 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1730 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1731 
1732 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1733 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1734 	}
1735 }
1736 
l2cap_conn_ready(struct l2cap_conn * conn)1737 static void l2cap_conn_ready(struct l2cap_conn *conn)
1738 {
1739 	struct l2cap_chan *chan;
1740 	struct hci_conn *hcon = conn->hcon;
1741 
1742 	BT_DBG("conn %p", conn);
1743 
1744 	if (hcon->type == ACL_LINK)
1745 		l2cap_request_info(conn);
1746 
1747 	mutex_lock(&conn->chan_lock);
1748 
1749 	list_for_each_entry(chan, &conn->chan_l, list) {
1750 
1751 		l2cap_chan_lock(chan);
1752 
1753 		if (chan->scid == L2CAP_CID_A2MP) {
1754 			l2cap_chan_unlock(chan);
1755 			continue;
1756 		}
1757 
1758 		if (hcon->type == LE_LINK) {
1759 			l2cap_le_start(chan);
1760 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1761 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1762 				l2cap_chan_ready(chan);
1763 		} else if (chan->state == BT_CONNECT) {
1764 			l2cap_do_start(chan);
1765 		}
1766 
1767 		l2cap_chan_unlock(chan);
1768 	}
1769 
1770 	mutex_unlock(&conn->chan_lock);
1771 
1772 	if (hcon->type == LE_LINK)
1773 		l2cap_le_conn_ready(conn);
1774 
1775 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1776 }
1777 
1778 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1779 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1780 {
1781 	struct l2cap_chan *chan;
1782 
1783 	BT_DBG("conn %p", conn);
1784 
1785 	mutex_lock(&conn->chan_lock);
1786 
1787 	list_for_each_entry(chan, &conn->chan_l, list) {
1788 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1789 			l2cap_chan_set_err(chan, err);
1790 	}
1791 
1792 	mutex_unlock(&conn->chan_lock);
1793 }
1794 
l2cap_info_timeout(struct work_struct * work)1795 static void l2cap_info_timeout(struct work_struct *work)
1796 {
1797 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1798 					       info_timer.work);
1799 
1800 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1801 	conn->info_ident = 0;
1802 
1803 	l2cap_conn_start(conn);
1804 }
1805 
1806 /*
1807  * l2cap_user
1808  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1809  * callback is called during registration. The ->remove callback is called
1810  * during unregistration.
1811  * An l2cap_user object can either be explicitly unregistered or when the
1812  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1813  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1814  * External modules must own a reference to the l2cap_conn object if they intend
1815  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1816  * any time if they don't.
1817  */
1818 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1819 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1820 {
1821 	struct hci_dev *hdev = conn->hcon->hdev;
1822 	int ret;
1823 
1824 	/* We need to check whether l2cap_conn is registered. If it is not, we
1825 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1826 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1827 	 * relies on the parent hci_conn object to be locked. This itself relies
1828 	 * on the hci_dev object to be locked. So we must lock the hci device
1829 	 * here, too. */
1830 
1831 	hci_dev_lock(hdev);
1832 
1833 	if (!list_empty(&user->list)) {
1834 		ret = -EINVAL;
1835 		goto out_unlock;
1836 	}
1837 
1838 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1839 	if (!conn->hchan) {
1840 		ret = -ENODEV;
1841 		goto out_unlock;
1842 	}
1843 
1844 	ret = user->probe(conn, user);
1845 	if (ret)
1846 		goto out_unlock;
1847 
1848 	list_add(&user->list, &conn->users);
1849 	ret = 0;
1850 
1851 out_unlock:
1852 	hci_dev_unlock(hdev);
1853 	return ret;
1854 }
1855 EXPORT_SYMBOL(l2cap_register_user);
1856 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1857 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1858 {
1859 	struct hci_dev *hdev = conn->hcon->hdev;
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (list_empty(&user->list))
1864 		goto out_unlock;
1865 
1866 	list_del_init(&user->list);
1867 	user->remove(conn, user);
1868 
1869 out_unlock:
1870 	hci_dev_unlock(hdev);
1871 }
1872 EXPORT_SYMBOL(l2cap_unregister_user);
1873 
l2cap_unregister_all_users(struct l2cap_conn * conn)1874 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1875 {
1876 	struct l2cap_user *user;
1877 
1878 	while (!list_empty(&conn->users)) {
1879 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1880 		list_del_init(&user->list);
1881 		user->remove(conn, user);
1882 	}
1883 }
1884 
l2cap_conn_del(struct hci_conn * hcon,int err)1885 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1886 {
1887 	struct l2cap_conn *conn = hcon->l2cap_data;
1888 	struct l2cap_chan *chan, *l;
1889 
1890 	if (!conn)
1891 		return;
1892 
1893 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1894 
1895 	kfree_skb(conn->rx_skb);
1896 
1897 	skb_queue_purge(&conn->pending_rx);
1898 
1899 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1900 	 * might block if we are running on a worker from the same workqueue
1901 	 * pending_rx_work is waiting on.
1902 	 */
1903 	if (work_pending(&conn->pending_rx_work))
1904 		cancel_work_sync(&conn->pending_rx_work);
1905 
1906 	if (work_pending(&conn->id_addr_update_work))
1907 		cancel_work_sync(&conn->id_addr_update_work);
1908 
1909 	l2cap_unregister_all_users(conn);
1910 
1911 	/* Force the connection to be immediately dropped */
1912 	hcon->disc_timeout = 0;
1913 
1914 	mutex_lock(&conn->chan_lock);
1915 
1916 	/* Kill channels */
1917 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1918 		l2cap_chan_hold(chan);
1919 		l2cap_chan_lock(chan);
1920 
1921 		l2cap_chan_del(chan, err);
1922 
1923 		chan->ops->close(chan);
1924 
1925 		l2cap_chan_unlock(chan);
1926 		l2cap_chan_put(chan);
1927 	}
1928 
1929 	mutex_unlock(&conn->chan_lock);
1930 
1931 	hci_chan_del(conn->hchan);
1932 
1933 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1934 		cancel_delayed_work_sync(&conn->info_timer);
1935 
1936 	hcon->l2cap_data = NULL;
1937 	conn->hchan = NULL;
1938 	l2cap_conn_put(conn);
1939 }
1940 
l2cap_conn_free(struct kref * ref)1941 static void l2cap_conn_free(struct kref *ref)
1942 {
1943 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1944 
1945 	hci_conn_put(conn->hcon);
1946 	kfree(conn);
1947 }
1948 
l2cap_conn_get(struct l2cap_conn * conn)1949 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1950 {
1951 	kref_get(&conn->ref);
1952 	return conn;
1953 }
1954 EXPORT_SYMBOL(l2cap_conn_get);
1955 
l2cap_conn_put(struct l2cap_conn * conn)1956 void l2cap_conn_put(struct l2cap_conn *conn)
1957 {
1958 	kref_put(&conn->ref, l2cap_conn_free);
1959 }
1960 EXPORT_SYMBOL(l2cap_conn_put);
1961 
1962 /* ---- Socket interface ---- */
1963 
1964 /* Find socket with psm and source / destination bdaddr.
1965  * Returns closest match.
1966  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1967 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1968 						   bdaddr_t *src,
1969 						   bdaddr_t *dst,
1970 						   u8 link_type)
1971 {
1972 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1973 
1974 	read_lock(&chan_list_lock);
1975 
1976 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1977 		if (state && c->state != state)
1978 			continue;
1979 
1980 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1981 			continue;
1982 
1983 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1987 			int src_match, dst_match;
1988 			int src_any, dst_any;
1989 
1990 			/* Exact match. */
1991 			src_match = !bacmp(&c->src, src);
1992 			dst_match = !bacmp(&c->dst, dst);
1993 			if (src_match && dst_match) {
1994 				if (!l2cap_chan_hold_unless_zero(c))
1995 					continue;
1996 
1997 				read_unlock(&chan_list_lock);
1998 				return c;
1999 			}
2000 
2001 			/* Closest match */
2002 			src_any = !bacmp(&c->src, BDADDR_ANY);
2003 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2004 			if ((src_match && dst_any) || (src_any && dst_match) ||
2005 			    (src_any && dst_any))
2006 				c1 = c;
2007 		}
2008 	}
2009 
2010 	if (c1)
2011 		c1 = l2cap_chan_hold_unless_zero(c1);
2012 
2013 	read_unlock(&chan_list_lock);
2014 
2015 	return c1;
2016 }
2017 
l2cap_monitor_timeout(struct work_struct * work)2018 static void l2cap_monitor_timeout(struct work_struct *work)
2019 {
2020 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2021 					       monitor_timer.work);
2022 
2023 	BT_DBG("chan %p", chan);
2024 
2025 	l2cap_chan_lock(chan);
2026 
2027 	if (!chan->conn) {
2028 		l2cap_chan_unlock(chan);
2029 		l2cap_chan_put(chan);
2030 		return;
2031 	}
2032 
2033 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2034 
2035 	l2cap_chan_unlock(chan);
2036 	l2cap_chan_put(chan);
2037 }
2038 
l2cap_retrans_timeout(struct work_struct * work)2039 static void l2cap_retrans_timeout(struct work_struct *work)
2040 {
2041 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2042 					       retrans_timer.work);
2043 
2044 	BT_DBG("chan %p", chan);
2045 
2046 	l2cap_chan_lock(chan);
2047 
2048 	if (!chan->conn) {
2049 		l2cap_chan_unlock(chan);
2050 		l2cap_chan_put(chan);
2051 		return;
2052 	}
2053 
2054 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2055 	l2cap_chan_unlock(chan);
2056 	l2cap_chan_put(chan);
2057 }
2058 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2059 static void l2cap_streaming_send(struct l2cap_chan *chan,
2060 				 struct sk_buff_head *skbs)
2061 {
2062 	struct sk_buff *skb;
2063 	struct l2cap_ctrl *control;
2064 
2065 	BT_DBG("chan %p, skbs %p", chan, skbs);
2066 
2067 	if (__chan_is_moving(chan))
2068 		return;
2069 
2070 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2071 
2072 	while (!skb_queue_empty(&chan->tx_q)) {
2073 
2074 		skb = skb_dequeue(&chan->tx_q);
2075 
2076 		bt_cb(skb)->l2cap.retries = 1;
2077 		control = &bt_cb(skb)->l2cap;
2078 
2079 		control->reqseq = 0;
2080 		control->txseq = chan->next_tx_seq;
2081 
2082 		__pack_control(chan, control, skb);
2083 
2084 		if (chan->fcs == L2CAP_FCS_CRC16) {
2085 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2086 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2087 		}
2088 
2089 		l2cap_do_send(chan, skb);
2090 
2091 		BT_DBG("Sent txseq %u", control->txseq);
2092 
2093 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2094 		chan->frames_sent++;
2095 	}
2096 }
2097 
l2cap_ertm_send(struct l2cap_chan * chan)2098 static int l2cap_ertm_send(struct l2cap_chan *chan)
2099 {
2100 	struct sk_buff *skb, *tx_skb;
2101 	struct l2cap_ctrl *control;
2102 	int sent = 0;
2103 
2104 	BT_DBG("chan %p", chan);
2105 
2106 	if (chan->state != BT_CONNECTED)
2107 		return -ENOTCONN;
2108 
2109 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2110 		return 0;
2111 
2112 	if (__chan_is_moving(chan))
2113 		return 0;
2114 
2115 	while (chan->tx_send_head &&
2116 	       chan->unacked_frames < chan->remote_tx_win &&
2117 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2118 
2119 		skb = chan->tx_send_head;
2120 
2121 		bt_cb(skb)->l2cap.retries = 1;
2122 		control = &bt_cb(skb)->l2cap;
2123 
2124 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2125 			control->final = 1;
2126 
2127 		control->reqseq = chan->buffer_seq;
2128 		chan->last_acked_seq = chan->buffer_seq;
2129 		control->txseq = chan->next_tx_seq;
2130 
2131 		__pack_control(chan, control, skb);
2132 
2133 		if (chan->fcs == L2CAP_FCS_CRC16) {
2134 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2135 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2136 		}
2137 
2138 		/* Clone after data has been modified. Data is assumed to be
2139 		   read-only (for locking purposes) on cloned sk_buffs.
2140 		 */
2141 		tx_skb = skb_clone(skb, GFP_KERNEL);
2142 
2143 		if (!tx_skb)
2144 			break;
2145 
2146 		__set_retrans_timer(chan);
2147 
2148 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2149 		chan->unacked_frames++;
2150 		chan->frames_sent++;
2151 		sent++;
2152 
2153 		if (skb_queue_is_last(&chan->tx_q, skb))
2154 			chan->tx_send_head = NULL;
2155 		else
2156 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2157 
2158 		l2cap_do_send(chan, tx_skb);
2159 		BT_DBG("Sent txseq %u", control->txseq);
2160 	}
2161 
2162 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2163 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2164 
2165 	return sent;
2166 }
2167 
l2cap_ertm_resend(struct l2cap_chan * chan)2168 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2169 {
2170 	struct l2cap_ctrl control;
2171 	struct sk_buff *skb;
2172 	struct sk_buff *tx_skb;
2173 	u16 seq;
2174 
2175 	BT_DBG("chan %p", chan);
2176 
2177 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2178 		return;
2179 
2180 	if (__chan_is_moving(chan))
2181 		return;
2182 
2183 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2184 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2185 
2186 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2187 		if (!skb) {
2188 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2189 			       seq);
2190 			continue;
2191 		}
2192 
2193 		bt_cb(skb)->l2cap.retries++;
2194 		control = bt_cb(skb)->l2cap;
2195 
2196 		if (chan->max_tx != 0 &&
2197 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2198 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2199 			l2cap_send_disconn_req(chan, ECONNRESET);
2200 			l2cap_seq_list_clear(&chan->retrans_list);
2201 			break;
2202 		}
2203 
2204 		control.reqseq = chan->buffer_seq;
2205 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2206 			control.final = 1;
2207 		else
2208 			control.final = 0;
2209 
2210 		if (skb_cloned(skb)) {
2211 			/* Cloned sk_buffs are read-only, so we need a
2212 			 * writeable copy
2213 			 */
2214 			tx_skb = skb_copy(skb, GFP_KERNEL);
2215 		} else {
2216 			tx_skb = skb_clone(skb, GFP_KERNEL);
2217 		}
2218 
2219 		if (!tx_skb) {
2220 			l2cap_seq_list_clear(&chan->retrans_list);
2221 			break;
2222 		}
2223 
2224 		/* Update skb contents */
2225 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2226 			put_unaligned_le32(__pack_extended_control(&control),
2227 					   tx_skb->data + L2CAP_HDR_SIZE);
2228 		} else {
2229 			put_unaligned_le16(__pack_enhanced_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		}
2232 
2233 		/* Update FCS */
2234 		if (chan->fcs == L2CAP_FCS_CRC16) {
2235 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2236 					tx_skb->len - L2CAP_FCS_SIZE);
2237 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2238 						L2CAP_FCS_SIZE);
2239 		}
2240 
2241 		l2cap_do_send(chan, tx_skb);
2242 
2243 		BT_DBG("Resent txseq %d", control.txseq);
2244 
2245 		chan->last_acked_seq = chan->buffer_seq;
2246 	}
2247 }
2248 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2249 static void l2cap_retransmit(struct l2cap_chan *chan,
2250 			     struct l2cap_ctrl *control)
2251 {
2252 	BT_DBG("chan %p, control %p", chan, control);
2253 
2254 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2255 	l2cap_ertm_resend(chan);
2256 }
2257 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2258 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2259 				 struct l2cap_ctrl *control)
2260 {
2261 	struct sk_buff *skb;
2262 
2263 	BT_DBG("chan %p, control %p", chan, control);
2264 
2265 	if (control->poll)
2266 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2267 
2268 	l2cap_seq_list_clear(&chan->retrans_list);
2269 
2270 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2271 		return;
2272 
2273 	if (chan->unacked_frames) {
2274 		skb_queue_walk(&chan->tx_q, skb) {
2275 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2276 			    skb == chan->tx_send_head)
2277 				break;
2278 		}
2279 
2280 		skb_queue_walk_from(&chan->tx_q, skb) {
2281 			if (skb == chan->tx_send_head)
2282 				break;
2283 
2284 			l2cap_seq_list_append(&chan->retrans_list,
2285 					      bt_cb(skb)->l2cap.txseq);
2286 		}
2287 
2288 		l2cap_ertm_resend(chan);
2289 	}
2290 }
2291 
l2cap_send_ack(struct l2cap_chan * chan)2292 static void l2cap_send_ack(struct l2cap_chan *chan)
2293 {
2294 	struct l2cap_ctrl control;
2295 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2296 					 chan->last_acked_seq);
2297 	int threshold;
2298 
2299 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2300 	       chan, chan->last_acked_seq, chan->buffer_seq);
2301 
2302 	memset(&control, 0, sizeof(control));
2303 	control.sframe = 1;
2304 
2305 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2306 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2307 		__clear_ack_timer(chan);
2308 		control.super = L2CAP_SUPER_RNR;
2309 		control.reqseq = chan->buffer_seq;
2310 		l2cap_send_sframe(chan, &control);
2311 	} else {
2312 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2313 			l2cap_ertm_send(chan);
2314 			/* If any i-frames were sent, they included an ack */
2315 			if (chan->buffer_seq == chan->last_acked_seq)
2316 				frames_to_ack = 0;
2317 		}
2318 
2319 		/* Ack now if the window is 3/4ths full.
2320 		 * Calculate without mul or div
2321 		 */
2322 		threshold = chan->ack_win;
2323 		threshold += threshold << 1;
2324 		threshold >>= 2;
2325 
2326 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2327 		       threshold);
2328 
2329 		if (frames_to_ack >= threshold) {
2330 			__clear_ack_timer(chan);
2331 			control.super = L2CAP_SUPER_RR;
2332 			control.reqseq = chan->buffer_seq;
2333 			l2cap_send_sframe(chan, &control);
2334 			frames_to_ack = 0;
2335 		}
2336 
2337 		if (frames_to_ack)
2338 			__set_ack_timer(chan);
2339 	}
2340 }
2341 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2342 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2343 					 struct msghdr *msg, int len,
2344 					 int count, struct sk_buff *skb)
2345 {
2346 	struct l2cap_conn *conn = chan->conn;
2347 	struct sk_buff **frag;
2348 	int sent = 0;
2349 
2350 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2351 		return -EFAULT;
2352 
2353 	sent += count;
2354 	len  -= count;
2355 
2356 	/* Continuation fragments (no L2CAP header) */
2357 	frag = &skb_shinfo(skb)->frag_list;
2358 	while (len) {
2359 		struct sk_buff *tmp;
2360 
2361 		count = min_t(unsigned int, conn->mtu, len);
2362 
2363 		tmp = chan->ops->alloc_skb(chan, 0, count,
2364 					   msg->msg_flags & MSG_DONTWAIT);
2365 		if (IS_ERR(tmp))
2366 			return PTR_ERR(tmp);
2367 
2368 		*frag = tmp;
2369 
2370 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2371 				   &msg->msg_iter))
2372 			return -EFAULT;
2373 
2374 		sent += count;
2375 		len  -= count;
2376 
2377 		skb->len += (*frag)->len;
2378 		skb->data_len += (*frag)->len;
2379 
2380 		frag = &(*frag)->next;
2381 	}
2382 
2383 	return sent;
2384 }
2385 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2386 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2387 						 struct msghdr *msg, size_t len)
2388 {
2389 	struct l2cap_conn *conn = chan->conn;
2390 	struct sk_buff *skb;
2391 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2392 	struct l2cap_hdr *lh;
2393 
2394 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2395 	       __le16_to_cpu(chan->psm), len);
2396 
2397 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2398 
2399 	skb = chan->ops->alloc_skb(chan, hlen, count,
2400 				   msg->msg_flags & MSG_DONTWAIT);
2401 	if (IS_ERR(skb))
2402 		return skb;
2403 
2404 	/* Create L2CAP header */
2405 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2406 	lh->cid = cpu_to_le16(chan->dcid);
2407 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2408 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2409 
2410 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2411 	if (unlikely(err < 0)) {
2412 		kfree_skb(skb);
2413 		return ERR_PTR(err);
2414 	}
2415 	return skb;
2416 }
2417 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2418 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2419 					      struct msghdr *msg, size_t len)
2420 {
2421 	struct l2cap_conn *conn = chan->conn;
2422 	struct sk_buff *skb;
2423 	int err, count;
2424 	struct l2cap_hdr *lh;
2425 
2426 	BT_DBG("chan %p len %zu", chan, len);
2427 
2428 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2429 
2430 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2431 				   msg->msg_flags & MSG_DONTWAIT);
2432 	if (IS_ERR(skb))
2433 		return skb;
2434 
2435 	/* Create L2CAP header */
2436 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2437 	lh->cid = cpu_to_le16(chan->dcid);
2438 	lh->len = cpu_to_le16(len);
2439 
2440 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2441 	if (unlikely(err < 0)) {
2442 		kfree_skb(skb);
2443 		return ERR_PTR(err);
2444 	}
2445 	return skb;
2446 }
2447 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2448 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2449 					       struct msghdr *msg, size_t len,
2450 					       u16 sdulen)
2451 {
2452 	struct l2cap_conn *conn = chan->conn;
2453 	struct sk_buff *skb;
2454 	int err, count, hlen;
2455 	struct l2cap_hdr *lh;
2456 
2457 	BT_DBG("chan %p len %zu", chan, len);
2458 
2459 	if (!conn)
2460 		return ERR_PTR(-ENOTCONN);
2461 
2462 	hlen = __ertm_hdr_size(chan);
2463 
2464 	if (sdulen)
2465 		hlen += L2CAP_SDULEN_SIZE;
2466 
2467 	if (chan->fcs == L2CAP_FCS_CRC16)
2468 		hlen += L2CAP_FCS_SIZE;
2469 
2470 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2471 
2472 	skb = chan->ops->alloc_skb(chan, hlen, count,
2473 				   msg->msg_flags & MSG_DONTWAIT);
2474 	if (IS_ERR(skb))
2475 		return skb;
2476 
2477 	/* Create L2CAP header */
2478 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2479 	lh->cid = cpu_to_le16(chan->dcid);
2480 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2481 
2482 	/* Control header is populated later */
2483 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2484 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2485 	else
2486 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2487 
2488 	if (sdulen)
2489 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2490 
2491 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2492 	if (unlikely(err < 0)) {
2493 		kfree_skb(skb);
2494 		return ERR_PTR(err);
2495 	}
2496 
2497 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2498 	bt_cb(skb)->l2cap.retries = 0;
2499 	return skb;
2500 }
2501 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2502 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2503 			     struct sk_buff_head *seg_queue,
2504 			     struct msghdr *msg, size_t len)
2505 {
2506 	struct sk_buff *skb;
2507 	u16 sdu_len;
2508 	size_t pdu_len;
2509 	u8 sar;
2510 
2511 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2512 
2513 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2514 	 * so fragmented skbs are not used.  The HCI layer's handling
2515 	 * of fragmented skbs is not compatible with ERTM's queueing.
2516 	 */
2517 
2518 	/* PDU size is derived from the HCI MTU */
2519 	pdu_len = chan->conn->mtu;
2520 
2521 	/* Constrain PDU size for BR/EDR connections */
2522 	if (!chan->hs_hcon)
2523 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2524 
2525 	/* Adjust for largest possible L2CAP overhead. */
2526 	if (chan->fcs)
2527 		pdu_len -= L2CAP_FCS_SIZE;
2528 
2529 	pdu_len -= __ertm_hdr_size(chan);
2530 
2531 	/* Remote device may have requested smaller PDUs */
2532 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2533 
2534 	if (len <= pdu_len) {
2535 		sar = L2CAP_SAR_UNSEGMENTED;
2536 		sdu_len = 0;
2537 		pdu_len = len;
2538 	} else {
2539 		sar = L2CAP_SAR_START;
2540 		sdu_len = len;
2541 	}
2542 
2543 	while (len > 0) {
2544 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2545 
2546 		if (IS_ERR(skb)) {
2547 			__skb_queue_purge(seg_queue);
2548 			return PTR_ERR(skb);
2549 		}
2550 
2551 		bt_cb(skb)->l2cap.sar = sar;
2552 		__skb_queue_tail(seg_queue, skb);
2553 
2554 		len -= pdu_len;
2555 		if (sdu_len)
2556 			sdu_len = 0;
2557 
2558 		if (len <= pdu_len) {
2559 			sar = L2CAP_SAR_END;
2560 			pdu_len = len;
2561 		} else {
2562 			sar = L2CAP_SAR_CONTINUE;
2563 		}
2564 	}
2565 
2566 	return 0;
2567 }
2568 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2569 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2570 						   struct msghdr *msg,
2571 						   size_t len, u16 sdulen)
2572 {
2573 	struct l2cap_conn *conn = chan->conn;
2574 	struct sk_buff *skb;
2575 	int err, count, hlen;
2576 	struct l2cap_hdr *lh;
2577 
2578 	BT_DBG("chan %p len %zu", chan, len);
2579 
2580 	if (!conn)
2581 		return ERR_PTR(-ENOTCONN);
2582 
2583 	hlen = L2CAP_HDR_SIZE;
2584 
2585 	if (sdulen)
2586 		hlen += L2CAP_SDULEN_SIZE;
2587 
2588 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2589 
2590 	skb = chan->ops->alloc_skb(chan, hlen, count,
2591 				   msg->msg_flags & MSG_DONTWAIT);
2592 	if (IS_ERR(skb))
2593 		return skb;
2594 
2595 	/* Create L2CAP header */
2596 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2597 	lh->cid = cpu_to_le16(chan->dcid);
2598 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2599 
2600 	if (sdulen)
2601 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2602 
2603 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2604 	if (unlikely(err < 0)) {
2605 		kfree_skb(skb);
2606 		return ERR_PTR(err);
2607 	}
2608 
2609 	return skb;
2610 }
2611 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2612 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2613 				struct sk_buff_head *seg_queue,
2614 				struct msghdr *msg, size_t len)
2615 {
2616 	struct sk_buff *skb;
2617 	size_t pdu_len;
2618 	u16 sdu_len;
2619 
2620 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2621 
2622 	sdu_len = len;
2623 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2624 
2625 	while (len > 0) {
2626 		if (len <= pdu_len)
2627 			pdu_len = len;
2628 
2629 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2630 		if (IS_ERR(skb)) {
2631 			__skb_queue_purge(seg_queue);
2632 			return PTR_ERR(skb);
2633 		}
2634 
2635 		__skb_queue_tail(seg_queue, skb);
2636 
2637 		len -= pdu_len;
2638 
2639 		if (sdu_len) {
2640 			sdu_len = 0;
2641 			pdu_len += L2CAP_SDULEN_SIZE;
2642 		}
2643 	}
2644 
2645 	return 0;
2646 }
2647 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2648 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2649 {
2650 	int sent = 0;
2651 
2652 	BT_DBG("chan %p", chan);
2653 
2654 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2655 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2656 		chan->tx_credits--;
2657 		sent++;
2658 	}
2659 
2660 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2661 	       skb_queue_len(&chan->tx_q));
2662 }
2663 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2664 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2665 {
2666 	struct sk_buff *skb;
2667 	int err;
2668 	struct sk_buff_head seg_queue;
2669 
2670 	if (!chan->conn)
2671 		return -ENOTCONN;
2672 
2673 	/* Connectionless channel */
2674 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2675 		skb = l2cap_create_connless_pdu(chan, msg, len);
2676 		if (IS_ERR(skb))
2677 			return PTR_ERR(skb);
2678 
2679 		l2cap_do_send(chan, skb);
2680 		return len;
2681 	}
2682 
2683 	switch (chan->mode) {
2684 	case L2CAP_MODE_LE_FLOWCTL:
2685 	case L2CAP_MODE_EXT_FLOWCTL:
2686 		/* Check outgoing MTU */
2687 		if (len > chan->omtu)
2688 			return -EMSGSIZE;
2689 
2690 		__skb_queue_head_init(&seg_queue);
2691 
2692 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2693 
2694 		if (chan->state != BT_CONNECTED) {
2695 			__skb_queue_purge(&seg_queue);
2696 			err = -ENOTCONN;
2697 		}
2698 
2699 		if (err)
2700 			return err;
2701 
2702 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2703 
2704 		l2cap_le_flowctl_send(chan);
2705 
2706 		if (!chan->tx_credits)
2707 			chan->ops->suspend(chan);
2708 
2709 		err = len;
2710 
2711 		break;
2712 
2713 	case L2CAP_MODE_BASIC:
2714 		/* Check outgoing MTU */
2715 		if (len > chan->omtu)
2716 			return -EMSGSIZE;
2717 
2718 		/* Create a basic PDU */
2719 		skb = l2cap_create_basic_pdu(chan, msg, len);
2720 		if (IS_ERR(skb))
2721 			return PTR_ERR(skb);
2722 
2723 		l2cap_do_send(chan, skb);
2724 		err = len;
2725 		break;
2726 
2727 	case L2CAP_MODE_ERTM:
2728 	case L2CAP_MODE_STREAMING:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu) {
2731 			err = -EMSGSIZE;
2732 			break;
2733 		}
2734 
2735 		__skb_queue_head_init(&seg_queue);
2736 
2737 		/* Do segmentation before calling in to the state machine,
2738 		 * since it's possible to block while waiting for memory
2739 		 * allocation.
2740 		 */
2741 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2742 
2743 		if (err)
2744 			break;
2745 
2746 		if (chan->mode == L2CAP_MODE_ERTM)
2747 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2748 		else
2749 			l2cap_streaming_send(chan, &seg_queue);
2750 
2751 		err = len;
2752 
2753 		/* If the skbs were not queued for sending, they'll still be in
2754 		 * seg_queue and need to be purged.
2755 		 */
2756 		__skb_queue_purge(&seg_queue);
2757 		break;
2758 
2759 	default:
2760 		BT_DBG("bad state %1.1x", chan->mode);
2761 		err = -EBADFD;
2762 	}
2763 
2764 	return err;
2765 }
2766 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2767 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2768 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2769 {
2770 	struct l2cap_ctrl control;
2771 	u16 seq;
2772 
2773 	BT_DBG("chan %p, txseq %u", chan, txseq);
2774 
2775 	memset(&control, 0, sizeof(control));
2776 	control.sframe = 1;
2777 	control.super = L2CAP_SUPER_SREJ;
2778 
2779 	for (seq = chan->expected_tx_seq; seq != txseq;
2780 	     seq = __next_seq(chan, seq)) {
2781 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2782 			control.reqseq = seq;
2783 			l2cap_send_sframe(chan, &control);
2784 			l2cap_seq_list_append(&chan->srej_list, seq);
2785 		}
2786 	}
2787 
2788 	chan->expected_tx_seq = __next_seq(chan, txseq);
2789 }
2790 
l2cap_send_srej_tail(struct l2cap_chan * chan)2791 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2792 {
2793 	struct l2cap_ctrl control;
2794 
2795 	BT_DBG("chan %p", chan);
2796 
2797 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2798 		return;
2799 
2800 	memset(&control, 0, sizeof(control));
2801 	control.sframe = 1;
2802 	control.super = L2CAP_SUPER_SREJ;
2803 	control.reqseq = chan->srej_list.tail;
2804 	l2cap_send_sframe(chan, &control);
2805 }
2806 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2807 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2808 {
2809 	struct l2cap_ctrl control;
2810 	u16 initial_head;
2811 	u16 seq;
2812 
2813 	BT_DBG("chan %p, txseq %u", chan, txseq);
2814 
2815 	memset(&control, 0, sizeof(control));
2816 	control.sframe = 1;
2817 	control.super = L2CAP_SUPER_SREJ;
2818 
2819 	/* Capture initial list head to allow only one pass through the list. */
2820 	initial_head = chan->srej_list.head;
2821 
2822 	do {
2823 		seq = l2cap_seq_list_pop(&chan->srej_list);
2824 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2825 			break;
2826 
2827 		control.reqseq = seq;
2828 		l2cap_send_sframe(chan, &control);
2829 		l2cap_seq_list_append(&chan->srej_list, seq);
2830 	} while (chan->srej_list.head != initial_head);
2831 }
2832 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2833 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2834 {
2835 	struct sk_buff *acked_skb;
2836 	u16 ackseq;
2837 
2838 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2839 
2840 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2841 		return;
2842 
2843 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2844 	       chan->expected_ack_seq, chan->unacked_frames);
2845 
2846 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2847 	     ackseq = __next_seq(chan, ackseq)) {
2848 
2849 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2850 		if (acked_skb) {
2851 			skb_unlink(acked_skb, &chan->tx_q);
2852 			kfree_skb(acked_skb);
2853 			chan->unacked_frames--;
2854 		}
2855 	}
2856 
2857 	chan->expected_ack_seq = reqseq;
2858 
2859 	if (chan->unacked_frames == 0)
2860 		__clear_retrans_timer(chan);
2861 
2862 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2863 }
2864 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2865 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2866 {
2867 	BT_DBG("chan %p", chan);
2868 
2869 	chan->expected_tx_seq = chan->buffer_seq;
2870 	l2cap_seq_list_clear(&chan->srej_list);
2871 	skb_queue_purge(&chan->srej_q);
2872 	chan->rx_state = L2CAP_RX_STATE_RECV;
2873 }
2874 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2875 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2876 				struct l2cap_ctrl *control,
2877 				struct sk_buff_head *skbs, u8 event)
2878 {
2879 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2880 	       event);
2881 
2882 	switch (event) {
2883 	case L2CAP_EV_DATA_REQUEST:
2884 		if (chan->tx_send_head == NULL)
2885 			chan->tx_send_head = skb_peek(skbs);
2886 
2887 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2888 		l2cap_ertm_send(chan);
2889 		break;
2890 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2891 		BT_DBG("Enter LOCAL_BUSY");
2892 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2893 
2894 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2895 			/* The SREJ_SENT state must be aborted if we are to
2896 			 * enter the LOCAL_BUSY state.
2897 			 */
2898 			l2cap_abort_rx_srej_sent(chan);
2899 		}
2900 
2901 		l2cap_send_ack(chan);
2902 
2903 		break;
2904 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2905 		BT_DBG("Exit LOCAL_BUSY");
2906 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2907 
2908 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2909 			struct l2cap_ctrl local_control;
2910 
2911 			memset(&local_control, 0, sizeof(local_control));
2912 			local_control.sframe = 1;
2913 			local_control.super = L2CAP_SUPER_RR;
2914 			local_control.poll = 1;
2915 			local_control.reqseq = chan->buffer_seq;
2916 			l2cap_send_sframe(chan, &local_control);
2917 
2918 			chan->retry_count = 1;
2919 			__set_monitor_timer(chan);
2920 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2921 		}
2922 		break;
2923 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2924 		l2cap_process_reqseq(chan, control->reqseq);
2925 		break;
2926 	case L2CAP_EV_EXPLICIT_POLL:
2927 		l2cap_send_rr_or_rnr(chan, 1);
2928 		chan->retry_count = 1;
2929 		__set_monitor_timer(chan);
2930 		__clear_ack_timer(chan);
2931 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2932 		break;
2933 	case L2CAP_EV_RETRANS_TO:
2934 		l2cap_send_rr_or_rnr(chan, 1);
2935 		chan->retry_count = 1;
2936 		__set_monitor_timer(chan);
2937 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2938 		break;
2939 	case L2CAP_EV_RECV_FBIT:
2940 		/* Nothing to process */
2941 		break;
2942 	default:
2943 		break;
2944 	}
2945 }
2946 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2947 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2948 				  struct l2cap_ctrl *control,
2949 				  struct sk_buff_head *skbs, u8 event)
2950 {
2951 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2952 	       event);
2953 
2954 	switch (event) {
2955 	case L2CAP_EV_DATA_REQUEST:
2956 		if (chan->tx_send_head == NULL)
2957 			chan->tx_send_head = skb_peek(skbs);
2958 		/* Queue data, but don't send. */
2959 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2960 		break;
2961 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2962 		BT_DBG("Enter LOCAL_BUSY");
2963 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2964 
2965 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2966 			/* The SREJ_SENT state must be aborted if we are to
2967 			 * enter the LOCAL_BUSY state.
2968 			 */
2969 			l2cap_abort_rx_srej_sent(chan);
2970 		}
2971 
2972 		l2cap_send_ack(chan);
2973 
2974 		break;
2975 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2976 		BT_DBG("Exit LOCAL_BUSY");
2977 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2978 
2979 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2980 			struct l2cap_ctrl local_control;
2981 			memset(&local_control, 0, sizeof(local_control));
2982 			local_control.sframe = 1;
2983 			local_control.super = L2CAP_SUPER_RR;
2984 			local_control.poll = 1;
2985 			local_control.reqseq = chan->buffer_seq;
2986 			l2cap_send_sframe(chan, &local_control);
2987 
2988 			chan->retry_count = 1;
2989 			__set_monitor_timer(chan);
2990 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2991 		}
2992 		break;
2993 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2994 		l2cap_process_reqseq(chan, control->reqseq);
2995 		fallthrough;
2996 
2997 	case L2CAP_EV_RECV_FBIT:
2998 		if (control && control->final) {
2999 			__clear_monitor_timer(chan);
3000 			if (chan->unacked_frames > 0)
3001 				__set_retrans_timer(chan);
3002 			chan->retry_count = 0;
3003 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3004 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3005 		}
3006 		break;
3007 	case L2CAP_EV_EXPLICIT_POLL:
3008 		/* Ignore */
3009 		break;
3010 	case L2CAP_EV_MONITOR_TO:
3011 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3012 			l2cap_send_rr_or_rnr(chan, 1);
3013 			__set_monitor_timer(chan);
3014 			chan->retry_count++;
3015 		} else {
3016 			l2cap_send_disconn_req(chan, ECONNABORTED);
3017 		}
3018 		break;
3019 	default:
3020 		break;
3021 	}
3022 }
3023 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3024 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3025 		     struct sk_buff_head *skbs, u8 event)
3026 {
3027 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3028 	       chan, control, skbs, event, chan->tx_state);
3029 
3030 	switch (chan->tx_state) {
3031 	case L2CAP_TX_STATE_XMIT:
3032 		l2cap_tx_state_xmit(chan, control, skbs, event);
3033 		break;
3034 	case L2CAP_TX_STATE_WAIT_F:
3035 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3036 		break;
3037 	default:
3038 		/* Ignore event */
3039 		break;
3040 	}
3041 }
3042 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3043 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3044 			     struct l2cap_ctrl *control)
3045 {
3046 	BT_DBG("chan %p, control %p", chan, control);
3047 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3048 }
3049 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3050 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3051 				  struct l2cap_ctrl *control)
3052 {
3053 	BT_DBG("chan %p, control %p", chan, control);
3054 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3055 }
3056 
3057 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3058 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3059 {
3060 	struct sk_buff *nskb;
3061 	struct l2cap_chan *chan;
3062 
3063 	BT_DBG("conn %p", conn);
3064 
3065 	mutex_lock(&conn->chan_lock);
3066 
3067 	list_for_each_entry(chan, &conn->chan_l, list) {
3068 		if (chan->chan_type != L2CAP_CHAN_RAW)
3069 			continue;
3070 
3071 		/* Don't send frame to the channel it came from */
3072 		if (bt_cb(skb)->l2cap.chan == chan)
3073 			continue;
3074 
3075 		nskb = skb_clone(skb, GFP_KERNEL);
3076 		if (!nskb)
3077 			continue;
3078 		if (chan->ops->recv(chan, nskb))
3079 			kfree_skb(nskb);
3080 	}
3081 
3082 	mutex_unlock(&conn->chan_lock);
3083 }
3084 
3085 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3086 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3087 				       u8 ident, u16 dlen, void *data)
3088 {
3089 	struct sk_buff *skb, **frag;
3090 	struct l2cap_cmd_hdr *cmd;
3091 	struct l2cap_hdr *lh;
3092 	int len, count;
3093 
3094 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3095 	       conn, code, ident, dlen);
3096 
3097 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3098 		return NULL;
3099 
3100 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3101 	count = min_t(unsigned int, conn->mtu, len);
3102 
3103 	skb = bt_skb_alloc(count, GFP_KERNEL);
3104 	if (!skb)
3105 		return NULL;
3106 
3107 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3108 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3109 
3110 	if (conn->hcon->type == LE_LINK)
3111 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3112 	else
3113 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3114 
3115 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3116 	cmd->code  = code;
3117 	cmd->ident = ident;
3118 	cmd->len   = cpu_to_le16(dlen);
3119 
3120 	if (dlen) {
3121 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3122 		skb_put_data(skb, data, count);
3123 		data += count;
3124 	}
3125 
3126 	len -= skb->len;
3127 
3128 	/* Continuation fragments (no L2CAP header) */
3129 	frag = &skb_shinfo(skb)->frag_list;
3130 	while (len) {
3131 		count = min_t(unsigned int, conn->mtu, len);
3132 
3133 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3134 		if (!*frag)
3135 			goto fail;
3136 
3137 		skb_put_data(*frag, data, count);
3138 
3139 		len  -= count;
3140 		data += count;
3141 
3142 		frag = &(*frag)->next;
3143 	}
3144 
3145 	return skb;
3146 
3147 fail:
3148 	kfree_skb(skb);
3149 	return NULL;
3150 }
3151 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3152 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3153 				     unsigned long *val)
3154 {
3155 	struct l2cap_conf_opt *opt = *ptr;
3156 	int len;
3157 
3158 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3159 	*ptr += len;
3160 
3161 	*type = opt->type;
3162 	*olen = opt->len;
3163 
3164 	switch (opt->len) {
3165 	case 1:
3166 		*val = *((u8 *) opt->val);
3167 		break;
3168 
3169 	case 2:
3170 		*val = get_unaligned_le16(opt->val);
3171 		break;
3172 
3173 	case 4:
3174 		*val = get_unaligned_le32(opt->val);
3175 		break;
3176 
3177 	default:
3178 		*val = (unsigned long) opt->val;
3179 		break;
3180 	}
3181 
3182 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3183 	return len;
3184 }
3185 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3186 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3187 {
3188 	struct l2cap_conf_opt *opt = *ptr;
3189 
3190 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3191 
3192 	if (size < L2CAP_CONF_OPT_SIZE + len)
3193 		return;
3194 
3195 	opt->type = type;
3196 	opt->len  = len;
3197 
3198 	switch (len) {
3199 	case 1:
3200 		*((u8 *) opt->val)  = val;
3201 		break;
3202 
3203 	case 2:
3204 		put_unaligned_le16(val, opt->val);
3205 		break;
3206 
3207 	case 4:
3208 		put_unaligned_le32(val, opt->val);
3209 		break;
3210 
3211 	default:
3212 		memcpy(opt->val, (void *) val, len);
3213 		break;
3214 	}
3215 
3216 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3217 }
3218 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3219 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3220 {
3221 	struct l2cap_conf_efs efs;
3222 
3223 	switch (chan->mode) {
3224 	case L2CAP_MODE_ERTM:
3225 		efs.id		= chan->local_id;
3226 		efs.stype	= chan->local_stype;
3227 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3228 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3229 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3230 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3231 		break;
3232 
3233 	case L2CAP_MODE_STREAMING:
3234 		efs.id		= 1;
3235 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3236 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3237 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3238 		efs.acc_lat	= 0;
3239 		efs.flush_to	= 0;
3240 		break;
3241 
3242 	default:
3243 		return;
3244 	}
3245 
3246 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3247 			   (unsigned long) &efs, size);
3248 }
3249 
l2cap_ack_timeout(struct work_struct * work)3250 static void l2cap_ack_timeout(struct work_struct *work)
3251 {
3252 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3253 					       ack_timer.work);
3254 	u16 frames_to_ack;
3255 
3256 	BT_DBG("chan %p", chan);
3257 
3258 	l2cap_chan_lock(chan);
3259 
3260 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3261 				     chan->last_acked_seq);
3262 
3263 	if (frames_to_ack)
3264 		l2cap_send_rr_or_rnr(chan, 0);
3265 
3266 	l2cap_chan_unlock(chan);
3267 	l2cap_chan_put(chan);
3268 }
3269 
l2cap_ertm_init(struct l2cap_chan * chan)3270 int l2cap_ertm_init(struct l2cap_chan *chan)
3271 {
3272 	int err;
3273 
3274 	chan->next_tx_seq = 0;
3275 	chan->expected_tx_seq = 0;
3276 	chan->expected_ack_seq = 0;
3277 	chan->unacked_frames = 0;
3278 	chan->buffer_seq = 0;
3279 	chan->frames_sent = 0;
3280 	chan->last_acked_seq = 0;
3281 	chan->sdu = NULL;
3282 	chan->sdu_last_frag = NULL;
3283 	chan->sdu_len = 0;
3284 
3285 	skb_queue_head_init(&chan->tx_q);
3286 
3287 	chan->local_amp_id = AMP_ID_BREDR;
3288 	chan->move_id = AMP_ID_BREDR;
3289 	chan->move_state = L2CAP_MOVE_STABLE;
3290 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3291 
3292 	if (chan->mode != L2CAP_MODE_ERTM)
3293 		return 0;
3294 
3295 	chan->rx_state = L2CAP_RX_STATE_RECV;
3296 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3297 
3298 	skb_queue_head_init(&chan->srej_q);
3299 
3300 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3301 	if (err < 0)
3302 		return err;
3303 
3304 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3305 	if (err < 0)
3306 		l2cap_seq_list_free(&chan->srej_list);
3307 
3308 	return err;
3309 }
3310 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3311 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3312 {
3313 	switch (mode) {
3314 	case L2CAP_MODE_STREAMING:
3315 	case L2CAP_MODE_ERTM:
3316 		if (l2cap_mode_supported(mode, remote_feat_mask))
3317 			return mode;
3318 		fallthrough;
3319 	default:
3320 		return L2CAP_MODE_BASIC;
3321 	}
3322 }
3323 
__l2cap_ews_supported(struct l2cap_conn * conn)3324 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3325 {
3326 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3327 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3328 }
3329 
__l2cap_efs_supported(struct l2cap_conn * conn)3330 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3331 {
3332 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3333 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3334 }
3335 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3336 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3337 				      struct l2cap_conf_rfc *rfc)
3338 {
3339 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3340 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3341 
3342 		/* Class 1 devices have must have ERTM timeouts
3343 		 * exceeding the Link Supervision Timeout.  The
3344 		 * default Link Supervision Timeout for AMP
3345 		 * controllers is 10 seconds.
3346 		 *
3347 		 * Class 1 devices use 0xffffffff for their
3348 		 * best-effort flush timeout, so the clamping logic
3349 		 * will result in a timeout that meets the above
3350 		 * requirement.  ERTM timeouts are 16-bit values, so
3351 		 * the maximum timeout is 65.535 seconds.
3352 		 */
3353 
3354 		/* Convert timeout to milliseconds and round */
3355 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3356 
3357 		/* This is the recommended formula for class 2 devices
3358 		 * that start ERTM timers when packets are sent to the
3359 		 * controller.
3360 		 */
3361 		ertm_to = 3 * ertm_to + 500;
3362 
3363 		if (ertm_to > 0xffff)
3364 			ertm_to = 0xffff;
3365 
3366 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3367 		rfc->monitor_timeout = rfc->retrans_timeout;
3368 	} else {
3369 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3370 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3371 	}
3372 }
3373 
l2cap_txwin_setup(struct l2cap_chan * chan)3374 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3375 {
3376 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3377 	    __l2cap_ews_supported(chan->conn)) {
3378 		/* use extended control field */
3379 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3380 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3381 	} else {
3382 		chan->tx_win = min_t(u16, chan->tx_win,
3383 				     L2CAP_DEFAULT_TX_WINDOW);
3384 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3385 	}
3386 	chan->ack_win = chan->tx_win;
3387 }
3388 
l2cap_mtu_auto(struct l2cap_chan * chan)3389 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3390 {
3391 	struct hci_conn *conn = chan->conn->hcon;
3392 
3393 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3394 
3395 	/* The 2-DH1 packet has between 2 and 56 information bytes
3396 	 * (including the 2-byte payload header)
3397 	 */
3398 	if (!(conn->pkt_type & HCI_2DH1))
3399 		chan->imtu = 54;
3400 
3401 	/* The 3-DH1 packet has between 2 and 85 information bytes
3402 	 * (including the 2-byte payload header)
3403 	 */
3404 	if (!(conn->pkt_type & HCI_3DH1))
3405 		chan->imtu = 83;
3406 
3407 	/* The 2-DH3 packet has between 2 and 369 information bytes
3408 	 * (including the 2-byte payload header)
3409 	 */
3410 	if (!(conn->pkt_type & HCI_2DH3))
3411 		chan->imtu = 367;
3412 
3413 	/* The 3-DH3 packet has between 2 and 554 information bytes
3414 	 * (including the 2-byte payload header)
3415 	 */
3416 	if (!(conn->pkt_type & HCI_3DH3))
3417 		chan->imtu = 552;
3418 
3419 	/* The 2-DH5 packet has between 2 and 681 information bytes
3420 	 * (including the 2-byte payload header)
3421 	 */
3422 	if (!(conn->pkt_type & HCI_2DH5))
3423 		chan->imtu = 679;
3424 
3425 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3426 	 * (including the 2-byte payload header)
3427 	 */
3428 	if (!(conn->pkt_type & HCI_3DH5))
3429 		chan->imtu = 1021;
3430 }
3431 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3432 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3433 {
3434 	struct l2cap_conf_req *req = data;
3435 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3436 	void *ptr = req->data;
3437 	void *endptr = data + data_size;
3438 	u16 size;
3439 
3440 	BT_DBG("chan %p", chan);
3441 
3442 	if (chan->num_conf_req || chan->num_conf_rsp)
3443 		goto done;
3444 
3445 	switch (chan->mode) {
3446 	case L2CAP_MODE_STREAMING:
3447 	case L2CAP_MODE_ERTM:
3448 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3449 			break;
3450 
3451 		if (__l2cap_efs_supported(chan->conn))
3452 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3453 
3454 		fallthrough;
3455 	default:
3456 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3457 		break;
3458 	}
3459 
3460 done:
3461 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3462 		if (!chan->imtu)
3463 			l2cap_mtu_auto(chan);
3464 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3465 				   endptr - ptr);
3466 	}
3467 
3468 	switch (chan->mode) {
3469 	case L2CAP_MODE_BASIC:
3470 		if (disable_ertm)
3471 			break;
3472 
3473 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3474 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3475 			break;
3476 
3477 		rfc.mode            = L2CAP_MODE_BASIC;
3478 		rfc.txwin_size      = 0;
3479 		rfc.max_transmit    = 0;
3480 		rfc.retrans_timeout = 0;
3481 		rfc.monitor_timeout = 0;
3482 		rfc.max_pdu_size    = 0;
3483 
3484 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3485 				   (unsigned long) &rfc, endptr - ptr);
3486 		break;
3487 
3488 	case L2CAP_MODE_ERTM:
3489 		rfc.mode            = L2CAP_MODE_ERTM;
3490 		rfc.max_transmit    = chan->max_tx;
3491 
3492 		__l2cap_set_ertm_timeouts(chan, &rfc);
3493 
3494 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3495 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3496 			     L2CAP_FCS_SIZE);
3497 		rfc.max_pdu_size = cpu_to_le16(size);
3498 
3499 		l2cap_txwin_setup(chan);
3500 
3501 		rfc.txwin_size = min_t(u16, chan->tx_win,
3502 				       L2CAP_DEFAULT_TX_WINDOW);
3503 
3504 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3505 				   (unsigned long) &rfc, endptr - ptr);
3506 
3507 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3508 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3509 
3510 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3511 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3512 					   chan->tx_win, endptr - ptr);
3513 
3514 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3515 			if (chan->fcs == L2CAP_FCS_NONE ||
3516 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3517 				chan->fcs = L2CAP_FCS_NONE;
3518 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3519 						   chan->fcs, endptr - ptr);
3520 			}
3521 		break;
3522 
3523 	case L2CAP_MODE_STREAMING:
3524 		l2cap_txwin_setup(chan);
3525 		rfc.mode            = L2CAP_MODE_STREAMING;
3526 		rfc.txwin_size      = 0;
3527 		rfc.max_transmit    = 0;
3528 		rfc.retrans_timeout = 0;
3529 		rfc.monitor_timeout = 0;
3530 
3531 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3532 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3533 			     L2CAP_FCS_SIZE);
3534 		rfc.max_pdu_size = cpu_to_le16(size);
3535 
3536 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3537 				   (unsigned long) &rfc, endptr - ptr);
3538 
3539 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3540 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3541 
3542 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3543 			if (chan->fcs == L2CAP_FCS_NONE ||
3544 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3545 				chan->fcs = L2CAP_FCS_NONE;
3546 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3547 						   chan->fcs, endptr - ptr);
3548 			}
3549 		break;
3550 	}
3551 
3552 	req->dcid  = cpu_to_le16(chan->dcid);
3553 	req->flags = cpu_to_le16(0);
3554 
3555 	return ptr - data;
3556 }
3557 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3558 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3559 {
3560 	struct l2cap_conf_rsp *rsp = data;
3561 	void *ptr = rsp->data;
3562 	void *endptr = data + data_size;
3563 	void *req = chan->conf_req;
3564 	int len = chan->conf_len;
3565 	int type, hint, olen;
3566 	unsigned long val;
3567 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3568 	struct l2cap_conf_efs efs;
3569 	u8 remote_efs = 0;
3570 	u16 mtu = L2CAP_DEFAULT_MTU;
3571 	u16 result = L2CAP_CONF_SUCCESS;
3572 	u16 size;
3573 
3574 	BT_DBG("chan %p", chan);
3575 
3576 	while (len >= L2CAP_CONF_OPT_SIZE) {
3577 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3578 		if (len < 0)
3579 			break;
3580 
3581 		hint  = type & L2CAP_CONF_HINT;
3582 		type &= L2CAP_CONF_MASK;
3583 
3584 		switch (type) {
3585 		case L2CAP_CONF_MTU:
3586 			if (olen != 2)
3587 				break;
3588 			mtu = val;
3589 			break;
3590 
3591 		case L2CAP_CONF_FLUSH_TO:
3592 			if (olen != 2)
3593 				break;
3594 			chan->flush_to = val;
3595 			break;
3596 
3597 		case L2CAP_CONF_QOS:
3598 			break;
3599 
3600 		case L2CAP_CONF_RFC:
3601 			if (olen != sizeof(rfc))
3602 				break;
3603 			memcpy(&rfc, (void *) val, olen);
3604 			break;
3605 
3606 		case L2CAP_CONF_FCS:
3607 			if (olen != 1)
3608 				break;
3609 			if (val == L2CAP_FCS_NONE)
3610 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3611 			break;
3612 
3613 		case L2CAP_CONF_EFS:
3614 			if (olen != sizeof(efs))
3615 				break;
3616 			remote_efs = 1;
3617 			memcpy(&efs, (void *) val, olen);
3618 			break;
3619 
3620 		case L2CAP_CONF_EWS:
3621 			if (olen != 2)
3622 				break;
3623 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3624 				return -ECONNREFUSED;
3625 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3626 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3627 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3628 			chan->remote_tx_win = val;
3629 			break;
3630 
3631 		default:
3632 			if (hint)
3633 				break;
3634 			result = L2CAP_CONF_UNKNOWN;
3635 			*((u8 *) ptr++) = type;
3636 			break;
3637 		}
3638 	}
3639 
3640 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3641 		goto done;
3642 
3643 	switch (chan->mode) {
3644 	case L2CAP_MODE_STREAMING:
3645 	case L2CAP_MODE_ERTM:
3646 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3647 			chan->mode = l2cap_select_mode(rfc.mode,
3648 						       chan->conn->feat_mask);
3649 			break;
3650 		}
3651 
3652 		if (remote_efs) {
3653 			if (__l2cap_efs_supported(chan->conn))
3654 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3655 			else
3656 				return -ECONNREFUSED;
3657 		}
3658 
3659 		if (chan->mode != rfc.mode)
3660 			return -ECONNREFUSED;
3661 
3662 		break;
3663 	}
3664 
3665 done:
3666 	if (chan->mode != rfc.mode) {
3667 		result = L2CAP_CONF_UNACCEPT;
3668 		rfc.mode = chan->mode;
3669 
3670 		if (chan->num_conf_rsp == 1)
3671 			return -ECONNREFUSED;
3672 
3673 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3674 				   (unsigned long) &rfc, endptr - ptr);
3675 	}
3676 
3677 	if (result == L2CAP_CONF_SUCCESS) {
3678 		/* Configure output options and let the other side know
3679 		 * which ones we don't like. */
3680 
3681 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3682 			result = L2CAP_CONF_UNACCEPT;
3683 		else {
3684 			chan->omtu = mtu;
3685 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3686 		}
3687 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3688 
3689 		if (remote_efs) {
3690 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3691 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3692 			    efs.stype != chan->local_stype) {
3693 
3694 				result = L2CAP_CONF_UNACCEPT;
3695 
3696 				if (chan->num_conf_req >= 1)
3697 					return -ECONNREFUSED;
3698 
3699 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3700 						   sizeof(efs),
3701 						   (unsigned long) &efs, endptr - ptr);
3702 			} else {
3703 				/* Send PENDING Conf Rsp */
3704 				result = L2CAP_CONF_PENDING;
3705 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3706 			}
3707 		}
3708 
3709 		switch (rfc.mode) {
3710 		case L2CAP_MODE_BASIC:
3711 			chan->fcs = L2CAP_FCS_NONE;
3712 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3713 			break;
3714 
3715 		case L2CAP_MODE_ERTM:
3716 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3717 				chan->remote_tx_win = rfc.txwin_size;
3718 			else
3719 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3720 
3721 			chan->remote_max_tx = rfc.max_transmit;
3722 
3723 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3724 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3725 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3726 			rfc.max_pdu_size = cpu_to_le16(size);
3727 			chan->remote_mps = size;
3728 
3729 			__l2cap_set_ertm_timeouts(chan, &rfc);
3730 
3731 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3732 
3733 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3734 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3735 
3736 			if (remote_efs &&
3737 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3738 				chan->remote_id = efs.id;
3739 				chan->remote_stype = efs.stype;
3740 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3741 				chan->remote_flush_to =
3742 					le32_to_cpu(efs.flush_to);
3743 				chan->remote_acc_lat =
3744 					le32_to_cpu(efs.acc_lat);
3745 				chan->remote_sdu_itime =
3746 					le32_to_cpu(efs.sdu_itime);
3747 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3748 						   sizeof(efs),
3749 						   (unsigned long) &efs, endptr - ptr);
3750 			}
3751 			break;
3752 
3753 		case L2CAP_MODE_STREAMING:
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 
3762 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3763 					   (unsigned long) &rfc, endptr - ptr);
3764 
3765 			break;
3766 
3767 		default:
3768 			result = L2CAP_CONF_UNACCEPT;
3769 
3770 			memset(&rfc, 0, sizeof(rfc));
3771 			rfc.mode = chan->mode;
3772 		}
3773 
3774 		if (result == L2CAP_CONF_SUCCESS)
3775 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3776 	}
3777 	rsp->scid   = cpu_to_le16(chan->dcid);
3778 	rsp->result = cpu_to_le16(result);
3779 	rsp->flags  = cpu_to_le16(0);
3780 
3781 	return ptr - data;
3782 }
3783 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3784 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3785 				void *data, size_t size, u16 *result)
3786 {
3787 	struct l2cap_conf_req *req = data;
3788 	void *ptr = req->data;
3789 	void *endptr = data + size;
3790 	int type, olen;
3791 	unsigned long val;
3792 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3793 	struct l2cap_conf_efs efs;
3794 
3795 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3796 
3797 	while (len >= L2CAP_CONF_OPT_SIZE) {
3798 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3799 		if (len < 0)
3800 			break;
3801 
3802 		switch (type) {
3803 		case L2CAP_CONF_MTU:
3804 			if (olen != 2)
3805 				break;
3806 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3807 				*result = L2CAP_CONF_UNACCEPT;
3808 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3809 			} else
3810 				chan->imtu = val;
3811 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3812 					   endptr - ptr);
3813 			break;
3814 
3815 		case L2CAP_CONF_FLUSH_TO:
3816 			if (olen != 2)
3817 				break;
3818 			chan->flush_to = val;
3819 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3820 					   chan->flush_to, endptr - ptr);
3821 			break;
3822 
3823 		case L2CAP_CONF_RFC:
3824 			if (olen != sizeof(rfc))
3825 				break;
3826 			memcpy(&rfc, (void *)val, olen);
3827 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3828 			    rfc.mode != chan->mode)
3829 				return -ECONNREFUSED;
3830 			chan->fcs = 0;
3831 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3832 					   (unsigned long) &rfc, endptr - ptr);
3833 			break;
3834 
3835 		case L2CAP_CONF_EWS:
3836 			if (olen != 2)
3837 				break;
3838 			chan->ack_win = min_t(u16, val, chan->ack_win);
3839 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3840 					   chan->tx_win, endptr - ptr);
3841 			break;
3842 
3843 		case L2CAP_CONF_EFS:
3844 			if (olen != sizeof(efs))
3845 				break;
3846 			memcpy(&efs, (void *)val, olen);
3847 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3848 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3849 			    efs.stype != chan->local_stype)
3850 				return -ECONNREFUSED;
3851 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3852 					   (unsigned long) &efs, endptr - ptr);
3853 			break;
3854 
3855 		case L2CAP_CONF_FCS:
3856 			if (olen != 1)
3857 				break;
3858 			if (*result == L2CAP_CONF_PENDING)
3859 				if (val == L2CAP_FCS_NONE)
3860 					set_bit(CONF_RECV_NO_FCS,
3861 						&chan->conf_state);
3862 			break;
3863 		}
3864 	}
3865 
3866 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3867 		return -ECONNREFUSED;
3868 
3869 	chan->mode = rfc.mode;
3870 
3871 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3872 		switch (rfc.mode) {
3873 		case L2CAP_MODE_ERTM:
3874 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3875 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3876 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3877 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3878 				chan->ack_win = min_t(u16, chan->ack_win,
3879 						      rfc.txwin_size);
3880 
3881 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3882 				chan->local_msdu = le16_to_cpu(efs.msdu);
3883 				chan->local_sdu_itime =
3884 					le32_to_cpu(efs.sdu_itime);
3885 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3886 				chan->local_flush_to =
3887 					le32_to_cpu(efs.flush_to);
3888 			}
3889 			break;
3890 
3891 		case L2CAP_MODE_STREAMING:
3892 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3893 		}
3894 	}
3895 
3896 	req->dcid   = cpu_to_le16(chan->dcid);
3897 	req->flags  = cpu_to_le16(0);
3898 
3899 	return ptr - data;
3900 }
3901 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3902 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3903 				u16 result, u16 flags)
3904 {
3905 	struct l2cap_conf_rsp *rsp = data;
3906 	void *ptr = rsp->data;
3907 
3908 	BT_DBG("chan %p", chan);
3909 
3910 	rsp->scid   = cpu_to_le16(chan->dcid);
3911 	rsp->result = cpu_to_le16(result);
3912 	rsp->flags  = cpu_to_le16(flags);
3913 
3914 	return ptr - data;
3915 }
3916 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3917 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3918 {
3919 	struct l2cap_le_conn_rsp rsp;
3920 	struct l2cap_conn *conn = chan->conn;
3921 
3922 	BT_DBG("chan %p", chan);
3923 
3924 	rsp.dcid    = cpu_to_le16(chan->scid);
3925 	rsp.mtu     = cpu_to_le16(chan->imtu);
3926 	rsp.mps     = cpu_to_le16(chan->mps);
3927 	rsp.credits = cpu_to_le16(chan->rx_credits);
3928 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3929 
3930 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3931 		       &rsp);
3932 }
3933 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3934 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3935 {
3936 	int *result = data;
3937 
3938 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3939 		return;
3940 
3941 	switch (chan->state) {
3942 	case BT_CONNECT2:
3943 		/* If channel still pending accept add to result */
3944 		(*result)++;
3945 		return;
3946 	case BT_CONNECTED:
3947 		return;
3948 	default:
3949 		/* If not connected or pending accept it has been refused */
3950 		*result = -ECONNREFUSED;
3951 		return;
3952 	}
3953 }
3954 
3955 struct l2cap_ecred_rsp_data {
3956 	struct {
3957 		struct l2cap_ecred_conn_rsp rsp;
3958 		__le16 scid[L2CAP_ECRED_MAX_CID];
3959 	} __packed pdu;
3960 	int count;
3961 };
3962 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3963 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3964 {
3965 	struct l2cap_ecred_rsp_data *rsp = data;
3966 
3967 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3968 		return;
3969 
3970 	/* Reset ident so only one response is sent */
3971 	chan->ident = 0;
3972 
3973 	/* Include all channels pending with the same ident */
3974 	if (!rsp->pdu.rsp.result)
3975 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3976 	else
3977 		l2cap_chan_del(chan, ECONNRESET);
3978 }
3979 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3980 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3981 {
3982 	struct l2cap_conn *conn = chan->conn;
3983 	struct l2cap_ecred_rsp_data data;
3984 	u16 id = chan->ident;
3985 	int result = 0;
3986 
3987 	if (!id)
3988 		return;
3989 
3990 	BT_DBG("chan %p id %d", chan, id);
3991 
3992 	memset(&data, 0, sizeof(data));
3993 
3994 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3995 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3996 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3997 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3998 
3999 	/* Verify that all channels are ready */
4000 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4001 
4002 	if (result > 0)
4003 		return;
4004 
4005 	if (result < 0)
4006 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4007 
4008 	/* Build response */
4009 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4010 
4011 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4012 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4013 		       &data.pdu);
4014 }
4015 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4016 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4017 {
4018 	struct l2cap_conn_rsp rsp;
4019 	struct l2cap_conn *conn = chan->conn;
4020 	u8 buf[128];
4021 	u8 rsp_code;
4022 
4023 	rsp.scid   = cpu_to_le16(chan->dcid);
4024 	rsp.dcid   = cpu_to_le16(chan->scid);
4025 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4026 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4027 
4028 	if (chan->hs_hcon)
4029 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4030 	else
4031 		rsp_code = L2CAP_CONN_RSP;
4032 
4033 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4034 
4035 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4036 
4037 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4038 		return;
4039 
4040 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4041 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4042 	chan->num_conf_req++;
4043 }
4044 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4045 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4046 {
4047 	int type, olen;
4048 	unsigned long val;
4049 	/* Use sane default values in case a misbehaving remote device
4050 	 * did not send an RFC or extended window size option.
4051 	 */
4052 	u16 txwin_ext = chan->ack_win;
4053 	struct l2cap_conf_rfc rfc = {
4054 		.mode = chan->mode,
4055 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4056 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4057 		.max_pdu_size = cpu_to_le16(chan->imtu),
4058 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4059 	};
4060 
4061 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4062 
4063 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4064 		return;
4065 
4066 	while (len >= L2CAP_CONF_OPT_SIZE) {
4067 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4068 		if (len < 0)
4069 			break;
4070 
4071 		switch (type) {
4072 		case L2CAP_CONF_RFC:
4073 			if (olen != sizeof(rfc))
4074 				break;
4075 			memcpy(&rfc, (void *)val, olen);
4076 			break;
4077 		case L2CAP_CONF_EWS:
4078 			if (olen != 2)
4079 				break;
4080 			txwin_ext = val;
4081 			break;
4082 		}
4083 	}
4084 
4085 	switch (rfc.mode) {
4086 	case L2CAP_MODE_ERTM:
4087 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4088 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4089 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4090 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4091 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4092 		else
4093 			chan->ack_win = min_t(u16, chan->ack_win,
4094 					      rfc.txwin_size);
4095 		break;
4096 	case L2CAP_MODE_STREAMING:
4097 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4098 	}
4099 }
4100 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4101 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4102 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4103 				    u8 *data)
4104 {
4105 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4106 
4107 	if (cmd_len < sizeof(*rej))
4108 		return -EPROTO;
4109 
4110 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4111 		return 0;
4112 
4113 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4114 	    cmd->ident == conn->info_ident) {
4115 		cancel_delayed_work(&conn->info_timer);
4116 
4117 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4118 		conn->info_ident = 0;
4119 
4120 		l2cap_conn_start(conn);
4121 	}
4122 
4123 	return 0;
4124 }
4125 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4126 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4127 					struct l2cap_cmd_hdr *cmd,
4128 					u8 *data, u8 rsp_code, u8 amp_id)
4129 {
4130 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4131 	struct l2cap_conn_rsp rsp;
4132 	struct l2cap_chan *chan = NULL, *pchan;
4133 	int result, status = L2CAP_CS_NO_INFO;
4134 
4135 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4136 	__le16 psm = req->psm;
4137 
4138 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4139 
4140 	/* Check if we have socket listening on psm */
4141 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4142 					 &conn->hcon->dst, ACL_LINK);
4143 	if (!pchan) {
4144 		result = L2CAP_CR_BAD_PSM;
4145 		goto sendresp;
4146 	}
4147 
4148 	mutex_lock(&conn->chan_lock);
4149 	l2cap_chan_lock(pchan);
4150 
4151 	/* Check if the ACL is secure enough (if not SDP) */
4152 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4153 	    !hci_conn_check_link_mode(conn->hcon)) {
4154 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4155 		result = L2CAP_CR_SEC_BLOCK;
4156 		goto response;
4157 	}
4158 
4159 	result = L2CAP_CR_NO_MEM;
4160 
4161 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4162 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4163 		result = L2CAP_CR_INVALID_SCID;
4164 		goto response;
4165 	}
4166 
4167 	/* Check if we already have channel with that dcid */
4168 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4169 		result = L2CAP_CR_SCID_IN_USE;
4170 		goto response;
4171 	}
4172 
4173 	chan = pchan->ops->new_connection(pchan);
4174 	if (!chan)
4175 		goto response;
4176 
4177 	/* For certain devices (ex: HID mouse), support for authentication,
4178 	 * pairing and bonding is optional. For such devices, inorder to avoid
4179 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4180 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4181 	 */
4182 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4183 
4184 	bacpy(&chan->src, &conn->hcon->src);
4185 	bacpy(&chan->dst, &conn->hcon->dst);
4186 	chan->src_type = bdaddr_src_type(conn->hcon);
4187 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4188 	chan->psm  = psm;
4189 	chan->dcid = scid;
4190 	chan->local_amp_id = amp_id;
4191 
4192 	__l2cap_chan_add(conn, chan);
4193 
4194 	dcid = chan->scid;
4195 
4196 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4197 
4198 	chan->ident = cmd->ident;
4199 
4200 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4201 		if (l2cap_chan_check_security(chan, false)) {
4202 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4203 				l2cap_state_change(chan, BT_CONNECT2);
4204 				result = L2CAP_CR_PEND;
4205 				status = L2CAP_CS_AUTHOR_PEND;
4206 				chan->ops->defer(chan);
4207 			} else {
4208 				/* Force pending result for AMP controllers.
4209 				 * The connection will succeed after the
4210 				 * physical link is up.
4211 				 */
4212 				if (amp_id == AMP_ID_BREDR) {
4213 					l2cap_state_change(chan, BT_CONFIG);
4214 					result = L2CAP_CR_SUCCESS;
4215 				} else {
4216 					l2cap_state_change(chan, BT_CONNECT2);
4217 					result = L2CAP_CR_PEND;
4218 				}
4219 				status = L2CAP_CS_NO_INFO;
4220 			}
4221 		} else {
4222 			l2cap_state_change(chan, BT_CONNECT2);
4223 			result = L2CAP_CR_PEND;
4224 			status = L2CAP_CS_AUTHEN_PEND;
4225 		}
4226 	} else {
4227 		l2cap_state_change(chan, BT_CONNECT2);
4228 		result = L2CAP_CR_PEND;
4229 		status = L2CAP_CS_NO_INFO;
4230 	}
4231 
4232 response:
4233 	l2cap_chan_unlock(pchan);
4234 	mutex_unlock(&conn->chan_lock);
4235 	l2cap_chan_put(pchan);
4236 
4237 sendresp:
4238 	rsp.scid   = cpu_to_le16(scid);
4239 	rsp.dcid   = cpu_to_le16(dcid);
4240 	rsp.result = cpu_to_le16(result);
4241 	rsp.status = cpu_to_le16(status);
4242 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4243 
4244 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4245 		struct l2cap_info_req info;
4246 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4247 
4248 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4249 		conn->info_ident = l2cap_get_ident(conn);
4250 
4251 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4252 
4253 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4254 			       sizeof(info), &info);
4255 	}
4256 
4257 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4258 	    result == L2CAP_CR_SUCCESS) {
4259 		u8 buf[128];
4260 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4261 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4262 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4263 		chan->num_conf_req++;
4264 	}
4265 
4266 	return chan;
4267 }
4268 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4269 static int l2cap_connect_req(struct l2cap_conn *conn,
4270 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4271 {
4272 	struct hci_dev *hdev = conn->hcon->hdev;
4273 	struct hci_conn *hcon = conn->hcon;
4274 
4275 	if (cmd_len < sizeof(struct l2cap_conn_req))
4276 		return -EPROTO;
4277 
4278 	hci_dev_lock(hdev);
4279 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4280 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4281 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4282 	hci_dev_unlock(hdev);
4283 
4284 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4285 	return 0;
4286 }
4287 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4288 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4289 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 				    u8 *data)
4291 {
4292 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4293 	u16 scid, dcid, result, status;
4294 	struct l2cap_chan *chan;
4295 	u8 req[128];
4296 	int err;
4297 
4298 	if (cmd_len < sizeof(*rsp))
4299 		return -EPROTO;
4300 
4301 	scid   = __le16_to_cpu(rsp->scid);
4302 	dcid   = __le16_to_cpu(rsp->dcid);
4303 	result = __le16_to_cpu(rsp->result);
4304 	status = __le16_to_cpu(rsp->status);
4305 
4306 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4307 					   dcid > L2CAP_CID_DYN_END))
4308 		return -EPROTO;
4309 
4310 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4311 	       dcid, scid, result, status);
4312 
4313 	mutex_lock(&conn->chan_lock);
4314 
4315 	if (scid) {
4316 		chan = __l2cap_get_chan_by_scid(conn, scid);
4317 		if (!chan) {
4318 			err = -EBADSLT;
4319 			goto unlock;
4320 		}
4321 	} else {
4322 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4323 		if (!chan) {
4324 			err = -EBADSLT;
4325 			goto unlock;
4326 		}
4327 	}
4328 
4329 	chan = l2cap_chan_hold_unless_zero(chan);
4330 	if (!chan) {
4331 		err = -EBADSLT;
4332 		goto unlock;
4333 	}
4334 
4335 	err = 0;
4336 
4337 	l2cap_chan_lock(chan);
4338 
4339 	switch (result) {
4340 	case L2CAP_CR_SUCCESS:
4341 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4342 			err = -EBADSLT;
4343 			break;
4344 		}
4345 
4346 		l2cap_state_change(chan, BT_CONFIG);
4347 		chan->ident = 0;
4348 		chan->dcid = dcid;
4349 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4350 
4351 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4352 			break;
4353 
4354 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4355 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4356 		chan->num_conf_req++;
4357 		break;
4358 
4359 	case L2CAP_CR_PEND:
4360 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4361 		break;
4362 
4363 	default:
4364 		l2cap_chan_del(chan, ECONNREFUSED);
4365 		break;
4366 	}
4367 
4368 	l2cap_chan_unlock(chan);
4369 	l2cap_chan_put(chan);
4370 
4371 unlock:
4372 	mutex_unlock(&conn->chan_lock);
4373 
4374 	return err;
4375 }
4376 
set_default_fcs(struct l2cap_chan * chan)4377 static inline void set_default_fcs(struct l2cap_chan *chan)
4378 {
4379 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4380 	 * sides request it.
4381 	 */
4382 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4383 		chan->fcs = L2CAP_FCS_NONE;
4384 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4385 		chan->fcs = L2CAP_FCS_CRC16;
4386 }
4387 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4388 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4389 				    u8 ident, u16 flags)
4390 {
4391 	struct l2cap_conn *conn = chan->conn;
4392 
4393 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4394 	       flags);
4395 
4396 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4397 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4398 
4399 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4400 		       l2cap_build_conf_rsp(chan, data,
4401 					    L2CAP_CONF_SUCCESS, flags), data);
4402 }
4403 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4404 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4405 				   u16 scid, u16 dcid)
4406 {
4407 	struct l2cap_cmd_rej_cid rej;
4408 
4409 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4410 	rej.scid = __cpu_to_le16(scid);
4411 	rej.dcid = __cpu_to_le16(dcid);
4412 
4413 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4414 }
4415 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4416 static inline int l2cap_config_req(struct l2cap_conn *conn,
4417 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4418 				   u8 *data)
4419 {
4420 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4421 	u16 dcid, flags;
4422 	u8 rsp[64];
4423 	struct l2cap_chan *chan;
4424 	int len, err = 0;
4425 
4426 	if (cmd_len < sizeof(*req))
4427 		return -EPROTO;
4428 
4429 	dcid  = __le16_to_cpu(req->dcid);
4430 	flags = __le16_to_cpu(req->flags);
4431 
4432 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4433 
4434 	chan = l2cap_get_chan_by_scid(conn, dcid);
4435 	if (!chan) {
4436 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4437 		return 0;
4438 	}
4439 
4440 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4441 	    chan->state != BT_CONNECTED) {
4442 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4443 				       chan->dcid);
4444 		goto unlock;
4445 	}
4446 
4447 	/* Reject if config buffer is too small. */
4448 	len = cmd_len - sizeof(*req);
4449 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4450 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4451 			       l2cap_build_conf_rsp(chan, rsp,
4452 			       L2CAP_CONF_REJECT, flags), rsp);
4453 		goto unlock;
4454 	}
4455 
4456 	/* Store config. */
4457 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4458 	chan->conf_len += len;
4459 
4460 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4461 		/* Incomplete config. Send empty response. */
4462 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4463 			       l2cap_build_conf_rsp(chan, rsp,
4464 			       L2CAP_CONF_SUCCESS, flags), rsp);
4465 		goto unlock;
4466 	}
4467 
4468 	/* Complete config. */
4469 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4470 	if (len < 0) {
4471 		l2cap_send_disconn_req(chan, ECONNRESET);
4472 		goto unlock;
4473 	}
4474 
4475 	chan->ident = cmd->ident;
4476 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4477 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4478 		chan->num_conf_rsp++;
4479 
4480 	/* Reset config buffer. */
4481 	chan->conf_len = 0;
4482 
4483 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4484 		goto unlock;
4485 
4486 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4487 		set_default_fcs(chan);
4488 
4489 		if (chan->mode == L2CAP_MODE_ERTM ||
4490 		    chan->mode == L2CAP_MODE_STREAMING)
4491 			err = l2cap_ertm_init(chan);
4492 
4493 		if (err < 0)
4494 			l2cap_send_disconn_req(chan, -err);
4495 		else
4496 			l2cap_chan_ready(chan);
4497 
4498 		goto unlock;
4499 	}
4500 
4501 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4502 		u8 buf[64];
4503 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4504 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4505 		chan->num_conf_req++;
4506 	}
4507 
4508 	/* Got Conf Rsp PENDING from remote side and assume we sent
4509 	   Conf Rsp PENDING in the code above */
4510 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4511 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4512 
4513 		/* check compatibility */
4514 
4515 		/* Send rsp for BR/EDR channel */
4516 		if (!chan->hs_hcon)
4517 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4518 		else
4519 			chan->ident = cmd->ident;
4520 	}
4521 
4522 unlock:
4523 	l2cap_chan_unlock(chan);
4524 	l2cap_chan_put(chan);
4525 	return err;
4526 }
4527 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4528 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4529 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4530 				   u8 *data)
4531 {
4532 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4533 	u16 scid, flags, result;
4534 	struct l2cap_chan *chan;
4535 	int len = cmd_len - sizeof(*rsp);
4536 	int err = 0;
4537 
4538 	if (cmd_len < sizeof(*rsp))
4539 		return -EPROTO;
4540 
4541 	scid   = __le16_to_cpu(rsp->scid);
4542 	flags  = __le16_to_cpu(rsp->flags);
4543 	result = __le16_to_cpu(rsp->result);
4544 
4545 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4546 	       result, len);
4547 
4548 	chan = l2cap_get_chan_by_scid(conn, scid);
4549 	if (!chan)
4550 		return 0;
4551 
4552 	switch (result) {
4553 	case L2CAP_CONF_SUCCESS:
4554 		l2cap_conf_rfc_get(chan, rsp->data, len);
4555 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4556 		break;
4557 
4558 	case L2CAP_CONF_PENDING:
4559 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4560 
4561 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4562 			char buf[64];
4563 
4564 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4565 						   buf, sizeof(buf), &result);
4566 			if (len < 0) {
4567 				l2cap_send_disconn_req(chan, ECONNRESET);
4568 				goto done;
4569 			}
4570 
4571 			if (!chan->hs_hcon) {
4572 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4573 							0);
4574 			} else {
4575 				if (l2cap_check_efs(chan)) {
4576 					amp_create_logical_link(chan);
4577 					chan->ident = cmd->ident;
4578 				}
4579 			}
4580 		}
4581 		goto done;
4582 
4583 	case L2CAP_CONF_UNACCEPT:
4584 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4585 			char req[64];
4586 
4587 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4588 				l2cap_send_disconn_req(chan, ECONNRESET);
4589 				goto done;
4590 			}
4591 
4592 			/* throw out any old stored conf requests */
4593 			result = L2CAP_CONF_SUCCESS;
4594 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4595 						   req, sizeof(req), &result);
4596 			if (len < 0) {
4597 				l2cap_send_disconn_req(chan, ECONNRESET);
4598 				goto done;
4599 			}
4600 
4601 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4602 				       L2CAP_CONF_REQ, len, req);
4603 			chan->num_conf_req++;
4604 			if (result != L2CAP_CONF_SUCCESS)
4605 				goto done;
4606 			break;
4607 		}
4608 		fallthrough;
4609 
4610 	default:
4611 		l2cap_chan_set_err(chan, ECONNRESET);
4612 
4613 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4614 		l2cap_send_disconn_req(chan, ECONNRESET);
4615 		goto done;
4616 	}
4617 
4618 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4619 		goto done;
4620 
4621 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4622 
4623 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4624 		set_default_fcs(chan);
4625 
4626 		if (chan->mode == L2CAP_MODE_ERTM ||
4627 		    chan->mode == L2CAP_MODE_STREAMING)
4628 			err = l2cap_ertm_init(chan);
4629 
4630 		if (err < 0)
4631 			l2cap_send_disconn_req(chan, -err);
4632 		else
4633 			l2cap_chan_ready(chan);
4634 	}
4635 
4636 done:
4637 	l2cap_chan_unlock(chan);
4638 	l2cap_chan_put(chan);
4639 	return err;
4640 }
4641 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4642 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4643 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4644 				       u8 *data)
4645 {
4646 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4647 	struct l2cap_disconn_rsp rsp;
4648 	u16 dcid, scid;
4649 	struct l2cap_chan *chan;
4650 
4651 	if (cmd_len != sizeof(*req))
4652 		return -EPROTO;
4653 
4654 	scid = __le16_to_cpu(req->scid);
4655 	dcid = __le16_to_cpu(req->dcid);
4656 
4657 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4658 
4659 	chan = l2cap_get_chan_by_scid(conn, dcid);
4660 	if (!chan) {
4661 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4662 		return 0;
4663 	}
4664 
4665 	rsp.dcid = cpu_to_le16(chan->scid);
4666 	rsp.scid = cpu_to_le16(chan->dcid);
4667 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4668 
4669 	chan->ops->set_shutdown(chan);
4670 
4671 	l2cap_chan_unlock(chan);
4672 	mutex_lock(&conn->chan_lock);
4673 	l2cap_chan_lock(chan);
4674 	l2cap_chan_del(chan, ECONNRESET);
4675 	mutex_unlock(&conn->chan_lock);
4676 
4677 	chan->ops->close(chan);
4678 
4679 	l2cap_chan_unlock(chan);
4680 	l2cap_chan_put(chan);
4681 
4682 	return 0;
4683 }
4684 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4685 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4686 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4687 				       u8 *data)
4688 {
4689 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4690 	u16 dcid, scid;
4691 	struct l2cap_chan *chan;
4692 
4693 	if (cmd_len != sizeof(*rsp))
4694 		return -EPROTO;
4695 
4696 	scid = __le16_to_cpu(rsp->scid);
4697 	dcid = __le16_to_cpu(rsp->dcid);
4698 
4699 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4700 
4701 	chan = l2cap_get_chan_by_scid(conn, scid);
4702 	if (!chan) {
4703 		return 0;
4704 	}
4705 
4706 	if (chan->state != BT_DISCONN) {
4707 		l2cap_chan_unlock(chan);
4708 		l2cap_chan_put(chan);
4709 		return 0;
4710 	}
4711 
4712 	l2cap_chan_unlock(chan);
4713 	mutex_lock(&conn->chan_lock);
4714 	l2cap_chan_lock(chan);
4715 	l2cap_chan_del(chan, 0);
4716 	mutex_unlock(&conn->chan_lock);
4717 
4718 	chan->ops->close(chan);
4719 
4720 	l2cap_chan_unlock(chan);
4721 	l2cap_chan_put(chan);
4722 
4723 	return 0;
4724 }
4725 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4726 static inline int l2cap_information_req(struct l2cap_conn *conn,
4727 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4728 					u8 *data)
4729 {
4730 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4731 	u16 type;
4732 
4733 	if (cmd_len != sizeof(*req))
4734 		return -EPROTO;
4735 
4736 	type = __le16_to_cpu(req->type);
4737 
4738 	BT_DBG("type 0x%4.4x", type);
4739 
4740 	if (type == L2CAP_IT_FEAT_MASK) {
4741 		u8 buf[8];
4742 		u32 feat_mask = l2cap_feat_mask;
4743 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4744 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4745 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4746 		if (!disable_ertm)
4747 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4748 				| L2CAP_FEAT_FCS;
4749 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4750 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4751 				| L2CAP_FEAT_EXT_WINDOW;
4752 
4753 		put_unaligned_le32(feat_mask, rsp->data);
4754 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4755 			       buf);
4756 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4757 		u8 buf[12];
4758 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4759 
4760 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4761 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4762 		rsp->data[0] = conn->local_fixed_chan;
4763 		memset(rsp->data + 1, 0, 7);
4764 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4765 			       buf);
4766 	} else {
4767 		struct l2cap_info_rsp rsp;
4768 		rsp.type   = cpu_to_le16(type);
4769 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4770 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4771 			       &rsp);
4772 	}
4773 
4774 	return 0;
4775 }
4776 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4777 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4778 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4779 					u8 *data)
4780 {
4781 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4782 	u16 type, result;
4783 
4784 	if (cmd_len < sizeof(*rsp))
4785 		return -EPROTO;
4786 
4787 	type   = __le16_to_cpu(rsp->type);
4788 	result = __le16_to_cpu(rsp->result);
4789 
4790 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4791 
4792 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4793 	if (cmd->ident != conn->info_ident ||
4794 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4795 		return 0;
4796 
4797 	cancel_delayed_work(&conn->info_timer);
4798 
4799 	if (result != L2CAP_IR_SUCCESS) {
4800 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4801 		conn->info_ident = 0;
4802 
4803 		l2cap_conn_start(conn);
4804 
4805 		return 0;
4806 	}
4807 
4808 	switch (type) {
4809 	case L2CAP_IT_FEAT_MASK:
4810 		conn->feat_mask = get_unaligned_le32(rsp->data);
4811 
4812 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4813 			struct l2cap_info_req req;
4814 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4815 
4816 			conn->info_ident = l2cap_get_ident(conn);
4817 
4818 			l2cap_send_cmd(conn, conn->info_ident,
4819 				       L2CAP_INFO_REQ, sizeof(req), &req);
4820 		} else {
4821 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4822 			conn->info_ident = 0;
4823 
4824 			l2cap_conn_start(conn);
4825 		}
4826 		break;
4827 
4828 	case L2CAP_IT_FIXED_CHAN:
4829 		conn->remote_fixed_chan = rsp->data[0];
4830 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4831 		conn->info_ident = 0;
4832 
4833 		l2cap_conn_start(conn);
4834 		break;
4835 	}
4836 
4837 	return 0;
4838 }
4839 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4840 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4841 				    struct l2cap_cmd_hdr *cmd,
4842 				    u16 cmd_len, void *data)
4843 {
4844 	struct l2cap_create_chan_req *req = data;
4845 	struct l2cap_create_chan_rsp rsp;
4846 	struct l2cap_chan *chan;
4847 	struct hci_dev *hdev;
4848 	u16 psm, scid;
4849 
4850 	if (cmd_len != sizeof(*req))
4851 		return -EPROTO;
4852 
4853 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4854 		return -EINVAL;
4855 
4856 	psm = le16_to_cpu(req->psm);
4857 	scid = le16_to_cpu(req->scid);
4858 
4859 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4860 
4861 	/* For controller id 0 make BR/EDR connection */
4862 	if (req->amp_id == AMP_ID_BREDR) {
4863 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4864 			      req->amp_id);
4865 		return 0;
4866 	}
4867 
4868 	/* Validate AMP controller id */
4869 	hdev = hci_dev_get(req->amp_id);
4870 	if (!hdev)
4871 		goto error;
4872 
4873 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4874 		hci_dev_put(hdev);
4875 		goto error;
4876 	}
4877 
4878 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4879 			     req->amp_id);
4880 	if (chan) {
4881 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4882 		struct hci_conn *hs_hcon;
4883 
4884 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4885 						  &conn->hcon->dst);
4886 		if (!hs_hcon) {
4887 			hci_dev_put(hdev);
4888 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4889 					       chan->dcid);
4890 			return 0;
4891 		}
4892 
4893 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4894 
4895 		mgr->bredr_chan = chan;
4896 		chan->hs_hcon = hs_hcon;
4897 		chan->fcs = L2CAP_FCS_NONE;
4898 		conn->mtu = hdev->block_mtu;
4899 	}
4900 
4901 	hci_dev_put(hdev);
4902 
4903 	return 0;
4904 
4905 error:
4906 	rsp.dcid = 0;
4907 	rsp.scid = cpu_to_le16(scid);
4908 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4909 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4910 
4911 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4912 		       sizeof(rsp), &rsp);
4913 
4914 	return 0;
4915 }
4916 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4917 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4918 {
4919 	struct l2cap_move_chan_req req;
4920 	u8 ident;
4921 
4922 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4923 
4924 	ident = l2cap_get_ident(chan->conn);
4925 	chan->ident = ident;
4926 
4927 	req.icid = cpu_to_le16(chan->scid);
4928 	req.dest_amp_id = dest_amp_id;
4929 
4930 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4931 		       &req);
4932 
4933 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4934 }
4935 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4936 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4937 {
4938 	struct l2cap_move_chan_rsp rsp;
4939 
4940 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4941 
4942 	rsp.icid = cpu_to_le16(chan->dcid);
4943 	rsp.result = cpu_to_le16(result);
4944 
4945 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4946 		       sizeof(rsp), &rsp);
4947 }
4948 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4949 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4950 {
4951 	struct l2cap_move_chan_cfm cfm;
4952 
4953 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4954 
4955 	chan->ident = l2cap_get_ident(chan->conn);
4956 
4957 	cfm.icid = cpu_to_le16(chan->scid);
4958 	cfm.result = cpu_to_le16(result);
4959 
4960 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4961 		       sizeof(cfm), &cfm);
4962 
4963 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4964 }
4965 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4966 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4967 {
4968 	struct l2cap_move_chan_cfm cfm;
4969 
4970 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4971 
4972 	cfm.icid = cpu_to_le16(icid);
4973 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4974 
4975 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4976 		       sizeof(cfm), &cfm);
4977 }
4978 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4979 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4980 					 u16 icid)
4981 {
4982 	struct l2cap_move_chan_cfm_rsp rsp;
4983 
4984 	BT_DBG("icid 0x%4.4x", icid);
4985 
4986 	rsp.icid = cpu_to_le16(icid);
4987 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4988 }
4989 
__release_logical_link(struct l2cap_chan * chan)4990 static void __release_logical_link(struct l2cap_chan *chan)
4991 {
4992 	chan->hs_hchan = NULL;
4993 	chan->hs_hcon = NULL;
4994 
4995 	/* Placeholder - release the logical link */
4996 }
4997 
l2cap_logical_fail(struct l2cap_chan * chan)4998 static void l2cap_logical_fail(struct l2cap_chan *chan)
4999 {
5000 	/* Logical link setup failed */
5001 	if (chan->state != BT_CONNECTED) {
5002 		/* Create channel failure, disconnect */
5003 		l2cap_send_disconn_req(chan, ECONNRESET);
5004 		return;
5005 	}
5006 
5007 	switch (chan->move_role) {
5008 	case L2CAP_MOVE_ROLE_RESPONDER:
5009 		l2cap_move_done(chan);
5010 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5011 		break;
5012 	case L2CAP_MOVE_ROLE_INITIATOR:
5013 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5014 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5015 			/* Remote has only sent pending or
5016 			 * success responses, clean up
5017 			 */
5018 			l2cap_move_done(chan);
5019 		}
5020 
5021 		/* Other amp move states imply that the move
5022 		 * has already aborted
5023 		 */
5024 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5025 		break;
5026 	}
5027 }
5028 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5029 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5030 					struct hci_chan *hchan)
5031 {
5032 	struct l2cap_conf_rsp rsp;
5033 
5034 	chan->hs_hchan = hchan;
5035 	chan->hs_hcon->l2cap_data = chan->conn;
5036 
5037 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5038 
5039 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5040 		int err;
5041 
5042 		set_default_fcs(chan);
5043 
5044 		err = l2cap_ertm_init(chan);
5045 		if (err < 0)
5046 			l2cap_send_disconn_req(chan, -err);
5047 		else
5048 			l2cap_chan_ready(chan);
5049 	}
5050 }
5051 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5052 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5053 				      struct hci_chan *hchan)
5054 {
5055 	chan->hs_hcon = hchan->conn;
5056 	chan->hs_hcon->l2cap_data = chan->conn;
5057 
5058 	BT_DBG("move_state %d", chan->move_state);
5059 
5060 	switch (chan->move_state) {
5061 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5062 		/* Move confirm will be sent after a success
5063 		 * response is received
5064 		 */
5065 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5066 		break;
5067 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5068 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5069 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5070 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5071 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5072 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5073 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5074 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5075 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5076 		}
5077 		break;
5078 	default:
5079 		/* Move was not in expected state, free the channel */
5080 		__release_logical_link(chan);
5081 
5082 		chan->move_state = L2CAP_MOVE_STABLE;
5083 	}
5084 }
5085 
5086 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5087 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5088 		       u8 status)
5089 {
5090 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5091 
5092 	if (status) {
5093 		l2cap_logical_fail(chan);
5094 		__release_logical_link(chan);
5095 		return;
5096 	}
5097 
5098 	if (chan->state != BT_CONNECTED) {
5099 		/* Ignore logical link if channel is on BR/EDR */
5100 		if (chan->local_amp_id != AMP_ID_BREDR)
5101 			l2cap_logical_finish_create(chan, hchan);
5102 	} else {
5103 		l2cap_logical_finish_move(chan, hchan);
5104 	}
5105 }
5106 
l2cap_move_start(struct l2cap_chan * chan)5107 void l2cap_move_start(struct l2cap_chan *chan)
5108 {
5109 	BT_DBG("chan %p", chan);
5110 
5111 	if (chan->local_amp_id == AMP_ID_BREDR) {
5112 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5113 			return;
5114 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5115 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5116 		/* Placeholder - start physical link setup */
5117 	} else {
5118 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5119 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5120 		chan->move_id = 0;
5121 		l2cap_move_setup(chan);
5122 		l2cap_send_move_chan_req(chan, 0);
5123 	}
5124 }
5125 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5126 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5127 			    u8 local_amp_id, u8 remote_amp_id)
5128 {
5129 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5130 	       local_amp_id, remote_amp_id);
5131 
5132 	chan->fcs = L2CAP_FCS_NONE;
5133 
5134 	/* Outgoing channel on AMP */
5135 	if (chan->state == BT_CONNECT) {
5136 		if (result == L2CAP_CR_SUCCESS) {
5137 			chan->local_amp_id = local_amp_id;
5138 			l2cap_send_create_chan_req(chan, remote_amp_id);
5139 		} else {
5140 			/* Revert to BR/EDR connect */
5141 			l2cap_send_conn_req(chan);
5142 		}
5143 
5144 		return;
5145 	}
5146 
5147 	/* Incoming channel on AMP */
5148 	if (__l2cap_no_conn_pending(chan)) {
5149 		struct l2cap_conn_rsp rsp;
5150 		char buf[128];
5151 		rsp.scid = cpu_to_le16(chan->dcid);
5152 		rsp.dcid = cpu_to_le16(chan->scid);
5153 
5154 		if (result == L2CAP_CR_SUCCESS) {
5155 			/* Send successful response */
5156 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5157 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5158 		} else {
5159 			/* Send negative response */
5160 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5161 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5162 		}
5163 
5164 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5165 			       sizeof(rsp), &rsp);
5166 
5167 		if (result == L2CAP_CR_SUCCESS) {
5168 			l2cap_state_change(chan, BT_CONFIG);
5169 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5170 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5171 				       L2CAP_CONF_REQ,
5172 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5173 			chan->num_conf_req++;
5174 		}
5175 	}
5176 }
5177 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5178 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5179 				   u8 remote_amp_id)
5180 {
5181 	l2cap_move_setup(chan);
5182 	chan->move_id = local_amp_id;
5183 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5184 
5185 	l2cap_send_move_chan_req(chan, remote_amp_id);
5186 }
5187 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5188 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5189 {
5190 	struct hci_chan *hchan = NULL;
5191 
5192 	/* Placeholder - get hci_chan for logical link */
5193 
5194 	if (hchan) {
5195 		if (hchan->state == BT_CONNECTED) {
5196 			/* Logical link is ready to go */
5197 			chan->hs_hcon = hchan->conn;
5198 			chan->hs_hcon->l2cap_data = chan->conn;
5199 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5200 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5201 
5202 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5203 		} else {
5204 			/* Wait for logical link to be ready */
5205 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5206 		}
5207 	} else {
5208 		/* Logical link not available */
5209 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5210 	}
5211 }
5212 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5213 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5214 {
5215 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5216 		u8 rsp_result;
5217 		if (result == -EINVAL)
5218 			rsp_result = L2CAP_MR_BAD_ID;
5219 		else
5220 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5221 
5222 		l2cap_send_move_chan_rsp(chan, rsp_result);
5223 	}
5224 
5225 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5226 	chan->move_state = L2CAP_MOVE_STABLE;
5227 
5228 	/* Restart data transmission */
5229 	l2cap_ertm_send(chan);
5230 }
5231 
5232 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5233 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5234 {
5235 	u8 local_amp_id = chan->local_amp_id;
5236 	u8 remote_amp_id = chan->remote_amp_id;
5237 
5238 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5239 	       chan, result, local_amp_id, remote_amp_id);
5240 
5241 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5242 		return;
5243 
5244 	if (chan->state != BT_CONNECTED) {
5245 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5246 	} else if (result != L2CAP_MR_SUCCESS) {
5247 		l2cap_do_move_cancel(chan, result);
5248 	} else {
5249 		switch (chan->move_role) {
5250 		case L2CAP_MOVE_ROLE_INITIATOR:
5251 			l2cap_do_move_initiate(chan, local_amp_id,
5252 					       remote_amp_id);
5253 			break;
5254 		case L2CAP_MOVE_ROLE_RESPONDER:
5255 			l2cap_do_move_respond(chan, result);
5256 			break;
5257 		default:
5258 			l2cap_do_move_cancel(chan, result);
5259 			break;
5260 		}
5261 	}
5262 }
5263 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5264 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5265 					 struct l2cap_cmd_hdr *cmd,
5266 					 u16 cmd_len, void *data)
5267 {
5268 	struct l2cap_move_chan_req *req = data;
5269 	struct l2cap_move_chan_rsp rsp;
5270 	struct l2cap_chan *chan;
5271 	u16 icid = 0;
5272 	u16 result = L2CAP_MR_NOT_ALLOWED;
5273 
5274 	if (cmd_len != sizeof(*req))
5275 		return -EPROTO;
5276 
5277 	icid = le16_to_cpu(req->icid);
5278 
5279 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5280 
5281 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5282 		return -EINVAL;
5283 
5284 	chan = l2cap_get_chan_by_dcid(conn, icid);
5285 	if (!chan) {
5286 		rsp.icid = cpu_to_le16(icid);
5287 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5288 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5289 			       sizeof(rsp), &rsp);
5290 		return 0;
5291 	}
5292 
5293 	chan->ident = cmd->ident;
5294 
5295 	if (chan->scid < L2CAP_CID_DYN_START ||
5296 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5297 	    (chan->mode != L2CAP_MODE_ERTM &&
5298 	     chan->mode != L2CAP_MODE_STREAMING)) {
5299 		result = L2CAP_MR_NOT_ALLOWED;
5300 		goto send_move_response;
5301 	}
5302 
5303 	if (chan->local_amp_id == req->dest_amp_id) {
5304 		result = L2CAP_MR_SAME_ID;
5305 		goto send_move_response;
5306 	}
5307 
5308 	if (req->dest_amp_id != AMP_ID_BREDR) {
5309 		struct hci_dev *hdev;
5310 		hdev = hci_dev_get(req->dest_amp_id);
5311 		if (!hdev || hdev->dev_type != HCI_AMP ||
5312 		    !test_bit(HCI_UP, &hdev->flags)) {
5313 			if (hdev)
5314 				hci_dev_put(hdev);
5315 
5316 			result = L2CAP_MR_BAD_ID;
5317 			goto send_move_response;
5318 		}
5319 		hci_dev_put(hdev);
5320 	}
5321 
5322 	/* Detect a move collision.  Only send a collision response
5323 	 * if this side has "lost", otherwise proceed with the move.
5324 	 * The winner has the larger bd_addr.
5325 	 */
5326 	if ((__chan_is_moving(chan) ||
5327 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5328 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5329 		result = L2CAP_MR_COLLISION;
5330 		goto send_move_response;
5331 	}
5332 
5333 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5334 	l2cap_move_setup(chan);
5335 	chan->move_id = req->dest_amp_id;
5336 
5337 	if (req->dest_amp_id == AMP_ID_BREDR) {
5338 		/* Moving to BR/EDR */
5339 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5340 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5341 			result = L2CAP_MR_PEND;
5342 		} else {
5343 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5344 			result = L2CAP_MR_SUCCESS;
5345 		}
5346 	} else {
5347 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5348 		/* Placeholder - uncomment when amp functions are available */
5349 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5350 		result = L2CAP_MR_PEND;
5351 	}
5352 
5353 send_move_response:
5354 	l2cap_send_move_chan_rsp(chan, result);
5355 
5356 	l2cap_chan_unlock(chan);
5357 	l2cap_chan_put(chan);
5358 
5359 	return 0;
5360 }
5361 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5362 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5363 {
5364 	struct l2cap_chan *chan;
5365 	struct hci_chan *hchan = NULL;
5366 
5367 	chan = l2cap_get_chan_by_scid(conn, icid);
5368 	if (!chan) {
5369 		l2cap_send_move_chan_cfm_icid(conn, icid);
5370 		return;
5371 	}
5372 
5373 	__clear_chan_timer(chan);
5374 	if (result == L2CAP_MR_PEND)
5375 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5376 
5377 	switch (chan->move_state) {
5378 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5379 		/* Move confirm will be sent when logical link
5380 		 * is complete.
5381 		 */
5382 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5383 		break;
5384 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5385 		if (result == L2CAP_MR_PEND) {
5386 			break;
5387 		} else if (test_bit(CONN_LOCAL_BUSY,
5388 				    &chan->conn_state)) {
5389 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5390 		} else {
5391 			/* Logical link is up or moving to BR/EDR,
5392 			 * proceed with move
5393 			 */
5394 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5395 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5396 		}
5397 		break;
5398 	case L2CAP_MOVE_WAIT_RSP:
5399 		/* Moving to AMP */
5400 		if (result == L2CAP_MR_SUCCESS) {
5401 			/* Remote is ready, send confirm immediately
5402 			 * after logical link is ready
5403 			 */
5404 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5405 		} else {
5406 			/* Both logical link and move success
5407 			 * are required to confirm
5408 			 */
5409 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5410 		}
5411 
5412 		/* Placeholder - get hci_chan for logical link */
5413 		if (!hchan) {
5414 			/* Logical link not available */
5415 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5416 			break;
5417 		}
5418 
5419 		/* If the logical link is not yet connected, do not
5420 		 * send confirmation.
5421 		 */
5422 		if (hchan->state != BT_CONNECTED)
5423 			break;
5424 
5425 		/* Logical link is already ready to go */
5426 
5427 		chan->hs_hcon = hchan->conn;
5428 		chan->hs_hcon->l2cap_data = chan->conn;
5429 
5430 		if (result == L2CAP_MR_SUCCESS) {
5431 			/* Can confirm now */
5432 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5433 		} else {
5434 			/* Now only need move success
5435 			 * to confirm
5436 			 */
5437 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5438 		}
5439 
5440 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5441 		break;
5442 	default:
5443 		/* Any other amp move state means the move failed. */
5444 		chan->move_id = chan->local_amp_id;
5445 		l2cap_move_done(chan);
5446 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5447 	}
5448 
5449 	l2cap_chan_unlock(chan);
5450 	l2cap_chan_put(chan);
5451 }
5452 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5453 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5454 			    u16 result)
5455 {
5456 	struct l2cap_chan *chan;
5457 
5458 	chan = l2cap_get_chan_by_ident(conn, ident);
5459 	if (!chan) {
5460 		/* Could not locate channel, icid is best guess */
5461 		l2cap_send_move_chan_cfm_icid(conn, icid);
5462 		return;
5463 	}
5464 
5465 	__clear_chan_timer(chan);
5466 
5467 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5468 		if (result == L2CAP_MR_COLLISION) {
5469 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5470 		} else {
5471 			/* Cleanup - cancel move */
5472 			chan->move_id = chan->local_amp_id;
5473 			l2cap_move_done(chan);
5474 		}
5475 	}
5476 
5477 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5478 
5479 	l2cap_chan_unlock(chan);
5480 	l2cap_chan_put(chan);
5481 }
5482 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5483 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5484 				  struct l2cap_cmd_hdr *cmd,
5485 				  u16 cmd_len, void *data)
5486 {
5487 	struct l2cap_move_chan_rsp *rsp = data;
5488 	u16 icid, result;
5489 
5490 	if (cmd_len != sizeof(*rsp))
5491 		return -EPROTO;
5492 
5493 	icid = le16_to_cpu(rsp->icid);
5494 	result = le16_to_cpu(rsp->result);
5495 
5496 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5497 
5498 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5499 		l2cap_move_continue(conn, icid, result);
5500 	else
5501 		l2cap_move_fail(conn, cmd->ident, icid, result);
5502 
5503 	return 0;
5504 }
5505 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5506 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5507 				      struct l2cap_cmd_hdr *cmd,
5508 				      u16 cmd_len, void *data)
5509 {
5510 	struct l2cap_move_chan_cfm *cfm = data;
5511 	struct l2cap_chan *chan;
5512 	u16 icid, result;
5513 
5514 	if (cmd_len != sizeof(*cfm))
5515 		return -EPROTO;
5516 
5517 	icid = le16_to_cpu(cfm->icid);
5518 	result = le16_to_cpu(cfm->result);
5519 
5520 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5521 
5522 	chan = l2cap_get_chan_by_dcid(conn, icid);
5523 	if (!chan) {
5524 		/* Spec requires a response even if the icid was not found */
5525 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5526 		return 0;
5527 	}
5528 
5529 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5530 		if (result == L2CAP_MC_CONFIRMED) {
5531 			chan->local_amp_id = chan->move_id;
5532 			if (chan->local_amp_id == AMP_ID_BREDR)
5533 				__release_logical_link(chan);
5534 		} else {
5535 			chan->move_id = chan->local_amp_id;
5536 		}
5537 
5538 		l2cap_move_done(chan);
5539 	}
5540 
5541 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5542 
5543 	l2cap_chan_unlock(chan);
5544 	l2cap_chan_put(chan);
5545 
5546 	return 0;
5547 }
5548 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5549 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5550 						 struct l2cap_cmd_hdr *cmd,
5551 						 u16 cmd_len, void *data)
5552 {
5553 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5554 	struct l2cap_chan *chan;
5555 	u16 icid;
5556 
5557 	if (cmd_len != sizeof(*rsp))
5558 		return -EPROTO;
5559 
5560 	icid = le16_to_cpu(rsp->icid);
5561 
5562 	BT_DBG("icid 0x%4.4x", icid);
5563 
5564 	chan = l2cap_get_chan_by_scid(conn, icid);
5565 	if (!chan)
5566 		return 0;
5567 
5568 	__clear_chan_timer(chan);
5569 
5570 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5571 		chan->local_amp_id = chan->move_id;
5572 
5573 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5574 			__release_logical_link(chan);
5575 
5576 		l2cap_move_done(chan);
5577 	}
5578 
5579 	l2cap_chan_unlock(chan);
5580 	l2cap_chan_put(chan);
5581 
5582 	return 0;
5583 }
5584 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5585 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5586 					      struct l2cap_cmd_hdr *cmd,
5587 					      u16 cmd_len, u8 *data)
5588 {
5589 	struct hci_conn *hcon = conn->hcon;
5590 	struct l2cap_conn_param_update_req *req;
5591 	struct l2cap_conn_param_update_rsp rsp;
5592 	u16 min, max, latency, to_multiplier;
5593 	int err;
5594 
5595 	if (hcon->role != HCI_ROLE_MASTER)
5596 		return -EINVAL;
5597 
5598 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5599 		return -EPROTO;
5600 
5601 	req = (struct l2cap_conn_param_update_req *) data;
5602 	min		= __le16_to_cpu(req->min);
5603 	max		= __le16_to_cpu(req->max);
5604 	latency		= __le16_to_cpu(req->latency);
5605 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5606 
5607 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5608 	       min, max, latency, to_multiplier);
5609 
5610 	memset(&rsp, 0, sizeof(rsp));
5611 
5612 	if (max > hcon->le_conn_max_interval) {
5613 		BT_DBG("requested connection interval exceeds current bounds.");
5614 		err = -EINVAL;
5615 	} else {
5616 		err = hci_check_conn_params(min, max, latency, to_multiplier);
5617 	}
5618 
5619 	if (err)
5620 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5621 	else
5622 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5623 
5624 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5625 		       sizeof(rsp), &rsp);
5626 
5627 	if (!err) {
5628 		u8 store_hint;
5629 
5630 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5631 						to_multiplier);
5632 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5633 				    store_hint, min, max, latency,
5634 				    to_multiplier);
5635 
5636 	}
5637 
5638 	return 0;
5639 }
5640 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5641 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5642 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5643 				u8 *data)
5644 {
5645 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5646 	struct hci_conn *hcon = conn->hcon;
5647 	u16 dcid, mtu, mps, credits, result;
5648 	struct l2cap_chan *chan;
5649 	int err, sec_level;
5650 
5651 	if (cmd_len < sizeof(*rsp))
5652 		return -EPROTO;
5653 
5654 	dcid    = __le16_to_cpu(rsp->dcid);
5655 	mtu     = __le16_to_cpu(rsp->mtu);
5656 	mps     = __le16_to_cpu(rsp->mps);
5657 	credits = __le16_to_cpu(rsp->credits);
5658 	result  = __le16_to_cpu(rsp->result);
5659 
5660 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5661 					   dcid < L2CAP_CID_DYN_START ||
5662 					   dcid > L2CAP_CID_LE_DYN_END))
5663 		return -EPROTO;
5664 
5665 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5666 	       dcid, mtu, mps, credits, result);
5667 
5668 	mutex_lock(&conn->chan_lock);
5669 
5670 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5671 	if (!chan) {
5672 		err = -EBADSLT;
5673 		goto unlock;
5674 	}
5675 
5676 	err = 0;
5677 
5678 	l2cap_chan_lock(chan);
5679 
5680 	switch (result) {
5681 	case L2CAP_CR_LE_SUCCESS:
5682 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5683 			err = -EBADSLT;
5684 			break;
5685 		}
5686 
5687 		chan->ident = 0;
5688 		chan->dcid = dcid;
5689 		chan->omtu = mtu;
5690 		chan->remote_mps = mps;
5691 		chan->tx_credits = credits;
5692 		l2cap_chan_ready(chan);
5693 		break;
5694 
5695 	case L2CAP_CR_LE_AUTHENTICATION:
5696 	case L2CAP_CR_LE_ENCRYPTION:
5697 		/* If we already have MITM protection we can't do
5698 		 * anything.
5699 		 */
5700 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5701 			l2cap_chan_del(chan, ECONNREFUSED);
5702 			break;
5703 		}
5704 
5705 		sec_level = hcon->sec_level + 1;
5706 		if (chan->sec_level < sec_level)
5707 			chan->sec_level = sec_level;
5708 
5709 		/* We'll need to send a new Connect Request */
5710 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5711 
5712 		smp_conn_security(hcon, chan->sec_level);
5713 		break;
5714 
5715 	default:
5716 		l2cap_chan_del(chan, ECONNREFUSED);
5717 		break;
5718 	}
5719 
5720 	l2cap_chan_unlock(chan);
5721 
5722 unlock:
5723 	mutex_unlock(&conn->chan_lock);
5724 
5725 	return err;
5726 }
5727 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5728 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5729 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5730 				      u8 *data)
5731 {
5732 	int err = 0;
5733 
5734 	switch (cmd->code) {
5735 	case L2CAP_COMMAND_REJ:
5736 		l2cap_command_rej(conn, cmd, cmd_len, data);
5737 		break;
5738 
5739 	case L2CAP_CONN_REQ:
5740 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5741 		break;
5742 
5743 	case L2CAP_CONN_RSP:
5744 	case L2CAP_CREATE_CHAN_RSP:
5745 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5746 		break;
5747 
5748 	case L2CAP_CONF_REQ:
5749 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5750 		break;
5751 
5752 	case L2CAP_CONF_RSP:
5753 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5754 		break;
5755 
5756 	case L2CAP_DISCONN_REQ:
5757 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5758 		break;
5759 
5760 	case L2CAP_DISCONN_RSP:
5761 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5762 		break;
5763 
5764 	case L2CAP_ECHO_REQ:
5765 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5766 		break;
5767 
5768 	case L2CAP_ECHO_RSP:
5769 		break;
5770 
5771 	case L2CAP_INFO_REQ:
5772 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5773 		break;
5774 
5775 	case L2CAP_INFO_RSP:
5776 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5777 		break;
5778 
5779 	case L2CAP_CREATE_CHAN_REQ:
5780 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5781 		break;
5782 
5783 	case L2CAP_MOVE_CHAN_REQ:
5784 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5785 		break;
5786 
5787 	case L2CAP_MOVE_CHAN_RSP:
5788 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5789 		break;
5790 
5791 	case L2CAP_MOVE_CHAN_CFM:
5792 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5793 		break;
5794 
5795 	case L2CAP_MOVE_CHAN_CFM_RSP:
5796 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5797 		break;
5798 
5799 	default:
5800 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5801 		err = -EINVAL;
5802 		break;
5803 	}
5804 
5805 	return err;
5806 }
5807 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5808 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5809 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5810 				u8 *data)
5811 {
5812 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5813 	struct l2cap_le_conn_rsp rsp;
5814 	struct l2cap_chan *chan, *pchan;
5815 	u16 dcid, scid, credits, mtu, mps;
5816 	__le16 psm;
5817 	u8 result;
5818 
5819 	if (cmd_len != sizeof(*req))
5820 		return -EPROTO;
5821 
5822 	scid = __le16_to_cpu(req->scid);
5823 	mtu  = __le16_to_cpu(req->mtu);
5824 	mps  = __le16_to_cpu(req->mps);
5825 	psm  = req->psm;
5826 	dcid = 0;
5827 	credits = 0;
5828 
5829 	if (mtu < 23 || mps < 23)
5830 		return -EPROTO;
5831 
5832 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5833 	       scid, mtu, mps);
5834 
5835 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5836 	 * page 1059:
5837 	 *
5838 	 * Valid range: 0x0001-0x00ff
5839 	 *
5840 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5841 	 */
5842 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5843 		result = L2CAP_CR_LE_BAD_PSM;
5844 		chan = NULL;
5845 		goto response;
5846 	}
5847 
5848 	/* Check if we have socket listening on psm */
5849 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5850 					 &conn->hcon->dst, LE_LINK);
5851 	if (!pchan) {
5852 		result = L2CAP_CR_LE_BAD_PSM;
5853 		chan = NULL;
5854 		goto response;
5855 	}
5856 
5857 	mutex_lock(&conn->chan_lock);
5858 	l2cap_chan_lock(pchan);
5859 
5860 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5861 				     SMP_ALLOW_STK)) {
5862 		result = L2CAP_CR_LE_AUTHENTICATION;
5863 		chan = NULL;
5864 		goto response_unlock;
5865 	}
5866 
5867 	/* Check for valid dynamic CID range */
5868 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5869 		result = L2CAP_CR_LE_INVALID_SCID;
5870 		chan = NULL;
5871 		goto response_unlock;
5872 	}
5873 
5874 	/* Check if we already have channel with that dcid */
5875 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5876 		result = L2CAP_CR_LE_SCID_IN_USE;
5877 		chan = NULL;
5878 		goto response_unlock;
5879 	}
5880 
5881 	chan = pchan->ops->new_connection(pchan);
5882 	if (!chan) {
5883 		result = L2CAP_CR_LE_NO_MEM;
5884 		goto response_unlock;
5885 	}
5886 
5887 	bacpy(&chan->src, &conn->hcon->src);
5888 	bacpy(&chan->dst, &conn->hcon->dst);
5889 	chan->src_type = bdaddr_src_type(conn->hcon);
5890 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5891 	chan->psm  = psm;
5892 	chan->dcid = scid;
5893 	chan->omtu = mtu;
5894 	chan->remote_mps = mps;
5895 
5896 	__l2cap_chan_add(conn, chan);
5897 
5898 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5899 
5900 	dcid = chan->scid;
5901 	credits = chan->rx_credits;
5902 
5903 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5904 
5905 	chan->ident = cmd->ident;
5906 
5907 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5908 		l2cap_state_change(chan, BT_CONNECT2);
5909 		/* The following result value is actually not defined
5910 		 * for LE CoC but we use it to let the function know
5911 		 * that it should bail out after doing its cleanup
5912 		 * instead of sending a response.
5913 		 */
5914 		result = L2CAP_CR_PEND;
5915 		chan->ops->defer(chan);
5916 	} else {
5917 		l2cap_chan_ready(chan);
5918 		result = L2CAP_CR_LE_SUCCESS;
5919 	}
5920 
5921 response_unlock:
5922 	l2cap_chan_unlock(pchan);
5923 	mutex_unlock(&conn->chan_lock);
5924 	l2cap_chan_put(pchan);
5925 
5926 	if (result == L2CAP_CR_PEND)
5927 		return 0;
5928 
5929 response:
5930 	if (chan) {
5931 		rsp.mtu = cpu_to_le16(chan->imtu);
5932 		rsp.mps = cpu_to_le16(chan->mps);
5933 	} else {
5934 		rsp.mtu = 0;
5935 		rsp.mps = 0;
5936 	}
5937 
5938 	rsp.dcid    = cpu_to_le16(dcid);
5939 	rsp.credits = cpu_to_le16(credits);
5940 	rsp.result  = cpu_to_le16(result);
5941 
5942 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5943 
5944 	return 0;
5945 }
5946 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5947 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5948 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5949 				   u8 *data)
5950 {
5951 	struct l2cap_le_credits *pkt;
5952 	struct l2cap_chan *chan;
5953 	u16 cid, credits, max_credits;
5954 
5955 	if (cmd_len != sizeof(*pkt))
5956 		return -EPROTO;
5957 
5958 	pkt = (struct l2cap_le_credits *) data;
5959 	cid	= __le16_to_cpu(pkt->cid);
5960 	credits	= __le16_to_cpu(pkt->credits);
5961 
5962 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5963 
5964 	chan = l2cap_get_chan_by_dcid(conn, cid);
5965 	if (!chan)
5966 		return -EBADSLT;
5967 
5968 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5969 	if (credits > max_credits) {
5970 		BT_ERR("LE credits overflow");
5971 		l2cap_send_disconn_req(chan, ECONNRESET);
5972 
5973 		/* Return 0 so that we don't trigger an unnecessary
5974 		 * command reject packet.
5975 		 */
5976 		goto unlock;
5977 	}
5978 
5979 	chan->tx_credits += credits;
5980 
5981 	/* Resume sending */
5982 	l2cap_le_flowctl_send(chan);
5983 
5984 	if (chan->tx_credits)
5985 		chan->ops->resume(chan);
5986 
5987 unlock:
5988 	l2cap_chan_unlock(chan);
5989 	l2cap_chan_put(chan);
5990 
5991 	return 0;
5992 }
5993 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5994 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5995 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5996 				       u8 *data)
5997 {
5998 	struct l2cap_ecred_conn_req *req = (void *) data;
5999 	struct {
6000 		struct l2cap_ecred_conn_rsp rsp;
6001 		__le16 dcid[L2CAP_ECRED_MAX_CID];
6002 	} __packed pdu;
6003 	struct l2cap_chan *chan, *pchan;
6004 	u16 mtu, mps;
6005 	__le16 psm;
6006 	u8 result, len = 0;
6007 	int i, num_scid;
6008 	bool defer = false;
6009 
6010 	if (!enable_ecred)
6011 		return -EINVAL;
6012 
6013 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6014 		result = L2CAP_CR_LE_INVALID_PARAMS;
6015 		goto response;
6016 	}
6017 
6018 	cmd_len -= sizeof(*req);
6019 	num_scid = cmd_len / sizeof(u16);
6020 
6021 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6022 		result = L2CAP_CR_LE_INVALID_PARAMS;
6023 		goto response;
6024 	}
6025 
6026 	mtu  = __le16_to_cpu(req->mtu);
6027 	mps  = __le16_to_cpu(req->mps);
6028 
6029 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6030 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6031 		goto response;
6032 	}
6033 
6034 	psm  = req->psm;
6035 
6036 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6037 	 * page 1059:
6038 	 *
6039 	 * Valid range: 0x0001-0x00ff
6040 	 *
6041 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6042 	 */
6043 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6044 		result = L2CAP_CR_LE_BAD_PSM;
6045 		goto response;
6046 	}
6047 
6048 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6049 
6050 	memset(&pdu, 0, sizeof(pdu));
6051 
6052 	/* Check if we have socket listening on psm */
6053 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6054 					 &conn->hcon->dst, LE_LINK);
6055 	if (!pchan) {
6056 		result = L2CAP_CR_LE_BAD_PSM;
6057 		goto response;
6058 	}
6059 
6060 	mutex_lock(&conn->chan_lock);
6061 	l2cap_chan_lock(pchan);
6062 
6063 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6064 				     SMP_ALLOW_STK)) {
6065 		result = L2CAP_CR_LE_AUTHENTICATION;
6066 		goto unlock;
6067 	}
6068 
6069 	result = L2CAP_CR_LE_SUCCESS;
6070 
6071 	for (i = 0; i < num_scid; i++) {
6072 		u16 scid = __le16_to_cpu(req->scid[i]);
6073 
6074 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6075 
6076 		pdu.dcid[i] = 0x0000;
6077 		len += sizeof(*pdu.dcid);
6078 
6079 		/* Check for valid dynamic CID range */
6080 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6081 			result = L2CAP_CR_LE_INVALID_SCID;
6082 			continue;
6083 		}
6084 
6085 		/* Check if we already have channel with that dcid */
6086 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6087 			result = L2CAP_CR_LE_SCID_IN_USE;
6088 			continue;
6089 		}
6090 
6091 		chan = pchan->ops->new_connection(pchan);
6092 		if (!chan) {
6093 			result = L2CAP_CR_LE_NO_MEM;
6094 			continue;
6095 		}
6096 
6097 		bacpy(&chan->src, &conn->hcon->src);
6098 		bacpy(&chan->dst, &conn->hcon->dst);
6099 		chan->src_type = bdaddr_src_type(conn->hcon);
6100 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6101 		chan->psm  = psm;
6102 		chan->dcid = scid;
6103 		chan->omtu = mtu;
6104 		chan->remote_mps = mps;
6105 
6106 		__l2cap_chan_add(conn, chan);
6107 
6108 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6109 
6110 		/* Init response */
6111 		if (!pdu.rsp.credits) {
6112 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6113 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6114 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6115 		}
6116 
6117 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6118 
6119 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6120 
6121 		chan->ident = cmd->ident;
6122 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6123 
6124 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6125 			l2cap_state_change(chan, BT_CONNECT2);
6126 			defer = true;
6127 			chan->ops->defer(chan);
6128 		} else {
6129 			l2cap_chan_ready(chan);
6130 		}
6131 	}
6132 
6133 unlock:
6134 	l2cap_chan_unlock(pchan);
6135 	mutex_unlock(&conn->chan_lock);
6136 	l2cap_chan_put(pchan);
6137 
6138 response:
6139 	pdu.rsp.result = cpu_to_le16(result);
6140 
6141 	if (defer)
6142 		return 0;
6143 
6144 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6145 		       sizeof(pdu.rsp) + len, &pdu);
6146 
6147 	return 0;
6148 }
6149 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6150 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6151 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6152 				       u8 *data)
6153 {
6154 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6155 	struct hci_conn *hcon = conn->hcon;
6156 	u16 mtu, mps, credits, result;
6157 	struct l2cap_chan *chan, *tmp;
6158 	int err = 0, sec_level;
6159 	int i = 0;
6160 
6161 	if (cmd_len < sizeof(*rsp))
6162 		return -EPROTO;
6163 
6164 	mtu     = __le16_to_cpu(rsp->mtu);
6165 	mps     = __le16_to_cpu(rsp->mps);
6166 	credits = __le16_to_cpu(rsp->credits);
6167 	result  = __le16_to_cpu(rsp->result);
6168 
6169 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6170 	       result);
6171 
6172 	mutex_lock(&conn->chan_lock);
6173 
6174 	cmd_len -= sizeof(*rsp);
6175 
6176 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6177 		u16 dcid;
6178 
6179 		if (chan->ident != cmd->ident ||
6180 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6181 		    chan->state == BT_CONNECTED)
6182 			continue;
6183 
6184 		l2cap_chan_lock(chan);
6185 
6186 		/* Check that there is a dcid for each pending channel */
6187 		if (cmd_len < sizeof(dcid)) {
6188 			l2cap_chan_del(chan, ECONNREFUSED);
6189 			l2cap_chan_unlock(chan);
6190 			continue;
6191 		}
6192 
6193 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6194 		cmd_len -= sizeof(u16);
6195 
6196 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6197 
6198 		/* Check if dcid is already in use */
6199 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6200 			/* If a device receives a
6201 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6202 			 * already-assigned Destination CID, then both the
6203 			 * original channel and the new channel shall be
6204 			 * immediately discarded and not used.
6205 			 */
6206 			l2cap_chan_del(chan, ECONNREFUSED);
6207 			l2cap_chan_unlock(chan);
6208 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6209 			l2cap_chan_lock(chan);
6210 			l2cap_chan_del(chan, ECONNRESET);
6211 			l2cap_chan_unlock(chan);
6212 			continue;
6213 		}
6214 
6215 		switch (result) {
6216 		case L2CAP_CR_LE_AUTHENTICATION:
6217 		case L2CAP_CR_LE_ENCRYPTION:
6218 			/* If we already have MITM protection we can't do
6219 			 * anything.
6220 			 */
6221 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6222 				l2cap_chan_del(chan, ECONNREFUSED);
6223 				break;
6224 			}
6225 
6226 			sec_level = hcon->sec_level + 1;
6227 			if (chan->sec_level < sec_level)
6228 				chan->sec_level = sec_level;
6229 
6230 			/* We'll need to send a new Connect Request */
6231 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6232 
6233 			smp_conn_security(hcon, chan->sec_level);
6234 			break;
6235 
6236 		case L2CAP_CR_LE_BAD_PSM:
6237 			l2cap_chan_del(chan, ECONNREFUSED);
6238 			break;
6239 
6240 		default:
6241 			/* If dcid was not set it means channels was refused */
6242 			if (!dcid) {
6243 				l2cap_chan_del(chan, ECONNREFUSED);
6244 				break;
6245 			}
6246 
6247 			chan->ident = 0;
6248 			chan->dcid = dcid;
6249 			chan->omtu = mtu;
6250 			chan->remote_mps = mps;
6251 			chan->tx_credits = credits;
6252 			l2cap_chan_ready(chan);
6253 			break;
6254 		}
6255 
6256 		l2cap_chan_unlock(chan);
6257 	}
6258 
6259 	mutex_unlock(&conn->chan_lock);
6260 
6261 	return err;
6262 }
6263 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6264 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6265 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6266 					 u8 *data)
6267 {
6268 	struct l2cap_ecred_reconf_req *req = (void *) data;
6269 	struct l2cap_ecred_reconf_rsp rsp;
6270 	u16 mtu, mps, result;
6271 	struct l2cap_chan *chan;
6272 	int i, num_scid;
6273 
6274 	if (!enable_ecred)
6275 		return -EINVAL;
6276 
6277 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6278 		result = L2CAP_CR_LE_INVALID_PARAMS;
6279 		goto respond;
6280 	}
6281 
6282 	mtu = __le16_to_cpu(req->mtu);
6283 	mps = __le16_to_cpu(req->mps);
6284 
6285 	BT_DBG("mtu %u mps %u", mtu, mps);
6286 
6287 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6288 		result = L2CAP_RECONF_INVALID_MTU;
6289 		goto respond;
6290 	}
6291 
6292 	if (mps < L2CAP_ECRED_MIN_MPS) {
6293 		result = L2CAP_RECONF_INVALID_MPS;
6294 		goto respond;
6295 	}
6296 
6297 	cmd_len -= sizeof(*req);
6298 	num_scid = cmd_len / sizeof(u16);
6299 	result = L2CAP_RECONF_SUCCESS;
6300 
6301 	for (i = 0; i < num_scid; i++) {
6302 		u16 scid;
6303 
6304 		scid = __le16_to_cpu(req->scid[i]);
6305 		if (!scid)
6306 			return -EPROTO;
6307 
6308 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6309 		if (!chan)
6310 			continue;
6311 
6312 		/* If the MTU value is decreased for any of the included
6313 		 * channels, then the receiver shall disconnect all
6314 		 * included channels.
6315 		 */
6316 		if (chan->omtu > mtu) {
6317 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6318 			       chan->omtu, mtu);
6319 			result = L2CAP_RECONF_INVALID_MTU;
6320 		}
6321 
6322 		chan->omtu = mtu;
6323 		chan->remote_mps = mps;
6324 	}
6325 
6326 respond:
6327 	rsp.result = cpu_to_le16(result);
6328 
6329 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6330 		       &rsp);
6331 
6332 	return 0;
6333 }
6334 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6335 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6336 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6337 					 u8 *data)
6338 {
6339 	struct l2cap_chan *chan, *tmp;
6340 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6341 	u16 result;
6342 
6343 	if (cmd_len < sizeof(*rsp))
6344 		return -EPROTO;
6345 
6346 	result = __le16_to_cpu(rsp->result);
6347 
6348 	BT_DBG("result 0x%4.4x", rsp->result);
6349 
6350 	if (!result)
6351 		return 0;
6352 
6353 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6354 		if (chan->ident != cmd->ident)
6355 			continue;
6356 
6357 		l2cap_chan_del(chan, ECONNRESET);
6358 	}
6359 
6360 	return 0;
6361 }
6362 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6363 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6364 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6365 				       u8 *data)
6366 {
6367 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6368 	struct l2cap_chan *chan;
6369 
6370 	if (cmd_len < sizeof(*rej))
6371 		return -EPROTO;
6372 
6373 	mutex_lock(&conn->chan_lock);
6374 
6375 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6376 	if (!chan)
6377 		goto done;
6378 
6379 	chan = l2cap_chan_hold_unless_zero(chan);
6380 	if (!chan)
6381 		goto done;
6382 
6383 	l2cap_chan_lock(chan);
6384 	l2cap_chan_del(chan, ECONNREFUSED);
6385 	l2cap_chan_unlock(chan);
6386 	l2cap_chan_put(chan);
6387 
6388 done:
6389 	mutex_unlock(&conn->chan_lock);
6390 	return 0;
6391 }
6392 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6393 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6394 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6395 				   u8 *data)
6396 {
6397 	int err = 0;
6398 
6399 	switch (cmd->code) {
6400 	case L2CAP_COMMAND_REJ:
6401 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6402 		break;
6403 
6404 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6405 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6406 		break;
6407 
6408 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6409 		break;
6410 
6411 	case L2CAP_LE_CONN_RSP:
6412 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6413 		break;
6414 
6415 	case L2CAP_LE_CONN_REQ:
6416 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6417 		break;
6418 
6419 	case L2CAP_LE_CREDITS:
6420 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6421 		break;
6422 
6423 	case L2CAP_ECRED_CONN_REQ:
6424 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6425 		break;
6426 
6427 	case L2CAP_ECRED_CONN_RSP:
6428 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6429 		break;
6430 
6431 	case L2CAP_ECRED_RECONF_REQ:
6432 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6433 		break;
6434 
6435 	case L2CAP_ECRED_RECONF_RSP:
6436 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6437 		break;
6438 
6439 	case L2CAP_DISCONN_REQ:
6440 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6441 		break;
6442 
6443 	case L2CAP_DISCONN_RSP:
6444 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6445 		break;
6446 
6447 	default:
6448 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6449 		err = -EINVAL;
6450 		break;
6451 	}
6452 
6453 	return err;
6454 }
6455 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6456 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6457 					struct sk_buff *skb)
6458 {
6459 	struct hci_conn *hcon = conn->hcon;
6460 	struct l2cap_cmd_hdr *cmd;
6461 	u16 len;
6462 	int err;
6463 
6464 	if (hcon->type != LE_LINK)
6465 		goto drop;
6466 
6467 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6468 		goto drop;
6469 
6470 	cmd = (void *) skb->data;
6471 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6472 
6473 	len = le16_to_cpu(cmd->len);
6474 
6475 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6476 
6477 	if (len != skb->len || !cmd->ident) {
6478 		BT_DBG("corrupted command");
6479 		goto drop;
6480 	}
6481 
6482 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6483 	if (err) {
6484 		struct l2cap_cmd_rej_unk rej;
6485 
6486 		BT_ERR("Wrong link type (%d)", err);
6487 
6488 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6489 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6490 			       sizeof(rej), &rej);
6491 	}
6492 
6493 drop:
6494 	kfree_skb(skb);
6495 }
6496 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)6497 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6498 {
6499 	struct l2cap_cmd_rej_unk rej;
6500 
6501 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6502 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6503 }
6504 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6505 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6506 				     struct sk_buff *skb)
6507 {
6508 	struct hci_conn *hcon = conn->hcon;
6509 	struct l2cap_cmd_hdr *cmd;
6510 	int err;
6511 
6512 	l2cap_raw_recv(conn, skb);
6513 
6514 	if (hcon->type != ACL_LINK)
6515 		goto drop;
6516 
6517 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6518 		u16 len;
6519 
6520 		cmd = (void *) skb->data;
6521 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6522 
6523 		len = le16_to_cpu(cmd->len);
6524 
6525 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6526 		       cmd->ident);
6527 
6528 		if (len > skb->len || !cmd->ident) {
6529 			BT_DBG("corrupted command");
6530 			l2cap_sig_send_rej(conn, cmd->ident);
6531 			skb_pull(skb, len > skb->len ? skb->len : len);
6532 			continue;
6533 		}
6534 
6535 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6536 		if (err) {
6537 			BT_ERR("Wrong link type (%d)", err);
6538 			l2cap_sig_send_rej(conn, cmd->ident);
6539 		}
6540 
6541 		skb_pull(skb, len);
6542 	}
6543 
6544 	if (skb->len > 0) {
6545 		BT_DBG("corrupted command");
6546 		l2cap_sig_send_rej(conn, 0);
6547 	}
6548 
6549 drop:
6550 	kfree_skb(skb);
6551 }
6552 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6553 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6554 {
6555 	u16 our_fcs, rcv_fcs;
6556 	int hdr_size;
6557 
6558 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6559 		hdr_size = L2CAP_EXT_HDR_SIZE;
6560 	else
6561 		hdr_size = L2CAP_ENH_HDR_SIZE;
6562 
6563 	if (chan->fcs == L2CAP_FCS_CRC16) {
6564 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6565 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6566 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6567 
6568 		if (our_fcs != rcv_fcs)
6569 			return -EBADMSG;
6570 	}
6571 	return 0;
6572 }
6573 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6574 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6575 {
6576 	struct l2cap_ctrl control;
6577 
6578 	BT_DBG("chan %p", chan);
6579 
6580 	memset(&control, 0, sizeof(control));
6581 	control.sframe = 1;
6582 	control.final = 1;
6583 	control.reqseq = chan->buffer_seq;
6584 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6585 
6586 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6587 		control.super = L2CAP_SUPER_RNR;
6588 		l2cap_send_sframe(chan, &control);
6589 	}
6590 
6591 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6592 	    chan->unacked_frames > 0)
6593 		__set_retrans_timer(chan);
6594 
6595 	/* Send pending iframes */
6596 	l2cap_ertm_send(chan);
6597 
6598 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6599 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6600 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6601 		 * send it now.
6602 		 */
6603 		control.super = L2CAP_SUPER_RR;
6604 		l2cap_send_sframe(chan, &control);
6605 	}
6606 }
6607 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6608 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6609 			    struct sk_buff **last_frag)
6610 {
6611 	/* skb->len reflects data in skb as well as all fragments
6612 	 * skb->data_len reflects only data in fragments
6613 	 */
6614 	if (!skb_has_frag_list(skb))
6615 		skb_shinfo(skb)->frag_list = new_frag;
6616 
6617 	new_frag->next = NULL;
6618 
6619 	(*last_frag)->next = new_frag;
6620 	*last_frag = new_frag;
6621 
6622 	skb->len += new_frag->len;
6623 	skb->data_len += new_frag->len;
6624 	skb->truesize += new_frag->truesize;
6625 }
6626 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6627 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6628 				struct l2cap_ctrl *control)
6629 {
6630 	int err = -EINVAL;
6631 
6632 	switch (control->sar) {
6633 	case L2CAP_SAR_UNSEGMENTED:
6634 		if (chan->sdu)
6635 			break;
6636 
6637 		err = chan->ops->recv(chan, skb);
6638 		break;
6639 
6640 	case L2CAP_SAR_START:
6641 		if (chan->sdu)
6642 			break;
6643 
6644 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6645 			break;
6646 
6647 		chan->sdu_len = get_unaligned_le16(skb->data);
6648 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6649 
6650 		if (chan->sdu_len > chan->imtu) {
6651 			err = -EMSGSIZE;
6652 			break;
6653 		}
6654 
6655 		if (skb->len >= chan->sdu_len)
6656 			break;
6657 
6658 		chan->sdu = skb;
6659 		chan->sdu_last_frag = skb;
6660 
6661 		skb = NULL;
6662 		err = 0;
6663 		break;
6664 
6665 	case L2CAP_SAR_CONTINUE:
6666 		if (!chan->sdu)
6667 			break;
6668 
6669 		append_skb_frag(chan->sdu, skb,
6670 				&chan->sdu_last_frag);
6671 		skb = NULL;
6672 
6673 		if (chan->sdu->len >= chan->sdu_len)
6674 			break;
6675 
6676 		err = 0;
6677 		break;
6678 
6679 	case L2CAP_SAR_END:
6680 		if (!chan->sdu)
6681 			break;
6682 
6683 		append_skb_frag(chan->sdu, skb,
6684 				&chan->sdu_last_frag);
6685 		skb = NULL;
6686 
6687 		if (chan->sdu->len != chan->sdu_len)
6688 			break;
6689 
6690 		err = chan->ops->recv(chan, chan->sdu);
6691 
6692 		if (!err) {
6693 			/* Reassembly complete */
6694 			chan->sdu = NULL;
6695 			chan->sdu_last_frag = NULL;
6696 			chan->sdu_len = 0;
6697 		}
6698 		break;
6699 	}
6700 
6701 	if (err) {
6702 		kfree_skb(skb);
6703 		kfree_skb(chan->sdu);
6704 		chan->sdu = NULL;
6705 		chan->sdu_last_frag = NULL;
6706 		chan->sdu_len = 0;
6707 	}
6708 
6709 	return err;
6710 }
6711 
l2cap_resegment(struct l2cap_chan * chan)6712 static int l2cap_resegment(struct l2cap_chan *chan)
6713 {
6714 	/* Placeholder */
6715 	return 0;
6716 }
6717 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6718 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6719 {
6720 	u8 event;
6721 
6722 	if (chan->mode != L2CAP_MODE_ERTM)
6723 		return;
6724 
6725 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6726 	l2cap_tx(chan, NULL, NULL, event);
6727 }
6728 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6729 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6730 {
6731 	int err = 0;
6732 	/* Pass sequential frames to l2cap_reassemble_sdu()
6733 	 * until a gap is encountered.
6734 	 */
6735 
6736 	BT_DBG("chan %p", chan);
6737 
6738 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6739 		struct sk_buff *skb;
6740 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6741 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6742 
6743 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6744 
6745 		if (!skb)
6746 			break;
6747 
6748 		skb_unlink(skb, &chan->srej_q);
6749 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6750 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6751 		if (err)
6752 			break;
6753 	}
6754 
6755 	if (skb_queue_empty(&chan->srej_q)) {
6756 		chan->rx_state = L2CAP_RX_STATE_RECV;
6757 		l2cap_send_ack(chan);
6758 	}
6759 
6760 	return err;
6761 }
6762 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6763 static void l2cap_handle_srej(struct l2cap_chan *chan,
6764 			      struct l2cap_ctrl *control)
6765 {
6766 	struct sk_buff *skb;
6767 
6768 	BT_DBG("chan %p, control %p", chan, control);
6769 
6770 	if (control->reqseq == chan->next_tx_seq) {
6771 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6772 		l2cap_send_disconn_req(chan, ECONNRESET);
6773 		return;
6774 	}
6775 
6776 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6777 
6778 	if (skb == NULL) {
6779 		BT_DBG("Seq %d not available for retransmission",
6780 		       control->reqseq);
6781 		return;
6782 	}
6783 
6784 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6785 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6786 		l2cap_send_disconn_req(chan, ECONNRESET);
6787 		return;
6788 	}
6789 
6790 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6791 
6792 	if (control->poll) {
6793 		l2cap_pass_to_tx(chan, control);
6794 
6795 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6796 		l2cap_retransmit(chan, control);
6797 		l2cap_ertm_send(chan);
6798 
6799 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6800 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6801 			chan->srej_save_reqseq = control->reqseq;
6802 		}
6803 	} else {
6804 		l2cap_pass_to_tx_fbit(chan, control);
6805 
6806 		if (control->final) {
6807 			if (chan->srej_save_reqseq != control->reqseq ||
6808 			    !test_and_clear_bit(CONN_SREJ_ACT,
6809 						&chan->conn_state))
6810 				l2cap_retransmit(chan, control);
6811 		} else {
6812 			l2cap_retransmit(chan, control);
6813 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6814 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6815 				chan->srej_save_reqseq = control->reqseq;
6816 			}
6817 		}
6818 	}
6819 }
6820 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6821 static void l2cap_handle_rej(struct l2cap_chan *chan,
6822 			     struct l2cap_ctrl *control)
6823 {
6824 	struct sk_buff *skb;
6825 
6826 	BT_DBG("chan %p, control %p", chan, control);
6827 
6828 	if (control->reqseq == chan->next_tx_seq) {
6829 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6830 		l2cap_send_disconn_req(chan, ECONNRESET);
6831 		return;
6832 	}
6833 
6834 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6835 
6836 	if (chan->max_tx && skb &&
6837 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6838 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6839 		l2cap_send_disconn_req(chan, ECONNRESET);
6840 		return;
6841 	}
6842 
6843 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6844 
6845 	l2cap_pass_to_tx(chan, control);
6846 
6847 	if (control->final) {
6848 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6849 			l2cap_retransmit_all(chan, control);
6850 	} else {
6851 		l2cap_retransmit_all(chan, control);
6852 		l2cap_ertm_send(chan);
6853 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6854 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6855 	}
6856 }
6857 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6858 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6859 {
6860 	BT_DBG("chan %p, txseq %d", chan, txseq);
6861 
6862 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6863 	       chan->expected_tx_seq);
6864 
6865 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6866 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6867 		    chan->tx_win) {
6868 			/* See notes below regarding "double poll" and
6869 			 * invalid packets.
6870 			 */
6871 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6872 				BT_DBG("Invalid/Ignore - after SREJ");
6873 				return L2CAP_TXSEQ_INVALID_IGNORE;
6874 			} else {
6875 				BT_DBG("Invalid - in window after SREJ sent");
6876 				return L2CAP_TXSEQ_INVALID;
6877 			}
6878 		}
6879 
6880 		if (chan->srej_list.head == txseq) {
6881 			BT_DBG("Expected SREJ");
6882 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6883 		}
6884 
6885 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6886 			BT_DBG("Duplicate SREJ - txseq already stored");
6887 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6888 		}
6889 
6890 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6891 			BT_DBG("Unexpected SREJ - not requested");
6892 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6893 		}
6894 	}
6895 
6896 	if (chan->expected_tx_seq == txseq) {
6897 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6898 		    chan->tx_win) {
6899 			BT_DBG("Invalid - txseq outside tx window");
6900 			return L2CAP_TXSEQ_INVALID;
6901 		} else {
6902 			BT_DBG("Expected");
6903 			return L2CAP_TXSEQ_EXPECTED;
6904 		}
6905 	}
6906 
6907 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6908 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6909 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6910 		return L2CAP_TXSEQ_DUPLICATE;
6911 	}
6912 
6913 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6914 		/* A source of invalid packets is a "double poll" condition,
6915 		 * where delays cause us to send multiple poll packets.  If
6916 		 * the remote stack receives and processes both polls,
6917 		 * sequence numbers can wrap around in such a way that a
6918 		 * resent frame has a sequence number that looks like new data
6919 		 * with a sequence gap.  This would trigger an erroneous SREJ
6920 		 * request.
6921 		 *
6922 		 * Fortunately, this is impossible with a tx window that's
6923 		 * less than half of the maximum sequence number, which allows
6924 		 * invalid frames to be safely ignored.
6925 		 *
6926 		 * With tx window sizes greater than half of the tx window
6927 		 * maximum, the frame is invalid and cannot be ignored.  This
6928 		 * causes a disconnect.
6929 		 */
6930 
6931 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6932 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6933 			return L2CAP_TXSEQ_INVALID_IGNORE;
6934 		} else {
6935 			BT_DBG("Invalid - txseq outside tx window");
6936 			return L2CAP_TXSEQ_INVALID;
6937 		}
6938 	} else {
6939 		BT_DBG("Unexpected - txseq indicates missing frames");
6940 		return L2CAP_TXSEQ_UNEXPECTED;
6941 	}
6942 }
6943 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6944 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6945 			       struct l2cap_ctrl *control,
6946 			       struct sk_buff *skb, u8 event)
6947 {
6948 	struct l2cap_ctrl local_control;
6949 	int err = 0;
6950 	bool skb_in_use = false;
6951 
6952 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6953 	       event);
6954 
6955 	switch (event) {
6956 	case L2CAP_EV_RECV_IFRAME:
6957 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6958 		case L2CAP_TXSEQ_EXPECTED:
6959 			l2cap_pass_to_tx(chan, control);
6960 
6961 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6962 				BT_DBG("Busy, discarding expected seq %d",
6963 				       control->txseq);
6964 				break;
6965 			}
6966 
6967 			chan->expected_tx_seq = __next_seq(chan,
6968 							   control->txseq);
6969 
6970 			chan->buffer_seq = chan->expected_tx_seq;
6971 			skb_in_use = true;
6972 
6973 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6974 			 * control, so make a copy in advance to use it after
6975 			 * l2cap_reassemble_sdu returns and to avoid the race
6976 			 * condition, for example:
6977 			 *
6978 			 * The current thread calls:
6979 			 *   l2cap_reassemble_sdu
6980 			 *     chan->ops->recv == l2cap_sock_recv_cb
6981 			 *       __sock_queue_rcv_skb
6982 			 * Another thread calls:
6983 			 *   bt_sock_recvmsg
6984 			 *     skb_recv_datagram
6985 			 *     skb_free_datagram
6986 			 * Then the current thread tries to access control, but
6987 			 * it was freed by skb_free_datagram.
6988 			 */
6989 			local_control = *control;
6990 			err = l2cap_reassemble_sdu(chan, skb, control);
6991 			if (err)
6992 				break;
6993 
6994 			if (local_control.final) {
6995 				if (!test_and_clear_bit(CONN_REJ_ACT,
6996 							&chan->conn_state)) {
6997 					local_control.final = 0;
6998 					l2cap_retransmit_all(chan, &local_control);
6999 					l2cap_ertm_send(chan);
7000 				}
7001 			}
7002 
7003 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7004 				l2cap_send_ack(chan);
7005 			break;
7006 		case L2CAP_TXSEQ_UNEXPECTED:
7007 			l2cap_pass_to_tx(chan, control);
7008 
7009 			/* Can't issue SREJ frames in the local busy state.
7010 			 * Drop this frame, it will be seen as missing
7011 			 * when local busy is exited.
7012 			 */
7013 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7014 				BT_DBG("Busy, discarding unexpected seq %d",
7015 				       control->txseq);
7016 				break;
7017 			}
7018 
7019 			/* There was a gap in the sequence, so an SREJ
7020 			 * must be sent for each missing frame.  The
7021 			 * current frame is stored for later use.
7022 			 */
7023 			skb_queue_tail(&chan->srej_q, skb);
7024 			skb_in_use = true;
7025 			BT_DBG("Queued %p (queue len %d)", skb,
7026 			       skb_queue_len(&chan->srej_q));
7027 
7028 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7029 			l2cap_seq_list_clear(&chan->srej_list);
7030 			l2cap_send_srej(chan, control->txseq);
7031 
7032 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7033 			break;
7034 		case L2CAP_TXSEQ_DUPLICATE:
7035 			l2cap_pass_to_tx(chan, control);
7036 			break;
7037 		case L2CAP_TXSEQ_INVALID_IGNORE:
7038 			break;
7039 		case L2CAP_TXSEQ_INVALID:
7040 		default:
7041 			l2cap_send_disconn_req(chan, ECONNRESET);
7042 			break;
7043 		}
7044 		break;
7045 	case L2CAP_EV_RECV_RR:
7046 		l2cap_pass_to_tx(chan, control);
7047 		if (control->final) {
7048 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7049 
7050 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7051 			    !__chan_is_moving(chan)) {
7052 				control->final = 0;
7053 				l2cap_retransmit_all(chan, control);
7054 			}
7055 
7056 			l2cap_ertm_send(chan);
7057 		} else if (control->poll) {
7058 			l2cap_send_i_or_rr_or_rnr(chan);
7059 		} else {
7060 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7061 					       &chan->conn_state) &&
7062 			    chan->unacked_frames)
7063 				__set_retrans_timer(chan);
7064 
7065 			l2cap_ertm_send(chan);
7066 		}
7067 		break;
7068 	case L2CAP_EV_RECV_RNR:
7069 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7070 		l2cap_pass_to_tx(chan, control);
7071 		if (control && control->poll) {
7072 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7073 			l2cap_send_rr_or_rnr(chan, 0);
7074 		}
7075 		__clear_retrans_timer(chan);
7076 		l2cap_seq_list_clear(&chan->retrans_list);
7077 		break;
7078 	case L2CAP_EV_RECV_REJ:
7079 		l2cap_handle_rej(chan, control);
7080 		break;
7081 	case L2CAP_EV_RECV_SREJ:
7082 		l2cap_handle_srej(chan, control);
7083 		break;
7084 	default:
7085 		break;
7086 	}
7087 
7088 	if (skb && !skb_in_use) {
7089 		BT_DBG("Freeing %p", skb);
7090 		kfree_skb(skb);
7091 	}
7092 
7093 	return err;
7094 }
7095 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7096 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7097 				    struct l2cap_ctrl *control,
7098 				    struct sk_buff *skb, u8 event)
7099 {
7100 	int err = 0;
7101 	u16 txseq = control->txseq;
7102 	bool skb_in_use = false;
7103 
7104 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7105 	       event);
7106 
7107 	switch (event) {
7108 	case L2CAP_EV_RECV_IFRAME:
7109 		switch (l2cap_classify_txseq(chan, txseq)) {
7110 		case L2CAP_TXSEQ_EXPECTED:
7111 			/* Keep frame for reassembly later */
7112 			l2cap_pass_to_tx(chan, control);
7113 			skb_queue_tail(&chan->srej_q, skb);
7114 			skb_in_use = true;
7115 			BT_DBG("Queued %p (queue len %d)", skb,
7116 			       skb_queue_len(&chan->srej_q));
7117 
7118 			chan->expected_tx_seq = __next_seq(chan, txseq);
7119 			break;
7120 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7121 			l2cap_seq_list_pop(&chan->srej_list);
7122 
7123 			l2cap_pass_to_tx(chan, control);
7124 			skb_queue_tail(&chan->srej_q, skb);
7125 			skb_in_use = true;
7126 			BT_DBG("Queued %p (queue len %d)", skb,
7127 			       skb_queue_len(&chan->srej_q));
7128 
7129 			err = l2cap_rx_queued_iframes(chan);
7130 			if (err)
7131 				break;
7132 
7133 			break;
7134 		case L2CAP_TXSEQ_UNEXPECTED:
7135 			/* Got a frame that can't be reassembled yet.
7136 			 * Save it for later, and send SREJs to cover
7137 			 * the missing frames.
7138 			 */
7139 			skb_queue_tail(&chan->srej_q, skb);
7140 			skb_in_use = true;
7141 			BT_DBG("Queued %p (queue len %d)", skb,
7142 			       skb_queue_len(&chan->srej_q));
7143 
7144 			l2cap_pass_to_tx(chan, control);
7145 			l2cap_send_srej(chan, control->txseq);
7146 			break;
7147 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7148 			/* This frame was requested with an SREJ, but
7149 			 * some expected retransmitted frames are
7150 			 * missing.  Request retransmission of missing
7151 			 * SREJ'd frames.
7152 			 */
7153 			skb_queue_tail(&chan->srej_q, skb);
7154 			skb_in_use = true;
7155 			BT_DBG("Queued %p (queue len %d)", skb,
7156 			       skb_queue_len(&chan->srej_q));
7157 
7158 			l2cap_pass_to_tx(chan, control);
7159 			l2cap_send_srej_list(chan, control->txseq);
7160 			break;
7161 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7162 			/* We've already queued this frame.  Drop this copy. */
7163 			l2cap_pass_to_tx(chan, control);
7164 			break;
7165 		case L2CAP_TXSEQ_DUPLICATE:
7166 			/* Expecting a later sequence number, so this frame
7167 			 * was already received.  Ignore it completely.
7168 			 */
7169 			break;
7170 		case L2CAP_TXSEQ_INVALID_IGNORE:
7171 			break;
7172 		case L2CAP_TXSEQ_INVALID:
7173 		default:
7174 			l2cap_send_disconn_req(chan, ECONNRESET);
7175 			break;
7176 		}
7177 		break;
7178 	case L2CAP_EV_RECV_RR:
7179 		l2cap_pass_to_tx(chan, control);
7180 		if (control->final) {
7181 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7182 
7183 			if (!test_and_clear_bit(CONN_REJ_ACT,
7184 						&chan->conn_state)) {
7185 				control->final = 0;
7186 				l2cap_retransmit_all(chan, control);
7187 			}
7188 
7189 			l2cap_ertm_send(chan);
7190 		} else if (control->poll) {
7191 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7192 					       &chan->conn_state) &&
7193 			    chan->unacked_frames) {
7194 				__set_retrans_timer(chan);
7195 			}
7196 
7197 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7198 			l2cap_send_srej_tail(chan);
7199 		} else {
7200 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7201 					       &chan->conn_state) &&
7202 			    chan->unacked_frames)
7203 				__set_retrans_timer(chan);
7204 
7205 			l2cap_send_ack(chan);
7206 		}
7207 		break;
7208 	case L2CAP_EV_RECV_RNR:
7209 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7210 		l2cap_pass_to_tx(chan, control);
7211 		if (control->poll) {
7212 			l2cap_send_srej_tail(chan);
7213 		} else {
7214 			struct l2cap_ctrl rr_control;
7215 			memset(&rr_control, 0, sizeof(rr_control));
7216 			rr_control.sframe = 1;
7217 			rr_control.super = L2CAP_SUPER_RR;
7218 			rr_control.reqseq = chan->buffer_seq;
7219 			l2cap_send_sframe(chan, &rr_control);
7220 		}
7221 
7222 		break;
7223 	case L2CAP_EV_RECV_REJ:
7224 		l2cap_handle_rej(chan, control);
7225 		break;
7226 	case L2CAP_EV_RECV_SREJ:
7227 		l2cap_handle_srej(chan, control);
7228 		break;
7229 	}
7230 
7231 	if (skb && !skb_in_use) {
7232 		BT_DBG("Freeing %p", skb);
7233 		kfree_skb(skb);
7234 	}
7235 
7236 	return err;
7237 }
7238 
l2cap_finish_move(struct l2cap_chan * chan)7239 static int l2cap_finish_move(struct l2cap_chan *chan)
7240 {
7241 	BT_DBG("chan %p", chan);
7242 
7243 	chan->rx_state = L2CAP_RX_STATE_RECV;
7244 
7245 	if (chan->hs_hcon)
7246 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7247 	else
7248 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7249 
7250 	return l2cap_resegment(chan);
7251 }
7252 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7253 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7254 				 struct l2cap_ctrl *control,
7255 				 struct sk_buff *skb, u8 event)
7256 {
7257 	int err;
7258 
7259 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7260 	       event);
7261 
7262 	if (!control->poll)
7263 		return -EPROTO;
7264 
7265 	l2cap_process_reqseq(chan, control->reqseq);
7266 
7267 	if (!skb_queue_empty(&chan->tx_q))
7268 		chan->tx_send_head = skb_peek(&chan->tx_q);
7269 	else
7270 		chan->tx_send_head = NULL;
7271 
7272 	/* Rewind next_tx_seq to the point expected
7273 	 * by the receiver.
7274 	 */
7275 	chan->next_tx_seq = control->reqseq;
7276 	chan->unacked_frames = 0;
7277 
7278 	err = l2cap_finish_move(chan);
7279 	if (err)
7280 		return err;
7281 
7282 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7283 	l2cap_send_i_or_rr_or_rnr(chan);
7284 
7285 	if (event == L2CAP_EV_RECV_IFRAME)
7286 		return -EPROTO;
7287 
7288 	return l2cap_rx_state_recv(chan, control, NULL, event);
7289 }
7290 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7291 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7292 				 struct l2cap_ctrl *control,
7293 				 struct sk_buff *skb, u8 event)
7294 {
7295 	int err;
7296 
7297 	if (!control->final)
7298 		return -EPROTO;
7299 
7300 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7301 
7302 	chan->rx_state = L2CAP_RX_STATE_RECV;
7303 	l2cap_process_reqseq(chan, control->reqseq);
7304 
7305 	if (!skb_queue_empty(&chan->tx_q))
7306 		chan->tx_send_head = skb_peek(&chan->tx_q);
7307 	else
7308 		chan->tx_send_head = NULL;
7309 
7310 	/* Rewind next_tx_seq to the point expected
7311 	 * by the receiver.
7312 	 */
7313 	chan->next_tx_seq = control->reqseq;
7314 	chan->unacked_frames = 0;
7315 
7316 	if (chan->hs_hcon)
7317 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7318 	else
7319 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7320 
7321 	err = l2cap_resegment(chan);
7322 
7323 	if (!err)
7324 		err = l2cap_rx_state_recv(chan, control, skb, event);
7325 
7326 	return err;
7327 }
7328 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7329 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7330 {
7331 	/* Make sure reqseq is for a packet that has been sent but not acked */
7332 	u16 unacked;
7333 
7334 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7335 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7336 }
7337 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7338 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7339 		    struct sk_buff *skb, u8 event)
7340 {
7341 	int err = 0;
7342 
7343 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7344 	       control, skb, event, chan->rx_state);
7345 
7346 	if (__valid_reqseq(chan, control->reqseq)) {
7347 		switch (chan->rx_state) {
7348 		case L2CAP_RX_STATE_RECV:
7349 			err = l2cap_rx_state_recv(chan, control, skb, event);
7350 			break;
7351 		case L2CAP_RX_STATE_SREJ_SENT:
7352 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7353 						       event);
7354 			break;
7355 		case L2CAP_RX_STATE_WAIT_P:
7356 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7357 			break;
7358 		case L2CAP_RX_STATE_WAIT_F:
7359 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7360 			break;
7361 		default:
7362 			/* shut it down */
7363 			break;
7364 		}
7365 	} else {
7366 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7367 		       control->reqseq, chan->next_tx_seq,
7368 		       chan->expected_ack_seq);
7369 		l2cap_send_disconn_req(chan, ECONNRESET);
7370 	}
7371 
7372 	return err;
7373 }
7374 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7375 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7376 			   struct sk_buff *skb)
7377 {
7378 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7379 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7380 	 * returns and to avoid the race condition, for example:
7381 	 *
7382 	 * The current thread calls:
7383 	 *   l2cap_reassemble_sdu
7384 	 *     chan->ops->recv == l2cap_sock_recv_cb
7385 	 *       __sock_queue_rcv_skb
7386 	 * Another thread calls:
7387 	 *   bt_sock_recvmsg
7388 	 *     skb_recv_datagram
7389 	 *     skb_free_datagram
7390 	 * Then the current thread tries to access control, but it was freed by
7391 	 * skb_free_datagram.
7392 	 */
7393 	u16 txseq = control->txseq;
7394 
7395 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7396 	       chan->rx_state);
7397 
7398 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7399 		l2cap_pass_to_tx(chan, control);
7400 
7401 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7402 		       __next_seq(chan, chan->buffer_seq));
7403 
7404 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7405 
7406 		l2cap_reassemble_sdu(chan, skb, control);
7407 	} else {
7408 		if (chan->sdu) {
7409 			kfree_skb(chan->sdu);
7410 			chan->sdu = NULL;
7411 		}
7412 		chan->sdu_last_frag = NULL;
7413 		chan->sdu_len = 0;
7414 
7415 		if (skb) {
7416 			BT_DBG("Freeing %p", skb);
7417 			kfree_skb(skb);
7418 		}
7419 	}
7420 
7421 	chan->last_acked_seq = txseq;
7422 	chan->expected_tx_seq = __next_seq(chan, txseq);
7423 
7424 	return 0;
7425 }
7426 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7427 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7428 {
7429 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7430 	u16 len;
7431 	u8 event;
7432 
7433 	__unpack_control(chan, skb);
7434 
7435 	len = skb->len;
7436 
7437 	/*
7438 	 * We can just drop the corrupted I-frame here.
7439 	 * Receiver will miss it and start proper recovery
7440 	 * procedures and ask for retransmission.
7441 	 */
7442 	if (l2cap_check_fcs(chan, skb))
7443 		goto drop;
7444 
7445 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7446 		len -= L2CAP_SDULEN_SIZE;
7447 
7448 	if (chan->fcs == L2CAP_FCS_CRC16)
7449 		len -= L2CAP_FCS_SIZE;
7450 
7451 	if (len > chan->mps) {
7452 		l2cap_send_disconn_req(chan, ECONNRESET);
7453 		goto drop;
7454 	}
7455 
7456 	if (chan->ops->filter) {
7457 		if (chan->ops->filter(chan, skb))
7458 			goto drop;
7459 	}
7460 
7461 	if (!control->sframe) {
7462 		int err;
7463 
7464 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7465 		       control->sar, control->reqseq, control->final,
7466 		       control->txseq);
7467 
7468 		/* Validate F-bit - F=0 always valid, F=1 only
7469 		 * valid in TX WAIT_F
7470 		 */
7471 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7472 			goto drop;
7473 
7474 		if (chan->mode != L2CAP_MODE_STREAMING) {
7475 			event = L2CAP_EV_RECV_IFRAME;
7476 			err = l2cap_rx(chan, control, skb, event);
7477 		} else {
7478 			err = l2cap_stream_rx(chan, control, skb);
7479 		}
7480 
7481 		if (err)
7482 			l2cap_send_disconn_req(chan, ECONNRESET);
7483 	} else {
7484 		const u8 rx_func_to_event[4] = {
7485 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7486 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7487 		};
7488 
7489 		/* Only I-frames are expected in streaming mode */
7490 		if (chan->mode == L2CAP_MODE_STREAMING)
7491 			goto drop;
7492 
7493 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7494 		       control->reqseq, control->final, control->poll,
7495 		       control->super);
7496 
7497 		if (len != 0) {
7498 			BT_ERR("Trailing bytes: %d in sframe", len);
7499 			l2cap_send_disconn_req(chan, ECONNRESET);
7500 			goto drop;
7501 		}
7502 
7503 		/* Validate F and P bits */
7504 		if (control->final && (control->poll ||
7505 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7506 			goto drop;
7507 
7508 		event = rx_func_to_event[control->super];
7509 		if (l2cap_rx(chan, control, skb, event))
7510 			l2cap_send_disconn_req(chan, ECONNRESET);
7511 	}
7512 
7513 	return 0;
7514 
7515 drop:
7516 	kfree_skb(skb);
7517 	return 0;
7518 }
7519 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7520 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7521 {
7522 	struct l2cap_conn *conn = chan->conn;
7523 	struct l2cap_le_credits pkt;
7524 	u16 return_credits;
7525 
7526 	return_credits = (chan->imtu / chan->mps) + 1;
7527 
7528 	if (chan->rx_credits >= return_credits)
7529 		return;
7530 
7531 	return_credits -= chan->rx_credits;
7532 
7533 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7534 
7535 	chan->rx_credits += return_credits;
7536 
7537 	pkt.cid     = cpu_to_le16(chan->scid);
7538 	pkt.credits = cpu_to_le16(return_credits);
7539 
7540 	chan->ident = l2cap_get_ident(conn);
7541 
7542 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7543 }
7544 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7545 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7546 {
7547 	int err;
7548 
7549 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7550 
7551 	/* Wait recv to confirm reception before updating the credits */
7552 	err = chan->ops->recv(chan, skb);
7553 
7554 	/* Update credits whenever an SDU is received */
7555 	l2cap_chan_le_send_credits(chan);
7556 
7557 	return err;
7558 }
7559 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7560 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7561 {
7562 	int err;
7563 
7564 	if (!chan->rx_credits) {
7565 		BT_ERR("No credits to receive LE L2CAP data");
7566 		l2cap_send_disconn_req(chan, ECONNRESET);
7567 		return -ENOBUFS;
7568 	}
7569 
7570 	if (chan->imtu < skb->len) {
7571 		BT_ERR("Too big LE L2CAP PDU");
7572 		return -ENOBUFS;
7573 	}
7574 
7575 	chan->rx_credits--;
7576 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7577 
7578 	/* Update if remote had run out of credits, this should only happens
7579 	 * if the remote is not using the entire MPS.
7580 	 */
7581 	if (!chan->rx_credits)
7582 		l2cap_chan_le_send_credits(chan);
7583 
7584 	err = 0;
7585 
7586 	if (!chan->sdu) {
7587 		u16 sdu_len;
7588 
7589 		sdu_len = get_unaligned_le16(skb->data);
7590 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7591 
7592 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7593 		       sdu_len, skb->len, chan->imtu);
7594 
7595 		if (sdu_len > chan->imtu) {
7596 			BT_ERR("Too big LE L2CAP SDU length received");
7597 			err = -EMSGSIZE;
7598 			goto failed;
7599 		}
7600 
7601 		if (skb->len > sdu_len) {
7602 			BT_ERR("Too much LE L2CAP data received");
7603 			err = -EINVAL;
7604 			goto failed;
7605 		}
7606 
7607 		if (skb->len == sdu_len)
7608 			return l2cap_ecred_recv(chan, skb);
7609 
7610 		chan->sdu = skb;
7611 		chan->sdu_len = sdu_len;
7612 		chan->sdu_last_frag = skb;
7613 
7614 		/* Detect if remote is not able to use the selected MPS */
7615 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7616 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7617 
7618 			/* Adjust the number of credits */
7619 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7620 			chan->mps = mps_len;
7621 			l2cap_chan_le_send_credits(chan);
7622 		}
7623 
7624 		return 0;
7625 	}
7626 
7627 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7628 	       chan->sdu->len, skb->len, chan->sdu_len);
7629 
7630 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7631 		BT_ERR("Too much LE L2CAP data received");
7632 		err = -EINVAL;
7633 		goto failed;
7634 	}
7635 
7636 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7637 	skb = NULL;
7638 
7639 	if (chan->sdu->len == chan->sdu_len) {
7640 		err = l2cap_ecred_recv(chan, chan->sdu);
7641 		if (!err) {
7642 			chan->sdu = NULL;
7643 			chan->sdu_last_frag = NULL;
7644 			chan->sdu_len = 0;
7645 		}
7646 	}
7647 
7648 failed:
7649 	if (err) {
7650 		kfree_skb(skb);
7651 		kfree_skb(chan->sdu);
7652 		chan->sdu = NULL;
7653 		chan->sdu_last_frag = NULL;
7654 		chan->sdu_len = 0;
7655 	}
7656 
7657 	/* We can't return an error here since we took care of the skb
7658 	 * freeing internally. An error return would cause the caller to
7659 	 * do a double-free of the skb.
7660 	 */
7661 	return 0;
7662 }
7663 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7664 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7665 			       struct sk_buff *skb)
7666 {
7667 	struct l2cap_chan *chan;
7668 
7669 	chan = l2cap_get_chan_by_scid(conn, cid);
7670 	if (!chan) {
7671 		if (cid == L2CAP_CID_A2MP) {
7672 			chan = a2mp_channel_create(conn, skb);
7673 			if (!chan) {
7674 				kfree_skb(skb);
7675 				return;
7676 			}
7677 
7678 			l2cap_chan_hold(chan);
7679 			l2cap_chan_lock(chan);
7680 		} else {
7681 			BT_DBG("unknown cid 0x%4.4x", cid);
7682 			/* Drop packet and return */
7683 			kfree_skb(skb);
7684 			return;
7685 		}
7686 	}
7687 
7688 	BT_DBG("chan %p, len %d", chan, skb->len);
7689 
7690 	/* If we receive data on a fixed channel before the info req/rsp
7691 	 * procedure is done simply assume that the channel is supported
7692 	 * and mark it as ready.
7693 	 */
7694 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7695 		l2cap_chan_ready(chan);
7696 
7697 	if (chan->state != BT_CONNECTED)
7698 		goto drop;
7699 
7700 	switch (chan->mode) {
7701 	case L2CAP_MODE_LE_FLOWCTL:
7702 	case L2CAP_MODE_EXT_FLOWCTL:
7703 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7704 			goto drop;
7705 
7706 		goto done;
7707 
7708 	case L2CAP_MODE_BASIC:
7709 		/* If socket recv buffers overflows we drop data here
7710 		 * which is *bad* because L2CAP has to be reliable.
7711 		 * But we don't have any other choice. L2CAP doesn't
7712 		 * provide flow control mechanism. */
7713 
7714 		if (chan->imtu < skb->len) {
7715 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7716 			goto drop;
7717 		}
7718 
7719 		if (!chan->ops->recv(chan, skb))
7720 			goto done;
7721 		break;
7722 
7723 	case L2CAP_MODE_ERTM:
7724 	case L2CAP_MODE_STREAMING:
7725 		l2cap_data_rcv(chan, skb);
7726 		goto done;
7727 
7728 	default:
7729 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7730 		break;
7731 	}
7732 
7733 drop:
7734 	kfree_skb(skb);
7735 
7736 done:
7737 	l2cap_chan_unlock(chan);
7738 	l2cap_chan_put(chan);
7739 }
7740 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7741 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7742 				  struct sk_buff *skb)
7743 {
7744 	struct hci_conn *hcon = conn->hcon;
7745 	struct l2cap_chan *chan;
7746 
7747 	if (hcon->type != ACL_LINK)
7748 		goto free_skb;
7749 
7750 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7751 					ACL_LINK);
7752 	if (!chan)
7753 		goto free_skb;
7754 
7755 	BT_DBG("chan %p, len %d", chan, skb->len);
7756 
7757 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7758 		goto drop;
7759 
7760 	if (chan->imtu < skb->len)
7761 		goto drop;
7762 
7763 	/* Store remote BD_ADDR and PSM for msg_name */
7764 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7765 	bt_cb(skb)->l2cap.psm = psm;
7766 
7767 	if (!chan->ops->recv(chan, skb)) {
7768 		l2cap_chan_put(chan);
7769 		return;
7770 	}
7771 
7772 drop:
7773 	l2cap_chan_put(chan);
7774 free_skb:
7775 	kfree_skb(skb);
7776 }
7777 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7778 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7779 {
7780 	struct l2cap_hdr *lh = (void *) skb->data;
7781 	struct hci_conn *hcon = conn->hcon;
7782 	u16 cid, len;
7783 	__le16 psm;
7784 
7785 	if (hcon->state != BT_CONNECTED) {
7786 		BT_DBG("queueing pending rx skb");
7787 		skb_queue_tail(&conn->pending_rx, skb);
7788 		return;
7789 	}
7790 
7791 	skb_pull(skb, L2CAP_HDR_SIZE);
7792 	cid = __le16_to_cpu(lh->cid);
7793 	len = __le16_to_cpu(lh->len);
7794 
7795 	if (len != skb->len) {
7796 		kfree_skb(skb);
7797 		return;
7798 	}
7799 
7800 	/* Since we can't actively block incoming LE connections we must
7801 	 * at least ensure that we ignore incoming data from them.
7802 	 */
7803 	if (hcon->type == LE_LINK &&
7804 	    hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
7805 				   bdaddr_dst_type(hcon))) {
7806 		kfree_skb(skb);
7807 		return;
7808 	}
7809 
7810 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7811 
7812 	switch (cid) {
7813 	case L2CAP_CID_SIGNALING:
7814 		l2cap_sig_channel(conn, skb);
7815 		break;
7816 
7817 	case L2CAP_CID_CONN_LESS:
7818 		psm = get_unaligned((__le16 *) skb->data);
7819 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7820 		l2cap_conless_channel(conn, psm, skb);
7821 		break;
7822 
7823 	case L2CAP_CID_LE_SIGNALING:
7824 		l2cap_le_sig_channel(conn, skb);
7825 		break;
7826 
7827 	default:
7828 		l2cap_data_channel(conn, cid, skb);
7829 		break;
7830 	}
7831 }
7832 
process_pending_rx(struct work_struct * work)7833 static void process_pending_rx(struct work_struct *work)
7834 {
7835 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7836 					       pending_rx_work);
7837 	struct sk_buff *skb;
7838 
7839 	BT_DBG("");
7840 
7841 	while ((skb = skb_dequeue(&conn->pending_rx)))
7842 		l2cap_recv_frame(conn, skb);
7843 }
7844 
l2cap_conn_add(struct hci_conn * hcon)7845 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7846 {
7847 	struct l2cap_conn *conn = hcon->l2cap_data;
7848 	struct hci_chan *hchan;
7849 
7850 	if (conn)
7851 		return conn;
7852 
7853 	hchan = hci_chan_create(hcon);
7854 	if (!hchan)
7855 		return NULL;
7856 
7857 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7858 	if (!conn) {
7859 		hci_chan_del(hchan);
7860 		return NULL;
7861 	}
7862 
7863 	kref_init(&conn->ref);
7864 	hcon->l2cap_data = conn;
7865 	conn->hcon = hci_conn_get(hcon);
7866 	conn->hchan = hchan;
7867 
7868 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7869 
7870 	switch (hcon->type) {
7871 	case LE_LINK:
7872 		if (hcon->hdev->le_mtu) {
7873 			conn->mtu = hcon->hdev->le_mtu;
7874 			break;
7875 		}
7876 		fallthrough;
7877 	default:
7878 		conn->mtu = hcon->hdev->acl_mtu;
7879 		break;
7880 	}
7881 
7882 	conn->feat_mask = 0;
7883 
7884 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7885 
7886 	if (hcon->type == ACL_LINK &&
7887 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7888 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7889 
7890 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7891 	    (bredr_sc_enabled(hcon->hdev) ||
7892 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7893 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7894 
7895 	mutex_init(&conn->ident_lock);
7896 	mutex_init(&conn->chan_lock);
7897 
7898 	INIT_LIST_HEAD(&conn->chan_l);
7899 	INIT_LIST_HEAD(&conn->users);
7900 
7901 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7902 
7903 	skb_queue_head_init(&conn->pending_rx);
7904 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7905 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7906 
7907 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7908 
7909 	return conn;
7910 }
7911 
is_valid_psm(u16 psm,u8 dst_type)7912 static bool is_valid_psm(u16 psm, u8 dst_type) {
7913 	if (!psm)
7914 		return false;
7915 
7916 	if (bdaddr_type_is_le(dst_type))
7917 		return (psm <= 0x00ff);
7918 
7919 	/* PSM must be odd and lsb of upper byte must be 0 */
7920 	return ((psm & 0x0101) == 0x0001);
7921 }
7922 
7923 struct l2cap_chan_data {
7924 	struct l2cap_chan *chan;
7925 	struct pid *pid;
7926 	int count;
7927 };
7928 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7929 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7930 {
7931 	struct l2cap_chan_data *d = data;
7932 	struct pid *pid;
7933 
7934 	if (chan == d->chan)
7935 		return;
7936 
7937 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7938 		return;
7939 
7940 	pid = chan->ops->get_peer_pid(chan);
7941 
7942 	/* Only count deferred channels with the same PID/PSM */
7943 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7944 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7945 		return;
7946 
7947 	d->count++;
7948 }
7949 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7950 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7951 		       bdaddr_t *dst, u8 dst_type)
7952 {
7953 	struct l2cap_conn *conn;
7954 	struct hci_conn *hcon;
7955 	struct hci_dev *hdev;
7956 	int err;
7957 
7958 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7959 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7960 
7961 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7962 	if (!hdev)
7963 		return -EHOSTUNREACH;
7964 
7965 	hci_dev_lock(hdev);
7966 
7967 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7968 	    chan->chan_type != L2CAP_CHAN_RAW) {
7969 		err = -EINVAL;
7970 		goto done;
7971 	}
7972 
7973 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7974 		err = -EINVAL;
7975 		goto done;
7976 	}
7977 
7978 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7979 		err = -EINVAL;
7980 		goto done;
7981 	}
7982 
7983 	switch (chan->mode) {
7984 	case L2CAP_MODE_BASIC:
7985 		break;
7986 	case L2CAP_MODE_LE_FLOWCTL:
7987 		break;
7988 	case L2CAP_MODE_EXT_FLOWCTL:
7989 		if (!enable_ecred) {
7990 			err = -EOPNOTSUPP;
7991 			goto done;
7992 		}
7993 		break;
7994 	case L2CAP_MODE_ERTM:
7995 	case L2CAP_MODE_STREAMING:
7996 		if (!disable_ertm)
7997 			break;
7998 		fallthrough;
7999 	default:
8000 		err = -EOPNOTSUPP;
8001 		goto done;
8002 	}
8003 
8004 	switch (chan->state) {
8005 	case BT_CONNECT:
8006 	case BT_CONNECT2:
8007 	case BT_CONFIG:
8008 		/* Already connecting */
8009 		err = 0;
8010 		goto done;
8011 
8012 	case BT_CONNECTED:
8013 		/* Already connected */
8014 		err = -EISCONN;
8015 		goto done;
8016 
8017 	case BT_OPEN:
8018 	case BT_BOUND:
8019 		/* Can connect */
8020 		break;
8021 
8022 	default:
8023 		err = -EBADFD;
8024 		goto done;
8025 	}
8026 
8027 	/* Set destination address and psm */
8028 	bacpy(&chan->dst, dst);
8029 	chan->dst_type = dst_type;
8030 
8031 	chan->psm = psm;
8032 	chan->dcid = cid;
8033 
8034 	if (bdaddr_type_is_le(dst_type)) {
8035 		/* Convert from L2CAP channel address type to HCI address type
8036 		 */
8037 		if (dst_type == BDADDR_LE_PUBLIC)
8038 			dst_type = ADDR_LE_DEV_PUBLIC;
8039 		else
8040 			dst_type = ADDR_LE_DEV_RANDOM;
8041 
8042 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8043 			hcon = hci_connect_le(hdev, dst, dst_type,
8044 					      chan->sec_level,
8045 					      HCI_LE_CONN_TIMEOUT,
8046 					      HCI_ROLE_SLAVE, NULL);
8047 		else
8048 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8049 						   chan->sec_level,
8050 						   HCI_LE_CONN_TIMEOUT,
8051 						   CONN_REASON_L2CAP_CHAN);
8052 
8053 	} else {
8054 		u8 auth_type = l2cap_get_auth_type(chan);
8055 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8056 				       CONN_REASON_L2CAP_CHAN);
8057 	}
8058 
8059 	if (IS_ERR(hcon)) {
8060 		err = PTR_ERR(hcon);
8061 		goto done;
8062 	}
8063 
8064 	conn = l2cap_conn_add(hcon);
8065 	if (!conn) {
8066 		hci_conn_drop(hcon);
8067 		err = -ENOMEM;
8068 		goto done;
8069 	}
8070 
8071 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8072 		struct l2cap_chan_data data;
8073 
8074 		data.chan = chan;
8075 		data.pid = chan->ops->get_peer_pid(chan);
8076 		data.count = 1;
8077 
8078 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8079 
8080 		/* Check if there isn't too many channels being connected */
8081 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8082 			hci_conn_drop(hcon);
8083 			err = -EPROTO;
8084 			goto done;
8085 		}
8086 	}
8087 
8088 	mutex_lock(&conn->chan_lock);
8089 	l2cap_chan_lock(chan);
8090 
8091 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8092 		hci_conn_drop(hcon);
8093 		err = -EBUSY;
8094 		goto chan_unlock;
8095 	}
8096 
8097 	/* Update source addr of the socket */
8098 	bacpy(&chan->src, &hcon->src);
8099 	chan->src_type = bdaddr_src_type(hcon);
8100 
8101 	__l2cap_chan_add(conn, chan);
8102 
8103 	/* l2cap_chan_add takes its own ref so we can drop this one */
8104 	hci_conn_drop(hcon);
8105 
8106 	l2cap_state_change(chan, BT_CONNECT);
8107 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8108 
8109 	/* Release chan->sport so that it can be reused by other
8110 	 * sockets (as it's only used for listening sockets).
8111 	 */
8112 	write_lock(&chan_list_lock);
8113 	chan->sport = 0;
8114 	write_unlock(&chan_list_lock);
8115 
8116 	if (hcon->state == BT_CONNECTED) {
8117 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8118 			__clear_chan_timer(chan);
8119 			if (l2cap_chan_check_security(chan, true))
8120 				l2cap_state_change(chan, BT_CONNECTED);
8121 		} else
8122 			l2cap_do_start(chan);
8123 	}
8124 
8125 	err = 0;
8126 
8127 chan_unlock:
8128 	l2cap_chan_unlock(chan);
8129 	mutex_unlock(&conn->chan_lock);
8130 done:
8131 	hci_dev_unlock(hdev);
8132 	hci_dev_put(hdev);
8133 	return err;
8134 }
8135 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8136 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8137 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8138 {
8139 	struct l2cap_conn *conn = chan->conn;
8140 	struct {
8141 		struct l2cap_ecred_reconf_req req;
8142 		__le16 scid;
8143 	} pdu;
8144 
8145 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8146 	pdu.req.mps = cpu_to_le16(chan->mps);
8147 	pdu.scid    = cpu_to_le16(chan->scid);
8148 
8149 	chan->ident = l2cap_get_ident(conn);
8150 
8151 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8152 		       sizeof(pdu), &pdu);
8153 }
8154 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8155 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8156 {
8157 	if (chan->imtu > mtu)
8158 		return -EINVAL;
8159 
8160 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8161 
8162 	chan->imtu = mtu;
8163 
8164 	l2cap_ecred_reconfigure(chan);
8165 
8166 	return 0;
8167 }
8168 
8169 /* ---- L2CAP interface with lower layer (HCI) ---- */
8170 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8171 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8172 {
8173 	int exact = 0, lm1 = 0, lm2 = 0;
8174 	struct l2cap_chan *c;
8175 
8176 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8177 
8178 	/* Find listening sockets and check their link_mode */
8179 	read_lock(&chan_list_lock);
8180 	list_for_each_entry(c, &chan_list, global_l) {
8181 		if (c->state != BT_LISTEN)
8182 			continue;
8183 
8184 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8185 			lm1 |= HCI_LM_ACCEPT;
8186 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8187 				lm1 |= HCI_LM_MASTER;
8188 			exact++;
8189 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8190 			lm2 |= HCI_LM_ACCEPT;
8191 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8192 				lm2 |= HCI_LM_MASTER;
8193 		}
8194 	}
8195 	read_unlock(&chan_list_lock);
8196 
8197 	return exact ? lm1 : lm2;
8198 }
8199 
8200 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8201  * from an existing channel in the list or from the beginning of the
8202  * global list (by passing NULL as first parameter).
8203  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8204 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8205 						  struct hci_conn *hcon)
8206 {
8207 	u8 src_type = bdaddr_src_type(hcon);
8208 
8209 	read_lock(&chan_list_lock);
8210 
8211 	if (c)
8212 		c = list_next_entry(c, global_l);
8213 	else
8214 		c = list_entry(chan_list.next, typeof(*c), global_l);
8215 
8216 	list_for_each_entry_from(c, &chan_list, global_l) {
8217 		if (c->chan_type != L2CAP_CHAN_FIXED)
8218 			continue;
8219 		if (c->state != BT_LISTEN)
8220 			continue;
8221 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8222 			continue;
8223 		if (src_type != c->src_type)
8224 			continue;
8225 
8226 		c = l2cap_chan_hold_unless_zero(c);
8227 		read_unlock(&chan_list_lock);
8228 		return c;
8229 	}
8230 
8231 	read_unlock(&chan_list_lock);
8232 
8233 	return NULL;
8234 }
8235 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8236 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8237 {
8238 	struct hci_dev *hdev = hcon->hdev;
8239 	struct l2cap_conn *conn;
8240 	struct l2cap_chan *pchan;
8241 	u8 dst_type;
8242 
8243 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8244 		return;
8245 
8246 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8247 
8248 	if (status) {
8249 		l2cap_conn_del(hcon, bt_to_errno(status));
8250 		return;
8251 	}
8252 
8253 	conn = l2cap_conn_add(hcon);
8254 	if (!conn)
8255 		return;
8256 
8257 	dst_type = bdaddr_dst_type(hcon);
8258 
8259 	/* If device is blocked, do not create channels for it */
8260 	if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
8261 		return;
8262 
8263 	/* Find fixed channels and notify them of the new connection. We
8264 	 * use multiple individual lookups, continuing each time where
8265 	 * we left off, because the list lock would prevent calling the
8266 	 * potentially sleeping l2cap_chan_lock() function.
8267 	 */
8268 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8269 	while (pchan) {
8270 		struct l2cap_chan *chan, *next;
8271 
8272 		/* Client fixed channels should override server ones */
8273 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8274 			goto next;
8275 
8276 		l2cap_chan_lock(pchan);
8277 		chan = pchan->ops->new_connection(pchan);
8278 		if (chan) {
8279 			bacpy(&chan->src, &hcon->src);
8280 			bacpy(&chan->dst, &hcon->dst);
8281 			chan->src_type = bdaddr_src_type(hcon);
8282 			chan->dst_type = dst_type;
8283 
8284 			__l2cap_chan_add(conn, chan);
8285 		}
8286 
8287 		l2cap_chan_unlock(pchan);
8288 next:
8289 		next = l2cap_global_fixed_chan(pchan, hcon);
8290 		l2cap_chan_put(pchan);
8291 		pchan = next;
8292 	}
8293 
8294 	l2cap_conn_ready(conn);
8295 }
8296 
l2cap_disconn_ind(struct hci_conn * hcon)8297 int l2cap_disconn_ind(struct hci_conn *hcon)
8298 {
8299 	struct l2cap_conn *conn = hcon->l2cap_data;
8300 
8301 	BT_DBG("hcon %p", hcon);
8302 
8303 	if (!conn)
8304 		return HCI_ERROR_REMOTE_USER_TERM;
8305 	return conn->disc_reason;
8306 }
8307 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8308 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8309 {
8310 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8311 		return;
8312 
8313 	BT_DBG("hcon %p reason %d", hcon, reason);
8314 
8315 	l2cap_conn_del(hcon, bt_to_errno(reason));
8316 }
8317 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8318 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8319 {
8320 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8321 		return;
8322 
8323 	if (encrypt == 0x00) {
8324 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8325 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8326 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8327 			   chan->sec_level == BT_SECURITY_FIPS)
8328 			l2cap_chan_close(chan, ECONNREFUSED);
8329 	} else {
8330 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8331 			__clear_chan_timer(chan);
8332 	}
8333 }
8334 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8335 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8336 {
8337 	struct l2cap_conn *conn = hcon->l2cap_data;
8338 	struct l2cap_chan *chan;
8339 
8340 	if (!conn)
8341 		return;
8342 
8343 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8344 
8345 	mutex_lock(&conn->chan_lock);
8346 
8347 	list_for_each_entry(chan, &conn->chan_l, list) {
8348 		l2cap_chan_lock(chan);
8349 
8350 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8351 		       state_to_string(chan->state));
8352 
8353 		if (chan->scid == L2CAP_CID_A2MP) {
8354 			l2cap_chan_unlock(chan);
8355 			continue;
8356 		}
8357 
8358 		if (!status && encrypt)
8359 			chan->sec_level = hcon->sec_level;
8360 
8361 		if (!__l2cap_no_conn_pending(chan)) {
8362 			l2cap_chan_unlock(chan);
8363 			continue;
8364 		}
8365 
8366 		if (!status && (chan->state == BT_CONNECTED ||
8367 				chan->state == BT_CONFIG)) {
8368 			chan->ops->resume(chan);
8369 			l2cap_check_encryption(chan, encrypt);
8370 			l2cap_chan_unlock(chan);
8371 			continue;
8372 		}
8373 
8374 		if (chan->state == BT_CONNECT) {
8375 			if (!status && l2cap_check_enc_key_size(hcon))
8376 				l2cap_start_connection(chan);
8377 			else
8378 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8379 		} else if (chan->state == BT_CONNECT2 &&
8380 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8381 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8382 			struct l2cap_conn_rsp rsp;
8383 			__u16 res, stat;
8384 
8385 			if (!status && l2cap_check_enc_key_size(hcon)) {
8386 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8387 					res = L2CAP_CR_PEND;
8388 					stat = L2CAP_CS_AUTHOR_PEND;
8389 					chan->ops->defer(chan);
8390 				} else {
8391 					l2cap_state_change(chan, BT_CONFIG);
8392 					res = L2CAP_CR_SUCCESS;
8393 					stat = L2CAP_CS_NO_INFO;
8394 				}
8395 			} else {
8396 				l2cap_state_change(chan, BT_DISCONN);
8397 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8398 				res = L2CAP_CR_SEC_BLOCK;
8399 				stat = L2CAP_CS_NO_INFO;
8400 			}
8401 
8402 			rsp.scid   = cpu_to_le16(chan->dcid);
8403 			rsp.dcid   = cpu_to_le16(chan->scid);
8404 			rsp.result = cpu_to_le16(res);
8405 			rsp.status = cpu_to_le16(stat);
8406 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8407 				       sizeof(rsp), &rsp);
8408 
8409 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8410 			    res == L2CAP_CR_SUCCESS) {
8411 				char buf[128];
8412 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8413 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8414 					       L2CAP_CONF_REQ,
8415 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8416 					       buf);
8417 				chan->num_conf_req++;
8418 			}
8419 		}
8420 
8421 		l2cap_chan_unlock(chan);
8422 	}
8423 
8424 	mutex_unlock(&conn->chan_lock);
8425 }
8426 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8427 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8428 {
8429 	struct l2cap_conn *conn = hcon->l2cap_data;
8430 	struct l2cap_hdr *hdr;
8431 	int len;
8432 
8433 	/* For AMP controller do not create l2cap conn */
8434 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8435 		goto drop;
8436 
8437 	if (!conn)
8438 		conn = l2cap_conn_add(hcon);
8439 
8440 	if (!conn)
8441 		goto drop;
8442 
8443 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8444 
8445 	switch (flags) {
8446 	case ACL_START:
8447 	case ACL_START_NO_FLUSH:
8448 	case ACL_COMPLETE:
8449 		if (conn->rx_len) {
8450 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8451 			kfree_skb(conn->rx_skb);
8452 			conn->rx_skb = NULL;
8453 			conn->rx_len = 0;
8454 			l2cap_conn_unreliable(conn, ECOMM);
8455 		}
8456 
8457 		/* Start fragment always begin with Basic L2CAP header */
8458 		if (skb->len < L2CAP_HDR_SIZE) {
8459 			BT_ERR("Frame is too short (len %d)", skb->len);
8460 			l2cap_conn_unreliable(conn, ECOMM);
8461 			goto drop;
8462 		}
8463 
8464 		hdr = (struct l2cap_hdr *) skb->data;
8465 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8466 
8467 		if (len == skb->len) {
8468 			/* Complete frame received */
8469 			l2cap_recv_frame(conn, skb);
8470 			return;
8471 		}
8472 
8473 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8474 
8475 		if (skb->len > len) {
8476 			BT_ERR("Frame is too long (len %d, expected len %d)",
8477 			       skb->len, len);
8478 			l2cap_conn_unreliable(conn, ECOMM);
8479 			goto drop;
8480 		}
8481 
8482 		/* Allocate skb for the complete frame (with header) */
8483 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8484 		if (!conn->rx_skb)
8485 			goto drop;
8486 
8487 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8488 					  skb->len);
8489 		conn->rx_len = len - skb->len;
8490 		break;
8491 
8492 	case ACL_CONT:
8493 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8494 
8495 		if (!conn->rx_len) {
8496 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8497 			l2cap_conn_unreliable(conn, ECOMM);
8498 			goto drop;
8499 		}
8500 
8501 		if (skb->len > conn->rx_len) {
8502 			BT_ERR("Fragment is too long (len %d, expected %d)",
8503 			       skb->len, conn->rx_len);
8504 			kfree_skb(conn->rx_skb);
8505 			conn->rx_skb = NULL;
8506 			conn->rx_len = 0;
8507 			l2cap_conn_unreliable(conn, ECOMM);
8508 			goto drop;
8509 		}
8510 
8511 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8512 					  skb->len);
8513 		conn->rx_len -= skb->len;
8514 
8515 		if (!conn->rx_len) {
8516 			/* Complete frame received. l2cap_recv_frame
8517 			 * takes ownership of the skb so set the global
8518 			 * rx_skb pointer to NULL first.
8519 			 */
8520 			struct sk_buff *rx_skb = conn->rx_skb;
8521 			conn->rx_skb = NULL;
8522 			l2cap_recv_frame(conn, rx_skb);
8523 		}
8524 		break;
8525 	}
8526 
8527 drop:
8528 	kfree_skb(skb);
8529 }
8530 
8531 static struct hci_cb l2cap_cb = {
8532 	.name		= "L2CAP",
8533 	.connect_cfm	= l2cap_connect_cfm,
8534 	.disconn_cfm	= l2cap_disconn_cfm,
8535 	.security_cfm	= l2cap_security_cfm,
8536 };
8537 
l2cap_debugfs_show(struct seq_file * f,void * p)8538 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8539 {
8540 	struct l2cap_chan *c;
8541 
8542 	read_lock(&chan_list_lock);
8543 
8544 	list_for_each_entry(c, &chan_list, global_l) {
8545 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8546 			   &c->src, c->src_type, &c->dst, c->dst_type,
8547 			   c->state, __le16_to_cpu(c->psm),
8548 			   c->scid, c->dcid, c->imtu, c->omtu,
8549 			   c->sec_level, c->mode);
8550 	}
8551 
8552 	read_unlock(&chan_list_lock);
8553 
8554 	return 0;
8555 }
8556 
8557 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8558 
8559 static struct dentry *l2cap_debugfs;
8560 
l2cap_init(void)8561 int __init l2cap_init(void)
8562 {
8563 	int err;
8564 
8565 	err = l2cap_init_sockets();
8566 	if (err < 0)
8567 		return err;
8568 
8569 	hci_register_cb(&l2cap_cb);
8570 
8571 	if (IS_ERR_OR_NULL(bt_debugfs))
8572 		return 0;
8573 
8574 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8575 					    NULL, &l2cap_debugfs_fops);
8576 
8577 	return 0;
8578 }
8579 
l2cap_exit(void)8580 void l2cap_exit(void)
8581 {
8582 	debugfs_remove(l2cap_debugfs);
8583 	hci_unregister_cb(&l2cap_cb);
8584 	l2cap_cleanup_sockets();
8585 }
8586 
8587 module_param(disable_ertm, bool, 0644);
8588 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8589 
8590 module_param(enable_ecred, bool, 0644);
8591 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8592