• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch (chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 }
709 EXPORT_SYMBOL_GPL(l2cap_chan_del);
710 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)711 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
712 				 l2cap_chan_func_t func, void *data)
713 {
714 	struct l2cap_chan *chan, *l;
715 
716 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
717 		if (chan->ident == id)
718 			func(chan, data);
719 	}
720 }
721 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)722 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
723 			      void *data)
724 {
725 	struct l2cap_chan *chan;
726 
727 	list_for_each_entry(chan, &conn->chan_l, list) {
728 		func(chan, data);
729 	}
730 }
731 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)732 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
733 		     void *data)
734 {
735 	if (!conn)
736 		return;
737 
738 	mutex_lock(&conn->chan_lock);
739 	__l2cap_chan_list(conn, func, data);
740 	mutex_unlock(&conn->chan_lock);
741 }
742 
743 EXPORT_SYMBOL_GPL(l2cap_chan_list);
744 
l2cap_conn_update_id_addr(struct work_struct * work)745 static void l2cap_conn_update_id_addr(struct work_struct *work)
746 {
747 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
748 					       id_addr_update_work);
749 	struct hci_conn *hcon = conn->hcon;
750 	struct l2cap_chan *chan;
751 
752 	mutex_lock(&conn->chan_lock);
753 
754 	list_for_each_entry(chan, &conn->chan_l, list) {
755 		l2cap_chan_lock(chan);
756 		bacpy(&chan->dst, &hcon->dst);
757 		chan->dst_type = bdaddr_dst_type(hcon);
758 		l2cap_chan_unlock(chan);
759 	}
760 
761 	mutex_unlock(&conn->chan_lock);
762 }
763 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)764 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
765 {
766 	struct l2cap_conn *conn = chan->conn;
767 	struct l2cap_le_conn_rsp rsp;
768 	u16 result;
769 
770 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
771 		result = L2CAP_CR_LE_AUTHORIZATION;
772 	else
773 		result = L2CAP_CR_LE_BAD_PSM;
774 
775 	l2cap_state_change(chan, BT_DISCONN);
776 
777 	rsp.dcid    = cpu_to_le16(chan->scid);
778 	rsp.mtu     = cpu_to_le16(chan->imtu);
779 	rsp.mps     = cpu_to_le16(chan->mps);
780 	rsp.credits = cpu_to_le16(chan->rx_credits);
781 	rsp.result  = cpu_to_le16(result);
782 
783 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
784 		       &rsp);
785 }
786 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)787 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
788 {
789 	l2cap_state_change(chan, BT_DISCONN);
790 
791 	__l2cap_ecred_conn_rsp_defer(chan);
792 }
793 
l2cap_chan_connect_reject(struct l2cap_chan * chan)794 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
795 {
796 	struct l2cap_conn *conn = chan->conn;
797 	struct l2cap_conn_rsp rsp;
798 	u16 result;
799 
800 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
801 		result = L2CAP_CR_SEC_BLOCK;
802 	else
803 		result = L2CAP_CR_BAD_PSM;
804 
805 	l2cap_state_change(chan, BT_DISCONN);
806 
807 	rsp.scid   = cpu_to_le16(chan->dcid);
808 	rsp.dcid   = cpu_to_le16(chan->scid);
809 	rsp.result = cpu_to_le16(result);
810 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
811 
812 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
813 }
814 
l2cap_chan_close(struct l2cap_chan * chan,int reason)815 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
816 {
817 	struct l2cap_conn *conn = chan->conn;
818 
819 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
820 
821 	switch (chan->state) {
822 	case BT_LISTEN:
823 		chan->ops->teardown(chan, 0);
824 		break;
825 
826 	case BT_CONNECTED:
827 	case BT_CONFIG:
828 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
829 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
830 			l2cap_send_disconn_req(chan, reason);
831 		} else
832 			l2cap_chan_del(chan, reason);
833 		break;
834 
835 	case BT_CONNECT2:
836 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
837 			if (conn->hcon->type == ACL_LINK)
838 				l2cap_chan_connect_reject(chan);
839 			else if (conn->hcon->type == LE_LINK) {
840 				switch (chan->mode) {
841 				case L2CAP_MODE_LE_FLOWCTL:
842 					l2cap_chan_le_connect_reject(chan);
843 					break;
844 				case L2CAP_MODE_EXT_FLOWCTL:
845 					l2cap_chan_ecred_connect_reject(chan);
846 					return;
847 				}
848 			}
849 		}
850 
851 		l2cap_chan_del(chan, reason);
852 		break;
853 
854 	case BT_CONNECT:
855 	case BT_DISCONN:
856 		l2cap_chan_del(chan, reason);
857 		break;
858 
859 	default:
860 		chan->ops->teardown(chan, 0);
861 		break;
862 	}
863 }
864 EXPORT_SYMBOL(l2cap_chan_close);
865 
l2cap_get_auth_type(struct l2cap_chan * chan)866 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
867 {
868 	switch (chan->chan_type) {
869 	case L2CAP_CHAN_RAW:
870 		switch (chan->sec_level) {
871 		case BT_SECURITY_HIGH:
872 		case BT_SECURITY_FIPS:
873 			return HCI_AT_DEDICATED_BONDING_MITM;
874 		case BT_SECURITY_MEDIUM:
875 			return HCI_AT_DEDICATED_BONDING;
876 		default:
877 			return HCI_AT_NO_BONDING;
878 		}
879 		break;
880 	case L2CAP_CHAN_CONN_LESS:
881 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
882 			if (chan->sec_level == BT_SECURITY_LOW)
883 				chan->sec_level = BT_SECURITY_SDP;
884 		}
885 		if (chan->sec_level == BT_SECURITY_HIGH ||
886 		    chan->sec_level == BT_SECURITY_FIPS)
887 			return HCI_AT_NO_BONDING_MITM;
888 		else
889 			return HCI_AT_NO_BONDING;
890 		break;
891 	case L2CAP_CHAN_CONN_ORIENTED:
892 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
893 			if (chan->sec_level == BT_SECURITY_LOW)
894 				chan->sec_level = BT_SECURITY_SDP;
895 
896 			if (chan->sec_level == BT_SECURITY_HIGH ||
897 			    chan->sec_level == BT_SECURITY_FIPS)
898 				return HCI_AT_NO_BONDING_MITM;
899 			else
900 				return HCI_AT_NO_BONDING;
901 		}
902 		fallthrough;
903 
904 	default:
905 		switch (chan->sec_level) {
906 		case BT_SECURITY_HIGH:
907 		case BT_SECURITY_FIPS:
908 			return HCI_AT_GENERAL_BONDING_MITM;
909 		case BT_SECURITY_MEDIUM:
910 			return HCI_AT_GENERAL_BONDING;
911 		default:
912 			return HCI_AT_NO_BONDING;
913 		}
914 		break;
915 	}
916 }
917 
918 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)919 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
920 {
921 	struct l2cap_conn *conn = chan->conn;
922 	__u8 auth_type;
923 
924 	if (conn->hcon->type == LE_LINK)
925 		return smp_conn_security(conn->hcon, chan->sec_level);
926 
927 	auth_type = l2cap_get_auth_type(chan);
928 
929 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
930 				 initiator);
931 }
932 
l2cap_get_ident(struct l2cap_conn * conn)933 static u8 l2cap_get_ident(struct l2cap_conn *conn)
934 {
935 	u8 id;
936 
937 	/* Get next available identificator.
938 	 *    1 - 128 are used by kernel.
939 	 *  129 - 199 are reserved.
940 	 *  200 - 254 are used by utilities like l2ping, etc.
941 	 */
942 
943 	mutex_lock(&conn->ident_lock);
944 
945 	if (++conn->tx_ident > 128)
946 		conn->tx_ident = 1;
947 
948 	id = conn->tx_ident;
949 
950 	mutex_unlock(&conn->ident_lock);
951 
952 	return id;
953 }
954 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)955 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
956 			   void *data)
957 {
958 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
959 	u8 flags;
960 
961 	BT_DBG("code 0x%2.2x", code);
962 
963 	if (!skb)
964 		return;
965 
966 	/* Use NO_FLUSH if supported or we have an LE link (which does
967 	 * not support auto-flushing packets) */
968 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
969 	    conn->hcon->type == LE_LINK)
970 		flags = ACL_START_NO_FLUSH;
971 	else
972 		flags = ACL_START;
973 
974 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
975 	skb->priority = HCI_PRIO_MAX;
976 
977 	hci_send_acl(conn->hchan, skb, flags);
978 }
979 
__chan_is_moving(struct l2cap_chan * chan)980 static bool __chan_is_moving(struct l2cap_chan *chan)
981 {
982 	return chan->move_state != L2CAP_MOVE_STABLE &&
983 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
984 }
985 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)986 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
987 {
988 	struct hci_conn *hcon = chan->conn->hcon;
989 	u16 flags;
990 
991 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
992 	       skb->priority);
993 
994 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
995 		if (chan->hs_hchan)
996 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
997 		else
998 			kfree_skb(skb);
999 
1000 		return;
1001 	}
1002 
1003 	/* Use NO_FLUSH for LE links (where this is the only option) or
1004 	 * if the BR/EDR link supports it and flushing has not been
1005 	 * explicitly requested (through FLAG_FLUSHABLE).
1006 	 */
1007 	if (hcon->type == LE_LINK ||
1008 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1009 	     lmp_no_flush_capable(hcon->hdev)))
1010 		flags = ACL_START_NO_FLUSH;
1011 	else
1012 		flags = ACL_START;
1013 
1014 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1015 	hci_send_acl(chan->conn->hchan, skb, flags);
1016 }
1017 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1018 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1019 {
1020 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1021 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1022 
1023 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1024 		/* S-Frame */
1025 		control->sframe = 1;
1026 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1027 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1028 
1029 		control->sar = 0;
1030 		control->txseq = 0;
1031 	} else {
1032 		/* I-Frame */
1033 		control->sframe = 0;
1034 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1035 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1036 
1037 		control->poll = 0;
1038 		control->super = 0;
1039 	}
1040 }
1041 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1042 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1043 {
1044 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1045 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1046 
1047 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1048 		/* S-Frame */
1049 		control->sframe = 1;
1050 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1051 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1052 
1053 		control->sar = 0;
1054 		control->txseq = 0;
1055 	} else {
1056 		/* I-Frame */
1057 		control->sframe = 0;
1058 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1059 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1060 
1061 		control->poll = 0;
1062 		control->super = 0;
1063 	}
1064 }
1065 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1066 static inline void __unpack_control(struct l2cap_chan *chan,
1067 				    struct sk_buff *skb)
1068 {
1069 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1070 		__unpack_extended_control(get_unaligned_le32(skb->data),
1071 					  &bt_cb(skb)->l2cap);
1072 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1073 	} else {
1074 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1075 					  &bt_cb(skb)->l2cap);
1076 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1077 	}
1078 }
1079 
__pack_extended_control(struct l2cap_ctrl * control)1080 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1081 {
1082 	u32 packed;
1083 
1084 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1085 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1086 
1087 	if (control->sframe) {
1088 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1089 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1090 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1091 	} else {
1092 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1093 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1094 	}
1095 
1096 	return packed;
1097 }
1098 
__pack_enhanced_control(struct l2cap_ctrl * control)1099 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1100 {
1101 	u16 packed;
1102 
1103 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1104 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1105 
1106 	if (control->sframe) {
1107 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1108 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1109 		packed |= L2CAP_CTRL_FRAME_TYPE;
1110 	} else {
1111 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1112 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1113 	}
1114 
1115 	return packed;
1116 }
1117 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1118 static inline void __pack_control(struct l2cap_chan *chan,
1119 				  struct l2cap_ctrl *control,
1120 				  struct sk_buff *skb)
1121 {
1122 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1123 		put_unaligned_le32(__pack_extended_control(control),
1124 				   skb->data + L2CAP_HDR_SIZE);
1125 	} else {
1126 		put_unaligned_le16(__pack_enhanced_control(control),
1127 				   skb->data + L2CAP_HDR_SIZE);
1128 	}
1129 }
1130 
__ertm_hdr_size(struct l2cap_chan * chan)1131 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1132 {
1133 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1134 		return L2CAP_EXT_HDR_SIZE;
1135 	else
1136 		return L2CAP_ENH_HDR_SIZE;
1137 }
1138 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1139 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1140 					       u32 control)
1141 {
1142 	struct sk_buff *skb;
1143 	struct l2cap_hdr *lh;
1144 	int hlen = __ertm_hdr_size(chan);
1145 
1146 	if (chan->fcs == L2CAP_FCS_CRC16)
1147 		hlen += L2CAP_FCS_SIZE;
1148 
1149 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1150 
1151 	if (!skb)
1152 		return ERR_PTR(-ENOMEM);
1153 
1154 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1155 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1156 	lh->cid = cpu_to_le16(chan->dcid);
1157 
1158 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1159 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1160 	else
1161 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1162 
1163 	if (chan->fcs == L2CAP_FCS_CRC16) {
1164 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1165 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1166 	}
1167 
1168 	skb->priority = HCI_PRIO_MAX;
1169 	return skb;
1170 }
1171 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1172 static void l2cap_send_sframe(struct l2cap_chan *chan,
1173 			      struct l2cap_ctrl *control)
1174 {
1175 	struct sk_buff *skb;
1176 	u32 control_field;
1177 
1178 	BT_DBG("chan %p, control %p", chan, control);
1179 
1180 	if (!control->sframe)
1181 		return;
1182 
1183 	if (__chan_is_moving(chan))
1184 		return;
1185 
1186 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1187 	    !control->poll)
1188 		control->final = 1;
1189 
1190 	if (control->super == L2CAP_SUPER_RR)
1191 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1192 	else if (control->super == L2CAP_SUPER_RNR)
1193 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1194 
1195 	if (control->super != L2CAP_SUPER_SREJ) {
1196 		chan->last_acked_seq = control->reqseq;
1197 		__clear_ack_timer(chan);
1198 	}
1199 
1200 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1201 	       control->final, control->poll, control->super);
1202 
1203 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1204 		control_field = __pack_extended_control(control);
1205 	else
1206 		control_field = __pack_enhanced_control(control);
1207 
1208 	skb = l2cap_create_sframe_pdu(chan, control_field);
1209 	if (!IS_ERR(skb))
1210 		l2cap_do_send(chan, skb);
1211 }
1212 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1213 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1214 {
1215 	struct l2cap_ctrl control;
1216 
1217 	BT_DBG("chan %p, poll %d", chan, poll);
1218 
1219 	memset(&control, 0, sizeof(control));
1220 	control.sframe = 1;
1221 	control.poll = poll;
1222 
1223 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1224 		control.super = L2CAP_SUPER_RNR;
1225 	else
1226 		control.super = L2CAP_SUPER_RR;
1227 
1228 	control.reqseq = chan->buffer_seq;
1229 	l2cap_send_sframe(chan, &control);
1230 }
1231 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1232 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1233 {
1234 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1235 		return true;
1236 
1237 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1238 }
1239 
__amp_capable(struct l2cap_chan * chan)1240 static bool __amp_capable(struct l2cap_chan *chan)
1241 {
1242 	struct l2cap_conn *conn = chan->conn;
1243 	struct hci_dev *hdev;
1244 	bool amp_available = false;
1245 
1246 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1247 		return false;
1248 
1249 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1250 		return false;
1251 
1252 	read_lock(&hci_dev_list_lock);
1253 	list_for_each_entry(hdev, &hci_dev_list, list) {
1254 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1255 		    test_bit(HCI_UP, &hdev->flags)) {
1256 			amp_available = true;
1257 			break;
1258 		}
1259 	}
1260 	read_unlock(&hci_dev_list_lock);
1261 
1262 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1263 		return amp_available;
1264 
1265 	return false;
1266 }
1267 
l2cap_check_efs(struct l2cap_chan * chan)1268 static bool l2cap_check_efs(struct l2cap_chan *chan)
1269 {
1270 	/* Check EFS parameters */
1271 	return true;
1272 }
1273 
l2cap_send_conn_req(struct l2cap_chan * chan)1274 void l2cap_send_conn_req(struct l2cap_chan *chan)
1275 {
1276 	struct l2cap_conn *conn = chan->conn;
1277 	struct l2cap_conn_req req;
1278 
1279 	req.scid = cpu_to_le16(chan->scid);
1280 	req.psm  = chan->psm;
1281 
1282 	chan->ident = l2cap_get_ident(conn);
1283 
1284 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1285 
1286 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1287 }
1288 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1289 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1290 {
1291 	struct l2cap_create_chan_req req;
1292 	req.scid = cpu_to_le16(chan->scid);
1293 	req.psm  = chan->psm;
1294 	req.amp_id = amp_id;
1295 
1296 	chan->ident = l2cap_get_ident(chan->conn);
1297 
1298 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1299 		       sizeof(req), &req);
1300 }
1301 
l2cap_move_setup(struct l2cap_chan * chan)1302 static void l2cap_move_setup(struct l2cap_chan *chan)
1303 {
1304 	struct sk_buff *skb;
1305 
1306 	BT_DBG("chan %p", chan);
1307 
1308 	if (chan->mode != L2CAP_MODE_ERTM)
1309 		return;
1310 
1311 	__clear_retrans_timer(chan);
1312 	__clear_monitor_timer(chan);
1313 	__clear_ack_timer(chan);
1314 
1315 	chan->retry_count = 0;
1316 	skb_queue_walk(&chan->tx_q, skb) {
1317 		if (bt_cb(skb)->l2cap.retries)
1318 			bt_cb(skb)->l2cap.retries = 1;
1319 		else
1320 			break;
1321 	}
1322 
1323 	chan->expected_tx_seq = chan->buffer_seq;
1324 
1325 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1326 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1327 	l2cap_seq_list_clear(&chan->retrans_list);
1328 	l2cap_seq_list_clear(&chan->srej_list);
1329 	skb_queue_purge(&chan->srej_q);
1330 
1331 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1332 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1333 
1334 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1335 }
1336 
l2cap_move_done(struct l2cap_chan * chan)1337 static void l2cap_move_done(struct l2cap_chan *chan)
1338 {
1339 	u8 move_role = chan->move_role;
1340 	BT_DBG("chan %p", chan);
1341 
1342 	chan->move_state = L2CAP_MOVE_STABLE;
1343 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1344 
1345 	if (chan->mode != L2CAP_MODE_ERTM)
1346 		return;
1347 
1348 	switch (move_role) {
1349 	case L2CAP_MOVE_ROLE_INITIATOR:
1350 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1351 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1352 		break;
1353 	case L2CAP_MOVE_ROLE_RESPONDER:
1354 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1355 		break;
1356 	}
1357 }
1358 
l2cap_chan_ready(struct l2cap_chan * chan)1359 static void l2cap_chan_ready(struct l2cap_chan *chan)
1360 {
1361 	/* The channel may have already been flagged as connected in
1362 	 * case of receiving data before the L2CAP info req/rsp
1363 	 * procedure is complete.
1364 	 */
1365 	if (chan->state == BT_CONNECTED)
1366 		return;
1367 
1368 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1369 	chan->conf_state = 0;
1370 	__clear_chan_timer(chan);
1371 
1372 	switch (chan->mode) {
1373 	case L2CAP_MODE_LE_FLOWCTL:
1374 	case L2CAP_MODE_EXT_FLOWCTL:
1375 		if (!chan->tx_credits)
1376 			chan->ops->suspend(chan);
1377 		break;
1378 	}
1379 
1380 	chan->state = BT_CONNECTED;
1381 
1382 	chan->ops->ready(chan);
1383 }
1384 
l2cap_le_connect(struct l2cap_chan * chan)1385 static void l2cap_le_connect(struct l2cap_chan *chan)
1386 {
1387 	struct l2cap_conn *conn = chan->conn;
1388 	struct l2cap_le_conn_req req;
1389 
1390 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1391 		return;
1392 
1393 	if (!chan->imtu)
1394 		chan->imtu = chan->conn->mtu;
1395 
1396 	l2cap_le_flowctl_init(chan, 0);
1397 
1398 	memset(&req, 0, sizeof(req));
1399 	req.psm     = chan->psm;
1400 	req.scid    = cpu_to_le16(chan->scid);
1401 	req.mtu     = cpu_to_le16(chan->imtu);
1402 	req.mps     = cpu_to_le16(chan->mps);
1403 	req.credits = cpu_to_le16(chan->rx_credits);
1404 
1405 	chan->ident = l2cap_get_ident(conn);
1406 
1407 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1408 		       sizeof(req), &req);
1409 }
1410 
1411 struct l2cap_ecred_conn_data {
1412 	struct {
1413 		struct l2cap_ecred_conn_req req;
1414 		__le16 scid[5];
1415 	} __packed pdu;
1416 	struct l2cap_chan *chan;
1417 	struct pid *pid;
1418 	int count;
1419 };
1420 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1421 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1422 {
1423 	struct l2cap_ecred_conn_data *conn = data;
1424 	struct pid *pid;
1425 
1426 	if (chan == conn->chan)
1427 		return;
1428 
1429 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1430 		return;
1431 
1432 	pid = chan->ops->get_peer_pid(chan);
1433 
1434 	/* Only add deferred channels with the same PID/PSM */
1435 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1436 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1437 		return;
1438 
1439 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1440 		return;
1441 
1442 	l2cap_ecred_init(chan, 0);
1443 
1444 	/* Set the same ident so we can match on the rsp */
1445 	chan->ident = conn->chan->ident;
1446 
1447 	/* Include all channels deferred */
1448 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1449 
1450 	conn->count++;
1451 }
1452 
l2cap_ecred_connect(struct l2cap_chan * chan)1453 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1454 {
1455 	struct l2cap_conn *conn = chan->conn;
1456 	struct l2cap_ecred_conn_data data;
1457 
1458 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1459 		return;
1460 
1461 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1462 		return;
1463 
1464 	l2cap_ecred_init(chan, 0);
1465 
1466 	memset(&data, 0, sizeof(data));
1467 	data.pdu.req.psm     = chan->psm;
1468 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1469 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1470 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1471 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1472 
1473 	chan->ident = l2cap_get_ident(conn);
1474 
1475 	data.count = 1;
1476 	data.chan = chan;
1477 	data.pid = chan->ops->get_peer_pid(chan);
1478 
1479 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1480 
1481 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1482 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1483 		       &data.pdu);
1484 }
1485 
l2cap_le_start(struct l2cap_chan * chan)1486 static void l2cap_le_start(struct l2cap_chan *chan)
1487 {
1488 	struct l2cap_conn *conn = chan->conn;
1489 
1490 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1491 		return;
1492 
1493 	if (!chan->psm) {
1494 		l2cap_chan_ready(chan);
1495 		return;
1496 	}
1497 
1498 	if (chan->state == BT_CONNECT) {
1499 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1500 			l2cap_ecred_connect(chan);
1501 		else
1502 			l2cap_le_connect(chan);
1503 	}
1504 }
1505 
l2cap_start_connection(struct l2cap_chan * chan)1506 static void l2cap_start_connection(struct l2cap_chan *chan)
1507 {
1508 	if (__amp_capable(chan)) {
1509 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1510 		a2mp_discover_amp(chan);
1511 	} else if (chan->conn->hcon->type == LE_LINK) {
1512 		l2cap_le_start(chan);
1513 	} else {
1514 		l2cap_send_conn_req(chan);
1515 	}
1516 }
1517 
l2cap_request_info(struct l2cap_conn * conn)1518 static void l2cap_request_info(struct l2cap_conn *conn)
1519 {
1520 	struct l2cap_info_req req;
1521 
1522 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1523 		return;
1524 
1525 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1526 
1527 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1528 	conn->info_ident = l2cap_get_ident(conn);
1529 
1530 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1531 
1532 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1533 		       sizeof(req), &req);
1534 }
1535 
l2cap_check_enc_key_size(struct hci_conn * hcon)1536 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1537 {
1538 	/* The minimum encryption key size needs to be enforced by the
1539 	 * host stack before establishing any L2CAP connections. The
1540 	 * specification in theory allows a minimum of 1, but to align
1541 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1542 	 *
1543 	 * This check might also be called for unencrypted connections
1544 	 * that have no key size requirements. Ensure that the link is
1545 	 * actually encrypted before enforcing a key size.
1546 	 */
1547 	int min_key_size = hcon->hdev->min_enc_key_size;
1548 
1549 	/* On FIPS security level, key size must be 16 bytes */
1550 	if (hcon->sec_level == BT_SECURITY_FIPS)
1551 		min_key_size = 16;
1552 
1553 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1554 		hcon->enc_key_size >= min_key_size);
1555 }
1556 
l2cap_do_start(struct l2cap_chan * chan)1557 static void l2cap_do_start(struct l2cap_chan *chan)
1558 {
1559 	struct l2cap_conn *conn = chan->conn;
1560 
1561 	if (conn->hcon->type == LE_LINK) {
1562 		l2cap_le_start(chan);
1563 		return;
1564 	}
1565 
1566 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1567 		l2cap_request_info(conn);
1568 		return;
1569 	}
1570 
1571 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1572 		return;
1573 
1574 	if (!l2cap_chan_check_security(chan, true) ||
1575 	    !__l2cap_no_conn_pending(chan))
1576 		return;
1577 
1578 	if (l2cap_check_enc_key_size(conn->hcon))
1579 		l2cap_start_connection(chan);
1580 	else
1581 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1582 }
1583 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1584 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1585 {
1586 	u32 local_feat_mask = l2cap_feat_mask;
1587 	if (!disable_ertm)
1588 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1589 
1590 	switch (mode) {
1591 	case L2CAP_MODE_ERTM:
1592 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1593 	case L2CAP_MODE_STREAMING:
1594 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1595 	default:
1596 		return 0x00;
1597 	}
1598 }
1599 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1600 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1601 {
1602 	struct l2cap_conn *conn = chan->conn;
1603 	struct l2cap_disconn_req req;
1604 
1605 	if (!conn)
1606 		return;
1607 
1608 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1609 		__clear_retrans_timer(chan);
1610 		__clear_monitor_timer(chan);
1611 		__clear_ack_timer(chan);
1612 	}
1613 
1614 	if (chan->scid == L2CAP_CID_A2MP) {
1615 		l2cap_state_change(chan, BT_DISCONN);
1616 		return;
1617 	}
1618 
1619 	req.dcid = cpu_to_le16(chan->dcid);
1620 	req.scid = cpu_to_le16(chan->scid);
1621 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1622 		       sizeof(req), &req);
1623 
1624 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1625 }
1626 
1627 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1628 static void l2cap_conn_start(struct l2cap_conn *conn)
1629 {
1630 	struct l2cap_chan *chan, *tmp;
1631 
1632 	BT_DBG("conn %p", conn);
1633 
1634 	mutex_lock(&conn->chan_lock);
1635 
1636 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1637 		l2cap_chan_lock(chan);
1638 
1639 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1640 			l2cap_chan_ready(chan);
1641 			l2cap_chan_unlock(chan);
1642 			continue;
1643 		}
1644 
1645 		if (chan->state == BT_CONNECT) {
1646 			if (!l2cap_chan_check_security(chan, true) ||
1647 			    !__l2cap_no_conn_pending(chan)) {
1648 				l2cap_chan_unlock(chan);
1649 				continue;
1650 			}
1651 
1652 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1653 			    && test_bit(CONF_STATE2_DEVICE,
1654 					&chan->conf_state)) {
1655 				l2cap_chan_close(chan, ECONNRESET);
1656 				l2cap_chan_unlock(chan);
1657 				continue;
1658 			}
1659 
1660 			if (l2cap_check_enc_key_size(conn->hcon))
1661 				l2cap_start_connection(chan);
1662 			else
1663 				l2cap_chan_close(chan, ECONNREFUSED);
1664 
1665 		} else if (chan->state == BT_CONNECT2) {
1666 			struct l2cap_conn_rsp rsp;
1667 			char buf[128];
1668 			rsp.scid = cpu_to_le16(chan->dcid);
1669 			rsp.dcid = cpu_to_le16(chan->scid);
1670 
1671 			if (l2cap_chan_check_security(chan, false)) {
1672 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1673 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1674 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1675 					chan->ops->defer(chan);
1676 
1677 				} else {
1678 					l2cap_state_change(chan, BT_CONFIG);
1679 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1680 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1681 				}
1682 			} else {
1683 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1684 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1685 			}
1686 
1687 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1688 				       sizeof(rsp), &rsp);
1689 
1690 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1691 			    rsp.result != L2CAP_CR_SUCCESS) {
1692 				l2cap_chan_unlock(chan);
1693 				continue;
1694 			}
1695 
1696 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1697 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1698 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1699 			chan->num_conf_req++;
1700 		}
1701 
1702 		l2cap_chan_unlock(chan);
1703 	}
1704 
1705 	mutex_unlock(&conn->chan_lock);
1706 }
1707 
l2cap_le_conn_ready(struct l2cap_conn * conn)1708 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1709 {
1710 	struct hci_conn *hcon = conn->hcon;
1711 	struct hci_dev *hdev = hcon->hdev;
1712 
1713 	BT_DBG("%s conn %p", hdev->name, conn);
1714 
1715 	/* For outgoing pairing which doesn't necessarily have an
1716 	 * associated socket (e.g. mgmt_pair_device).
1717 	 */
1718 	if (hcon->out)
1719 		smp_conn_security(hcon, hcon->pending_sec_level);
1720 
1721 	/* For LE peripheral connections, make sure the connection interval
1722 	 * is in the range of the minimum and maximum interval that has
1723 	 * been configured for this connection. If not, then trigger
1724 	 * the connection update procedure.
1725 	 */
1726 	if (hcon->role == HCI_ROLE_SLAVE &&
1727 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1728 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1729 		struct l2cap_conn_param_update_req req;
1730 
1731 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1732 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1733 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1734 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1735 
1736 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1737 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1738 	}
1739 }
1740 
l2cap_conn_ready(struct l2cap_conn * conn)1741 static void l2cap_conn_ready(struct l2cap_conn *conn)
1742 {
1743 	struct l2cap_chan *chan;
1744 	struct hci_conn *hcon = conn->hcon;
1745 
1746 	BT_DBG("conn %p", conn);
1747 
1748 	if (hcon->type == ACL_LINK)
1749 		l2cap_request_info(conn);
1750 
1751 	mutex_lock(&conn->chan_lock);
1752 
1753 	list_for_each_entry(chan, &conn->chan_l, list) {
1754 
1755 		l2cap_chan_lock(chan);
1756 
1757 		if (chan->scid == L2CAP_CID_A2MP) {
1758 			l2cap_chan_unlock(chan);
1759 			continue;
1760 		}
1761 
1762 		if (hcon->type == LE_LINK) {
1763 			l2cap_le_start(chan);
1764 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1765 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1766 				l2cap_chan_ready(chan);
1767 		} else if (chan->state == BT_CONNECT) {
1768 			l2cap_do_start(chan);
1769 		}
1770 
1771 		l2cap_chan_unlock(chan);
1772 	}
1773 
1774 	mutex_unlock(&conn->chan_lock);
1775 
1776 	if (hcon->type == LE_LINK)
1777 		l2cap_le_conn_ready(conn);
1778 
1779 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1780 }
1781 
1782 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1783 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1784 {
1785 	struct l2cap_chan *chan;
1786 
1787 	BT_DBG("conn %p", conn);
1788 
1789 	mutex_lock(&conn->chan_lock);
1790 
1791 	list_for_each_entry(chan, &conn->chan_l, list) {
1792 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1793 			l2cap_chan_set_err(chan, err);
1794 	}
1795 
1796 	mutex_unlock(&conn->chan_lock);
1797 }
1798 
l2cap_info_timeout(struct work_struct * work)1799 static void l2cap_info_timeout(struct work_struct *work)
1800 {
1801 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1802 					       info_timer.work);
1803 
1804 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1805 	conn->info_ident = 0;
1806 
1807 	l2cap_conn_start(conn);
1808 }
1809 
1810 /*
1811  * l2cap_user
1812  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1813  * callback is called during registration. The ->remove callback is called
1814  * during unregistration.
1815  * An l2cap_user object can either be explicitly unregistered or when the
1816  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1817  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1818  * External modules must own a reference to the l2cap_conn object if they intend
1819  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1820  * any time if they don't.
1821  */
1822 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1823 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1824 {
1825 	struct hci_dev *hdev = conn->hcon->hdev;
1826 	int ret;
1827 
1828 	/* We need to check whether l2cap_conn is registered. If it is not, we
1829 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1830 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1831 	 * relies on the parent hci_conn object to be locked. This itself relies
1832 	 * on the hci_dev object to be locked. So we must lock the hci device
1833 	 * here, too. */
1834 
1835 	hci_dev_lock(hdev);
1836 
1837 	if (!list_empty(&user->list)) {
1838 		ret = -EINVAL;
1839 		goto out_unlock;
1840 	}
1841 
1842 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1843 	if (!conn->hchan) {
1844 		ret = -ENODEV;
1845 		goto out_unlock;
1846 	}
1847 
1848 	ret = user->probe(conn, user);
1849 	if (ret)
1850 		goto out_unlock;
1851 
1852 	list_add(&user->list, &conn->users);
1853 	ret = 0;
1854 
1855 out_unlock:
1856 	hci_dev_unlock(hdev);
1857 	return ret;
1858 }
1859 EXPORT_SYMBOL(l2cap_register_user);
1860 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1861 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1862 {
1863 	struct hci_dev *hdev = conn->hcon->hdev;
1864 
1865 	hci_dev_lock(hdev);
1866 
1867 	if (list_empty(&user->list))
1868 		goto out_unlock;
1869 
1870 	list_del_init(&user->list);
1871 	user->remove(conn, user);
1872 
1873 out_unlock:
1874 	hci_dev_unlock(hdev);
1875 }
1876 EXPORT_SYMBOL(l2cap_unregister_user);
1877 
l2cap_unregister_all_users(struct l2cap_conn * conn)1878 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1879 {
1880 	struct l2cap_user *user;
1881 
1882 	while (!list_empty(&conn->users)) {
1883 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1884 		list_del_init(&user->list);
1885 		user->remove(conn, user);
1886 	}
1887 }
1888 
l2cap_conn_del(struct hci_conn * hcon,int err)1889 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1890 {
1891 	struct l2cap_conn *conn = hcon->l2cap_data;
1892 	struct l2cap_chan *chan, *l;
1893 
1894 	if (!conn)
1895 		return;
1896 
1897 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1898 
1899 	kfree_skb(conn->rx_skb);
1900 
1901 	skb_queue_purge(&conn->pending_rx);
1902 
1903 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1904 	 * might block if we are running on a worker from the same workqueue
1905 	 * pending_rx_work is waiting on.
1906 	 */
1907 	if (work_pending(&conn->pending_rx_work))
1908 		cancel_work_sync(&conn->pending_rx_work);
1909 
1910 	if (work_pending(&conn->id_addr_update_work))
1911 		cancel_work_sync(&conn->id_addr_update_work);
1912 
1913 	l2cap_unregister_all_users(conn);
1914 
1915 	/* Force the connection to be immediately dropped */
1916 	hcon->disc_timeout = 0;
1917 
1918 	mutex_lock(&conn->chan_lock);
1919 
1920 	/* Kill channels */
1921 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1922 		l2cap_chan_hold(chan);
1923 		l2cap_chan_lock(chan);
1924 
1925 		l2cap_chan_del(chan, err);
1926 
1927 		chan->ops->close(chan);
1928 
1929 		l2cap_chan_unlock(chan);
1930 		l2cap_chan_put(chan);
1931 	}
1932 
1933 	mutex_unlock(&conn->chan_lock);
1934 
1935 	hci_chan_del(conn->hchan);
1936 
1937 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1938 		cancel_delayed_work_sync(&conn->info_timer);
1939 
1940 	hcon->l2cap_data = NULL;
1941 	conn->hchan = NULL;
1942 	l2cap_conn_put(conn);
1943 }
1944 
l2cap_conn_free(struct kref * ref)1945 static void l2cap_conn_free(struct kref *ref)
1946 {
1947 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1948 
1949 	hci_conn_put(conn->hcon);
1950 	kfree(conn);
1951 }
1952 
l2cap_conn_get(struct l2cap_conn * conn)1953 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1954 {
1955 	kref_get(&conn->ref);
1956 	return conn;
1957 }
1958 EXPORT_SYMBOL(l2cap_conn_get);
1959 
l2cap_conn_put(struct l2cap_conn * conn)1960 void l2cap_conn_put(struct l2cap_conn *conn)
1961 {
1962 	kref_put(&conn->ref, l2cap_conn_free);
1963 }
1964 EXPORT_SYMBOL(l2cap_conn_put);
1965 
1966 /* ---- Socket interface ---- */
1967 
1968 /* Find socket with psm and source / destination bdaddr.
1969  * Returns closest match.
1970  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1971 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1972 						   bdaddr_t *src,
1973 						   bdaddr_t *dst,
1974 						   u8 link_type)
1975 {
1976 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1977 
1978 	read_lock(&chan_list_lock);
1979 
1980 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1981 		if (state && c->state != state)
1982 			continue;
1983 
1984 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1985 			continue;
1986 
1987 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1988 			continue;
1989 
1990 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1991 			int src_match, dst_match;
1992 			int src_any, dst_any;
1993 
1994 			/* Exact match. */
1995 			src_match = !bacmp(&c->src, src);
1996 			dst_match = !bacmp(&c->dst, dst);
1997 			if (src_match && dst_match) {
1998 				if (!l2cap_chan_hold_unless_zero(c))
1999 					continue;
2000 
2001 				read_unlock(&chan_list_lock);
2002 				return c;
2003 			}
2004 
2005 			/* Closest match */
2006 			src_any = !bacmp(&c->src, BDADDR_ANY);
2007 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2008 			if ((src_match && dst_any) || (src_any && dst_match) ||
2009 			    (src_any && dst_any))
2010 				c1 = c;
2011 		}
2012 	}
2013 
2014 	if (c1)
2015 		c1 = l2cap_chan_hold_unless_zero(c1);
2016 
2017 	read_unlock(&chan_list_lock);
2018 
2019 	return c1;
2020 }
2021 
l2cap_monitor_timeout(struct work_struct * work)2022 static void l2cap_monitor_timeout(struct work_struct *work)
2023 {
2024 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2025 					       monitor_timer.work);
2026 
2027 	BT_DBG("chan %p", chan);
2028 
2029 	l2cap_chan_lock(chan);
2030 
2031 	if (!chan->conn) {
2032 		l2cap_chan_unlock(chan);
2033 		l2cap_chan_put(chan);
2034 		return;
2035 	}
2036 
2037 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2038 
2039 	l2cap_chan_unlock(chan);
2040 	l2cap_chan_put(chan);
2041 }
2042 
l2cap_retrans_timeout(struct work_struct * work)2043 static void l2cap_retrans_timeout(struct work_struct *work)
2044 {
2045 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2046 					       retrans_timer.work);
2047 
2048 	BT_DBG("chan %p", chan);
2049 
2050 	l2cap_chan_lock(chan);
2051 
2052 	if (!chan->conn) {
2053 		l2cap_chan_unlock(chan);
2054 		l2cap_chan_put(chan);
2055 		return;
2056 	}
2057 
2058 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2059 	l2cap_chan_unlock(chan);
2060 	l2cap_chan_put(chan);
2061 }
2062 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2063 static void l2cap_streaming_send(struct l2cap_chan *chan,
2064 				 struct sk_buff_head *skbs)
2065 {
2066 	struct sk_buff *skb;
2067 	struct l2cap_ctrl *control;
2068 
2069 	BT_DBG("chan %p, skbs %p", chan, skbs);
2070 
2071 	if (__chan_is_moving(chan))
2072 		return;
2073 
2074 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2075 
2076 	while (!skb_queue_empty(&chan->tx_q)) {
2077 
2078 		skb = skb_dequeue(&chan->tx_q);
2079 
2080 		bt_cb(skb)->l2cap.retries = 1;
2081 		control = &bt_cb(skb)->l2cap;
2082 
2083 		control->reqseq = 0;
2084 		control->txseq = chan->next_tx_seq;
2085 
2086 		__pack_control(chan, control, skb);
2087 
2088 		if (chan->fcs == L2CAP_FCS_CRC16) {
2089 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2090 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2091 		}
2092 
2093 		l2cap_do_send(chan, skb);
2094 
2095 		BT_DBG("Sent txseq %u", control->txseq);
2096 
2097 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2098 		chan->frames_sent++;
2099 	}
2100 }
2101 
l2cap_ertm_send(struct l2cap_chan * chan)2102 static int l2cap_ertm_send(struct l2cap_chan *chan)
2103 {
2104 	struct sk_buff *skb, *tx_skb;
2105 	struct l2cap_ctrl *control;
2106 	int sent = 0;
2107 
2108 	BT_DBG("chan %p", chan);
2109 
2110 	if (chan->state != BT_CONNECTED)
2111 		return -ENOTCONN;
2112 
2113 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2114 		return 0;
2115 
2116 	if (__chan_is_moving(chan))
2117 		return 0;
2118 
2119 	while (chan->tx_send_head &&
2120 	       chan->unacked_frames < chan->remote_tx_win &&
2121 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2122 
2123 		skb = chan->tx_send_head;
2124 
2125 		bt_cb(skb)->l2cap.retries = 1;
2126 		control = &bt_cb(skb)->l2cap;
2127 
2128 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2129 			control->final = 1;
2130 
2131 		control->reqseq = chan->buffer_seq;
2132 		chan->last_acked_seq = chan->buffer_seq;
2133 		control->txseq = chan->next_tx_seq;
2134 
2135 		__pack_control(chan, control, skb);
2136 
2137 		if (chan->fcs == L2CAP_FCS_CRC16) {
2138 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2139 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2140 		}
2141 
2142 		/* Clone after data has been modified. Data is assumed to be
2143 		   read-only (for locking purposes) on cloned sk_buffs.
2144 		 */
2145 		tx_skb = skb_clone(skb, GFP_KERNEL);
2146 
2147 		if (!tx_skb)
2148 			break;
2149 
2150 		__set_retrans_timer(chan);
2151 
2152 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2153 		chan->unacked_frames++;
2154 		chan->frames_sent++;
2155 		sent++;
2156 
2157 		if (skb_queue_is_last(&chan->tx_q, skb))
2158 			chan->tx_send_head = NULL;
2159 		else
2160 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2161 
2162 		l2cap_do_send(chan, tx_skb);
2163 		BT_DBG("Sent txseq %u", control->txseq);
2164 	}
2165 
2166 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2167 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2168 
2169 	return sent;
2170 }
2171 
l2cap_ertm_resend(struct l2cap_chan * chan)2172 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2173 {
2174 	struct l2cap_ctrl control;
2175 	struct sk_buff *skb;
2176 	struct sk_buff *tx_skb;
2177 	u16 seq;
2178 
2179 	BT_DBG("chan %p", chan);
2180 
2181 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2182 		return;
2183 
2184 	if (__chan_is_moving(chan))
2185 		return;
2186 
2187 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2188 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2189 
2190 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2191 		if (!skb) {
2192 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2193 			       seq);
2194 			continue;
2195 		}
2196 
2197 		bt_cb(skb)->l2cap.retries++;
2198 		control = bt_cb(skb)->l2cap;
2199 
2200 		if (chan->max_tx != 0 &&
2201 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2202 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2203 			l2cap_send_disconn_req(chan, ECONNRESET);
2204 			l2cap_seq_list_clear(&chan->retrans_list);
2205 			break;
2206 		}
2207 
2208 		control.reqseq = chan->buffer_seq;
2209 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2210 			control.final = 1;
2211 		else
2212 			control.final = 0;
2213 
2214 		if (skb_cloned(skb)) {
2215 			/* Cloned sk_buffs are read-only, so we need a
2216 			 * writeable copy
2217 			 */
2218 			tx_skb = skb_copy(skb, GFP_KERNEL);
2219 		} else {
2220 			tx_skb = skb_clone(skb, GFP_KERNEL);
2221 		}
2222 
2223 		if (!tx_skb) {
2224 			l2cap_seq_list_clear(&chan->retrans_list);
2225 			break;
2226 		}
2227 
2228 		/* Update skb contents */
2229 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2230 			put_unaligned_le32(__pack_extended_control(&control),
2231 					   tx_skb->data + L2CAP_HDR_SIZE);
2232 		} else {
2233 			put_unaligned_le16(__pack_enhanced_control(&control),
2234 					   tx_skb->data + L2CAP_HDR_SIZE);
2235 		}
2236 
2237 		/* Update FCS */
2238 		if (chan->fcs == L2CAP_FCS_CRC16) {
2239 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2240 					tx_skb->len - L2CAP_FCS_SIZE);
2241 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2242 						L2CAP_FCS_SIZE);
2243 		}
2244 
2245 		l2cap_do_send(chan, tx_skb);
2246 
2247 		BT_DBG("Resent txseq %d", control.txseq);
2248 
2249 		chan->last_acked_seq = chan->buffer_seq;
2250 	}
2251 }
2252 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2253 static void l2cap_retransmit(struct l2cap_chan *chan,
2254 			     struct l2cap_ctrl *control)
2255 {
2256 	BT_DBG("chan %p, control %p", chan, control);
2257 
2258 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2259 	l2cap_ertm_resend(chan);
2260 }
2261 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2262 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2263 				 struct l2cap_ctrl *control)
2264 {
2265 	struct sk_buff *skb;
2266 
2267 	BT_DBG("chan %p, control %p", chan, control);
2268 
2269 	if (control->poll)
2270 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2271 
2272 	l2cap_seq_list_clear(&chan->retrans_list);
2273 
2274 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2275 		return;
2276 
2277 	if (chan->unacked_frames) {
2278 		skb_queue_walk(&chan->tx_q, skb) {
2279 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2280 			    skb == chan->tx_send_head)
2281 				break;
2282 		}
2283 
2284 		skb_queue_walk_from(&chan->tx_q, skb) {
2285 			if (skb == chan->tx_send_head)
2286 				break;
2287 
2288 			l2cap_seq_list_append(&chan->retrans_list,
2289 					      bt_cb(skb)->l2cap.txseq);
2290 		}
2291 
2292 		l2cap_ertm_resend(chan);
2293 	}
2294 }
2295 
l2cap_send_ack(struct l2cap_chan * chan)2296 static void l2cap_send_ack(struct l2cap_chan *chan)
2297 {
2298 	struct l2cap_ctrl control;
2299 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2300 					 chan->last_acked_seq);
2301 	int threshold;
2302 
2303 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2304 	       chan, chan->last_acked_seq, chan->buffer_seq);
2305 
2306 	memset(&control, 0, sizeof(control));
2307 	control.sframe = 1;
2308 
2309 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2310 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2311 		__clear_ack_timer(chan);
2312 		control.super = L2CAP_SUPER_RNR;
2313 		control.reqseq = chan->buffer_seq;
2314 		l2cap_send_sframe(chan, &control);
2315 	} else {
2316 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2317 			l2cap_ertm_send(chan);
2318 			/* If any i-frames were sent, they included an ack */
2319 			if (chan->buffer_seq == chan->last_acked_seq)
2320 				frames_to_ack = 0;
2321 		}
2322 
2323 		/* Ack now if the window is 3/4ths full.
2324 		 * Calculate without mul or div
2325 		 */
2326 		threshold = chan->ack_win;
2327 		threshold += threshold << 1;
2328 		threshold >>= 2;
2329 
2330 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2331 		       threshold);
2332 
2333 		if (frames_to_ack >= threshold) {
2334 			__clear_ack_timer(chan);
2335 			control.super = L2CAP_SUPER_RR;
2336 			control.reqseq = chan->buffer_seq;
2337 			l2cap_send_sframe(chan, &control);
2338 			frames_to_ack = 0;
2339 		}
2340 
2341 		if (frames_to_ack)
2342 			__set_ack_timer(chan);
2343 	}
2344 }
2345 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2346 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2347 					 struct msghdr *msg, int len,
2348 					 int count, struct sk_buff *skb)
2349 {
2350 	struct l2cap_conn *conn = chan->conn;
2351 	struct sk_buff **frag;
2352 	int sent = 0;
2353 
2354 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2355 		return -EFAULT;
2356 
2357 	sent += count;
2358 	len  -= count;
2359 
2360 	/* Continuation fragments (no L2CAP header) */
2361 	frag = &skb_shinfo(skb)->frag_list;
2362 	while (len) {
2363 		struct sk_buff *tmp;
2364 
2365 		count = min_t(unsigned int, conn->mtu, len);
2366 
2367 		tmp = chan->ops->alloc_skb(chan, 0, count,
2368 					   msg->msg_flags & MSG_DONTWAIT);
2369 		if (IS_ERR(tmp))
2370 			return PTR_ERR(tmp);
2371 
2372 		*frag = tmp;
2373 
2374 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2375 				   &msg->msg_iter))
2376 			return -EFAULT;
2377 
2378 		sent += count;
2379 		len  -= count;
2380 
2381 		skb->len += (*frag)->len;
2382 		skb->data_len += (*frag)->len;
2383 
2384 		frag = &(*frag)->next;
2385 	}
2386 
2387 	return sent;
2388 }
2389 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2390 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2391 						 struct msghdr *msg, size_t len)
2392 {
2393 	struct l2cap_conn *conn = chan->conn;
2394 	struct sk_buff *skb;
2395 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2396 	struct l2cap_hdr *lh;
2397 
2398 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2399 	       __le16_to_cpu(chan->psm), len);
2400 
2401 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2402 
2403 	skb = chan->ops->alloc_skb(chan, hlen, count,
2404 				   msg->msg_flags & MSG_DONTWAIT);
2405 	if (IS_ERR(skb))
2406 		return skb;
2407 
2408 	/* Create L2CAP header */
2409 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2410 	lh->cid = cpu_to_le16(chan->dcid);
2411 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2412 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2413 
2414 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2415 	if (unlikely(err < 0)) {
2416 		kfree_skb(skb);
2417 		return ERR_PTR(err);
2418 	}
2419 	return skb;
2420 }
2421 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2422 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2423 					      struct msghdr *msg, size_t len)
2424 {
2425 	struct l2cap_conn *conn = chan->conn;
2426 	struct sk_buff *skb;
2427 	int err, count;
2428 	struct l2cap_hdr *lh;
2429 
2430 	BT_DBG("chan %p len %zu", chan, len);
2431 
2432 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2433 
2434 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2435 				   msg->msg_flags & MSG_DONTWAIT);
2436 	if (IS_ERR(skb))
2437 		return skb;
2438 
2439 	/* Create L2CAP header */
2440 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2441 	lh->cid = cpu_to_le16(chan->dcid);
2442 	lh->len = cpu_to_le16(len);
2443 
2444 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2445 	if (unlikely(err < 0)) {
2446 		kfree_skb(skb);
2447 		return ERR_PTR(err);
2448 	}
2449 	return skb;
2450 }
2451 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2452 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2453 					       struct msghdr *msg, size_t len,
2454 					       u16 sdulen)
2455 {
2456 	struct l2cap_conn *conn = chan->conn;
2457 	struct sk_buff *skb;
2458 	int err, count, hlen;
2459 	struct l2cap_hdr *lh;
2460 
2461 	BT_DBG("chan %p len %zu", chan, len);
2462 
2463 	if (!conn)
2464 		return ERR_PTR(-ENOTCONN);
2465 
2466 	hlen = __ertm_hdr_size(chan);
2467 
2468 	if (sdulen)
2469 		hlen += L2CAP_SDULEN_SIZE;
2470 
2471 	if (chan->fcs == L2CAP_FCS_CRC16)
2472 		hlen += L2CAP_FCS_SIZE;
2473 
2474 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2475 
2476 	skb = chan->ops->alloc_skb(chan, hlen, count,
2477 				   msg->msg_flags & MSG_DONTWAIT);
2478 	if (IS_ERR(skb))
2479 		return skb;
2480 
2481 	/* Create L2CAP header */
2482 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2483 	lh->cid = cpu_to_le16(chan->dcid);
2484 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2485 
2486 	/* Control header is populated later */
2487 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2488 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2489 	else
2490 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2491 
2492 	if (sdulen)
2493 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2494 
2495 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2496 	if (unlikely(err < 0)) {
2497 		kfree_skb(skb);
2498 		return ERR_PTR(err);
2499 	}
2500 
2501 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2502 	bt_cb(skb)->l2cap.retries = 0;
2503 	return skb;
2504 }
2505 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2506 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2507 			     struct sk_buff_head *seg_queue,
2508 			     struct msghdr *msg, size_t len)
2509 {
2510 	struct sk_buff *skb;
2511 	u16 sdu_len;
2512 	size_t pdu_len;
2513 	u8 sar;
2514 
2515 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2516 
2517 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2518 	 * so fragmented skbs are not used.  The HCI layer's handling
2519 	 * of fragmented skbs is not compatible with ERTM's queueing.
2520 	 */
2521 
2522 	/* PDU size is derived from the HCI MTU */
2523 	pdu_len = chan->conn->mtu;
2524 
2525 	/* Constrain PDU size for BR/EDR connections */
2526 	if (!chan->hs_hcon)
2527 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2528 
2529 	/* Adjust for largest possible L2CAP overhead. */
2530 	if (chan->fcs)
2531 		pdu_len -= L2CAP_FCS_SIZE;
2532 
2533 	pdu_len -= __ertm_hdr_size(chan);
2534 
2535 	/* Remote device may have requested smaller PDUs */
2536 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2537 
2538 	if (len <= pdu_len) {
2539 		sar = L2CAP_SAR_UNSEGMENTED;
2540 		sdu_len = 0;
2541 		pdu_len = len;
2542 	} else {
2543 		sar = L2CAP_SAR_START;
2544 		sdu_len = len;
2545 	}
2546 
2547 	while (len > 0) {
2548 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2549 
2550 		if (IS_ERR(skb)) {
2551 			__skb_queue_purge(seg_queue);
2552 			return PTR_ERR(skb);
2553 		}
2554 
2555 		bt_cb(skb)->l2cap.sar = sar;
2556 		__skb_queue_tail(seg_queue, skb);
2557 
2558 		len -= pdu_len;
2559 		if (sdu_len)
2560 			sdu_len = 0;
2561 
2562 		if (len <= pdu_len) {
2563 			sar = L2CAP_SAR_END;
2564 			pdu_len = len;
2565 		} else {
2566 			sar = L2CAP_SAR_CONTINUE;
2567 		}
2568 	}
2569 
2570 	return 0;
2571 }
2572 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2573 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2574 						   struct msghdr *msg,
2575 						   size_t len, u16 sdulen)
2576 {
2577 	struct l2cap_conn *conn = chan->conn;
2578 	struct sk_buff *skb;
2579 	int err, count, hlen;
2580 	struct l2cap_hdr *lh;
2581 
2582 	BT_DBG("chan %p len %zu", chan, len);
2583 
2584 	if (!conn)
2585 		return ERR_PTR(-ENOTCONN);
2586 
2587 	hlen = L2CAP_HDR_SIZE;
2588 
2589 	if (sdulen)
2590 		hlen += L2CAP_SDULEN_SIZE;
2591 
2592 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2593 
2594 	skb = chan->ops->alloc_skb(chan, hlen, count,
2595 				   msg->msg_flags & MSG_DONTWAIT);
2596 	if (IS_ERR(skb))
2597 		return skb;
2598 
2599 	/* Create L2CAP header */
2600 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2601 	lh->cid = cpu_to_le16(chan->dcid);
2602 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2603 
2604 	if (sdulen)
2605 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2606 
2607 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2608 	if (unlikely(err < 0)) {
2609 		kfree_skb(skb);
2610 		return ERR_PTR(err);
2611 	}
2612 
2613 	return skb;
2614 }
2615 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2616 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2617 				struct sk_buff_head *seg_queue,
2618 				struct msghdr *msg, size_t len)
2619 {
2620 	struct sk_buff *skb;
2621 	size_t pdu_len;
2622 	u16 sdu_len;
2623 
2624 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2625 
2626 	sdu_len = len;
2627 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2628 
2629 	while (len > 0) {
2630 		if (len <= pdu_len)
2631 			pdu_len = len;
2632 
2633 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2634 		if (IS_ERR(skb)) {
2635 			__skb_queue_purge(seg_queue);
2636 			return PTR_ERR(skb);
2637 		}
2638 
2639 		__skb_queue_tail(seg_queue, skb);
2640 
2641 		len -= pdu_len;
2642 
2643 		if (sdu_len) {
2644 			sdu_len = 0;
2645 			pdu_len += L2CAP_SDULEN_SIZE;
2646 		}
2647 	}
2648 
2649 	return 0;
2650 }
2651 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2652 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2653 {
2654 	int sent = 0;
2655 
2656 	BT_DBG("chan %p", chan);
2657 
2658 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2659 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2660 		chan->tx_credits--;
2661 		sent++;
2662 	}
2663 
2664 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2665 	       skb_queue_len(&chan->tx_q));
2666 }
2667 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2668 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2669 {
2670 	struct sk_buff *skb;
2671 	int err;
2672 	struct sk_buff_head seg_queue;
2673 
2674 	if (!chan->conn)
2675 		return -ENOTCONN;
2676 
2677 	/* Connectionless channel */
2678 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2679 		skb = l2cap_create_connless_pdu(chan, msg, len);
2680 		if (IS_ERR(skb))
2681 			return PTR_ERR(skb);
2682 
2683 		l2cap_do_send(chan, skb);
2684 		return len;
2685 	}
2686 
2687 	switch (chan->mode) {
2688 	case L2CAP_MODE_LE_FLOWCTL:
2689 	case L2CAP_MODE_EXT_FLOWCTL:
2690 		/* Check outgoing MTU */
2691 		if (len > chan->omtu)
2692 			return -EMSGSIZE;
2693 
2694 		__skb_queue_head_init(&seg_queue);
2695 
2696 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2697 
2698 		if (chan->state != BT_CONNECTED) {
2699 			__skb_queue_purge(&seg_queue);
2700 			err = -ENOTCONN;
2701 		}
2702 
2703 		if (err)
2704 			return err;
2705 
2706 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2707 
2708 		l2cap_le_flowctl_send(chan);
2709 
2710 		if (!chan->tx_credits)
2711 			chan->ops->suspend(chan);
2712 
2713 		err = len;
2714 
2715 		break;
2716 
2717 	case L2CAP_MODE_BASIC:
2718 		/* Check outgoing MTU */
2719 		if (len > chan->omtu)
2720 			return -EMSGSIZE;
2721 
2722 		/* Create a basic PDU */
2723 		skb = l2cap_create_basic_pdu(chan, msg, len);
2724 		if (IS_ERR(skb))
2725 			return PTR_ERR(skb);
2726 
2727 		l2cap_do_send(chan, skb);
2728 		err = len;
2729 		break;
2730 
2731 	case L2CAP_MODE_ERTM:
2732 	case L2CAP_MODE_STREAMING:
2733 		/* Check outgoing MTU */
2734 		if (len > chan->omtu) {
2735 			err = -EMSGSIZE;
2736 			break;
2737 		}
2738 
2739 		__skb_queue_head_init(&seg_queue);
2740 
2741 		/* Do segmentation before calling in to the state machine,
2742 		 * since it's possible to block while waiting for memory
2743 		 * allocation.
2744 		 */
2745 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2746 
2747 		if (err)
2748 			break;
2749 
2750 		if (chan->mode == L2CAP_MODE_ERTM)
2751 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2752 		else
2753 			l2cap_streaming_send(chan, &seg_queue);
2754 
2755 		err = len;
2756 
2757 		/* If the skbs were not queued for sending, they'll still be in
2758 		 * seg_queue and need to be purged.
2759 		 */
2760 		__skb_queue_purge(&seg_queue);
2761 		break;
2762 
2763 	default:
2764 		BT_DBG("bad state %1.1x", chan->mode);
2765 		err = -EBADFD;
2766 	}
2767 
2768 	return err;
2769 }
2770 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2771 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2772 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2773 {
2774 	struct l2cap_ctrl control;
2775 	u16 seq;
2776 
2777 	BT_DBG("chan %p, txseq %u", chan, txseq);
2778 
2779 	memset(&control, 0, sizeof(control));
2780 	control.sframe = 1;
2781 	control.super = L2CAP_SUPER_SREJ;
2782 
2783 	for (seq = chan->expected_tx_seq; seq != txseq;
2784 	     seq = __next_seq(chan, seq)) {
2785 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2786 			control.reqseq = seq;
2787 			l2cap_send_sframe(chan, &control);
2788 			l2cap_seq_list_append(&chan->srej_list, seq);
2789 		}
2790 	}
2791 
2792 	chan->expected_tx_seq = __next_seq(chan, txseq);
2793 }
2794 
l2cap_send_srej_tail(struct l2cap_chan * chan)2795 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2796 {
2797 	struct l2cap_ctrl control;
2798 
2799 	BT_DBG("chan %p", chan);
2800 
2801 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2802 		return;
2803 
2804 	memset(&control, 0, sizeof(control));
2805 	control.sframe = 1;
2806 	control.super = L2CAP_SUPER_SREJ;
2807 	control.reqseq = chan->srej_list.tail;
2808 	l2cap_send_sframe(chan, &control);
2809 }
2810 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2811 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2812 {
2813 	struct l2cap_ctrl control;
2814 	u16 initial_head;
2815 	u16 seq;
2816 
2817 	BT_DBG("chan %p, txseq %u", chan, txseq);
2818 
2819 	memset(&control, 0, sizeof(control));
2820 	control.sframe = 1;
2821 	control.super = L2CAP_SUPER_SREJ;
2822 
2823 	/* Capture initial list head to allow only one pass through the list. */
2824 	initial_head = chan->srej_list.head;
2825 
2826 	do {
2827 		seq = l2cap_seq_list_pop(&chan->srej_list);
2828 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2829 			break;
2830 
2831 		control.reqseq = seq;
2832 		l2cap_send_sframe(chan, &control);
2833 		l2cap_seq_list_append(&chan->srej_list, seq);
2834 	} while (chan->srej_list.head != initial_head);
2835 }
2836 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2837 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2838 {
2839 	struct sk_buff *acked_skb;
2840 	u16 ackseq;
2841 
2842 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2843 
2844 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2845 		return;
2846 
2847 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2848 	       chan->expected_ack_seq, chan->unacked_frames);
2849 
2850 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2851 	     ackseq = __next_seq(chan, ackseq)) {
2852 
2853 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2854 		if (acked_skb) {
2855 			skb_unlink(acked_skb, &chan->tx_q);
2856 			kfree_skb(acked_skb);
2857 			chan->unacked_frames--;
2858 		}
2859 	}
2860 
2861 	chan->expected_ack_seq = reqseq;
2862 
2863 	if (chan->unacked_frames == 0)
2864 		__clear_retrans_timer(chan);
2865 
2866 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2867 }
2868 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2869 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2870 {
2871 	BT_DBG("chan %p", chan);
2872 
2873 	chan->expected_tx_seq = chan->buffer_seq;
2874 	l2cap_seq_list_clear(&chan->srej_list);
2875 	skb_queue_purge(&chan->srej_q);
2876 	chan->rx_state = L2CAP_RX_STATE_RECV;
2877 }
2878 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2879 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2880 				struct l2cap_ctrl *control,
2881 				struct sk_buff_head *skbs, u8 event)
2882 {
2883 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2884 	       event);
2885 
2886 	switch (event) {
2887 	case L2CAP_EV_DATA_REQUEST:
2888 		if (chan->tx_send_head == NULL)
2889 			chan->tx_send_head = skb_peek(skbs);
2890 
2891 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2892 		l2cap_ertm_send(chan);
2893 		break;
2894 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2895 		BT_DBG("Enter LOCAL_BUSY");
2896 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2897 
2898 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2899 			/* The SREJ_SENT state must be aborted if we are to
2900 			 * enter the LOCAL_BUSY state.
2901 			 */
2902 			l2cap_abort_rx_srej_sent(chan);
2903 		}
2904 
2905 		l2cap_send_ack(chan);
2906 
2907 		break;
2908 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2909 		BT_DBG("Exit LOCAL_BUSY");
2910 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2911 
2912 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2913 			struct l2cap_ctrl local_control;
2914 
2915 			memset(&local_control, 0, sizeof(local_control));
2916 			local_control.sframe = 1;
2917 			local_control.super = L2CAP_SUPER_RR;
2918 			local_control.poll = 1;
2919 			local_control.reqseq = chan->buffer_seq;
2920 			l2cap_send_sframe(chan, &local_control);
2921 
2922 			chan->retry_count = 1;
2923 			__set_monitor_timer(chan);
2924 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2925 		}
2926 		break;
2927 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2928 		l2cap_process_reqseq(chan, control->reqseq);
2929 		break;
2930 	case L2CAP_EV_EXPLICIT_POLL:
2931 		l2cap_send_rr_or_rnr(chan, 1);
2932 		chan->retry_count = 1;
2933 		__set_monitor_timer(chan);
2934 		__clear_ack_timer(chan);
2935 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2936 		break;
2937 	case L2CAP_EV_RETRANS_TO:
2938 		l2cap_send_rr_or_rnr(chan, 1);
2939 		chan->retry_count = 1;
2940 		__set_monitor_timer(chan);
2941 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2942 		break;
2943 	case L2CAP_EV_RECV_FBIT:
2944 		/* Nothing to process */
2945 		break;
2946 	default:
2947 		break;
2948 	}
2949 }
2950 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2951 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2952 				  struct l2cap_ctrl *control,
2953 				  struct sk_buff_head *skbs, u8 event)
2954 {
2955 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2956 	       event);
2957 
2958 	switch (event) {
2959 	case L2CAP_EV_DATA_REQUEST:
2960 		if (chan->tx_send_head == NULL)
2961 			chan->tx_send_head = skb_peek(skbs);
2962 		/* Queue data, but don't send. */
2963 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2964 		break;
2965 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2966 		BT_DBG("Enter LOCAL_BUSY");
2967 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2968 
2969 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2970 			/* The SREJ_SENT state must be aborted if we are to
2971 			 * enter the LOCAL_BUSY state.
2972 			 */
2973 			l2cap_abort_rx_srej_sent(chan);
2974 		}
2975 
2976 		l2cap_send_ack(chan);
2977 
2978 		break;
2979 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2980 		BT_DBG("Exit LOCAL_BUSY");
2981 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2982 
2983 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2984 			struct l2cap_ctrl local_control;
2985 			memset(&local_control, 0, sizeof(local_control));
2986 			local_control.sframe = 1;
2987 			local_control.super = L2CAP_SUPER_RR;
2988 			local_control.poll = 1;
2989 			local_control.reqseq = chan->buffer_seq;
2990 			l2cap_send_sframe(chan, &local_control);
2991 
2992 			chan->retry_count = 1;
2993 			__set_monitor_timer(chan);
2994 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2995 		}
2996 		break;
2997 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2998 		l2cap_process_reqseq(chan, control->reqseq);
2999 		fallthrough;
3000 
3001 	case L2CAP_EV_RECV_FBIT:
3002 		if (control && control->final) {
3003 			__clear_monitor_timer(chan);
3004 			if (chan->unacked_frames > 0)
3005 				__set_retrans_timer(chan);
3006 			chan->retry_count = 0;
3007 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3008 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3009 		}
3010 		break;
3011 	case L2CAP_EV_EXPLICIT_POLL:
3012 		/* Ignore */
3013 		break;
3014 	case L2CAP_EV_MONITOR_TO:
3015 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3016 			l2cap_send_rr_or_rnr(chan, 1);
3017 			__set_monitor_timer(chan);
3018 			chan->retry_count++;
3019 		} else {
3020 			l2cap_send_disconn_req(chan, ECONNABORTED);
3021 		}
3022 		break;
3023 	default:
3024 		break;
3025 	}
3026 }
3027 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3028 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3029 		     struct sk_buff_head *skbs, u8 event)
3030 {
3031 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3032 	       chan, control, skbs, event, chan->tx_state);
3033 
3034 	switch (chan->tx_state) {
3035 	case L2CAP_TX_STATE_XMIT:
3036 		l2cap_tx_state_xmit(chan, control, skbs, event);
3037 		break;
3038 	case L2CAP_TX_STATE_WAIT_F:
3039 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3040 		break;
3041 	default:
3042 		/* Ignore event */
3043 		break;
3044 	}
3045 }
3046 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3047 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3048 			     struct l2cap_ctrl *control)
3049 {
3050 	BT_DBG("chan %p, control %p", chan, control);
3051 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3052 }
3053 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3054 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3055 				  struct l2cap_ctrl *control)
3056 {
3057 	BT_DBG("chan %p, control %p", chan, control);
3058 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3059 }
3060 
3061 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3062 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3063 {
3064 	struct sk_buff *nskb;
3065 	struct l2cap_chan *chan;
3066 
3067 	BT_DBG("conn %p", conn);
3068 
3069 	mutex_lock(&conn->chan_lock);
3070 
3071 	list_for_each_entry(chan, &conn->chan_l, list) {
3072 		if (chan->chan_type != L2CAP_CHAN_RAW)
3073 			continue;
3074 
3075 		/* Don't send frame to the channel it came from */
3076 		if (bt_cb(skb)->l2cap.chan == chan)
3077 			continue;
3078 
3079 		nskb = skb_clone(skb, GFP_KERNEL);
3080 		if (!nskb)
3081 			continue;
3082 		if (chan->ops->recv(chan, nskb))
3083 			kfree_skb(nskb);
3084 	}
3085 
3086 	mutex_unlock(&conn->chan_lock);
3087 }
3088 
3089 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3090 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3091 				       u8 ident, u16 dlen, void *data)
3092 {
3093 	struct sk_buff *skb, **frag;
3094 	struct l2cap_cmd_hdr *cmd;
3095 	struct l2cap_hdr *lh;
3096 	int len, count;
3097 
3098 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3099 	       conn, code, ident, dlen);
3100 
3101 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3102 		return NULL;
3103 
3104 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3105 	count = min_t(unsigned int, conn->mtu, len);
3106 
3107 	skb = bt_skb_alloc(count, GFP_KERNEL);
3108 	if (!skb)
3109 		return NULL;
3110 
3111 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3112 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3113 
3114 	if (conn->hcon->type == LE_LINK)
3115 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3116 	else
3117 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3118 
3119 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3120 	cmd->code  = code;
3121 	cmd->ident = ident;
3122 	cmd->len   = cpu_to_le16(dlen);
3123 
3124 	if (dlen) {
3125 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3126 		skb_put_data(skb, data, count);
3127 		data += count;
3128 	}
3129 
3130 	len -= skb->len;
3131 
3132 	/* Continuation fragments (no L2CAP header) */
3133 	frag = &skb_shinfo(skb)->frag_list;
3134 	while (len) {
3135 		count = min_t(unsigned int, conn->mtu, len);
3136 
3137 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3138 		if (!*frag)
3139 			goto fail;
3140 
3141 		skb_put_data(*frag, data, count);
3142 
3143 		len  -= count;
3144 		data += count;
3145 
3146 		frag = &(*frag)->next;
3147 	}
3148 
3149 	return skb;
3150 
3151 fail:
3152 	kfree_skb(skb);
3153 	return NULL;
3154 }
3155 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3156 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3157 				     unsigned long *val)
3158 {
3159 	struct l2cap_conf_opt *opt = *ptr;
3160 	int len;
3161 
3162 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3163 	*ptr += len;
3164 
3165 	*type = opt->type;
3166 	*olen = opt->len;
3167 
3168 	switch (opt->len) {
3169 	case 1:
3170 		*val = *((u8 *) opt->val);
3171 		break;
3172 
3173 	case 2:
3174 		*val = get_unaligned_le16(opt->val);
3175 		break;
3176 
3177 	case 4:
3178 		*val = get_unaligned_le32(opt->val);
3179 		break;
3180 
3181 	default:
3182 		*val = (unsigned long) opt->val;
3183 		break;
3184 	}
3185 
3186 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3187 	return len;
3188 }
3189 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3190 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3191 {
3192 	struct l2cap_conf_opt *opt = *ptr;
3193 
3194 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3195 
3196 	if (size < L2CAP_CONF_OPT_SIZE + len)
3197 		return;
3198 
3199 	opt->type = type;
3200 	opt->len  = len;
3201 
3202 	switch (len) {
3203 	case 1:
3204 		*((u8 *) opt->val)  = val;
3205 		break;
3206 
3207 	case 2:
3208 		put_unaligned_le16(val, opt->val);
3209 		break;
3210 
3211 	case 4:
3212 		put_unaligned_le32(val, opt->val);
3213 		break;
3214 
3215 	default:
3216 		memcpy(opt->val, (void *) val, len);
3217 		break;
3218 	}
3219 
3220 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3221 }
3222 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3223 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3224 {
3225 	struct l2cap_conf_efs efs;
3226 
3227 	switch (chan->mode) {
3228 	case L2CAP_MODE_ERTM:
3229 		efs.id		= chan->local_id;
3230 		efs.stype	= chan->local_stype;
3231 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3232 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3233 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3234 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3235 		break;
3236 
3237 	case L2CAP_MODE_STREAMING:
3238 		efs.id		= 1;
3239 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3240 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3241 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3242 		efs.acc_lat	= 0;
3243 		efs.flush_to	= 0;
3244 		break;
3245 
3246 	default:
3247 		return;
3248 	}
3249 
3250 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3251 			   (unsigned long) &efs, size);
3252 }
3253 
l2cap_ack_timeout(struct work_struct * work)3254 static void l2cap_ack_timeout(struct work_struct *work)
3255 {
3256 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3257 					       ack_timer.work);
3258 	u16 frames_to_ack;
3259 
3260 	BT_DBG("chan %p", chan);
3261 
3262 	l2cap_chan_lock(chan);
3263 
3264 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3265 				     chan->last_acked_seq);
3266 
3267 	if (frames_to_ack)
3268 		l2cap_send_rr_or_rnr(chan, 0);
3269 
3270 	l2cap_chan_unlock(chan);
3271 	l2cap_chan_put(chan);
3272 }
3273 
l2cap_ertm_init(struct l2cap_chan * chan)3274 int l2cap_ertm_init(struct l2cap_chan *chan)
3275 {
3276 	int err;
3277 
3278 	chan->next_tx_seq = 0;
3279 	chan->expected_tx_seq = 0;
3280 	chan->expected_ack_seq = 0;
3281 	chan->unacked_frames = 0;
3282 	chan->buffer_seq = 0;
3283 	chan->frames_sent = 0;
3284 	chan->last_acked_seq = 0;
3285 	chan->sdu = NULL;
3286 	chan->sdu_last_frag = NULL;
3287 	chan->sdu_len = 0;
3288 
3289 	skb_queue_head_init(&chan->tx_q);
3290 
3291 	chan->local_amp_id = AMP_ID_BREDR;
3292 	chan->move_id = AMP_ID_BREDR;
3293 	chan->move_state = L2CAP_MOVE_STABLE;
3294 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3295 
3296 	if (chan->mode != L2CAP_MODE_ERTM)
3297 		return 0;
3298 
3299 	chan->rx_state = L2CAP_RX_STATE_RECV;
3300 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3301 
3302 	skb_queue_head_init(&chan->srej_q);
3303 
3304 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3305 	if (err < 0)
3306 		return err;
3307 
3308 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3309 	if (err < 0)
3310 		l2cap_seq_list_free(&chan->srej_list);
3311 
3312 	return err;
3313 }
3314 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3315 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3316 {
3317 	switch (mode) {
3318 	case L2CAP_MODE_STREAMING:
3319 	case L2CAP_MODE_ERTM:
3320 		if (l2cap_mode_supported(mode, remote_feat_mask))
3321 			return mode;
3322 		fallthrough;
3323 	default:
3324 		return L2CAP_MODE_BASIC;
3325 	}
3326 }
3327 
__l2cap_ews_supported(struct l2cap_conn * conn)3328 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3329 {
3330 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3331 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3332 }
3333 
__l2cap_efs_supported(struct l2cap_conn * conn)3334 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3335 {
3336 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3337 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3338 }
3339 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3340 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3341 				      struct l2cap_conf_rfc *rfc)
3342 {
3343 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3344 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3345 
3346 		/* Class 1 devices have must have ERTM timeouts
3347 		 * exceeding the Link Supervision Timeout.  The
3348 		 * default Link Supervision Timeout for AMP
3349 		 * controllers is 10 seconds.
3350 		 *
3351 		 * Class 1 devices use 0xffffffff for their
3352 		 * best-effort flush timeout, so the clamping logic
3353 		 * will result in a timeout that meets the above
3354 		 * requirement.  ERTM timeouts are 16-bit values, so
3355 		 * the maximum timeout is 65.535 seconds.
3356 		 */
3357 
3358 		/* Convert timeout to milliseconds and round */
3359 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3360 
3361 		/* This is the recommended formula for class 2 devices
3362 		 * that start ERTM timers when packets are sent to the
3363 		 * controller.
3364 		 */
3365 		ertm_to = 3 * ertm_to + 500;
3366 
3367 		if (ertm_to > 0xffff)
3368 			ertm_to = 0xffff;
3369 
3370 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3371 		rfc->monitor_timeout = rfc->retrans_timeout;
3372 	} else {
3373 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3374 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3375 	}
3376 }
3377 
l2cap_txwin_setup(struct l2cap_chan * chan)3378 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3379 {
3380 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3381 	    __l2cap_ews_supported(chan->conn)) {
3382 		/* use extended control field */
3383 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3384 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3385 	} else {
3386 		chan->tx_win = min_t(u16, chan->tx_win,
3387 				     L2CAP_DEFAULT_TX_WINDOW);
3388 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3389 	}
3390 	chan->ack_win = chan->tx_win;
3391 }
3392 
l2cap_mtu_auto(struct l2cap_chan * chan)3393 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3394 {
3395 	struct hci_conn *conn = chan->conn->hcon;
3396 
3397 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3398 
3399 	/* The 2-DH1 packet has between 2 and 56 information bytes
3400 	 * (including the 2-byte payload header)
3401 	 */
3402 	if (!(conn->pkt_type & HCI_2DH1))
3403 		chan->imtu = 54;
3404 
3405 	/* The 3-DH1 packet has between 2 and 85 information bytes
3406 	 * (including the 2-byte payload header)
3407 	 */
3408 	if (!(conn->pkt_type & HCI_3DH1))
3409 		chan->imtu = 83;
3410 
3411 	/* The 2-DH3 packet has between 2 and 369 information bytes
3412 	 * (including the 2-byte payload header)
3413 	 */
3414 	if (!(conn->pkt_type & HCI_2DH3))
3415 		chan->imtu = 367;
3416 
3417 	/* The 3-DH3 packet has between 2 and 554 information bytes
3418 	 * (including the 2-byte payload header)
3419 	 */
3420 	if (!(conn->pkt_type & HCI_3DH3))
3421 		chan->imtu = 552;
3422 
3423 	/* The 2-DH5 packet has between 2 and 681 information bytes
3424 	 * (including the 2-byte payload header)
3425 	 */
3426 	if (!(conn->pkt_type & HCI_2DH5))
3427 		chan->imtu = 679;
3428 
3429 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3430 	 * (including the 2-byte payload header)
3431 	 */
3432 	if (!(conn->pkt_type & HCI_3DH5))
3433 		chan->imtu = 1021;
3434 }
3435 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3436 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3437 {
3438 	struct l2cap_conf_req *req = data;
3439 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3440 	void *ptr = req->data;
3441 	void *endptr = data + data_size;
3442 	u16 size;
3443 
3444 	BT_DBG("chan %p", chan);
3445 
3446 	if (chan->num_conf_req || chan->num_conf_rsp)
3447 		goto done;
3448 
3449 	switch (chan->mode) {
3450 	case L2CAP_MODE_STREAMING:
3451 	case L2CAP_MODE_ERTM:
3452 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3453 			break;
3454 
3455 		if (__l2cap_efs_supported(chan->conn))
3456 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3457 
3458 		fallthrough;
3459 	default:
3460 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3461 		break;
3462 	}
3463 
3464 done:
3465 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3466 		if (!chan->imtu)
3467 			l2cap_mtu_auto(chan);
3468 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3469 				   endptr - ptr);
3470 	}
3471 
3472 	switch (chan->mode) {
3473 	case L2CAP_MODE_BASIC:
3474 		if (disable_ertm)
3475 			break;
3476 
3477 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3478 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3479 			break;
3480 
3481 		rfc.mode            = L2CAP_MODE_BASIC;
3482 		rfc.txwin_size      = 0;
3483 		rfc.max_transmit    = 0;
3484 		rfc.retrans_timeout = 0;
3485 		rfc.monitor_timeout = 0;
3486 		rfc.max_pdu_size    = 0;
3487 
3488 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3489 				   (unsigned long) &rfc, endptr - ptr);
3490 		break;
3491 
3492 	case L2CAP_MODE_ERTM:
3493 		rfc.mode            = L2CAP_MODE_ERTM;
3494 		rfc.max_transmit    = chan->max_tx;
3495 
3496 		__l2cap_set_ertm_timeouts(chan, &rfc);
3497 
3498 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3499 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3500 			     L2CAP_FCS_SIZE);
3501 		rfc.max_pdu_size = cpu_to_le16(size);
3502 
3503 		l2cap_txwin_setup(chan);
3504 
3505 		rfc.txwin_size = min_t(u16, chan->tx_win,
3506 				       L2CAP_DEFAULT_TX_WINDOW);
3507 
3508 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3509 				   (unsigned long) &rfc, endptr - ptr);
3510 
3511 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3512 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3513 
3514 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3516 					   chan->tx_win, endptr - ptr);
3517 
3518 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3519 			if (chan->fcs == L2CAP_FCS_NONE ||
3520 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3521 				chan->fcs = L2CAP_FCS_NONE;
3522 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3523 						   chan->fcs, endptr - ptr);
3524 			}
3525 		break;
3526 
3527 	case L2CAP_MODE_STREAMING:
3528 		l2cap_txwin_setup(chan);
3529 		rfc.mode            = L2CAP_MODE_STREAMING;
3530 		rfc.txwin_size      = 0;
3531 		rfc.max_transmit    = 0;
3532 		rfc.retrans_timeout = 0;
3533 		rfc.monitor_timeout = 0;
3534 
3535 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3536 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3537 			     L2CAP_FCS_SIZE);
3538 		rfc.max_pdu_size = cpu_to_le16(size);
3539 
3540 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3541 				   (unsigned long) &rfc, endptr - ptr);
3542 
3543 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3544 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3545 
3546 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3547 			if (chan->fcs == L2CAP_FCS_NONE ||
3548 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3549 				chan->fcs = L2CAP_FCS_NONE;
3550 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3551 						   chan->fcs, endptr - ptr);
3552 			}
3553 		break;
3554 	}
3555 
3556 	req->dcid  = cpu_to_le16(chan->dcid);
3557 	req->flags = cpu_to_le16(0);
3558 
3559 	return ptr - data;
3560 }
3561 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3562 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3563 {
3564 	struct l2cap_conf_rsp *rsp = data;
3565 	void *ptr = rsp->data;
3566 	void *endptr = data + data_size;
3567 	void *req = chan->conf_req;
3568 	int len = chan->conf_len;
3569 	int type, hint, olen;
3570 	unsigned long val;
3571 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3572 	struct l2cap_conf_efs efs;
3573 	u8 remote_efs = 0;
3574 	u16 mtu = L2CAP_DEFAULT_MTU;
3575 	u16 result = L2CAP_CONF_SUCCESS;
3576 	u16 size;
3577 
3578 	BT_DBG("chan %p", chan);
3579 
3580 	while (len >= L2CAP_CONF_OPT_SIZE) {
3581 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3582 		if (len < 0)
3583 			break;
3584 
3585 		hint  = type & L2CAP_CONF_HINT;
3586 		type &= L2CAP_CONF_MASK;
3587 
3588 		switch (type) {
3589 		case L2CAP_CONF_MTU:
3590 			if (olen != 2)
3591 				break;
3592 			mtu = val;
3593 			break;
3594 
3595 		case L2CAP_CONF_FLUSH_TO:
3596 			if (olen != 2)
3597 				break;
3598 			chan->flush_to = val;
3599 			break;
3600 
3601 		case L2CAP_CONF_QOS:
3602 			break;
3603 
3604 		case L2CAP_CONF_RFC:
3605 			if (olen != sizeof(rfc))
3606 				break;
3607 			memcpy(&rfc, (void *) val, olen);
3608 			break;
3609 
3610 		case L2CAP_CONF_FCS:
3611 			if (olen != 1)
3612 				break;
3613 			if (val == L2CAP_FCS_NONE)
3614 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3615 			break;
3616 
3617 		case L2CAP_CONF_EFS:
3618 			if (olen != sizeof(efs))
3619 				break;
3620 			remote_efs = 1;
3621 			memcpy(&efs, (void *) val, olen);
3622 			break;
3623 
3624 		case L2CAP_CONF_EWS:
3625 			if (olen != 2)
3626 				break;
3627 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3628 				return -ECONNREFUSED;
3629 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3630 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3631 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3632 			chan->remote_tx_win = val;
3633 			break;
3634 
3635 		default:
3636 			if (hint)
3637 				break;
3638 			result = L2CAP_CONF_UNKNOWN;
3639 			l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr);
3640 			break;
3641 		}
3642 	}
3643 
3644 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3645 		goto done;
3646 
3647 	switch (chan->mode) {
3648 	case L2CAP_MODE_STREAMING:
3649 	case L2CAP_MODE_ERTM:
3650 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3651 			chan->mode = l2cap_select_mode(rfc.mode,
3652 						       chan->conn->feat_mask);
3653 			break;
3654 		}
3655 
3656 		if (remote_efs) {
3657 			if (__l2cap_efs_supported(chan->conn))
3658 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3659 			else
3660 				return -ECONNREFUSED;
3661 		}
3662 
3663 		if (chan->mode != rfc.mode)
3664 			return -ECONNREFUSED;
3665 
3666 		break;
3667 	}
3668 
3669 done:
3670 	if (chan->mode != rfc.mode) {
3671 		result = L2CAP_CONF_UNACCEPT;
3672 		rfc.mode = chan->mode;
3673 
3674 		if (chan->num_conf_rsp == 1)
3675 			return -ECONNREFUSED;
3676 
3677 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3678 				   (unsigned long) &rfc, endptr - ptr);
3679 	}
3680 
3681 	if (result == L2CAP_CONF_SUCCESS) {
3682 		/* Configure output options and let the other side know
3683 		 * which ones we don't like. */
3684 
3685 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3686 			result = L2CAP_CONF_UNACCEPT;
3687 		else {
3688 			chan->omtu = mtu;
3689 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3690 		}
3691 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3692 
3693 		if (remote_efs) {
3694 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3695 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3696 			    efs.stype != chan->local_stype) {
3697 
3698 				result = L2CAP_CONF_UNACCEPT;
3699 
3700 				if (chan->num_conf_req >= 1)
3701 					return -ECONNREFUSED;
3702 
3703 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3704 						   sizeof(efs),
3705 						   (unsigned long) &efs, endptr - ptr);
3706 			} else {
3707 				/* Send PENDING Conf Rsp */
3708 				result = L2CAP_CONF_PENDING;
3709 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3710 			}
3711 		}
3712 
3713 		switch (rfc.mode) {
3714 		case L2CAP_MODE_BASIC:
3715 			chan->fcs = L2CAP_FCS_NONE;
3716 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3717 			break;
3718 
3719 		case L2CAP_MODE_ERTM:
3720 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3721 				chan->remote_tx_win = rfc.txwin_size;
3722 			else
3723 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3724 
3725 			chan->remote_max_tx = rfc.max_transmit;
3726 
3727 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3728 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3729 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3730 			rfc.max_pdu_size = cpu_to_le16(size);
3731 			chan->remote_mps = size;
3732 
3733 			__l2cap_set_ertm_timeouts(chan, &rfc);
3734 
3735 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3736 
3737 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3738 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3739 
3740 			if (remote_efs &&
3741 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3742 				chan->remote_id = efs.id;
3743 				chan->remote_stype = efs.stype;
3744 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3745 				chan->remote_flush_to =
3746 					le32_to_cpu(efs.flush_to);
3747 				chan->remote_acc_lat =
3748 					le32_to_cpu(efs.acc_lat);
3749 				chan->remote_sdu_itime =
3750 					le32_to_cpu(efs.sdu_itime);
3751 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3752 						   sizeof(efs),
3753 						   (unsigned long) &efs, endptr - ptr);
3754 			}
3755 			break;
3756 
3757 		case L2CAP_MODE_STREAMING:
3758 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3759 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3760 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3761 			rfc.max_pdu_size = cpu_to_le16(size);
3762 			chan->remote_mps = size;
3763 
3764 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3765 
3766 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3767 					   (unsigned long) &rfc, endptr - ptr);
3768 
3769 			break;
3770 
3771 		default:
3772 			result = L2CAP_CONF_UNACCEPT;
3773 
3774 			memset(&rfc, 0, sizeof(rfc));
3775 			rfc.mode = chan->mode;
3776 		}
3777 
3778 		if (result == L2CAP_CONF_SUCCESS)
3779 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3780 	}
3781 	rsp->scid   = cpu_to_le16(chan->dcid);
3782 	rsp->result = cpu_to_le16(result);
3783 	rsp->flags  = cpu_to_le16(0);
3784 
3785 	return ptr - data;
3786 }
3787 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3788 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3789 				void *data, size_t size, u16 *result)
3790 {
3791 	struct l2cap_conf_req *req = data;
3792 	void *ptr = req->data;
3793 	void *endptr = data + size;
3794 	int type, olen;
3795 	unsigned long val;
3796 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3797 	struct l2cap_conf_efs efs;
3798 
3799 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3800 
3801 	while (len >= L2CAP_CONF_OPT_SIZE) {
3802 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3803 		if (len < 0)
3804 			break;
3805 
3806 		switch (type) {
3807 		case L2CAP_CONF_MTU:
3808 			if (olen != 2)
3809 				break;
3810 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3811 				*result = L2CAP_CONF_UNACCEPT;
3812 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3813 			} else
3814 				chan->imtu = val;
3815 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3816 					   endptr - ptr);
3817 			break;
3818 
3819 		case L2CAP_CONF_FLUSH_TO:
3820 			if (olen != 2)
3821 				break;
3822 			chan->flush_to = val;
3823 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3824 					   chan->flush_to, endptr - ptr);
3825 			break;
3826 
3827 		case L2CAP_CONF_RFC:
3828 			if (olen != sizeof(rfc))
3829 				break;
3830 			memcpy(&rfc, (void *)val, olen);
3831 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3832 			    rfc.mode != chan->mode)
3833 				return -ECONNREFUSED;
3834 			chan->fcs = 0;
3835 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3836 					   (unsigned long) &rfc, endptr - ptr);
3837 			break;
3838 
3839 		case L2CAP_CONF_EWS:
3840 			if (olen != 2)
3841 				break;
3842 			chan->ack_win = min_t(u16, val, chan->ack_win);
3843 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3844 					   chan->tx_win, endptr - ptr);
3845 			break;
3846 
3847 		case L2CAP_CONF_EFS:
3848 			if (olen != sizeof(efs))
3849 				break;
3850 			memcpy(&efs, (void *)val, olen);
3851 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3852 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3853 			    efs.stype != chan->local_stype)
3854 				return -ECONNREFUSED;
3855 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3856 					   (unsigned long) &efs, endptr - ptr);
3857 			break;
3858 
3859 		case L2CAP_CONF_FCS:
3860 			if (olen != 1)
3861 				break;
3862 			if (*result == L2CAP_CONF_PENDING)
3863 				if (val == L2CAP_FCS_NONE)
3864 					set_bit(CONF_RECV_NO_FCS,
3865 						&chan->conf_state);
3866 			break;
3867 		}
3868 	}
3869 
3870 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3871 		return -ECONNREFUSED;
3872 
3873 	chan->mode = rfc.mode;
3874 
3875 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3876 		switch (rfc.mode) {
3877 		case L2CAP_MODE_ERTM:
3878 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3879 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3880 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3881 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3882 				chan->ack_win = min_t(u16, chan->ack_win,
3883 						      rfc.txwin_size);
3884 
3885 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3886 				chan->local_msdu = le16_to_cpu(efs.msdu);
3887 				chan->local_sdu_itime =
3888 					le32_to_cpu(efs.sdu_itime);
3889 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3890 				chan->local_flush_to =
3891 					le32_to_cpu(efs.flush_to);
3892 			}
3893 			break;
3894 
3895 		case L2CAP_MODE_STREAMING:
3896 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3897 		}
3898 	}
3899 
3900 	req->dcid   = cpu_to_le16(chan->dcid);
3901 	req->flags  = cpu_to_le16(0);
3902 
3903 	return ptr - data;
3904 }
3905 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3906 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3907 				u16 result, u16 flags)
3908 {
3909 	struct l2cap_conf_rsp *rsp = data;
3910 	void *ptr = rsp->data;
3911 
3912 	BT_DBG("chan %p", chan);
3913 
3914 	rsp->scid   = cpu_to_le16(chan->dcid);
3915 	rsp->result = cpu_to_le16(result);
3916 	rsp->flags  = cpu_to_le16(flags);
3917 
3918 	return ptr - data;
3919 }
3920 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3921 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3922 {
3923 	struct l2cap_le_conn_rsp rsp;
3924 	struct l2cap_conn *conn = chan->conn;
3925 
3926 	BT_DBG("chan %p", chan);
3927 
3928 	rsp.dcid    = cpu_to_le16(chan->scid);
3929 	rsp.mtu     = cpu_to_le16(chan->imtu);
3930 	rsp.mps     = cpu_to_le16(chan->mps);
3931 	rsp.credits = cpu_to_le16(chan->rx_credits);
3932 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3933 
3934 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3935 		       &rsp);
3936 }
3937 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3938 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3939 {
3940 	int *result = data;
3941 
3942 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3943 		return;
3944 
3945 	switch (chan->state) {
3946 	case BT_CONNECT2:
3947 		/* If channel still pending accept add to result */
3948 		(*result)++;
3949 		return;
3950 	case BT_CONNECTED:
3951 		return;
3952 	default:
3953 		/* If not connected or pending accept it has been refused */
3954 		*result = -ECONNREFUSED;
3955 		return;
3956 	}
3957 }
3958 
3959 struct l2cap_ecred_rsp_data {
3960 	struct {
3961 		struct l2cap_ecred_conn_rsp rsp;
3962 		__le16 scid[L2CAP_ECRED_MAX_CID];
3963 	} __packed pdu;
3964 	int count;
3965 };
3966 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3967 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3968 {
3969 	struct l2cap_ecred_rsp_data *rsp = data;
3970 
3971 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3972 		return;
3973 
3974 	/* Reset ident so only one response is sent */
3975 	chan->ident = 0;
3976 
3977 	/* Include all channels pending with the same ident */
3978 	if (!rsp->pdu.rsp.result)
3979 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3980 	else
3981 		l2cap_chan_del(chan, ECONNRESET);
3982 }
3983 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3984 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3985 {
3986 	struct l2cap_conn *conn = chan->conn;
3987 	struct l2cap_ecred_rsp_data data;
3988 	u16 id = chan->ident;
3989 	int result = 0;
3990 
3991 	if (!id)
3992 		return;
3993 
3994 	BT_DBG("chan %p id %d", chan, id);
3995 
3996 	memset(&data, 0, sizeof(data));
3997 
3998 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3999 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
4000 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
4001 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
4002 
4003 	/* Verify that all channels are ready */
4004 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4005 
4006 	if (result > 0)
4007 		return;
4008 
4009 	if (result < 0)
4010 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4011 
4012 	/* Build response */
4013 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4014 
4015 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4016 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4017 		       &data.pdu);
4018 }
4019 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4020 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4021 {
4022 	struct l2cap_conn_rsp rsp;
4023 	struct l2cap_conn *conn = chan->conn;
4024 	u8 buf[128];
4025 	u8 rsp_code;
4026 
4027 	rsp.scid   = cpu_to_le16(chan->dcid);
4028 	rsp.dcid   = cpu_to_le16(chan->scid);
4029 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4030 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4031 
4032 	if (chan->hs_hcon)
4033 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4034 	else
4035 		rsp_code = L2CAP_CONN_RSP;
4036 
4037 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4038 
4039 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4040 
4041 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4042 		return;
4043 
4044 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4045 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4046 	chan->num_conf_req++;
4047 }
4048 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4049 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4050 {
4051 	int type, olen;
4052 	unsigned long val;
4053 	/* Use sane default values in case a misbehaving remote device
4054 	 * did not send an RFC or extended window size option.
4055 	 */
4056 	u16 txwin_ext = chan->ack_win;
4057 	struct l2cap_conf_rfc rfc = {
4058 		.mode = chan->mode,
4059 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4060 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4061 		.max_pdu_size = cpu_to_le16(chan->imtu),
4062 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4063 	};
4064 
4065 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4066 
4067 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4068 		return;
4069 
4070 	while (len >= L2CAP_CONF_OPT_SIZE) {
4071 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4072 		if (len < 0)
4073 			break;
4074 
4075 		switch (type) {
4076 		case L2CAP_CONF_RFC:
4077 			if (olen != sizeof(rfc))
4078 				break;
4079 			memcpy(&rfc, (void *)val, olen);
4080 			break;
4081 		case L2CAP_CONF_EWS:
4082 			if (olen != 2)
4083 				break;
4084 			txwin_ext = val;
4085 			break;
4086 		}
4087 	}
4088 
4089 	switch (rfc.mode) {
4090 	case L2CAP_MODE_ERTM:
4091 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4092 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4093 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4094 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4095 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4096 		else
4097 			chan->ack_win = min_t(u16, chan->ack_win,
4098 					      rfc.txwin_size);
4099 		break;
4100 	case L2CAP_MODE_STREAMING:
4101 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4102 	}
4103 }
4104 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4105 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4106 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4107 				    u8 *data)
4108 {
4109 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4110 
4111 	if (cmd_len < sizeof(*rej))
4112 		return -EPROTO;
4113 
4114 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4115 		return 0;
4116 
4117 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4118 	    cmd->ident == conn->info_ident) {
4119 		cancel_delayed_work(&conn->info_timer);
4120 
4121 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4122 		conn->info_ident = 0;
4123 
4124 		l2cap_conn_start(conn);
4125 	}
4126 
4127 	return 0;
4128 }
4129 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4130 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4131 					struct l2cap_cmd_hdr *cmd,
4132 					u8 *data, u8 rsp_code, u8 amp_id)
4133 {
4134 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4135 	struct l2cap_conn_rsp rsp;
4136 	struct l2cap_chan *chan = NULL, *pchan;
4137 	int result, status = L2CAP_CS_NO_INFO;
4138 
4139 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4140 	__le16 psm = req->psm;
4141 
4142 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4143 
4144 	/* Check if we have socket listening on psm */
4145 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4146 					 &conn->hcon->dst, ACL_LINK);
4147 	if (!pchan) {
4148 		result = L2CAP_CR_BAD_PSM;
4149 		goto sendresp;
4150 	}
4151 
4152 	mutex_lock(&conn->chan_lock);
4153 	l2cap_chan_lock(pchan);
4154 
4155 	/* Check if the ACL is secure enough (if not SDP) */
4156 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4157 	    !hci_conn_check_link_mode(conn->hcon)) {
4158 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4159 		result = L2CAP_CR_SEC_BLOCK;
4160 		goto response;
4161 	}
4162 
4163 	result = L2CAP_CR_NO_MEM;
4164 
4165 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4166 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4167 		result = L2CAP_CR_INVALID_SCID;
4168 		goto response;
4169 	}
4170 
4171 	/* Check if we already have channel with that dcid */
4172 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4173 		result = L2CAP_CR_SCID_IN_USE;
4174 		goto response;
4175 	}
4176 
4177 	chan = pchan->ops->new_connection(pchan);
4178 	if (!chan)
4179 		goto response;
4180 
4181 	/* For certain devices (ex: HID mouse), support for authentication,
4182 	 * pairing and bonding is optional. For such devices, inorder to avoid
4183 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4184 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4185 	 */
4186 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4187 
4188 	bacpy(&chan->src, &conn->hcon->src);
4189 	bacpy(&chan->dst, &conn->hcon->dst);
4190 	chan->src_type = bdaddr_src_type(conn->hcon);
4191 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4192 	chan->psm  = psm;
4193 	chan->dcid = scid;
4194 	chan->local_amp_id = amp_id;
4195 
4196 	__l2cap_chan_add(conn, chan);
4197 
4198 	dcid = chan->scid;
4199 
4200 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4201 
4202 	chan->ident = cmd->ident;
4203 
4204 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4205 		if (l2cap_chan_check_security(chan, false)) {
4206 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4207 				l2cap_state_change(chan, BT_CONNECT2);
4208 				result = L2CAP_CR_PEND;
4209 				status = L2CAP_CS_AUTHOR_PEND;
4210 				chan->ops->defer(chan);
4211 			} else {
4212 				/* Force pending result for AMP controllers.
4213 				 * The connection will succeed after the
4214 				 * physical link is up.
4215 				 */
4216 				if (amp_id == AMP_ID_BREDR) {
4217 					l2cap_state_change(chan, BT_CONFIG);
4218 					result = L2CAP_CR_SUCCESS;
4219 				} else {
4220 					l2cap_state_change(chan, BT_CONNECT2);
4221 					result = L2CAP_CR_PEND;
4222 				}
4223 				status = L2CAP_CS_NO_INFO;
4224 			}
4225 		} else {
4226 			l2cap_state_change(chan, BT_CONNECT2);
4227 			result = L2CAP_CR_PEND;
4228 			status = L2CAP_CS_AUTHEN_PEND;
4229 		}
4230 	} else {
4231 		l2cap_state_change(chan, BT_CONNECT2);
4232 		result = L2CAP_CR_PEND;
4233 		status = L2CAP_CS_NO_INFO;
4234 	}
4235 
4236 response:
4237 	l2cap_chan_unlock(pchan);
4238 	mutex_unlock(&conn->chan_lock);
4239 	l2cap_chan_put(pchan);
4240 
4241 sendresp:
4242 	rsp.scid   = cpu_to_le16(scid);
4243 	rsp.dcid   = cpu_to_le16(dcid);
4244 	rsp.result = cpu_to_le16(result);
4245 	rsp.status = cpu_to_le16(status);
4246 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4247 
4248 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4249 		struct l2cap_info_req info;
4250 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4251 
4252 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4253 		conn->info_ident = l2cap_get_ident(conn);
4254 
4255 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4256 
4257 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4258 			       sizeof(info), &info);
4259 	}
4260 
4261 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4262 	    result == L2CAP_CR_SUCCESS) {
4263 		u8 buf[128];
4264 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4265 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4266 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4267 		chan->num_conf_req++;
4268 	}
4269 
4270 	return chan;
4271 }
4272 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4273 static int l2cap_connect_req(struct l2cap_conn *conn,
4274 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4275 {
4276 	struct hci_dev *hdev = conn->hcon->hdev;
4277 	struct hci_conn *hcon = conn->hcon;
4278 
4279 	if (cmd_len < sizeof(struct l2cap_conn_req))
4280 		return -EPROTO;
4281 
4282 	hci_dev_lock(hdev);
4283 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4284 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4285 		mgmt_device_connected(hdev, hcon, NULL, 0);
4286 	hci_dev_unlock(hdev);
4287 
4288 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4289 	return 0;
4290 }
4291 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4292 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4293 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4294 				    u8 *data)
4295 {
4296 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4297 	u16 scid, dcid, result, status;
4298 	struct l2cap_chan *chan;
4299 	u8 req[128];
4300 	int err;
4301 
4302 	if (cmd_len < sizeof(*rsp))
4303 		return -EPROTO;
4304 
4305 	scid   = __le16_to_cpu(rsp->scid);
4306 	dcid   = __le16_to_cpu(rsp->dcid);
4307 	result = __le16_to_cpu(rsp->result);
4308 	status = __le16_to_cpu(rsp->status);
4309 
4310 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4311 					   dcid > L2CAP_CID_DYN_END))
4312 		return -EPROTO;
4313 
4314 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4315 	       dcid, scid, result, status);
4316 
4317 	mutex_lock(&conn->chan_lock);
4318 
4319 	if (scid) {
4320 		chan = __l2cap_get_chan_by_scid(conn, scid);
4321 		if (!chan) {
4322 			err = -EBADSLT;
4323 			goto unlock;
4324 		}
4325 	} else {
4326 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4327 		if (!chan) {
4328 			err = -EBADSLT;
4329 			goto unlock;
4330 		}
4331 	}
4332 
4333 	chan = l2cap_chan_hold_unless_zero(chan);
4334 	if (!chan) {
4335 		err = -EBADSLT;
4336 		goto unlock;
4337 	}
4338 
4339 	err = 0;
4340 
4341 	l2cap_chan_lock(chan);
4342 
4343 	switch (result) {
4344 	case L2CAP_CR_SUCCESS:
4345 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4346 			err = -EBADSLT;
4347 			break;
4348 		}
4349 
4350 		l2cap_state_change(chan, BT_CONFIG);
4351 		chan->ident = 0;
4352 		chan->dcid = dcid;
4353 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4354 
4355 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4356 			break;
4357 
4358 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4359 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4360 		chan->num_conf_req++;
4361 		break;
4362 
4363 	case L2CAP_CR_PEND:
4364 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4365 		break;
4366 
4367 	default:
4368 		l2cap_chan_del(chan, ECONNREFUSED);
4369 		break;
4370 	}
4371 
4372 	l2cap_chan_unlock(chan);
4373 	l2cap_chan_put(chan);
4374 
4375 unlock:
4376 	mutex_unlock(&conn->chan_lock);
4377 
4378 	return err;
4379 }
4380 
set_default_fcs(struct l2cap_chan * chan)4381 static inline void set_default_fcs(struct l2cap_chan *chan)
4382 {
4383 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4384 	 * sides request it.
4385 	 */
4386 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4387 		chan->fcs = L2CAP_FCS_NONE;
4388 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4389 		chan->fcs = L2CAP_FCS_CRC16;
4390 }
4391 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4392 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4393 				    u8 ident, u16 flags)
4394 {
4395 	struct l2cap_conn *conn = chan->conn;
4396 
4397 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4398 	       flags);
4399 
4400 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4401 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4402 
4403 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4404 		       l2cap_build_conf_rsp(chan, data,
4405 					    L2CAP_CONF_SUCCESS, flags), data);
4406 }
4407 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4408 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4409 				   u16 scid, u16 dcid)
4410 {
4411 	struct l2cap_cmd_rej_cid rej;
4412 
4413 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4414 	rej.scid = __cpu_to_le16(scid);
4415 	rej.dcid = __cpu_to_le16(dcid);
4416 
4417 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4418 }
4419 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4420 static inline int l2cap_config_req(struct l2cap_conn *conn,
4421 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4422 				   u8 *data)
4423 {
4424 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4425 	u16 dcid, flags;
4426 	u8 rsp[64];
4427 	struct l2cap_chan *chan;
4428 	int len, err = 0;
4429 
4430 	if (cmd_len < sizeof(*req))
4431 		return -EPROTO;
4432 
4433 	dcid  = __le16_to_cpu(req->dcid);
4434 	flags = __le16_to_cpu(req->flags);
4435 
4436 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4437 
4438 	chan = l2cap_get_chan_by_scid(conn, dcid);
4439 	if (!chan) {
4440 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4441 		return 0;
4442 	}
4443 
4444 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4445 	    chan->state != BT_CONNECTED) {
4446 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4447 				       chan->dcid);
4448 		goto unlock;
4449 	}
4450 
4451 	/* Reject if config buffer is too small. */
4452 	len = cmd_len - sizeof(*req);
4453 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4454 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4455 			       l2cap_build_conf_rsp(chan, rsp,
4456 			       L2CAP_CONF_REJECT, flags), rsp);
4457 		goto unlock;
4458 	}
4459 
4460 	/* Store config. */
4461 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4462 	chan->conf_len += len;
4463 
4464 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4465 		/* Incomplete config. Send empty response. */
4466 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4467 			       l2cap_build_conf_rsp(chan, rsp,
4468 			       L2CAP_CONF_SUCCESS, flags), rsp);
4469 		goto unlock;
4470 	}
4471 
4472 	/* Complete config. */
4473 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4474 	if (len < 0) {
4475 		l2cap_send_disconn_req(chan, ECONNRESET);
4476 		goto unlock;
4477 	}
4478 
4479 	chan->ident = cmd->ident;
4480 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4481 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4482 		chan->num_conf_rsp++;
4483 
4484 	/* Reset config buffer. */
4485 	chan->conf_len = 0;
4486 
4487 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4488 		goto unlock;
4489 
4490 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4491 		set_default_fcs(chan);
4492 
4493 		if (chan->mode == L2CAP_MODE_ERTM ||
4494 		    chan->mode == L2CAP_MODE_STREAMING)
4495 			err = l2cap_ertm_init(chan);
4496 
4497 		if (err < 0)
4498 			l2cap_send_disconn_req(chan, -err);
4499 		else
4500 			l2cap_chan_ready(chan);
4501 
4502 		goto unlock;
4503 	}
4504 
4505 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4506 		u8 buf[64];
4507 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4508 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4509 		chan->num_conf_req++;
4510 	}
4511 
4512 	/* Got Conf Rsp PENDING from remote side and assume we sent
4513 	   Conf Rsp PENDING in the code above */
4514 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4515 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4516 
4517 		/* check compatibility */
4518 
4519 		/* Send rsp for BR/EDR channel */
4520 		if (!chan->hs_hcon)
4521 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4522 		else
4523 			chan->ident = cmd->ident;
4524 	}
4525 
4526 unlock:
4527 	l2cap_chan_unlock(chan);
4528 	l2cap_chan_put(chan);
4529 	return err;
4530 }
4531 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4532 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4533 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4534 				   u8 *data)
4535 {
4536 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4537 	u16 scid, flags, result;
4538 	struct l2cap_chan *chan;
4539 	int len = cmd_len - sizeof(*rsp);
4540 	int err = 0;
4541 
4542 	if (cmd_len < sizeof(*rsp))
4543 		return -EPROTO;
4544 
4545 	scid   = __le16_to_cpu(rsp->scid);
4546 	flags  = __le16_to_cpu(rsp->flags);
4547 	result = __le16_to_cpu(rsp->result);
4548 
4549 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4550 	       result, len);
4551 
4552 	chan = l2cap_get_chan_by_scid(conn, scid);
4553 	if (!chan)
4554 		return 0;
4555 
4556 	switch (result) {
4557 	case L2CAP_CONF_SUCCESS:
4558 		l2cap_conf_rfc_get(chan, rsp->data, len);
4559 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4560 		break;
4561 
4562 	case L2CAP_CONF_PENDING:
4563 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4564 
4565 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4566 			char buf[64];
4567 
4568 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4569 						   buf, sizeof(buf), &result);
4570 			if (len < 0) {
4571 				l2cap_send_disconn_req(chan, ECONNRESET);
4572 				goto done;
4573 			}
4574 
4575 			if (!chan->hs_hcon) {
4576 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4577 							0);
4578 			} else {
4579 				if (l2cap_check_efs(chan)) {
4580 					amp_create_logical_link(chan);
4581 					chan->ident = cmd->ident;
4582 				}
4583 			}
4584 		}
4585 		goto done;
4586 
4587 	case L2CAP_CONF_UNKNOWN:
4588 	case L2CAP_CONF_UNACCEPT:
4589 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4590 			char req[64];
4591 
4592 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4593 				l2cap_send_disconn_req(chan, ECONNRESET);
4594 				goto done;
4595 			}
4596 
4597 			/* throw out any old stored conf requests */
4598 			result = L2CAP_CONF_SUCCESS;
4599 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4600 						   req, sizeof(req), &result);
4601 			if (len < 0) {
4602 				l2cap_send_disconn_req(chan, ECONNRESET);
4603 				goto done;
4604 			}
4605 
4606 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4607 				       L2CAP_CONF_REQ, len, req);
4608 			chan->num_conf_req++;
4609 			if (result != L2CAP_CONF_SUCCESS)
4610 				goto done;
4611 			break;
4612 		}
4613 		fallthrough;
4614 
4615 	default:
4616 		l2cap_chan_set_err(chan, ECONNRESET);
4617 
4618 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4619 		l2cap_send_disconn_req(chan, ECONNRESET);
4620 		goto done;
4621 	}
4622 
4623 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4624 		goto done;
4625 
4626 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4627 
4628 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4629 		set_default_fcs(chan);
4630 
4631 		if (chan->mode == L2CAP_MODE_ERTM ||
4632 		    chan->mode == L2CAP_MODE_STREAMING)
4633 			err = l2cap_ertm_init(chan);
4634 
4635 		if (err < 0)
4636 			l2cap_send_disconn_req(chan, -err);
4637 		else
4638 			l2cap_chan_ready(chan);
4639 	}
4640 
4641 done:
4642 	l2cap_chan_unlock(chan);
4643 	l2cap_chan_put(chan);
4644 	return err;
4645 }
4646 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4647 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4648 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4649 				       u8 *data)
4650 {
4651 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4652 	struct l2cap_disconn_rsp rsp;
4653 	u16 dcid, scid;
4654 	struct l2cap_chan *chan;
4655 
4656 	if (cmd_len != sizeof(*req))
4657 		return -EPROTO;
4658 
4659 	scid = __le16_to_cpu(req->scid);
4660 	dcid = __le16_to_cpu(req->dcid);
4661 
4662 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4663 
4664 	chan = l2cap_get_chan_by_scid(conn, dcid);
4665 	if (!chan) {
4666 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4667 		return 0;
4668 	}
4669 
4670 	rsp.dcid = cpu_to_le16(chan->scid);
4671 	rsp.scid = cpu_to_le16(chan->dcid);
4672 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4673 
4674 	chan->ops->set_shutdown(chan);
4675 
4676 	l2cap_chan_unlock(chan);
4677 	mutex_lock(&conn->chan_lock);
4678 	l2cap_chan_lock(chan);
4679 	l2cap_chan_del(chan, ECONNRESET);
4680 	mutex_unlock(&conn->chan_lock);
4681 
4682 	chan->ops->close(chan);
4683 
4684 	l2cap_chan_unlock(chan);
4685 	l2cap_chan_put(chan);
4686 
4687 	return 0;
4688 }
4689 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4690 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4691 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4692 				       u8 *data)
4693 {
4694 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4695 	u16 dcid, scid;
4696 	struct l2cap_chan *chan;
4697 
4698 	if (cmd_len != sizeof(*rsp))
4699 		return -EPROTO;
4700 
4701 	scid = __le16_to_cpu(rsp->scid);
4702 	dcid = __le16_to_cpu(rsp->dcid);
4703 
4704 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4705 
4706 	chan = l2cap_get_chan_by_scid(conn, scid);
4707 	if (!chan) {
4708 		return 0;
4709 	}
4710 
4711 	if (chan->state != BT_DISCONN) {
4712 		l2cap_chan_unlock(chan);
4713 		l2cap_chan_put(chan);
4714 		return 0;
4715 	}
4716 
4717 	l2cap_chan_unlock(chan);
4718 	mutex_lock(&conn->chan_lock);
4719 	l2cap_chan_lock(chan);
4720 	l2cap_chan_del(chan, 0);
4721 	mutex_unlock(&conn->chan_lock);
4722 
4723 	chan->ops->close(chan);
4724 
4725 	l2cap_chan_unlock(chan);
4726 	l2cap_chan_put(chan);
4727 
4728 	return 0;
4729 }
4730 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4731 static inline int l2cap_information_req(struct l2cap_conn *conn,
4732 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4733 					u8 *data)
4734 {
4735 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4736 	u16 type;
4737 
4738 	if (cmd_len != sizeof(*req))
4739 		return -EPROTO;
4740 
4741 	type = __le16_to_cpu(req->type);
4742 
4743 	BT_DBG("type 0x%4.4x", type);
4744 
4745 	if (type == L2CAP_IT_FEAT_MASK) {
4746 		u8 buf[8];
4747 		u32 feat_mask = l2cap_feat_mask;
4748 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4749 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4750 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4751 		if (!disable_ertm)
4752 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4753 				| L2CAP_FEAT_FCS;
4754 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4755 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4756 				| L2CAP_FEAT_EXT_WINDOW;
4757 
4758 		put_unaligned_le32(feat_mask, rsp->data);
4759 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4760 			       buf);
4761 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4762 		u8 buf[12];
4763 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4764 
4765 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4766 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4767 		rsp->data[0] = conn->local_fixed_chan;
4768 		memset(rsp->data + 1, 0, 7);
4769 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4770 			       buf);
4771 	} else {
4772 		struct l2cap_info_rsp rsp;
4773 		rsp.type   = cpu_to_le16(type);
4774 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4775 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4776 			       &rsp);
4777 	}
4778 
4779 	return 0;
4780 }
4781 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4782 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4783 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4784 					u8 *data)
4785 {
4786 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4787 	u16 type, result;
4788 
4789 	if (cmd_len < sizeof(*rsp))
4790 		return -EPROTO;
4791 
4792 	type   = __le16_to_cpu(rsp->type);
4793 	result = __le16_to_cpu(rsp->result);
4794 
4795 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4796 
4797 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4798 	if (cmd->ident != conn->info_ident ||
4799 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4800 		return 0;
4801 
4802 	cancel_delayed_work(&conn->info_timer);
4803 
4804 	if (result != L2CAP_IR_SUCCESS) {
4805 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4806 		conn->info_ident = 0;
4807 
4808 		l2cap_conn_start(conn);
4809 
4810 		return 0;
4811 	}
4812 
4813 	switch (type) {
4814 	case L2CAP_IT_FEAT_MASK:
4815 		conn->feat_mask = get_unaligned_le32(rsp->data);
4816 
4817 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4818 			struct l2cap_info_req req;
4819 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4820 
4821 			conn->info_ident = l2cap_get_ident(conn);
4822 
4823 			l2cap_send_cmd(conn, conn->info_ident,
4824 				       L2CAP_INFO_REQ, sizeof(req), &req);
4825 		} else {
4826 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4827 			conn->info_ident = 0;
4828 
4829 			l2cap_conn_start(conn);
4830 		}
4831 		break;
4832 
4833 	case L2CAP_IT_FIXED_CHAN:
4834 		conn->remote_fixed_chan = rsp->data[0];
4835 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4836 		conn->info_ident = 0;
4837 
4838 		l2cap_conn_start(conn);
4839 		break;
4840 	}
4841 
4842 	return 0;
4843 }
4844 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4845 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4846 				    struct l2cap_cmd_hdr *cmd,
4847 				    u16 cmd_len, void *data)
4848 {
4849 	struct l2cap_create_chan_req *req = data;
4850 	struct l2cap_create_chan_rsp rsp;
4851 	struct l2cap_chan *chan;
4852 	struct hci_dev *hdev;
4853 	u16 psm, scid;
4854 
4855 	if (cmd_len != sizeof(*req))
4856 		return -EPROTO;
4857 
4858 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4859 		return -EINVAL;
4860 
4861 	psm = le16_to_cpu(req->psm);
4862 	scid = le16_to_cpu(req->scid);
4863 
4864 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4865 
4866 	/* For controller id 0 make BR/EDR connection */
4867 	if (req->amp_id == AMP_ID_BREDR) {
4868 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4869 			      req->amp_id);
4870 		return 0;
4871 	}
4872 
4873 	/* Validate AMP controller id */
4874 	hdev = hci_dev_get(req->amp_id);
4875 	if (!hdev)
4876 		goto error;
4877 
4878 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4879 		hci_dev_put(hdev);
4880 		goto error;
4881 	}
4882 
4883 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4884 			     req->amp_id);
4885 	if (chan) {
4886 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4887 		struct hci_conn *hs_hcon;
4888 
4889 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4890 						  &conn->hcon->dst);
4891 		if (!hs_hcon) {
4892 			hci_dev_put(hdev);
4893 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4894 					       chan->dcid);
4895 			return 0;
4896 		}
4897 
4898 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4899 
4900 		mgr->bredr_chan = chan;
4901 		chan->hs_hcon = hs_hcon;
4902 		chan->fcs = L2CAP_FCS_NONE;
4903 		conn->mtu = hdev->block_mtu;
4904 	}
4905 
4906 	hci_dev_put(hdev);
4907 
4908 	return 0;
4909 
4910 error:
4911 	rsp.dcid = 0;
4912 	rsp.scid = cpu_to_le16(scid);
4913 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4914 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4915 
4916 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4917 		       sizeof(rsp), &rsp);
4918 
4919 	return 0;
4920 }
4921 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4922 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4923 {
4924 	struct l2cap_move_chan_req req;
4925 	u8 ident;
4926 
4927 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4928 
4929 	ident = l2cap_get_ident(chan->conn);
4930 	chan->ident = ident;
4931 
4932 	req.icid = cpu_to_le16(chan->scid);
4933 	req.dest_amp_id = dest_amp_id;
4934 
4935 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4936 		       &req);
4937 
4938 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4939 }
4940 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4941 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4942 {
4943 	struct l2cap_move_chan_rsp rsp;
4944 
4945 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4946 
4947 	rsp.icid = cpu_to_le16(chan->dcid);
4948 	rsp.result = cpu_to_le16(result);
4949 
4950 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4951 		       sizeof(rsp), &rsp);
4952 }
4953 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4954 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4955 {
4956 	struct l2cap_move_chan_cfm cfm;
4957 
4958 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4959 
4960 	chan->ident = l2cap_get_ident(chan->conn);
4961 
4962 	cfm.icid = cpu_to_le16(chan->scid);
4963 	cfm.result = cpu_to_le16(result);
4964 
4965 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4966 		       sizeof(cfm), &cfm);
4967 
4968 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4969 }
4970 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4971 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4972 {
4973 	struct l2cap_move_chan_cfm cfm;
4974 
4975 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4976 
4977 	cfm.icid = cpu_to_le16(icid);
4978 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4979 
4980 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4981 		       sizeof(cfm), &cfm);
4982 }
4983 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4984 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4985 					 u16 icid)
4986 {
4987 	struct l2cap_move_chan_cfm_rsp rsp;
4988 
4989 	BT_DBG("icid 0x%4.4x", icid);
4990 
4991 	rsp.icid = cpu_to_le16(icid);
4992 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4993 }
4994 
__release_logical_link(struct l2cap_chan * chan)4995 static void __release_logical_link(struct l2cap_chan *chan)
4996 {
4997 	chan->hs_hchan = NULL;
4998 	chan->hs_hcon = NULL;
4999 
5000 	/* Placeholder - release the logical link */
5001 }
5002 
l2cap_logical_fail(struct l2cap_chan * chan)5003 static void l2cap_logical_fail(struct l2cap_chan *chan)
5004 {
5005 	/* Logical link setup failed */
5006 	if (chan->state != BT_CONNECTED) {
5007 		/* Create channel failure, disconnect */
5008 		l2cap_send_disconn_req(chan, ECONNRESET);
5009 		return;
5010 	}
5011 
5012 	switch (chan->move_role) {
5013 	case L2CAP_MOVE_ROLE_RESPONDER:
5014 		l2cap_move_done(chan);
5015 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5016 		break;
5017 	case L2CAP_MOVE_ROLE_INITIATOR:
5018 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5019 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5020 			/* Remote has only sent pending or
5021 			 * success responses, clean up
5022 			 */
5023 			l2cap_move_done(chan);
5024 		}
5025 
5026 		/* Other amp move states imply that the move
5027 		 * has already aborted
5028 		 */
5029 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5030 		break;
5031 	}
5032 }
5033 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5034 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5035 					struct hci_chan *hchan)
5036 {
5037 	struct l2cap_conf_rsp rsp;
5038 
5039 	chan->hs_hchan = hchan;
5040 	chan->hs_hcon->l2cap_data = chan->conn;
5041 
5042 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5043 
5044 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5045 		int err;
5046 
5047 		set_default_fcs(chan);
5048 
5049 		err = l2cap_ertm_init(chan);
5050 		if (err < 0)
5051 			l2cap_send_disconn_req(chan, -err);
5052 		else
5053 			l2cap_chan_ready(chan);
5054 	}
5055 }
5056 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5057 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5058 				      struct hci_chan *hchan)
5059 {
5060 	chan->hs_hcon = hchan->conn;
5061 	chan->hs_hcon->l2cap_data = chan->conn;
5062 
5063 	BT_DBG("move_state %d", chan->move_state);
5064 
5065 	switch (chan->move_state) {
5066 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5067 		/* Move confirm will be sent after a success
5068 		 * response is received
5069 		 */
5070 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5071 		break;
5072 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5073 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5074 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5075 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5076 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5077 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5078 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5079 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5080 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5081 		}
5082 		break;
5083 	default:
5084 		/* Move was not in expected state, free the channel */
5085 		__release_logical_link(chan);
5086 
5087 		chan->move_state = L2CAP_MOVE_STABLE;
5088 	}
5089 }
5090 
5091 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5092 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5093 		       u8 status)
5094 {
5095 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5096 
5097 	if (status) {
5098 		l2cap_logical_fail(chan);
5099 		__release_logical_link(chan);
5100 		return;
5101 	}
5102 
5103 	if (chan->state != BT_CONNECTED) {
5104 		/* Ignore logical link if channel is on BR/EDR */
5105 		if (chan->local_amp_id != AMP_ID_BREDR)
5106 			l2cap_logical_finish_create(chan, hchan);
5107 	} else {
5108 		l2cap_logical_finish_move(chan, hchan);
5109 	}
5110 }
5111 
l2cap_move_start(struct l2cap_chan * chan)5112 void l2cap_move_start(struct l2cap_chan *chan)
5113 {
5114 	BT_DBG("chan %p", chan);
5115 
5116 	if (chan->local_amp_id == AMP_ID_BREDR) {
5117 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5118 			return;
5119 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5120 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5121 		/* Placeholder - start physical link setup */
5122 	} else {
5123 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5124 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5125 		chan->move_id = 0;
5126 		l2cap_move_setup(chan);
5127 		l2cap_send_move_chan_req(chan, 0);
5128 	}
5129 }
5130 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5131 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5132 			    u8 local_amp_id, u8 remote_amp_id)
5133 {
5134 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5135 	       local_amp_id, remote_amp_id);
5136 
5137 	chan->fcs = L2CAP_FCS_NONE;
5138 
5139 	/* Outgoing channel on AMP */
5140 	if (chan->state == BT_CONNECT) {
5141 		if (result == L2CAP_CR_SUCCESS) {
5142 			chan->local_amp_id = local_amp_id;
5143 			l2cap_send_create_chan_req(chan, remote_amp_id);
5144 		} else {
5145 			/* Revert to BR/EDR connect */
5146 			l2cap_send_conn_req(chan);
5147 		}
5148 
5149 		return;
5150 	}
5151 
5152 	/* Incoming channel on AMP */
5153 	if (__l2cap_no_conn_pending(chan)) {
5154 		struct l2cap_conn_rsp rsp;
5155 		char buf[128];
5156 		rsp.scid = cpu_to_le16(chan->dcid);
5157 		rsp.dcid = cpu_to_le16(chan->scid);
5158 
5159 		if (result == L2CAP_CR_SUCCESS) {
5160 			/* Send successful response */
5161 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5162 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5163 		} else {
5164 			/* Send negative response */
5165 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5166 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5167 		}
5168 
5169 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5170 			       sizeof(rsp), &rsp);
5171 
5172 		if (result == L2CAP_CR_SUCCESS) {
5173 			l2cap_state_change(chan, BT_CONFIG);
5174 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5175 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5176 				       L2CAP_CONF_REQ,
5177 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5178 			chan->num_conf_req++;
5179 		}
5180 	}
5181 }
5182 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5183 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5184 				   u8 remote_amp_id)
5185 {
5186 	l2cap_move_setup(chan);
5187 	chan->move_id = local_amp_id;
5188 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5189 
5190 	l2cap_send_move_chan_req(chan, remote_amp_id);
5191 }
5192 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5193 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5194 {
5195 	struct hci_chan *hchan = NULL;
5196 
5197 	/* Placeholder - get hci_chan for logical link */
5198 
5199 	if (hchan) {
5200 		if (hchan->state == BT_CONNECTED) {
5201 			/* Logical link is ready to go */
5202 			chan->hs_hcon = hchan->conn;
5203 			chan->hs_hcon->l2cap_data = chan->conn;
5204 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5205 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5206 
5207 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5208 		} else {
5209 			/* Wait for logical link to be ready */
5210 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5211 		}
5212 	} else {
5213 		/* Logical link not available */
5214 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5215 	}
5216 }
5217 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5218 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5219 {
5220 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5221 		u8 rsp_result;
5222 		if (result == -EINVAL)
5223 			rsp_result = L2CAP_MR_BAD_ID;
5224 		else
5225 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5226 
5227 		l2cap_send_move_chan_rsp(chan, rsp_result);
5228 	}
5229 
5230 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5231 	chan->move_state = L2CAP_MOVE_STABLE;
5232 
5233 	/* Restart data transmission */
5234 	l2cap_ertm_send(chan);
5235 }
5236 
5237 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5238 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5239 {
5240 	u8 local_amp_id = chan->local_amp_id;
5241 	u8 remote_amp_id = chan->remote_amp_id;
5242 
5243 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5244 	       chan, result, local_amp_id, remote_amp_id);
5245 
5246 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5247 		return;
5248 
5249 	if (chan->state != BT_CONNECTED) {
5250 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5251 	} else if (result != L2CAP_MR_SUCCESS) {
5252 		l2cap_do_move_cancel(chan, result);
5253 	} else {
5254 		switch (chan->move_role) {
5255 		case L2CAP_MOVE_ROLE_INITIATOR:
5256 			l2cap_do_move_initiate(chan, local_amp_id,
5257 					       remote_amp_id);
5258 			break;
5259 		case L2CAP_MOVE_ROLE_RESPONDER:
5260 			l2cap_do_move_respond(chan, result);
5261 			break;
5262 		default:
5263 			l2cap_do_move_cancel(chan, result);
5264 			break;
5265 		}
5266 	}
5267 }
5268 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5269 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5270 					 struct l2cap_cmd_hdr *cmd,
5271 					 u16 cmd_len, void *data)
5272 {
5273 	struct l2cap_move_chan_req *req = data;
5274 	struct l2cap_move_chan_rsp rsp;
5275 	struct l2cap_chan *chan;
5276 	u16 icid = 0;
5277 	u16 result = L2CAP_MR_NOT_ALLOWED;
5278 
5279 	if (cmd_len != sizeof(*req))
5280 		return -EPROTO;
5281 
5282 	icid = le16_to_cpu(req->icid);
5283 
5284 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5285 
5286 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5287 		return -EINVAL;
5288 
5289 	chan = l2cap_get_chan_by_dcid(conn, icid);
5290 	if (!chan) {
5291 		rsp.icid = cpu_to_le16(icid);
5292 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5293 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5294 			       sizeof(rsp), &rsp);
5295 		return 0;
5296 	}
5297 
5298 	chan->ident = cmd->ident;
5299 
5300 	if (chan->scid < L2CAP_CID_DYN_START ||
5301 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5302 	    (chan->mode != L2CAP_MODE_ERTM &&
5303 	     chan->mode != L2CAP_MODE_STREAMING)) {
5304 		result = L2CAP_MR_NOT_ALLOWED;
5305 		goto send_move_response;
5306 	}
5307 
5308 	if (chan->local_amp_id == req->dest_amp_id) {
5309 		result = L2CAP_MR_SAME_ID;
5310 		goto send_move_response;
5311 	}
5312 
5313 	if (req->dest_amp_id != AMP_ID_BREDR) {
5314 		struct hci_dev *hdev;
5315 		hdev = hci_dev_get(req->dest_amp_id);
5316 		if (!hdev || hdev->dev_type != HCI_AMP ||
5317 		    !test_bit(HCI_UP, &hdev->flags)) {
5318 			if (hdev)
5319 				hci_dev_put(hdev);
5320 
5321 			result = L2CAP_MR_BAD_ID;
5322 			goto send_move_response;
5323 		}
5324 		hci_dev_put(hdev);
5325 	}
5326 
5327 	/* Detect a move collision.  Only send a collision response
5328 	 * if this side has "lost", otherwise proceed with the move.
5329 	 * The winner has the larger bd_addr.
5330 	 */
5331 	if ((__chan_is_moving(chan) ||
5332 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5333 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5334 		result = L2CAP_MR_COLLISION;
5335 		goto send_move_response;
5336 	}
5337 
5338 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5339 	l2cap_move_setup(chan);
5340 	chan->move_id = req->dest_amp_id;
5341 
5342 	if (req->dest_amp_id == AMP_ID_BREDR) {
5343 		/* Moving to BR/EDR */
5344 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5345 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5346 			result = L2CAP_MR_PEND;
5347 		} else {
5348 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5349 			result = L2CAP_MR_SUCCESS;
5350 		}
5351 	} else {
5352 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5353 		/* Placeholder - uncomment when amp functions are available */
5354 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5355 		result = L2CAP_MR_PEND;
5356 	}
5357 
5358 send_move_response:
5359 	l2cap_send_move_chan_rsp(chan, result);
5360 
5361 	l2cap_chan_unlock(chan);
5362 	l2cap_chan_put(chan);
5363 
5364 	return 0;
5365 }
5366 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5367 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5368 {
5369 	struct l2cap_chan *chan;
5370 	struct hci_chan *hchan = NULL;
5371 
5372 	chan = l2cap_get_chan_by_scid(conn, icid);
5373 	if (!chan) {
5374 		l2cap_send_move_chan_cfm_icid(conn, icid);
5375 		return;
5376 	}
5377 
5378 	__clear_chan_timer(chan);
5379 	if (result == L2CAP_MR_PEND)
5380 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5381 
5382 	switch (chan->move_state) {
5383 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5384 		/* Move confirm will be sent when logical link
5385 		 * is complete.
5386 		 */
5387 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5388 		break;
5389 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5390 		if (result == L2CAP_MR_PEND) {
5391 			break;
5392 		} else if (test_bit(CONN_LOCAL_BUSY,
5393 				    &chan->conn_state)) {
5394 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5395 		} else {
5396 			/* Logical link is up or moving to BR/EDR,
5397 			 * proceed with move
5398 			 */
5399 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5400 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5401 		}
5402 		break;
5403 	case L2CAP_MOVE_WAIT_RSP:
5404 		/* Moving to AMP */
5405 		if (result == L2CAP_MR_SUCCESS) {
5406 			/* Remote is ready, send confirm immediately
5407 			 * after logical link is ready
5408 			 */
5409 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5410 		} else {
5411 			/* Both logical link and move success
5412 			 * are required to confirm
5413 			 */
5414 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5415 		}
5416 
5417 		/* Placeholder - get hci_chan for logical link */
5418 		if (!hchan) {
5419 			/* Logical link not available */
5420 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5421 			break;
5422 		}
5423 
5424 		/* If the logical link is not yet connected, do not
5425 		 * send confirmation.
5426 		 */
5427 		if (hchan->state != BT_CONNECTED)
5428 			break;
5429 
5430 		/* Logical link is already ready to go */
5431 
5432 		chan->hs_hcon = hchan->conn;
5433 		chan->hs_hcon->l2cap_data = chan->conn;
5434 
5435 		if (result == L2CAP_MR_SUCCESS) {
5436 			/* Can confirm now */
5437 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5438 		} else {
5439 			/* Now only need move success
5440 			 * to confirm
5441 			 */
5442 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5443 		}
5444 
5445 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5446 		break;
5447 	default:
5448 		/* Any other amp move state means the move failed. */
5449 		chan->move_id = chan->local_amp_id;
5450 		l2cap_move_done(chan);
5451 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5452 	}
5453 
5454 	l2cap_chan_unlock(chan);
5455 	l2cap_chan_put(chan);
5456 }
5457 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5458 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5459 			    u16 result)
5460 {
5461 	struct l2cap_chan *chan;
5462 
5463 	chan = l2cap_get_chan_by_ident(conn, ident);
5464 	if (!chan) {
5465 		/* Could not locate channel, icid is best guess */
5466 		l2cap_send_move_chan_cfm_icid(conn, icid);
5467 		return;
5468 	}
5469 
5470 	__clear_chan_timer(chan);
5471 
5472 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5473 		if (result == L2CAP_MR_COLLISION) {
5474 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5475 		} else {
5476 			/* Cleanup - cancel move */
5477 			chan->move_id = chan->local_amp_id;
5478 			l2cap_move_done(chan);
5479 		}
5480 	}
5481 
5482 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5483 
5484 	l2cap_chan_unlock(chan);
5485 	l2cap_chan_put(chan);
5486 }
5487 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5488 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5489 				  struct l2cap_cmd_hdr *cmd,
5490 				  u16 cmd_len, void *data)
5491 {
5492 	struct l2cap_move_chan_rsp *rsp = data;
5493 	u16 icid, result;
5494 
5495 	if (cmd_len != sizeof(*rsp))
5496 		return -EPROTO;
5497 
5498 	icid = le16_to_cpu(rsp->icid);
5499 	result = le16_to_cpu(rsp->result);
5500 
5501 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5502 
5503 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5504 		l2cap_move_continue(conn, icid, result);
5505 	else
5506 		l2cap_move_fail(conn, cmd->ident, icid, result);
5507 
5508 	return 0;
5509 }
5510 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5511 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5512 				      struct l2cap_cmd_hdr *cmd,
5513 				      u16 cmd_len, void *data)
5514 {
5515 	struct l2cap_move_chan_cfm *cfm = data;
5516 	struct l2cap_chan *chan;
5517 	u16 icid, result;
5518 
5519 	if (cmd_len != sizeof(*cfm))
5520 		return -EPROTO;
5521 
5522 	icid = le16_to_cpu(cfm->icid);
5523 	result = le16_to_cpu(cfm->result);
5524 
5525 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5526 
5527 	chan = l2cap_get_chan_by_dcid(conn, icid);
5528 	if (!chan) {
5529 		/* Spec requires a response even if the icid was not found */
5530 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5531 		return 0;
5532 	}
5533 
5534 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5535 		if (result == L2CAP_MC_CONFIRMED) {
5536 			chan->local_amp_id = chan->move_id;
5537 			if (chan->local_amp_id == AMP_ID_BREDR)
5538 				__release_logical_link(chan);
5539 		} else {
5540 			chan->move_id = chan->local_amp_id;
5541 		}
5542 
5543 		l2cap_move_done(chan);
5544 	}
5545 
5546 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5547 
5548 	l2cap_chan_unlock(chan);
5549 	l2cap_chan_put(chan);
5550 
5551 	return 0;
5552 }
5553 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5554 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5555 						 struct l2cap_cmd_hdr *cmd,
5556 						 u16 cmd_len, void *data)
5557 {
5558 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5559 	struct l2cap_chan *chan;
5560 	u16 icid;
5561 
5562 	if (cmd_len != sizeof(*rsp))
5563 		return -EPROTO;
5564 
5565 	icid = le16_to_cpu(rsp->icid);
5566 
5567 	BT_DBG("icid 0x%4.4x", icid);
5568 
5569 	chan = l2cap_get_chan_by_scid(conn, icid);
5570 	if (!chan)
5571 		return 0;
5572 
5573 	__clear_chan_timer(chan);
5574 
5575 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5576 		chan->local_amp_id = chan->move_id;
5577 
5578 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5579 			__release_logical_link(chan);
5580 
5581 		l2cap_move_done(chan);
5582 	}
5583 
5584 	l2cap_chan_unlock(chan);
5585 	l2cap_chan_put(chan);
5586 
5587 	return 0;
5588 }
5589 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5590 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5591 					      struct l2cap_cmd_hdr *cmd,
5592 					      u16 cmd_len, u8 *data)
5593 {
5594 	struct hci_conn *hcon = conn->hcon;
5595 	struct l2cap_conn_param_update_req *req;
5596 	struct l2cap_conn_param_update_rsp rsp;
5597 	u16 min, max, latency, to_multiplier;
5598 	int err;
5599 
5600 	if (hcon->role != HCI_ROLE_MASTER)
5601 		return -EINVAL;
5602 
5603 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5604 		return -EPROTO;
5605 
5606 	req = (struct l2cap_conn_param_update_req *) data;
5607 	min		= __le16_to_cpu(req->min);
5608 	max		= __le16_to_cpu(req->max);
5609 	latency		= __le16_to_cpu(req->latency);
5610 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5611 
5612 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5613 	       min, max, latency, to_multiplier);
5614 
5615 	memset(&rsp, 0, sizeof(rsp));
5616 
5617 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5618 	if (err)
5619 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5620 	else
5621 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5622 
5623 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5624 		       sizeof(rsp), &rsp);
5625 
5626 	if (!err) {
5627 		u8 store_hint;
5628 
5629 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5630 						to_multiplier);
5631 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5632 				    store_hint, min, max, latency,
5633 				    to_multiplier);
5634 
5635 	}
5636 
5637 	return 0;
5638 }
5639 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5640 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5641 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5642 				u8 *data)
5643 {
5644 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5645 	struct hci_conn *hcon = conn->hcon;
5646 	u16 dcid, mtu, mps, credits, result;
5647 	struct l2cap_chan *chan;
5648 	int err, sec_level;
5649 
5650 	if (cmd_len < sizeof(*rsp))
5651 		return -EPROTO;
5652 
5653 	dcid    = __le16_to_cpu(rsp->dcid);
5654 	mtu     = __le16_to_cpu(rsp->mtu);
5655 	mps     = __le16_to_cpu(rsp->mps);
5656 	credits = __le16_to_cpu(rsp->credits);
5657 	result  = __le16_to_cpu(rsp->result);
5658 
5659 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5660 					   dcid < L2CAP_CID_DYN_START ||
5661 					   dcid > L2CAP_CID_LE_DYN_END))
5662 		return -EPROTO;
5663 
5664 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5665 	       dcid, mtu, mps, credits, result);
5666 
5667 	mutex_lock(&conn->chan_lock);
5668 
5669 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5670 	if (!chan) {
5671 		err = -EBADSLT;
5672 		goto unlock;
5673 	}
5674 
5675 	err = 0;
5676 
5677 	l2cap_chan_lock(chan);
5678 
5679 	switch (result) {
5680 	case L2CAP_CR_LE_SUCCESS:
5681 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5682 			err = -EBADSLT;
5683 			break;
5684 		}
5685 
5686 		chan->ident = 0;
5687 		chan->dcid = dcid;
5688 		chan->omtu = mtu;
5689 		chan->remote_mps = mps;
5690 		chan->tx_credits = credits;
5691 		l2cap_chan_ready(chan);
5692 		break;
5693 
5694 	case L2CAP_CR_LE_AUTHENTICATION:
5695 	case L2CAP_CR_LE_ENCRYPTION:
5696 		/* If we already have MITM protection we can't do
5697 		 * anything.
5698 		 */
5699 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5700 			l2cap_chan_del(chan, ECONNREFUSED);
5701 			break;
5702 		}
5703 
5704 		sec_level = hcon->sec_level + 1;
5705 		if (chan->sec_level < sec_level)
5706 			chan->sec_level = sec_level;
5707 
5708 		/* We'll need to send a new Connect Request */
5709 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5710 
5711 		smp_conn_security(hcon, chan->sec_level);
5712 		break;
5713 
5714 	default:
5715 		l2cap_chan_del(chan, ECONNREFUSED);
5716 		break;
5717 	}
5718 
5719 	l2cap_chan_unlock(chan);
5720 
5721 unlock:
5722 	mutex_unlock(&conn->chan_lock);
5723 
5724 	return err;
5725 }
5726 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5727 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5728 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5729 				      u8 *data)
5730 {
5731 	int err = 0;
5732 
5733 	switch (cmd->code) {
5734 	case L2CAP_COMMAND_REJ:
5735 		l2cap_command_rej(conn, cmd, cmd_len, data);
5736 		break;
5737 
5738 	case L2CAP_CONN_REQ:
5739 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5740 		break;
5741 
5742 	case L2CAP_CONN_RSP:
5743 	case L2CAP_CREATE_CHAN_RSP:
5744 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5745 		break;
5746 
5747 	case L2CAP_CONF_REQ:
5748 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5749 		break;
5750 
5751 	case L2CAP_CONF_RSP:
5752 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5753 		break;
5754 
5755 	case L2CAP_DISCONN_REQ:
5756 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5757 		break;
5758 
5759 	case L2CAP_DISCONN_RSP:
5760 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5761 		break;
5762 
5763 	case L2CAP_ECHO_REQ:
5764 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5765 		break;
5766 
5767 	case L2CAP_ECHO_RSP:
5768 		break;
5769 
5770 	case L2CAP_INFO_REQ:
5771 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5772 		break;
5773 
5774 	case L2CAP_INFO_RSP:
5775 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5776 		break;
5777 
5778 	case L2CAP_CREATE_CHAN_REQ:
5779 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5780 		break;
5781 
5782 	case L2CAP_MOVE_CHAN_REQ:
5783 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5784 		break;
5785 
5786 	case L2CAP_MOVE_CHAN_RSP:
5787 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5788 		break;
5789 
5790 	case L2CAP_MOVE_CHAN_CFM:
5791 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5792 		break;
5793 
5794 	case L2CAP_MOVE_CHAN_CFM_RSP:
5795 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5796 		break;
5797 
5798 	default:
5799 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5800 		err = -EINVAL;
5801 		break;
5802 	}
5803 
5804 	return err;
5805 }
5806 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5807 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5808 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5809 				u8 *data)
5810 {
5811 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5812 	struct l2cap_le_conn_rsp rsp;
5813 	struct l2cap_chan *chan, *pchan;
5814 	u16 dcid, scid, credits, mtu, mps;
5815 	__le16 psm;
5816 	u8 result;
5817 
5818 	if (cmd_len != sizeof(*req))
5819 		return -EPROTO;
5820 
5821 	scid = __le16_to_cpu(req->scid);
5822 	mtu  = __le16_to_cpu(req->mtu);
5823 	mps  = __le16_to_cpu(req->mps);
5824 	psm  = req->psm;
5825 	dcid = 0;
5826 	credits = 0;
5827 
5828 	if (mtu < 23 || mps < 23)
5829 		return -EPROTO;
5830 
5831 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5832 	       scid, mtu, mps);
5833 
5834 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5835 	 * page 1059:
5836 	 *
5837 	 * Valid range: 0x0001-0x00ff
5838 	 *
5839 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5840 	 */
5841 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5842 		result = L2CAP_CR_LE_BAD_PSM;
5843 		chan = NULL;
5844 		goto response;
5845 	}
5846 
5847 	/* Check if we have socket listening on psm */
5848 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5849 					 &conn->hcon->dst, LE_LINK);
5850 	if (!pchan) {
5851 		result = L2CAP_CR_LE_BAD_PSM;
5852 		chan = NULL;
5853 		goto response;
5854 	}
5855 
5856 	mutex_lock(&conn->chan_lock);
5857 	l2cap_chan_lock(pchan);
5858 
5859 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5860 				     SMP_ALLOW_STK)) {
5861 		result = L2CAP_CR_LE_AUTHENTICATION;
5862 		chan = NULL;
5863 		goto response_unlock;
5864 	}
5865 
5866 	/* Check for valid dynamic CID range */
5867 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5868 		result = L2CAP_CR_LE_INVALID_SCID;
5869 		chan = NULL;
5870 		goto response_unlock;
5871 	}
5872 
5873 	/* Check if we already have channel with that dcid */
5874 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5875 		result = L2CAP_CR_LE_SCID_IN_USE;
5876 		chan = NULL;
5877 		goto response_unlock;
5878 	}
5879 
5880 	chan = pchan->ops->new_connection(pchan);
5881 	if (!chan) {
5882 		result = L2CAP_CR_LE_NO_MEM;
5883 		goto response_unlock;
5884 	}
5885 
5886 	bacpy(&chan->src, &conn->hcon->src);
5887 	bacpy(&chan->dst, &conn->hcon->dst);
5888 	chan->src_type = bdaddr_src_type(conn->hcon);
5889 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5890 	chan->psm  = psm;
5891 	chan->dcid = scid;
5892 	chan->omtu = mtu;
5893 	chan->remote_mps = mps;
5894 
5895 	__l2cap_chan_add(conn, chan);
5896 
5897 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5898 
5899 	dcid = chan->scid;
5900 	credits = chan->rx_credits;
5901 
5902 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5903 
5904 	chan->ident = cmd->ident;
5905 
5906 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5907 		l2cap_state_change(chan, BT_CONNECT2);
5908 		/* The following result value is actually not defined
5909 		 * for LE CoC but we use it to let the function know
5910 		 * that it should bail out after doing its cleanup
5911 		 * instead of sending a response.
5912 		 */
5913 		result = L2CAP_CR_PEND;
5914 		chan->ops->defer(chan);
5915 	} else {
5916 		l2cap_chan_ready(chan);
5917 		result = L2CAP_CR_LE_SUCCESS;
5918 	}
5919 
5920 response_unlock:
5921 	l2cap_chan_unlock(pchan);
5922 	mutex_unlock(&conn->chan_lock);
5923 	l2cap_chan_put(pchan);
5924 
5925 	if (result == L2CAP_CR_PEND)
5926 		return 0;
5927 
5928 response:
5929 	if (chan) {
5930 		rsp.mtu = cpu_to_le16(chan->imtu);
5931 		rsp.mps = cpu_to_le16(chan->mps);
5932 	} else {
5933 		rsp.mtu = 0;
5934 		rsp.mps = 0;
5935 	}
5936 
5937 	rsp.dcid    = cpu_to_le16(dcid);
5938 	rsp.credits = cpu_to_le16(credits);
5939 	rsp.result  = cpu_to_le16(result);
5940 
5941 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5942 
5943 	return 0;
5944 }
5945 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5946 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5947 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5948 				   u8 *data)
5949 {
5950 	struct l2cap_le_credits *pkt;
5951 	struct l2cap_chan *chan;
5952 	u16 cid, credits, max_credits;
5953 
5954 	if (cmd_len != sizeof(*pkt))
5955 		return -EPROTO;
5956 
5957 	pkt = (struct l2cap_le_credits *) data;
5958 	cid	= __le16_to_cpu(pkt->cid);
5959 	credits	= __le16_to_cpu(pkt->credits);
5960 
5961 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5962 
5963 	chan = l2cap_get_chan_by_dcid(conn, cid);
5964 	if (!chan)
5965 		return -EBADSLT;
5966 
5967 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5968 	if (credits > max_credits) {
5969 		BT_ERR("LE credits overflow");
5970 		l2cap_send_disconn_req(chan, ECONNRESET);
5971 
5972 		/* Return 0 so that we don't trigger an unnecessary
5973 		 * command reject packet.
5974 		 */
5975 		goto unlock;
5976 	}
5977 
5978 	chan->tx_credits += credits;
5979 
5980 	/* Resume sending */
5981 	l2cap_le_flowctl_send(chan);
5982 
5983 	if (chan->tx_credits)
5984 		chan->ops->resume(chan);
5985 
5986 unlock:
5987 	l2cap_chan_unlock(chan);
5988 	l2cap_chan_put(chan);
5989 
5990 	return 0;
5991 }
5992 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5993 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5994 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5995 				       u8 *data)
5996 {
5997 	struct l2cap_ecred_conn_req *req = (void *) data;
5998 	struct {
5999 		struct l2cap_ecred_conn_rsp rsp;
6000 		__le16 dcid[L2CAP_ECRED_MAX_CID];
6001 	} __packed pdu;
6002 	struct l2cap_chan *chan, *pchan;
6003 	u16 mtu, mps;
6004 	__le16 psm;
6005 	u8 result, len = 0;
6006 	int i, num_scid;
6007 	bool defer = false;
6008 
6009 	if (!enable_ecred)
6010 		return -EINVAL;
6011 
6012 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6013 		result = L2CAP_CR_LE_INVALID_PARAMS;
6014 		goto response;
6015 	}
6016 
6017 	cmd_len -= sizeof(*req);
6018 	num_scid = cmd_len / sizeof(u16);
6019 
6020 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6021 		result = L2CAP_CR_LE_INVALID_PARAMS;
6022 		goto response;
6023 	}
6024 
6025 	mtu  = __le16_to_cpu(req->mtu);
6026 	mps  = __le16_to_cpu(req->mps);
6027 
6028 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6029 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6030 		goto response;
6031 	}
6032 
6033 	psm  = req->psm;
6034 
6035 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6036 	 * page 1059:
6037 	 *
6038 	 * Valid range: 0x0001-0x00ff
6039 	 *
6040 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6041 	 */
6042 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6043 		result = L2CAP_CR_LE_BAD_PSM;
6044 		goto response;
6045 	}
6046 
6047 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6048 
6049 	memset(&pdu, 0, sizeof(pdu));
6050 
6051 	/* Check if we have socket listening on psm */
6052 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6053 					 &conn->hcon->dst, LE_LINK);
6054 	if (!pchan) {
6055 		result = L2CAP_CR_LE_BAD_PSM;
6056 		goto response;
6057 	}
6058 
6059 	mutex_lock(&conn->chan_lock);
6060 	l2cap_chan_lock(pchan);
6061 
6062 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6063 				     SMP_ALLOW_STK)) {
6064 		result = L2CAP_CR_LE_AUTHENTICATION;
6065 		goto unlock;
6066 	}
6067 
6068 	result = L2CAP_CR_LE_SUCCESS;
6069 
6070 	for (i = 0; i < num_scid; i++) {
6071 		u16 scid = __le16_to_cpu(req->scid[i]);
6072 
6073 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6074 
6075 		pdu.dcid[i] = 0x0000;
6076 		len += sizeof(*pdu.dcid);
6077 
6078 		/* Check for valid dynamic CID range */
6079 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6080 			result = L2CAP_CR_LE_INVALID_SCID;
6081 			continue;
6082 		}
6083 
6084 		/* Check if we already have channel with that dcid */
6085 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6086 			result = L2CAP_CR_LE_SCID_IN_USE;
6087 			continue;
6088 		}
6089 
6090 		chan = pchan->ops->new_connection(pchan);
6091 		if (!chan) {
6092 			result = L2CAP_CR_LE_NO_MEM;
6093 			continue;
6094 		}
6095 
6096 		bacpy(&chan->src, &conn->hcon->src);
6097 		bacpy(&chan->dst, &conn->hcon->dst);
6098 		chan->src_type = bdaddr_src_type(conn->hcon);
6099 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6100 		chan->psm  = psm;
6101 		chan->dcid = scid;
6102 		chan->omtu = mtu;
6103 		chan->remote_mps = mps;
6104 
6105 		__l2cap_chan_add(conn, chan);
6106 
6107 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6108 
6109 		/* Init response */
6110 		if (!pdu.rsp.credits) {
6111 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6112 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6113 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6114 		}
6115 
6116 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6117 
6118 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6119 
6120 		chan->ident = cmd->ident;
6121 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6122 
6123 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6124 			l2cap_state_change(chan, BT_CONNECT2);
6125 			defer = true;
6126 			chan->ops->defer(chan);
6127 		} else {
6128 			l2cap_chan_ready(chan);
6129 		}
6130 	}
6131 
6132 unlock:
6133 	l2cap_chan_unlock(pchan);
6134 	mutex_unlock(&conn->chan_lock);
6135 	l2cap_chan_put(pchan);
6136 
6137 response:
6138 	pdu.rsp.result = cpu_to_le16(result);
6139 
6140 	if (defer)
6141 		return 0;
6142 
6143 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6144 		       sizeof(pdu.rsp) + len, &pdu);
6145 
6146 	return 0;
6147 }
6148 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6149 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6150 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6151 				       u8 *data)
6152 {
6153 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6154 	struct hci_conn *hcon = conn->hcon;
6155 	u16 mtu, mps, credits, result;
6156 	struct l2cap_chan *chan, *tmp;
6157 	int err = 0, sec_level;
6158 	int i = 0;
6159 
6160 	if (cmd_len < sizeof(*rsp))
6161 		return -EPROTO;
6162 
6163 	mtu     = __le16_to_cpu(rsp->mtu);
6164 	mps     = __le16_to_cpu(rsp->mps);
6165 	credits = __le16_to_cpu(rsp->credits);
6166 	result  = __le16_to_cpu(rsp->result);
6167 
6168 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6169 	       result);
6170 
6171 	mutex_lock(&conn->chan_lock);
6172 
6173 	cmd_len -= sizeof(*rsp);
6174 
6175 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6176 		u16 dcid;
6177 
6178 		if (chan->ident != cmd->ident ||
6179 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6180 		    chan->state == BT_CONNECTED)
6181 			continue;
6182 
6183 		l2cap_chan_lock(chan);
6184 
6185 		/* Check that there is a dcid for each pending channel */
6186 		if (cmd_len < sizeof(dcid)) {
6187 			l2cap_chan_del(chan, ECONNREFUSED);
6188 			l2cap_chan_unlock(chan);
6189 			continue;
6190 		}
6191 
6192 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6193 		cmd_len -= sizeof(u16);
6194 
6195 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6196 
6197 		/* Check if dcid is already in use */
6198 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6199 			/* If a device receives a
6200 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6201 			 * already-assigned Destination CID, then both the
6202 			 * original channel and the new channel shall be
6203 			 * immediately discarded and not used.
6204 			 */
6205 			l2cap_chan_del(chan, ECONNREFUSED);
6206 			l2cap_chan_unlock(chan);
6207 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6208 			l2cap_chan_lock(chan);
6209 			l2cap_chan_del(chan, ECONNRESET);
6210 			l2cap_chan_unlock(chan);
6211 			continue;
6212 		}
6213 
6214 		switch (result) {
6215 		case L2CAP_CR_LE_AUTHENTICATION:
6216 		case L2CAP_CR_LE_ENCRYPTION:
6217 			/* If we already have MITM protection we can't do
6218 			 * anything.
6219 			 */
6220 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6221 				l2cap_chan_del(chan, ECONNREFUSED);
6222 				break;
6223 			}
6224 
6225 			sec_level = hcon->sec_level + 1;
6226 			if (chan->sec_level < sec_level)
6227 				chan->sec_level = sec_level;
6228 
6229 			/* We'll need to send a new Connect Request */
6230 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6231 
6232 			smp_conn_security(hcon, chan->sec_level);
6233 			break;
6234 
6235 		case L2CAP_CR_LE_BAD_PSM:
6236 			l2cap_chan_del(chan, ECONNREFUSED);
6237 			break;
6238 
6239 		default:
6240 			/* If dcid was not set it means channels was refused */
6241 			if (!dcid) {
6242 				l2cap_chan_del(chan, ECONNREFUSED);
6243 				break;
6244 			}
6245 
6246 			chan->ident = 0;
6247 			chan->dcid = dcid;
6248 			chan->omtu = mtu;
6249 			chan->remote_mps = mps;
6250 			chan->tx_credits = credits;
6251 			l2cap_chan_ready(chan);
6252 			break;
6253 		}
6254 
6255 		l2cap_chan_unlock(chan);
6256 	}
6257 
6258 	mutex_unlock(&conn->chan_lock);
6259 
6260 	return err;
6261 }
6262 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6263 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6264 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6265 					 u8 *data)
6266 {
6267 	struct l2cap_ecred_reconf_req *req = (void *) data;
6268 	struct l2cap_ecred_reconf_rsp rsp;
6269 	u16 mtu, mps, result;
6270 	struct l2cap_chan *chan;
6271 	int i, num_scid;
6272 
6273 	if (!enable_ecred)
6274 		return -EINVAL;
6275 
6276 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6277 		result = L2CAP_CR_LE_INVALID_PARAMS;
6278 		goto respond;
6279 	}
6280 
6281 	mtu = __le16_to_cpu(req->mtu);
6282 	mps = __le16_to_cpu(req->mps);
6283 
6284 	BT_DBG("mtu %u mps %u", mtu, mps);
6285 
6286 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6287 		result = L2CAP_RECONF_INVALID_MTU;
6288 		goto respond;
6289 	}
6290 
6291 	if (mps < L2CAP_ECRED_MIN_MPS) {
6292 		result = L2CAP_RECONF_INVALID_MPS;
6293 		goto respond;
6294 	}
6295 
6296 	cmd_len -= sizeof(*req);
6297 	num_scid = cmd_len / sizeof(u16);
6298 	result = L2CAP_RECONF_SUCCESS;
6299 
6300 	for (i = 0; i < num_scid; i++) {
6301 		u16 scid;
6302 
6303 		scid = __le16_to_cpu(req->scid[i]);
6304 		if (!scid)
6305 			return -EPROTO;
6306 
6307 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6308 		if (!chan)
6309 			continue;
6310 
6311 		/* If the MTU value is decreased for any of the included
6312 		 * channels, then the receiver shall disconnect all
6313 		 * included channels.
6314 		 */
6315 		if (chan->omtu > mtu) {
6316 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6317 			       chan->omtu, mtu);
6318 			result = L2CAP_RECONF_INVALID_MTU;
6319 		}
6320 
6321 		chan->omtu = mtu;
6322 		chan->remote_mps = mps;
6323 	}
6324 
6325 respond:
6326 	rsp.result = cpu_to_le16(result);
6327 
6328 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6329 		       &rsp);
6330 
6331 	return 0;
6332 }
6333 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6334 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6335 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6336 					 u8 *data)
6337 {
6338 	struct l2cap_chan *chan, *tmp;
6339 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6340 	u16 result;
6341 
6342 	if (cmd_len < sizeof(*rsp))
6343 		return -EPROTO;
6344 
6345 	result = __le16_to_cpu(rsp->result);
6346 
6347 	BT_DBG("result 0x%4.4x", rsp->result);
6348 
6349 	if (!result)
6350 		return 0;
6351 
6352 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6353 		if (chan->ident != cmd->ident)
6354 			continue;
6355 
6356 		l2cap_chan_del(chan, ECONNRESET);
6357 	}
6358 
6359 	return 0;
6360 }
6361 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6362 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6363 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6364 				       u8 *data)
6365 {
6366 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6367 	struct l2cap_chan *chan;
6368 
6369 	if (cmd_len < sizeof(*rej))
6370 		return -EPROTO;
6371 
6372 	mutex_lock(&conn->chan_lock);
6373 
6374 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6375 	if (!chan)
6376 		goto done;
6377 
6378 	chan = l2cap_chan_hold_unless_zero(chan);
6379 	if (!chan)
6380 		goto done;
6381 
6382 	l2cap_chan_lock(chan);
6383 	l2cap_chan_del(chan, ECONNREFUSED);
6384 	l2cap_chan_unlock(chan);
6385 	l2cap_chan_put(chan);
6386 
6387 done:
6388 	mutex_unlock(&conn->chan_lock);
6389 	return 0;
6390 }
6391 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6392 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6393 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6394 				   u8 *data)
6395 {
6396 	int err = 0;
6397 
6398 	switch (cmd->code) {
6399 	case L2CAP_COMMAND_REJ:
6400 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6401 		break;
6402 
6403 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6404 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6405 		break;
6406 
6407 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6408 		break;
6409 
6410 	case L2CAP_LE_CONN_RSP:
6411 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6412 		break;
6413 
6414 	case L2CAP_LE_CONN_REQ:
6415 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6416 		break;
6417 
6418 	case L2CAP_LE_CREDITS:
6419 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6420 		break;
6421 
6422 	case L2CAP_ECRED_CONN_REQ:
6423 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6424 		break;
6425 
6426 	case L2CAP_ECRED_CONN_RSP:
6427 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6428 		break;
6429 
6430 	case L2CAP_ECRED_RECONF_REQ:
6431 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6432 		break;
6433 
6434 	case L2CAP_ECRED_RECONF_RSP:
6435 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6436 		break;
6437 
6438 	case L2CAP_DISCONN_REQ:
6439 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6440 		break;
6441 
6442 	case L2CAP_DISCONN_RSP:
6443 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6444 		break;
6445 
6446 	default:
6447 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6448 		err = -EINVAL;
6449 		break;
6450 	}
6451 
6452 	return err;
6453 }
6454 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6455 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6456 					struct sk_buff *skb)
6457 {
6458 	struct hci_conn *hcon = conn->hcon;
6459 	struct l2cap_cmd_hdr *cmd;
6460 	u16 len;
6461 	int err;
6462 
6463 	if (hcon->type != LE_LINK)
6464 		goto drop;
6465 
6466 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6467 		goto drop;
6468 
6469 	cmd = (void *) skb->data;
6470 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6471 
6472 	len = le16_to_cpu(cmd->len);
6473 
6474 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6475 
6476 	if (len != skb->len || !cmd->ident) {
6477 		BT_DBG("corrupted command");
6478 		goto drop;
6479 	}
6480 
6481 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6482 	if (err) {
6483 		struct l2cap_cmd_rej_unk rej;
6484 
6485 		BT_ERR("Wrong link type (%d)", err);
6486 
6487 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6488 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6489 			       sizeof(rej), &rej);
6490 	}
6491 
6492 drop:
6493 	kfree_skb(skb);
6494 }
6495 
l2cap_sig_send_rej(struct l2cap_conn * conn,u16 ident)6496 static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident)
6497 {
6498 	struct l2cap_cmd_rej_unk rej;
6499 
6500 	rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6501 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
6502 }
6503 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6504 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6505 				     struct sk_buff *skb)
6506 {
6507 	struct hci_conn *hcon = conn->hcon;
6508 	struct l2cap_cmd_hdr *cmd;
6509 	int err;
6510 
6511 	l2cap_raw_recv(conn, skb);
6512 
6513 	if (hcon->type != ACL_LINK)
6514 		goto drop;
6515 
6516 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6517 		u16 len;
6518 
6519 		cmd = (void *) skb->data;
6520 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6521 
6522 		len = le16_to_cpu(cmd->len);
6523 
6524 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6525 		       cmd->ident);
6526 
6527 		if (len > skb->len || !cmd->ident) {
6528 			BT_DBG("corrupted command");
6529 			l2cap_sig_send_rej(conn, cmd->ident);
6530 			skb_pull(skb, len > skb->len ? skb->len : len);
6531 			continue;
6532 		}
6533 
6534 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6535 		if (err) {
6536 			BT_ERR("Wrong link type (%d)", err);
6537 			l2cap_sig_send_rej(conn, cmd->ident);
6538 		}
6539 
6540 		skb_pull(skb, len);
6541 	}
6542 
6543 	if (skb->len > 0) {
6544 		BT_DBG("corrupted command");
6545 		l2cap_sig_send_rej(conn, 0);
6546 	}
6547 
6548 drop:
6549 	kfree_skb(skb);
6550 }
6551 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6552 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6553 {
6554 	u16 our_fcs, rcv_fcs;
6555 	int hdr_size;
6556 
6557 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6558 		hdr_size = L2CAP_EXT_HDR_SIZE;
6559 	else
6560 		hdr_size = L2CAP_ENH_HDR_SIZE;
6561 
6562 	if (chan->fcs == L2CAP_FCS_CRC16) {
6563 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6564 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6565 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6566 
6567 		if (our_fcs != rcv_fcs)
6568 			return -EBADMSG;
6569 	}
6570 	return 0;
6571 }
6572 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6573 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6574 {
6575 	struct l2cap_ctrl control;
6576 
6577 	BT_DBG("chan %p", chan);
6578 
6579 	memset(&control, 0, sizeof(control));
6580 	control.sframe = 1;
6581 	control.final = 1;
6582 	control.reqseq = chan->buffer_seq;
6583 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6584 
6585 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6586 		control.super = L2CAP_SUPER_RNR;
6587 		l2cap_send_sframe(chan, &control);
6588 	}
6589 
6590 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6591 	    chan->unacked_frames > 0)
6592 		__set_retrans_timer(chan);
6593 
6594 	/* Send pending iframes */
6595 	l2cap_ertm_send(chan);
6596 
6597 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6598 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6599 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6600 		 * send it now.
6601 		 */
6602 		control.super = L2CAP_SUPER_RR;
6603 		l2cap_send_sframe(chan, &control);
6604 	}
6605 }
6606 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6607 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6608 			    struct sk_buff **last_frag)
6609 {
6610 	/* skb->len reflects data in skb as well as all fragments
6611 	 * skb->data_len reflects only data in fragments
6612 	 */
6613 	if (!skb_has_frag_list(skb))
6614 		skb_shinfo(skb)->frag_list = new_frag;
6615 
6616 	new_frag->next = NULL;
6617 
6618 	(*last_frag)->next = new_frag;
6619 	*last_frag = new_frag;
6620 
6621 	skb->len += new_frag->len;
6622 	skb->data_len += new_frag->len;
6623 	skb->truesize += new_frag->truesize;
6624 }
6625 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6626 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6627 				struct l2cap_ctrl *control)
6628 {
6629 	int err = -EINVAL;
6630 
6631 	switch (control->sar) {
6632 	case L2CAP_SAR_UNSEGMENTED:
6633 		if (chan->sdu)
6634 			break;
6635 
6636 		err = chan->ops->recv(chan, skb);
6637 		break;
6638 
6639 	case L2CAP_SAR_START:
6640 		if (chan->sdu)
6641 			break;
6642 
6643 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6644 			break;
6645 
6646 		chan->sdu_len = get_unaligned_le16(skb->data);
6647 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6648 
6649 		if (chan->sdu_len > chan->imtu) {
6650 			err = -EMSGSIZE;
6651 			break;
6652 		}
6653 
6654 		if (skb->len >= chan->sdu_len)
6655 			break;
6656 
6657 		chan->sdu = skb;
6658 		chan->sdu_last_frag = skb;
6659 
6660 		skb = NULL;
6661 		err = 0;
6662 		break;
6663 
6664 	case L2CAP_SAR_CONTINUE:
6665 		if (!chan->sdu)
6666 			break;
6667 
6668 		append_skb_frag(chan->sdu, skb,
6669 				&chan->sdu_last_frag);
6670 		skb = NULL;
6671 
6672 		if (chan->sdu->len >= chan->sdu_len)
6673 			break;
6674 
6675 		err = 0;
6676 		break;
6677 
6678 	case L2CAP_SAR_END:
6679 		if (!chan->sdu)
6680 			break;
6681 
6682 		append_skb_frag(chan->sdu, skb,
6683 				&chan->sdu_last_frag);
6684 		skb = NULL;
6685 
6686 		if (chan->sdu->len != chan->sdu_len)
6687 			break;
6688 
6689 		err = chan->ops->recv(chan, chan->sdu);
6690 
6691 		if (!err) {
6692 			/* Reassembly complete */
6693 			chan->sdu = NULL;
6694 			chan->sdu_last_frag = NULL;
6695 			chan->sdu_len = 0;
6696 		}
6697 		break;
6698 	}
6699 
6700 	if (err) {
6701 		kfree_skb(skb);
6702 		kfree_skb(chan->sdu);
6703 		chan->sdu = NULL;
6704 		chan->sdu_last_frag = NULL;
6705 		chan->sdu_len = 0;
6706 	}
6707 
6708 	return err;
6709 }
6710 
l2cap_resegment(struct l2cap_chan * chan)6711 static int l2cap_resegment(struct l2cap_chan *chan)
6712 {
6713 	/* Placeholder */
6714 	return 0;
6715 }
6716 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6717 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6718 {
6719 	u8 event;
6720 
6721 	if (chan->mode != L2CAP_MODE_ERTM)
6722 		return;
6723 
6724 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6725 	l2cap_tx(chan, NULL, NULL, event);
6726 }
6727 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6728 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6729 {
6730 	int err = 0;
6731 	/* Pass sequential frames to l2cap_reassemble_sdu()
6732 	 * until a gap is encountered.
6733 	 */
6734 
6735 	BT_DBG("chan %p", chan);
6736 
6737 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6738 		struct sk_buff *skb;
6739 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6740 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6741 
6742 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6743 
6744 		if (!skb)
6745 			break;
6746 
6747 		skb_unlink(skb, &chan->srej_q);
6748 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6749 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6750 		if (err)
6751 			break;
6752 	}
6753 
6754 	if (skb_queue_empty(&chan->srej_q)) {
6755 		chan->rx_state = L2CAP_RX_STATE_RECV;
6756 		l2cap_send_ack(chan);
6757 	}
6758 
6759 	return err;
6760 }
6761 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6762 static void l2cap_handle_srej(struct l2cap_chan *chan,
6763 			      struct l2cap_ctrl *control)
6764 {
6765 	struct sk_buff *skb;
6766 
6767 	BT_DBG("chan %p, control %p", chan, control);
6768 
6769 	if (control->reqseq == chan->next_tx_seq) {
6770 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6771 		l2cap_send_disconn_req(chan, ECONNRESET);
6772 		return;
6773 	}
6774 
6775 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6776 
6777 	if (skb == NULL) {
6778 		BT_DBG("Seq %d not available for retransmission",
6779 		       control->reqseq);
6780 		return;
6781 	}
6782 
6783 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6784 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6785 		l2cap_send_disconn_req(chan, ECONNRESET);
6786 		return;
6787 	}
6788 
6789 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6790 
6791 	if (control->poll) {
6792 		l2cap_pass_to_tx(chan, control);
6793 
6794 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6795 		l2cap_retransmit(chan, control);
6796 		l2cap_ertm_send(chan);
6797 
6798 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6799 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6800 			chan->srej_save_reqseq = control->reqseq;
6801 		}
6802 	} else {
6803 		l2cap_pass_to_tx_fbit(chan, control);
6804 
6805 		if (control->final) {
6806 			if (chan->srej_save_reqseq != control->reqseq ||
6807 			    !test_and_clear_bit(CONN_SREJ_ACT,
6808 						&chan->conn_state))
6809 				l2cap_retransmit(chan, control);
6810 		} else {
6811 			l2cap_retransmit(chan, control);
6812 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6813 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6814 				chan->srej_save_reqseq = control->reqseq;
6815 			}
6816 		}
6817 	}
6818 }
6819 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6820 static void l2cap_handle_rej(struct l2cap_chan *chan,
6821 			     struct l2cap_ctrl *control)
6822 {
6823 	struct sk_buff *skb;
6824 
6825 	BT_DBG("chan %p, control %p", chan, control);
6826 
6827 	if (control->reqseq == chan->next_tx_seq) {
6828 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6829 		l2cap_send_disconn_req(chan, ECONNRESET);
6830 		return;
6831 	}
6832 
6833 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6834 
6835 	if (chan->max_tx && skb &&
6836 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6837 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6838 		l2cap_send_disconn_req(chan, ECONNRESET);
6839 		return;
6840 	}
6841 
6842 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6843 
6844 	l2cap_pass_to_tx(chan, control);
6845 
6846 	if (control->final) {
6847 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6848 			l2cap_retransmit_all(chan, control);
6849 	} else {
6850 		l2cap_retransmit_all(chan, control);
6851 		l2cap_ertm_send(chan);
6852 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6853 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6854 	}
6855 }
6856 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6857 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6858 {
6859 	BT_DBG("chan %p, txseq %d", chan, txseq);
6860 
6861 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6862 	       chan->expected_tx_seq);
6863 
6864 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6865 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6866 		    chan->tx_win) {
6867 			/* See notes below regarding "double poll" and
6868 			 * invalid packets.
6869 			 */
6870 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6871 				BT_DBG("Invalid/Ignore - after SREJ");
6872 				return L2CAP_TXSEQ_INVALID_IGNORE;
6873 			} else {
6874 				BT_DBG("Invalid - in window after SREJ sent");
6875 				return L2CAP_TXSEQ_INVALID;
6876 			}
6877 		}
6878 
6879 		if (chan->srej_list.head == txseq) {
6880 			BT_DBG("Expected SREJ");
6881 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6882 		}
6883 
6884 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6885 			BT_DBG("Duplicate SREJ - txseq already stored");
6886 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6887 		}
6888 
6889 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6890 			BT_DBG("Unexpected SREJ - not requested");
6891 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6892 		}
6893 	}
6894 
6895 	if (chan->expected_tx_seq == txseq) {
6896 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6897 		    chan->tx_win) {
6898 			BT_DBG("Invalid - txseq outside tx window");
6899 			return L2CAP_TXSEQ_INVALID;
6900 		} else {
6901 			BT_DBG("Expected");
6902 			return L2CAP_TXSEQ_EXPECTED;
6903 		}
6904 	}
6905 
6906 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6907 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6908 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6909 		return L2CAP_TXSEQ_DUPLICATE;
6910 	}
6911 
6912 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6913 		/* A source of invalid packets is a "double poll" condition,
6914 		 * where delays cause us to send multiple poll packets.  If
6915 		 * the remote stack receives and processes both polls,
6916 		 * sequence numbers can wrap around in such a way that a
6917 		 * resent frame has a sequence number that looks like new data
6918 		 * with a sequence gap.  This would trigger an erroneous SREJ
6919 		 * request.
6920 		 *
6921 		 * Fortunately, this is impossible with a tx window that's
6922 		 * less than half of the maximum sequence number, which allows
6923 		 * invalid frames to be safely ignored.
6924 		 *
6925 		 * With tx window sizes greater than half of the tx window
6926 		 * maximum, the frame is invalid and cannot be ignored.  This
6927 		 * causes a disconnect.
6928 		 */
6929 
6930 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6931 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6932 			return L2CAP_TXSEQ_INVALID_IGNORE;
6933 		} else {
6934 			BT_DBG("Invalid - txseq outside tx window");
6935 			return L2CAP_TXSEQ_INVALID;
6936 		}
6937 	} else {
6938 		BT_DBG("Unexpected - txseq indicates missing frames");
6939 		return L2CAP_TXSEQ_UNEXPECTED;
6940 	}
6941 }
6942 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6943 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6944 			       struct l2cap_ctrl *control,
6945 			       struct sk_buff *skb, u8 event)
6946 {
6947 	struct l2cap_ctrl local_control;
6948 	int err = 0;
6949 	bool skb_in_use = false;
6950 
6951 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6952 	       event);
6953 
6954 	switch (event) {
6955 	case L2CAP_EV_RECV_IFRAME:
6956 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6957 		case L2CAP_TXSEQ_EXPECTED:
6958 			l2cap_pass_to_tx(chan, control);
6959 
6960 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6961 				BT_DBG("Busy, discarding expected seq %d",
6962 				       control->txseq);
6963 				break;
6964 			}
6965 
6966 			chan->expected_tx_seq = __next_seq(chan,
6967 							   control->txseq);
6968 
6969 			chan->buffer_seq = chan->expected_tx_seq;
6970 			skb_in_use = true;
6971 
6972 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6973 			 * control, so make a copy in advance to use it after
6974 			 * l2cap_reassemble_sdu returns and to avoid the race
6975 			 * condition, for example:
6976 			 *
6977 			 * The current thread calls:
6978 			 *   l2cap_reassemble_sdu
6979 			 *     chan->ops->recv == l2cap_sock_recv_cb
6980 			 *       __sock_queue_rcv_skb
6981 			 * Another thread calls:
6982 			 *   bt_sock_recvmsg
6983 			 *     skb_recv_datagram
6984 			 *     skb_free_datagram
6985 			 * Then the current thread tries to access control, but
6986 			 * it was freed by skb_free_datagram.
6987 			 */
6988 			local_control = *control;
6989 			err = l2cap_reassemble_sdu(chan, skb, control);
6990 			if (err)
6991 				break;
6992 
6993 			if (local_control.final) {
6994 				if (!test_and_clear_bit(CONN_REJ_ACT,
6995 							&chan->conn_state)) {
6996 					local_control.final = 0;
6997 					l2cap_retransmit_all(chan, &local_control);
6998 					l2cap_ertm_send(chan);
6999 				}
7000 			}
7001 
7002 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
7003 				l2cap_send_ack(chan);
7004 			break;
7005 		case L2CAP_TXSEQ_UNEXPECTED:
7006 			l2cap_pass_to_tx(chan, control);
7007 
7008 			/* Can't issue SREJ frames in the local busy state.
7009 			 * Drop this frame, it will be seen as missing
7010 			 * when local busy is exited.
7011 			 */
7012 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
7013 				BT_DBG("Busy, discarding unexpected seq %d",
7014 				       control->txseq);
7015 				break;
7016 			}
7017 
7018 			/* There was a gap in the sequence, so an SREJ
7019 			 * must be sent for each missing frame.  The
7020 			 * current frame is stored for later use.
7021 			 */
7022 			skb_queue_tail(&chan->srej_q, skb);
7023 			skb_in_use = true;
7024 			BT_DBG("Queued %p (queue len %d)", skb,
7025 			       skb_queue_len(&chan->srej_q));
7026 
7027 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7028 			l2cap_seq_list_clear(&chan->srej_list);
7029 			l2cap_send_srej(chan, control->txseq);
7030 
7031 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7032 			break;
7033 		case L2CAP_TXSEQ_DUPLICATE:
7034 			l2cap_pass_to_tx(chan, control);
7035 			break;
7036 		case L2CAP_TXSEQ_INVALID_IGNORE:
7037 			break;
7038 		case L2CAP_TXSEQ_INVALID:
7039 		default:
7040 			l2cap_send_disconn_req(chan, ECONNRESET);
7041 			break;
7042 		}
7043 		break;
7044 	case L2CAP_EV_RECV_RR:
7045 		l2cap_pass_to_tx(chan, control);
7046 		if (control->final) {
7047 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7048 
7049 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7050 			    !__chan_is_moving(chan)) {
7051 				control->final = 0;
7052 				l2cap_retransmit_all(chan, control);
7053 			}
7054 
7055 			l2cap_ertm_send(chan);
7056 		} else if (control->poll) {
7057 			l2cap_send_i_or_rr_or_rnr(chan);
7058 		} else {
7059 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7060 					       &chan->conn_state) &&
7061 			    chan->unacked_frames)
7062 				__set_retrans_timer(chan);
7063 
7064 			l2cap_ertm_send(chan);
7065 		}
7066 		break;
7067 	case L2CAP_EV_RECV_RNR:
7068 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7069 		l2cap_pass_to_tx(chan, control);
7070 		if (control && control->poll) {
7071 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7072 			l2cap_send_rr_or_rnr(chan, 0);
7073 		}
7074 		__clear_retrans_timer(chan);
7075 		l2cap_seq_list_clear(&chan->retrans_list);
7076 		break;
7077 	case L2CAP_EV_RECV_REJ:
7078 		l2cap_handle_rej(chan, control);
7079 		break;
7080 	case L2CAP_EV_RECV_SREJ:
7081 		l2cap_handle_srej(chan, control);
7082 		break;
7083 	default:
7084 		break;
7085 	}
7086 
7087 	if (skb && !skb_in_use) {
7088 		BT_DBG("Freeing %p", skb);
7089 		kfree_skb(skb);
7090 	}
7091 
7092 	return err;
7093 }
7094 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7095 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7096 				    struct l2cap_ctrl *control,
7097 				    struct sk_buff *skb, u8 event)
7098 {
7099 	int err = 0;
7100 	u16 txseq = control->txseq;
7101 	bool skb_in_use = false;
7102 
7103 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7104 	       event);
7105 
7106 	switch (event) {
7107 	case L2CAP_EV_RECV_IFRAME:
7108 		switch (l2cap_classify_txseq(chan, txseq)) {
7109 		case L2CAP_TXSEQ_EXPECTED:
7110 			/* Keep frame for reassembly later */
7111 			l2cap_pass_to_tx(chan, control);
7112 			skb_queue_tail(&chan->srej_q, skb);
7113 			skb_in_use = true;
7114 			BT_DBG("Queued %p (queue len %d)", skb,
7115 			       skb_queue_len(&chan->srej_q));
7116 
7117 			chan->expected_tx_seq = __next_seq(chan, txseq);
7118 			break;
7119 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7120 			l2cap_seq_list_pop(&chan->srej_list);
7121 
7122 			l2cap_pass_to_tx(chan, control);
7123 			skb_queue_tail(&chan->srej_q, skb);
7124 			skb_in_use = true;
7125 			BT_DBG("Queued %p (queue len %d)", skb,
7126 			       skb_queue_len(&chan->srej_q));
7127 
7128 			err = l2cap_rx_queued_iframes(chan);
7129 			if (err)
7130 				break;
7131 
7132 			break;
7133 		case L2CAP_TXSEQ_UNEXPECTED:
7134 			/* Got a frame that can't be reassembled yet.
7135 			 * Save it for later, and send SREJs to cover
7136 			 * the missing frames.
7137 			 */
7138 			skb_queue_tail(&chan->srej_q, skb);
7139 			skb_in_use = true;
7140 			BT_DBG("Queued %p (queue len %d)", skb,
7141 			       skb_queue_len(&chan->srej_q));
7142 
7143 			l2cap_pass_to_tx(chan, control);
7144 			l2cap_send_srej(chan, control->txseq);
7145 			break;
7146 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7147 			/* This frame was requested with an SREJ, but
7148 			 * some expected retransmitted frames are
7149 			 * missing.  Request retransmission of missing
7150 			 * SREJ'd frames.
7151 			 */
7152 			skb_queue_tail(&chan->srej_q, skb);
7153 			skb_in_use = true;
7154 			BT_DBG("Queued %p (queue len %d)", skb,
7155 			       skb_queue_len(&chan->srej_q));
7156 
7157 			l2cap_pass_to_tx(chan, control);
7158 			l2cap_send_srej_list(chan, control->txseq);
7159 			break;
7160 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7161 			/* We've already queued this frame.  Drop this copy. */
7162 			l2cap_pass_to_tx(chan, control);
7163 			break;
7164 		case L2CAP_TXSEQ_DUPLICATE:
7165 			/* Expecting a later sequence number, so this frame
7166 			 * was already received.  Ignore it completely.
7167 			 */
7168 			break;
7169 		case L2CAP_TXSEQ_INVALID_IGNORE:
7170 			break;
7171 		case L2CAP_TXSEQ_INVALID:
7172 		default:
7173 			l2cap_send_disconn_req(chan, ECONNRESET);
7174 			break;
7175 		}
7176 		break;
7177 	case L2CAP_EV_RECV_RR:
7178 		l2cap_pass_to_tx(chan, control);
7179 		if (control->final) {
7180 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7181 
7182 			if (!test_and_clear_bit(CONN_REJ_ACT,
7183 						&chan->conn_state)) {
7184 				control->final = 0;
7185 				l2cap_retransmit_all(chan, control);
7186 			}
7187 
7188 			l2cap_ertm_send(chan);
7189 		} else if (control->poll) {
7190 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7191 					       &chan->conn_state) &&
7192 			    chan->unacked_frames) {
7193 				__set_retrans_timer(chan);
7194 			}
7195 
7196 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7197 			l2cap_send_srej_tail(chan);
7198 		} else {
7199 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7200 					       &chan->conn_state) &&
7201 			    chan->unacked_frames)
7202 				__set_retrans_timer(chan);
7203 
7204 			l2cap_send_ack(chan);
7205 		}
7206 		break;
7207 	case L2CAP_EV_RECV_RNR:
7208 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7209 		l2cap_pass_to_tx(chan, control);
7210 		if (control->poll) {
7211 			l2cap_send_srej_tail(chan);
7212 		} else {
7213 			struct l2cap_ctrl rr_control;
7214 			memset(&rr_control, 0, sizeof(rr_control));
7215 			rr_control.sframe = 1;
7216 			rr_control.super = L2CAP_SUPER_RR;
7217 			rr_control.reqseq = chan->buffer_seq;
7218 			l2cap_send_sframe(chan, &rr_control);
7219 		}
7220 
7221 		break;
7222 	case L2CAP_EV_RECV_REJ:
7223 		l2cap_handle_rej(chan, control);
7224 		break;
7225 	case L2CAP_EV_RECV_SREJ:
7226 		l2cap_handle_srej(chan, control);
7227 		break;
7228 	}
7229 
7230 	if (skb && !skb_in_use) {
7231 		BT_DBG("Freeing %p", skb);
7232 		kfree_skb(skb);
7233 	}
7234 
7235 	return err;
7236 }
7237 
l2cap_finish_move(struct l2cap_chan * chan)7238 static int l2cap_finish_move(struct l2cap_chan *chan)
7239 {
7240 	BT_DBG("chan %p", chan);
7241 
7242 	chan->rx_state = L2CAP_RX_STATE_RECV;
7243 
7244 	if (chan->hs_hcon)
7245 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7246 	else
7247 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7248 
7249 	return l2cap_resegment(chan);
7250 }
7251 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7252 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7253 				 struct l2cap_ctrl *control,
7254 				 struct sk_buff *skb, u8 event)
7255 {
7256 	int err;
7257 
7258 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7259 	       event);
7260 
7261 	if (!control->poll)
7262 		return -EPROTO;
7263 
7264 	l2cap_process_reqseq(chan, control->reqseq);
7265 
7266 	if (!skb_queue_empty(&chan->tx_q))
7267 		chan->tx_send_head = skb_peek(&chan->tx_q);
7268 	else
7269 		chan->tx_send_head = NULL;
7270 
7271 	/* Rewind next_tx_seq to the point expected
7272 	 * by the receiver.
7273 	 */
7274 	chan->next_tx_seq = control->reqseq;
7275 	chan->unacked_frames = 0;
7276 
7277 	err = l2cap_finish_move(chan);
7278 	if (err)
7279 		return err;
7280 
7281 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7282 	l2cap_send_i_or_rr_or_rnr(chan);
7283 
7284 	if (event == L2CAP_EV_RECV_IFRAME)
7285 		return -EPROTO;
7286 
7287 	return l2cap_rx_state_recv(chan, control, NULL, event);
7288 }
7289 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7290 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7291 				 struct l2cap_ctrl *control,
7292 				 struct sk_buff *skb, u8 event)
7293 {
7294 	int err;
7295 
7296 	if (!control->final)
7297 		return -EPROTO;
7298 
7299 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7300 
7301 	chan->rx_state = L2CAP_RX_STATE_RECV;
7302 	l2cap_process_reqseq(chan, control->reqseq);
7303 
7304 	if (!skb_queue_empty(&chan->tx_q))
7305 		chan->tx_send_head = skb_peek(&chan->tx_q);
7306 	else
7307 		chan->tx_send_head = NULL;
7308 
7309 	/* Rewind next_tx_seq to the point expected
7310 	 * by the receiver.
7311 	 */
7312 	chan->next_tx_seq = control->reqseq;
7313 	chan->unacked_frames = 0;
7314 
7315 	if (chan->hs_hcon)
7316 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7317 	else
7318 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7319 
7320 	err = l2cap_resegment(chan);
7321 
7322 	if (!err)
7323 		err = l2cap_rx_state_recv(chan, control, skb, event);
7324 
7325 	return err;
7326 }
7327 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7328 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7329 {
7330 	/* Make sure reqseq is for a packet that has been sent but not acked */
7331 	u16 unacked;
7332 
7333 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7334 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7335 }
7336 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7337 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7338 		    struct sk_buff *skb, u8 event)
7339 {
7340 	int err = 0;
7341 
7342 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7343 	       control, skb, event, chan->rx_state);
7344 
7345 	if (__valid_reqseq(chan, control->reqseq)) {
7346 		switch (chan->rx_state) {
7347 		case L2CAP_RX_STATE_RECV:
7348 			err = l2cap_rx_state_recv(chan, control, skb, event);
7349 			break;
7350 		case L2CAP_RX_STATE_SREJ_SENT:
7351 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7352 						       event);
7353 			break;
7354 		case L2CAP_RX_STATE_WAIT_P:
7355 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7356 			break;
7357 		case L2CAP_RX_STATE_WAIT_F:
7358 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7359 			break;
7360 		default:
7361 			/* shut it down */
7362 			break;
7363 		}
7364 	} else {
7365 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7366 		       control->reqseq, chan->next_tx_seq,
7367 		       chan->expected_ack_seq);
7368 		l2cap_send_disconn_req(chan, ECONNRESET);
7369 	}
7370 
7371 	return err;
7372 }
7373 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7374 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7375 			   struct sk_buff *skb)
7376 {
7377 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7378 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7379 	 * returns and to avoid the race condition, for example:
7380 	 *
7381 	 * The current thread calls:
7382 	 *   l2cap_reassemble_sdu
7383 	 *     chan->ops->recv == l2cap_sock_recv_cb
7384 	 *       __sock_queue_rcv_skb
7385 	 * Another thread calls:
7386 	 *   bt_sock_recvmsg
7387 	 *     skb_recv_datagram
7388 	 *     skb_free_datagram
7389 	 * Then the current thread tries to access control, but it was freed by
7390 	 * skb_free_datagram.
7391 	 */
7392 	u16 txseq = control->txseq;
7393 
7394 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7395 	       chan->rx_state);
7396 
7397 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7398 		l2cap_pass_to_tx(chan, control);
7399 
7400 		BT_DBG("buffer_seq %u->%u", chan->buffer_seq,
7401 		       __next_seq(chan, chan->buffer_seq));
7402 
7403 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7404 
7405 		l2cap_reassemble_sdu(chan, skb, control);
7406 	} else {
7407 		if (chan->sdu) {
7408 			kfree_skb(chan->sdu);
7409 			chan->sdu = NULL;
7410 		}
7411 		chan->sdu_last_frag = NULL;
7412 		chan->sdu_len = 0;
7413 
7414 		if (skb) {
7415 			BT_DBG("Freeing %p", skb);
7416 			kfree_skb(skb);
7417 		}
7418 	}
7419 
7420 	chan->last_acked_seq = txseq;
7421 	chan->expected_tx_seq = __next_seq(chan, txseq);
7422 
7423 	return 0;
7424 }
7425 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7426 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7427 {
7428 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7429 	u16 len;
7430 	u8 event;
7431 
7432 	__unpack_control(chan, skb);
7433 
7434 	len = skb->len;
7435 
7436 	/*
7437 	 * We can just drop the corrupted I-frame here.
7438 	 * Receiver will miss it and start proper recovery
7439 	 * procedures and ask for retransmission.
7440 	 */
7441 	if (l2cap_check_fcs(chan, skb))
7442 		goto drop;
7443 
7444 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7445 		len -= L2CAP_SDULEN_SIZE;
7446 
7447 	if (chan->fcs == L2CAP_FCS_CRC16)
7448 		len -= L2CAP_FCS_SIZE;
7449 
7450 	if (len > chan->mps) {
7451 		l2cap_send_disconn_req(chan, ECONNRESET);
7452 		goto drop;
7453 	}
7454 
7455 	if (chan->ops->filter) {
7456 		if (chan->ops->filter(chan, skb))
7457 			goto drop;
7458 	}
7459 
7460 	if (!control->sframe) {
7461 		int err;
7462 
7463 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7464 		       control->sar, control->reqseq, control->final,
7465 		       control->txseq);
7466 
7467 		/* Validate F-bit - F=0 always valid, F=1 only
7468 		 * valid in TX WAIT_F
7469 		 */
7470 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7471 			goto drop;
7472 
7473 		if (chan->mode != L2CAP_MODE_STREAMING) {
7474 			event = L2CAP_EV_RECV_IFRAME;
7475 			err = l2cap_rx(chan, control, skb, event);
7476 		} else {
7477 			err = l2cap_stream_rx(chan, control, skb);
7478 		}
7479 
7480 		if (err)
7481 			l2cap_send_disconn_req(chan, ECONNRESET);
7482 	} else {
7483 		const u8 rx_func_to_event[4] = {
7484 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7485 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7486 		};
7487 
7488 		/* Only I-frames are expected in streaming mode */
7489 		if (chan->mode == L2CAP_MODE_STREAMING)
7490 			goto drop;
7491 
7492 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7493 		       control->reqseq, control->final, control->poll,
7494 		       control->super);
7495 
7496 		if (len != 0) {
7497 			BT_ERR("Trailing bytes: %d in sframe", len);
7498 			l2cap_send_disconn_req(chan, ECONNRESET);
7499 			goto drop;
7500 		}
7501 
7502 		/* Validate F and P bits */
7503 		if (control->final && (control->poll ||
7504 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7505 			goto drop;
7506 
7507 		event = rx_func_to_event[control->super];
7508 		if (l2cap_rx(chan, control, skb, event))
7509 			l2cap_send_disconn_req(chan, ECONNRESET);
7510 	}
7511 
7512 	return 0;
7513 
7514 drop:
7515 	kfree_skb(skb);
7516 	return 0;
7517 }
7518 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7519 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7520 {
7521 	struct l2cap_conn *conn = chan->conn;
7522 	struct l2cap_le_credits pkt;
7523 	u16 return_credits;
7524 
7525 	return_credits = (chan->imtu / chan->mps) + 1;
7526 
7527 	if (chan->rx_credits >= return_credits)
7528 		return;
7529 
7530 	return_credits -= chan->rx_credits;
7531 
7532 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7533 
7534 	chan->rx_credits += return_credits;
7535 
7536 	pkt.cid     = cpu_to_le16(chan->scid);
7537 	pkt.credits = cpu_to_le16(return_credits);
7538 
7539 	chan->ident = l2cap_get_ident(conn);
7540 
7541 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7542 }
7543 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7544 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7545 {
7546 	int err;
7547 
7548 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7549 
7550 	/* Wait recv to confirm reception before updating the credits */
7551 	err = chan->ops->recv(chan, skb);
7552 
7553 	/* Update credits whenever an SDU is received */
7554 	l2cap_chan_le_send_credits(chan);
7555 
7556 	return err;
7557 }
7558 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7559 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7560 {
7561 	int err;
7562 
7563 	if (!chan->rx_credits) {
7564 		BT_ERR("No credits to receive LE L2CAP data");
7565 		l2cap_send_disconn_req(chan, ECONNRESET);
7566 		return -ENOBUFS;
7567 	}
7568 
7569 	if (chan->imtu < skb->len) {
7570 		BT_ERR("Too big LE L2CAP PDU");
7571 		return -ENOBUFS;
7572 	}
7573 
7574 	chan->rx_credits--;
7575 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7576 
7577 	/* Update if remote had run out of credits, this should only happens
7578 	 * if the remote is not using the entire MPS.
7579 	 */
7580 	if (!chan->rx_credits)
7581 		l2cap_chan_le_send_credits(chan);
7582 
7583 	err = 0;
7584 
7585 	if (!chan->sdu) {
7586 		u16 sdu_len;
7587 
7588 		sdu_len = get_unaligned_le16(skb->data);
7589 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7590 
7591 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7592 		       sdu_len, skb->len, chan->imtu);
7593 
7594 		if (sdu_len > chan->imtu) {
7595 			BT_ERR("Too big LE L2CAP SDU length received");
7596 			err = -EMSGSIZE;
7597 			goto failed;
7598 		}
7599 
7600 		if (skb->len > sdu_len) {
7601 			BT_ERR("Too much LE L2CAP data received");
7602 			err = -EINVAL;
7603 			goto failed;
7604 		}
7605 
7606 		if (skb->len == sdu_len)
7607 			return l2cap_ecred_recv(chan, skb);
7608 
7609 		chan->sdu = skb;
7610 		chan->sdu_len = sdu_len;
7611 		chan->sdu_last_frag = skb;
7612 
7613 		/* Detect if remote is not able to use the selected MPS */
7614 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7615 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7616 
7617 			/* Adjust the number of credits */
7618 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7619 			chan->mps = mps_len;
7620 			l2cap_chan_le_send_credits(chan);
7621 		}
7622 
7623 		return 0;
7624 	}
7625 
7626 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7627 	       chan->sdu->len, skb->len, chan->sdu_len);
7628 
7629 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7630 		BT_ERR("Too much LE L2CAP data received");
7631 		err = -EINVAL;
7632 		goto failed;
7633 	}
7634 
7635 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7636 	skb = NULL;
7637 
7638 	if (chan->sdu->len == chan->sdu_len) {
7639 		err = l2cap_ecred_recv(chan, chan->sdu);
7640 		if (!err) {
7641 			chan->sdu = NULL;
7642 			chan->sdu_last_frag = NULL;
7643 			chan->sdu_len = 0;
7644 		}
7645 	}
7646 
7647 failed:
7648 	if (err) {
7649 		kfree_skb(skb);
7650 		kfree_skb(chan->sdu);
7651 		chan->sdu = NULL;
7652 		chan->sdu_last_frag = NULL;
7653 		chan->sdu_len = 0;
7654 	}
7655 
7656 	/* We can't return an error here since we took care of the skb
7657 	 * freeing internally. An error return would cause the caller to
7658 	 * do a double-free of the skb.
7659 	 */
7660 	return 0;
7661 }
7662 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7663 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7664 			       struct sk_buff *skb)
7665 {
7666 	struct l2cap_chan *chan;
7667 
7668 	chan = l2cap_get_chan_by_scid(conn, cid);
7669 	if (!chan) {
7670 		if (cid == L2CAP_CID_A2MP) {
7671 			chan = a2mp_channel_create(conn, skb);
7672 			if (!chan) {
7673 				kfree_skb(skb);
7674 				return;
7675 			}
7676 
7677 			l2cap_chan_hold(chan);
7678 			l2cap_chan_lock(chan);
7679 		} else {
7680 			BT_DBG("unknown cid 0x%4.4x", cid);
7681 			/* Drop packet and return */
7682 			kfree_skb(skb);
7683 			return;
7684 		}
7685 	}
7686 
7687 	BT_DBG("chan %p, len %d", chan, skb->len);
7688 
7689 	/* If we receive data on a fixed channel before the info req/rsp
7690 	 * procedure is done simply assume that the channel is supported
7691 	 * and mark it as ready.
7692 	 */
7693 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7694 		l2cap_chan_ready(chan);
7695 
7696 	if (chan->state != BT_CONNECTED)
7697 		goto drop;
7698 
7699 	switch (chan->mode) {
7700 	case L2CAP_MODE_LE_FLOWCTL:
7701 	case L2CAP_MODE_EXT_FLOWCTL:
7702 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7703 			goto drop;
7704 
7705 		goto done;
7706 
7707 	case L2CAP_MODE_BASIC:
7708 		/* If socket recv buffers overflows we drop data here
7709 		 * which is *bad* because L2CAP has to be reliable.
7710 		 * But we don't have any other choice. L2CAP doesn't
7711 		 * provide flow control mechanism. */
7712 
7713 		if (chan->imtu < skb->len) {
7714 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7715 			goto drop;
7716 		}
7717 
7718 		if (!chan->ops->recv(chan, skb))
7719 			goto done;
7720 		break;
7721 
7722 	case L2CAP_MODE_ERTM:
7723 	case L2CAP_MODE_STREAMING:
7724 		l2cap_data_rcv(chan, skb);
7725 		goto done;
7726 
7727 	default:
7728 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7729 		break;
7730 	}
7731 
7732 drop:
7733 	kfree_skb(skb);
7734 
7735 done:
7736 	l2cap_chan_unlock(chan);
7737 	l2cap_chan_put(chan);
7738 }
7739 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7740 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7741 				  struct sk_buff *skb)
7742 {
7743 	struct hci_conn *hcon = conn->hcon;
7744 	struct l2cap_chan *chan;
7745 
7746 	if (hcon->type != ACL_LINK)
7747 		goto free_skb;
7748 
7749 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7750 					ACL_LINK);
7751 	if (!chan)
7752 		goto free_skb;
7753 
7754 	BT_DBG("chan %p, len %d", chan, skb->len);
7755 
7756 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7757 		goto drop;
7758 
7759 	if (chan->imtu < skb->len)
7760 		goto drop;
7761 
7762 	/* Store remote BD_ADDR and PSM for msg_name */
7763 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7764 	bt_cb(skb)->l2cap.psm = psm;
7765 
7766 	if (!chan->ops->recv(chan, skb)) {
7767 		l2cap_chan_put(chan);
7768 		return;
7769 	}
7770 
7771 drop:
7772 	l2cap_chan_put(chan);
7773 free_skb:
7774 	kfree_skb(skb);
7775 }
7776 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7777 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7778 {
7779 	struct l2cap_hdr *lh = (void *) skb->data;
7780 	struct hci_conn *hcon = conn->hcon;
7781 	u16 cid, len;
7782 	__le16 psm;
7783 
7784 	if (hcon->state != BT_CONNECTED) {
7785 		BT_DBG("queueing pending rx skb");
7786 		skb_queue_tail(&conn->pending_rx, skb);
7787 		return;
7788 	}
7789 
7790 	skb_pull(skb, L2CAP_HDR_SIZE);
7791 	cid = __le16_to_cpu(lh->cid);
7792 	len = __le16_to_cpu(lh->len);
7793 
7794 	if (len != skb->len) {
7795 		kfree_skb(skb);
7796 		return;
7797 	}
7798 
7799 	/* Since we can't actively block incoming LE connections we must
7800 	 * at least ensure that we ignore incoming data from them.
7801 	 */
7802 	if (hcon->type == LE_LINK &&
7803 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7804 				   bdaddr_dst_type(hcon))) {
7805 		kfree_skb(skb);
7806 		return;
7807 	}
7808 
7809 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7810 
7811 	switch (cid) {
7812 	case L2CAP_CID_SIGNALING:
7813 		l2cap_sig_channel(conn, skb);
7814 		break;
7815 
7816 	case L2CAP_CID_CONN_LESS:
7817 		psm = get_unaligned((__le16 *) skb->data);
7818 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7819 		l2cap_conless_channel(conn, psm, skb);
7820 		break;
7821 
7822 	case L2CAP_CID_LE_SIGNALING:
7823 		l2cap_le_sig_channel(conn, skb);
7824 		break;
7825 
7826 	default:
7827 		l2cap_data_channel(conn, cid, skb);
7828 		break;
7829 	}
7830 }
7831 
process_pending_rx(struct work_struct * work)7832 static void process_pending_rx(struct work_struct *work)
7833 {
7834 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7835 					       pending_rx_work);
7836 	struct sk_buff *skb;
7837 
7838 	BT_DBG("");
7839 
7840 	while ((skb = skb_dequeue(&conn->pending_rx)))
7841 		l2cap_recv_frame(conn, skb);
7842 }
7843 
l2cap_conn_add(struct hci_conn * hcon)7844 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7845 {
7846 	struct l2cap_conn *conn = hcon->l2cap_data;
7847 	struct hci_chan *hchan;
7848 
7849 	if (conn)
7850 		return conn;
7851 
7852 	hchan = hci_chan_create(hcon);
7853 	if (!hchan)
7854 		return NULL;
7855 
7856 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7857 	if (!conn) {
7858 		hci_chan_del(hchan);
7859 		return NULL;
7860 	}
7861 
7862 	kref_init(&conn->ref);
7863 	hcon->l2cap_data = conn;
7864 	conn->hcon = hci_conn_get(hcon);
7865 	conn->hchan = hchan;
7866 
7867 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7868 
7869 	switch (hcon->type) {
7870 	case LE_LINK:
7871 		if (hcon->hdev->le_mtu) {
7872 			conn->mtu = hcon->hdev->le_mtu;
7873 			break;
7874 		}
7875 		fallthrough;
7876 	default:
7877 		conn->mtu = hcon->hdev->acl_mtu;
7878 		break;
7879 	}
7880 
7881 	conn->feat_mask = 0;
7882 
7883 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7884 
7885 	if (hcon->type == ACL_LINK &&
7886 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7887 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7888 
7889 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7890 	    (bredr_sc_enabled(hcon->hdev) ||
7891 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7892 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7893 
7894 	mutex_init(&conn->ident_lock);
7895 	mutex_init(&conn->chan_lock);
7896 
7897 	INIT_LIST_HEAD(&conn->chan_l);
7898 	INIT_LIST_HEAD(&conn->users);
7899 
7900 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7901 
7902 	skb_queue_head_init(&conn->pending_rx);
7903 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7904 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7905 
7906 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7907 
7908 	return conn;
7909 }
7910 
is_valid_psm(u16 psm,u8 dst_type)7911 static bool is_valid_psm(u16 psm, u8 dst_type)
7912 {
7913 	if (!psm)
7914 		return false;
7915 
7916 	if (bdaddr_type_is_le(dst_type))
7917 		return (psm <= 0x00ff);
7918 
7919 	/* PSM must be odd and lsb of upper byte must be 0 */
7920 	return ((psm & 0x0101) == 0x0001);
7921 }
7922 
7923 struct l2cap_chan_data {
7924 	struct l2cap_chan *chan;
7925 	struct pid *pid;
7926 	int count;
7927 };
7928 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7929 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7930 {
7931 	struct l2cap_chan_data *d = data;
7932 	struct pid *pid;
7933 
7934 	if (chan == d->chan)
7935 		return;
7936 
7937 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7938 		return;
7939 
7940 	pid = chan->ops->get_peer_pid(chan);
7941 
7942 	/* Only count deferred channels with the same PID/PSM */
7943 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7944 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7945 		return;
7946 
7947 	d->count++;
7948 }
7949 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7950 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7951 		       bdaddr_t *dst, u8 dst_type)
7952 {
7953 	struct l2cap_conn *conn;
7954 	struct hci_conn *hcon;
7955 	struct hci_dev *hdev;
7956 	int err;
7957 
7958 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7959 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7960 
7961 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7962 	if (!hdev)
7963 		return -EHOSTUNREACH;
7964 
7965 	hci_dev_lock(hdev);
7966 
7967 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7968 	    chan->chan_type != L2CAP_CHAN_RAW) {
7969 		err = -EINVAL;
7970 		goto done;
7971 	}
7972 
7973 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7974 		err = -EINVAL;
7975 		goto done;
7976 	}
7977 
7978 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7979 		err = -EINVAL;
7980 		goto done;
7981 	}
7982 
7983 	switch (chan->mode) {
7984 	case L2CAP_MODE_BASIC:
7985 		break;
7986 	case L2CAP_MODE_LE_FLOWCTL:
7987 		break;
7988 	case L2CAP_MODE_EXT_FLOWCTL:
7989 		if (!enable_ecred) {
7990 			err = -EOPNOTSUPP;
7991 			goto done;
7992 		}
7993 		break;
7994 	case L2CAP_MODE_ERTM:
7995 	case L2CAP_MODE_STREAMING:
7996 		if (!disable_ertm)
7997 			break;
7998 		fallthrough;
7999 	default:
8000 		err = -EOPNOTSUPP;
8001 		goto done;
8002 	}
8003 
8004 	switch (chan->state) {
8005 	case BT_CONNECT:
8006 	case BT_CONNECT2:
8007 	case BT_CONFIG:
8008 		/* Already connecting */
8009 		err = 0;
8010 		goto done;
8011 
8012 	case BT_CONNECTED:
8013 		/* Already connected */
8014 		err = -EISCONN;
8015 		goto done;
8016 
8017 	case BT_OPEN:
8018 	case BT_BOUND:
8019 		/* Can connect */
8020 		break;
8021 
8022 	default:
8023 		err = -EBADFD;
8024 		goto done;
8025 	}
8026 
8027 	/* Set destination address and psm */
8028 	bacpy(&chan->dst, dst);
8029 	chan->dst_type = dst_type;
8030 
8031 	chan->psm = psm;
8032 	chan->dcid = cid;
8033 
8034 	if (bdaddr_type_is_le(dst_type)) {
8035 		/* Convert from L2CAP channel address type to HCI address type
8036 		 */
8037 		if (dst_type == BDADDR_LE_PUBLIC)
8038 			dst_type = ADDR_LE_DEV_PUBLIC;
8039 		else
8040 			dst_type = ADDR_LE_DEV_RANDOM;
8041 
8042 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8043 			hcon = hci_connect_le(hdev, dst, dst_type, false,
8044 					      chan->sec_level,
8045 					      HCI_LE_CONN_TIMEOUT,
8046 					      HCI_ROLE_SLAVE);
8047 		else
8048 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8049 						   chan->sec_level,
8050 						   HCI_LE_CONN_TIMEOUT,
8051 						   CONN_REASON_L2CAP_CHAN);
8052 
8053 	} else {
8054 		u8 auth_type = l2cap_get_auth_type(chan);
8055 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8056 				       CONN_REASON_L2CAP_CHAN);
8057 	}
8058 
8059 	if (IS_ERR(hcon)) {
8060 		err = PTR_ERR(hcon);
8061 		goto done;
8062 	}
8063 
8064 	conn = l2cap_conn_add(hcon);
8065 	if (!conn) {
8066 		hci_conn_drop(hcon);
8067 		err = -ENOMEM;
8068 		goto done;
8069 	}
8070 
8071 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8072 		struct l2cap_chan_data data;
8073 
8074 		data.chan = chan;
8075 		data.pid = chan->ops->get_peer_pid(chan);
8076 		data.count = 1;
8077 
8078 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8079 
8080 		/* Check if there isn't too many channels being connected */
8081 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8082 			hci_conn_drop(hcon);
8083 			err = -EPROTO;
8084 			goto done;
8085 		}
8086 	}
8087 
8088 	mutex_lock(&conn->chan_lock);
8089 	l2cap_chan_lock(chan);
8090 
8091 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8092 		hci_conn_drop(hcon);
8093 		err = -EBUSY;
8094 		goto chan_unlock;
8095 	}
8096 
8097 	/* Update source addr of the socket */
8098 	bacpy(&chan->src, &hcon->src);
8099 	chan->src_type = bdaddr_src_type(hcon);
8100 
8101 	__l2cap_chan_add(conn, chan);
8102 
8103 	/* l2cap_chan_add takes its own ref so we can drop this one */
8104 	hci_conn_drop(hcon);
8105 
8106 	l2cap_state_change(chan, BT_CONNECT);
8107 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8108 
8109 	/* Release chan->sport so that it can be reused by other
8110 	 * sockets (as it's only used for listening sockets).
8111 	 */
8112 	write_lock(&chan_list_lock);
8113 	chan->sport = 0;
8114 	write_unlock(&chan_list_lock);
8115 
8116 	if (hcon->state == BT_CONNECTED) {
8117 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8118 			__clear_chan_timer(chan);
8119 			if (l2cap_chan_check_security(chan, true))
8120 				l2cap_state_change(chan, BT_CONNECTED);
8121 		} else
8122 			l2cap_do_start(chan);
8123 	}
8124 
8125 	err = 0;
8126 
8127 chan_unlock:
8128 	l2cap_chan_unlock(chan);
8129 	mutex_unlock(&conn->chan_lock);
8130 done:
8131 	hci_dev_unlock(hdev);
8132 	hci_dev_put(hdev);
8133 	return err;
8134 }
8135 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8136 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8137 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8138 {
8139 	struct l2cap_conn *conn = chan->conn;
8140 	struct {
8141 		struct l2cap_ecred_reconf_req req;
8142 		__le16 scid;
8143 	} pdu;
8144 
8145 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8146 	pdu.req.mps = cpu_to_le16(chan->mps);
8147 	pdu.scid    = cpu_to_le16(chan->scid);
8148 
8149 	chan->ident = l2cap_get_ident(conn);
8150 
8151 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8152 		       sizeof(pdu), &pdu);
8153 }
8154 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8155 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8156 {
8157 	if (chan->imtu > mtu)
8158 		return -EINVAL;
8159 
8160 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8161 
8162 	chan->imtu = mtu;
8163 
8164 	l2cap_ecred_reconfigure(chan);
8165 
8166 	return 0;
8167 }
8168 
8169 /* ---- L2CAP interface with lower layer (HCI) ---- */
8170 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8171 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8172 {
8173 	int exact = 0, lm1 = 0, lm2 = 0;
8174 	struct l2cap_chan *c;
8175 
8176 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8177 
8178 	/* Find listening sockets and check their link_mode */
8179 	read_lock(&chan_list_lock);
8180 	list_for_each_entry(c, &chan_list, global_l) {
8181 		if (c->state != BT_LISTEN)
8182 			continue;
8183 
8184 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8185 			lm1 |= HCI_LM_ACCEPT;
8186 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8187 				lm1 |= HCI_LM_MASTER;
8188 			exact++;
8189 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8190 			lm2 |= HCI_LM_ACCEPT;
8191 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8192 				lm2 |= HCI_LM_MASTER;
8193 		}
8194 	}
8195 	read_unlock(&chan_list_lock);
8196 
8197 	return exact ? lm1 : lm2;
8198 }
8199 
8200 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8201  * from an existing channel in the list or from the beginning of the
8202  * global list (by passing NULL as first parameter).
8203  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8204 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8205 						  struct hci_conn *hcon)
8206 {
8207 	u8 src_type = bdaddr_src_type(hcon);
8208 
8209 	read_lock(&chan_list_lock);
8210 
8211 	if (c)
8212 		c = list_next_entry(c, global_l);
8213 	else
8214 		c = list_entry(chan_list.next, typeof(*c), global_l);
8215 
8216 	list_for_each_entry_from(c, &chan_list, global_l) {
8217 		if (c->chan_type != L2CAP_CHAN_FIXED)
8218 			continue;
8219 		if (c->state != BT_LISTEN)
8220 			continue;
8221 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8222 			continue;
8223 		if (src_type != c->src_type)
8224 			continue;
8225 
8226 		c = l2cap_chan_hold_unless_zero(c);
8227 		read_unlock(&chan_list_lock);
8228 		return c;
8229 	}
8230 
8231 	read_unlock(&chan_list_lock);
8232 
8233 	return NULL;
8234 }
8235 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8236 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8237 {
8238 	struct hci_dev *hdev = hcon->hdev;
8239 	struct l2cap_conn *conn;
8240 	struct l2cap_chan *pchan;
8241 	u8 dst_type;
8242 
8243 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8244 		return;
8245 
8246 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8247 
8248 	if (status) {
8249 		l2cap_conn_del(hcon, bt_to_errno(status));
8250 		return;
8251 	}
8252 
8253 	conn = l2cap_conn_add(hcon);
8254 	if (!conn)
8255 		return;
8256 
8257 	dst_type = bdaddr_dst_type(hcon);
8258 
8259 	/* If device is blocked, do not create channels for it */
8260 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8261 		return;
8262 
8263 	/* Find fixed channels and notify them of the new connection. We
8264 	 * use multiple individual lookups, continuing each time where
8265 	 * we left off, because the list lock would prevent calling the
8266 	 * potentially sleeping l2cap_chan_lock() function.
8267 	 */
8268 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8269 	while (pchan) {
8270 		struct l2cap_chan *chan, *next;
8271 
8272 		/* Client fixed channels should override server ones */
8273 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8274 			goto next;
8275 
8276 		l2cap_chan_lock(pchan);
8277 		chan = pchan->ops->new_connection(pchan);
8278 		if (chan) {
8279 			bacpy(&chan->src, &hcon->src);
8280 			bacpy(&chan->dst, &hcon->dst);
8281 			chan->src_type = bdaddr_src_type(hcon);
8282 			chan->dst_type = dst_type;
8283 
8284 			__l2cap_chan_add(conn, chan);
8285 		}
8286 
8287 		l2cap_chan_unlock(pchan);
8288 next:
8289 		next = l2cap_global_fixed_chan(pchan, hcon);
8290 		l2cap_chan_put(pchan);
8291 		pchan = next;
8292 	}
8293 
8294 	l2cap_conn_ready(conn);
8295 }
8296 
l2cap_disconn_ind(struct hci_conn * hcon)8297 int l2cap_disconn_ind(struct hci_conn *hcon)
8298 {
8299 	struct l2cap_conn *conn = hcon->l2cap_data;
8300 
8301 	BT_DBG("hcon %p", hcon);
8302 
8303 	if (!conn)
8304 		return HCI_ERROR_REMOTE_USER_TERM;
8305 	return conn->disc_reason;
8306 }
8307 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8308 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8309 {
8310 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8311 		return;
8312 
8313 	BT_DBG("hcon %p reason %d", hcon, reason);
8314 
8315 	l2cap_conn_del(hcon, bt_to_errno(reason));
8316 }
8317 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8318 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8319 {
8320 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8321 		return;
8322 
8323 	if (encrypt == 0x00) {
8324 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8325 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8326 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8327 			   chan->sec_level == BT_SECURITY_FIPS)
8328 			l2cap_chan_close(chan, ECONNREFUSED);
8329 	} else {
8330 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8331 			__clear_chan_timer(chan);
8332 	}
8333 }
8334 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8335 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8336 {
8337 	struct l2cap_conn *conn = hcon->l2cap_data;
8338 	struct l2cap_chan *chan;
8339 
8340 	if (!conn)
8341 		return;
8342 
8343 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8344 
8345 	mutex_lock(&conn->chan_lock);
8346 
8347 	list_for_each_entry(chan, &conn->chan_l, list) {
8348 		l2cap_chan_lock(chan);
8349 
8350 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8351 		       state_to_string(chan->state));
8352 
8353 		if (chan->scid == L2CAP_CID_A2MP) {
8354 			l2cap_chan_unlock(chan);
8355 			continue;
8356 		}
8357 
8358 		if (!status && encrypt)
8359 			chan->sec_level = hcon->sec_level;
8360 
8361 		if (!__l2cap_no_conn_pending(chan)) {
8362 			l2cap_chan_unlock(chan);
8363 			continue;
8364 		}
8365 
8366 		if (!status && (chan->state == BT_CONNECTED ||
8367 				chan->state == BT_CONFIG)) {
8368 			chan->ops->resume(chan);
8369 			l2cap_check_encryption(chan, encrypt);
8370 			l2cap_chan_unlock(chan);
8371 			continue;
8372 		}
8373 
8374 		if (chan->state == BT_CONNECT) {
8375 			if (!status && l2cap_check_enc_key_size(hcon))
8376 				l2cap_start_connection(chan);
8377 			else
8378 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8379 		} else if (chan->state == BT_CONNECT2 &&
8380 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8381 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8382 			struct l2cap_conn_rsp rsp;
8383 			__u16 res, stat;
8384 
8385 			if (!status && l2cap_check_enc_key_size(hcon)) {
8386 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8387 					res = L2CAP_CR_PEND;
8388 					stat = L2CAP_CS_AUTHOR_PEND;
8389 					chan->ops->defer(chan);
8390 				} else {
8391 					l2cap_state_change(chan, BT_CONFIG);
8392 					res = L2CAP_CR_SUCCESS;
8393 					stat = L2CAP_CS_NO_INFO;
8394 				}
8395 			} else {
8396 				l2cap_state_change(chan, BT_DISCONN);
8397 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8398 				res = L2CAP_CR_SEC_BLOCK;
8399 				stat = L2CAP_CS_NO_INFO;
8400 			}
8401 
8402 			rsp.scid   = cpu_to_le16(chan->dcid);
8403 			rsp.dcid   = cpu_to_le16(chan->scid);
8404 			rsp.result = cpu_to_le16(res);
8405 			rsp.status = cpu_to_le16(stat);
8406 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8407 				       sizeof(rsp), &rsp);
8408 
8409 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8410 			    res == L2CAP_CR_SUCCESS) {
8411 				char buf[128];
8412 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8413 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8414 					       L2CAP_CONF_REQ,
8415 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8416 					       buf);
8417 				chan->num_conf_req++;
8418 			}
8419 		}
8420 
8421 		l2cap_chan_unlock(chan);
8422 	}
8423 
8424 	mutex_unlock(&conn->chan_lock);
8425 }
8426 
8427 /* Append fragment into frame respecting the maximum len of rx_skb */
l2cap_recv_frag(struct l2cap_conn * conn,struct sk_buff * skb,u16 len)8428 static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb,
8429 			   u16 len)
8430 {
8431 	if (!conn->rx_skb) {
8432 		/* Allocate skb for the complete frame (with header) */
8433 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8434 		if (!conn->rx_skb)
8435 			return -ENOMEM;
8436 		/* Init rx_len */
8437 		conn->rx_len = len;
8438 	}
8439 
8440 	/* Copy as much as the rx_skb can hold */
8441 	len = min_t(u16, len, skb->len);
8442 	skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len);
8443 	skb_pull(skb, len);
8444 	conn->rx_len -= len;
8445 
8446 	return len;
8447 }
8448 
l2cap_recv_len(struct l2cap_conn * conn,struct sk_buff * skb)8449 static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb)
8450 {
8451 	struct sk_buff *rx_skb;
8452 	int len;
8453 
8454 	/* Append just enough to complete the header */
8455 	len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len);
8456 
8457 	/* If header could not be read just continue */
8458 	if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE)
8459 		return len;
8460 
8461 	rx_skb = conn->rx_skb;
8462 	len = get_unaligned_le16(rx_skb->data);
8463 
8464 	/* Check if rx_skb has enough space to received all fragments */
8465 	if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) {
8466 		/* Update expected len */
8467 		conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE);
8468 		return L2CAP_LEN_SIZE;
8469 	}
8470 
8471 	/* Reset conn->rx_skb since it will need to be reallocated in order to
8472 	 * fit all fragments.
8473 	 */
8474 	conn->rx_skb = NULL;
8475 
8476 	/* Reallocates rx_skb using the exact expected length */
8477 	len = l2cap_recv_frag(conn, rx_skb,
8478 			      len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE));
8479 	kfree_skb(rx_skb);
8480 
8481 	return len;
8482 }
8483 
l2cap_recv_reset(struct l2cap_conn * conn)8484 static void l2cap_recv_reset(struct l2cap_conn *conn)
8485 {
8486 	kfree_skb(conn->rx_skb);
8487 	conn->rx_skb = NULL;
8488 	conn->rx_len = 0;
8489 }
8490 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8491 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8492 {
8493 	struct l2cap_conn *conn = hcon->l2cap_data;
8494 	int len;
8495 
8496 	/* For AMP controller do not create l2cap conn */
8497 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8498 		goto drop;
8499 
8500 	if (!conn)
8501 		conn = l2cap_conn_add(hcon);
8502 
8503 	if (!conn)
8504 		goto drop;
8505 
8506 	BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags);
8507 
8508 	switch (flags) {
8509 	case ACL_START:
8510 	case ACL_START_NO_FLUSH:
8511 	case ACL_COMPLETE:
8512 		if (conn->rx_skb) {
8513 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8514 			l2cap_recv_reset(conn);
8515 			l2cap_conn_unreliable(conn, ECOMM);
8516 		}
8517 
8518 		/* Start fragment may not contain the L2CAP length so just
8519 		 * copy the initial byte when that happens and use conn->mtu as
8520 		 * expected length.
8521 		 */
8522 		if (skb->len < L2CAP_LEN_SIZE) {
8523 			l2cap_recv_frag(conn, skb, conn->mtu);
8524 			break;
8525 		}
8526 
8527 		len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE;
8528 
8529 		if (len == skb->len) {
8530 			/* Complete frame received */
8531 			l2cap_recv_frame(conn, skb);
8532 			return;
8533 		}
8534 
8535 		BT_DBG("Start: total len %d, frag len %u", len, skb->len);
8536 
8537 		if (skb->len > len) {
8538 			BT_ERR("Frame is too long (len %u, expected len %d)",
8539 			       skb->len, len);
8540 			l2cap_conn_unreliable(conn, ECOMM);
8541 			goto drop;
8542 		}
8543 
8544 		/* Append fragment into frame (with header) */
8545 		if (l2cap_recv_frag(conn, skb, len) < 0)
8546 			goto drop;
8547 
8548 		break;
8549 
8550 	case ACL_CONT:
8551 		BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len);
8552 
8553 		if (!conn->rx_skb) {
8554 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8555 			l2cap_conn_unreliable(conn, ECOMM);
8556 			goto drop;
8557 		}
8558 
8559 		/* Complete the L2CAP length if it has not been read */
8560 		if (conn->rx_skb->len < L2CAP_LEN_SIZE) {
8561 			if (l2cap_recv_len(conn, skb) < 0) {
8562 				l2cap_conn_unreliable(conn, ECOMM);
8563 				goto drop;
8564 			}
8565 
8566 			/* Header still could not be read just continue */
8567 			if (conn->rx_skb->len < L2CAP_LEN_SIZE)
8568 				break;
8569 		}
8570 
8571 		if (skb->len > conn->rx_len) {
8572 			BT_ERR("Fragment is too long (len %u, expected %u)",
8573 			       skb->len, conn->rx_len);
8574 			l2cap_recv_reset(conn);
8575 			l2cap_conn_unreliable(conn, ECOMM);
8576 			goto drop;
8577 		}
8578 
8579 		/* Append fragment into frame (with header) */
8580 		l2cap_recv_frag(conn, skb, skb->len);
8581 
8582 		if (!conn->rx_len) {
8583 			/* Complete frame received. l2cap_recv_frame
8584 			 * takes ownership of the skb so set the global
8585 			 * rx_skb pointer to NULL first.
8586 			 */
8587 			struct sk_buff *rx_skb = conn->rx_skb;
8588 			conn->rx_skb = NULL;
8589 			l2cap_recv_frame(conn, rx_skb);
8590 		}
8591 		break;
8592 	}
8593 
8594 drop:
8595 	kfree_skb(skb);
8596 }
8597 
8598 static struct hci_cb l2cap_cb = {
8599 	.name		= "L2CAP",
8600 	.connect_cfm	= l2cap_connect_cfm,
8601 	.disconn_cfm	= l2cap_disconn_cfm,
8602 	.security_cfm	= l2cap_security_cfm,
8603 };
8604 
l2cap_debugfs_show(struct seq_file * f,void * p)8605 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8606 {
8607 	struct l2cap_chan *c;
8608 
8609 	read_lock(&chan_list_lock);
8610 
8611 	list_for_each_entry(c, &chan_list, global_l) {
8612 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8613 			   &c->src, c->src_type, &c->dst, c->dst_type,
8614 			   c->state, __le16_to_cpu(c->psm),
8615 			   c->scid, c->dcid, c->imtu, c->omtu,
8616 			   c->sec_level, c->mode);
8617 	}
8618 
8619 	read_unlock(&chan_list_lock);
8620 
8621 	return 0;
8622 }
8623 
8624 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8625 
8626 static struct dentry *l2cap_debugfs;
8627 
l2cap_init(void)8628 int __init l2cap_init(void)
8629 {
8630 	int err;
8631 
8632 	err = l2cap_init_sockets();
8633 	if (err < 0)
8634 		return err;
8635 
8636 	hci_register_cb(&l2cap_cb);
8637 
8638 	if (IS_ERR_OR_NULL(bt_debugfs))
8639 		return 0;
8640 
8641 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8642 					    NULL, &l2cap_debugfs_fops);
8643 
8644 	return 0;
8645 }
8646 
l2cap_exit(void)8647 void l2cap_exit(void)
8648 {
8649 	debugfs_remove(l2cap_debugfs);
8650 	hci_unregister_cb(&l2cap_cb);
8651 	l2cap_cleanup_sockets();
8652 }
8653 
8654 module_param(disable_ertm, bool, 0644);
8655 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8656 
8657 module_param(enable_ecred, bool, 0644);
8658 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8659