• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5    Copyright (C) 2010 Google Inc.
6    Copyright (C) 2011 ProFUSION Embedded Systems
7    Copyright (c) 2012 Code Aurora Forum.  All rights reserved.
8 
9    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 
11    This program is free software; you can redistribute it and/or modify
12    it under the terms of the GNU General Public License version 2 as
13    published by the Free Software Foundation;
14 
15    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 
24    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26    SOFTWARE IS DISCLAIMED.
27 */
28 
29 /* Bluetooth L2CAP core. */
30 
31 #include <linux/module.h>
32 
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
35 #include <linux/filter.h>
36 
37 #include <net/bluetooth/bluetooth.h>
38 #include <net/bluetooth/hci_core.h>
39 #include <net/bluetooth/l2cap.h>
40 
41 #include "smp.h"
42 #include "a2mp.h"
43 #include "amp.h"
44 
45 #define LE_FLOWCTL_MAX_CREDITS 65535
46 
47 bool disable_ertm;
48 bool enable_ecred;
49 
50 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
51 
52 static LIST_HEAD(chan_list);
53 static DEFINE_RWLOCK(chan_list_lock);
54 
55 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
56 				       u8 code, u8 ident, u16 dlen, void *data);
57 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
58 			   void *data);
59 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
60 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
61 
62 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
63 		     struct sk_buff_head *skbs, u8 event);
64 static void l2cap_retrans_timeout(struct work_struct *work);
65 static void l2cap_monitor_timeout(struct work_struct *work);
66 static void l2cap_ack_timeout(struct work_struct *work);
67 
bdaddr_type(u8 link_type,u8 bdaddr_type)68 static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
69 {
70 	if (link_type == LE_LINK) {
71 		if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
72 			return BDADDR_LE_PUBLIC;
73 		else
74 			return BDADDR_LE_RANDOM;
75 	}
76 
77 	return BDADDR_BREDR;
78 }
79 
bdaddr_src_type(struct hci_conn * hcon)80 static inline u8 bdaddr_src_type(struct hci_conn *hcon)
81 {
82 	return bdaddr_type(hcon->type, hcon->src_type);
83 }
84 
bdaddr_dst_type(struct hci_conn * hcon)85 static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
86 {
87 	return bdaddr_type(hcon->type, hcon->dst_type);
88 }
89 
90 /* ---- L2CAP channels ---- */
91 
__l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)92 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
93 						   u16 cid)
94 {
95 	struct l2cap_chan *c;
96 
97 	list_for_each_entry(c, &conn->chan_l, list) {
98 		if (c->dcid == cid)
99 			return c;
100 	}
101 	return NULL;
102 }
103 
__l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)104 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
105 						   u16 cid)
106 {
107 	struct l2cap_chan *c;
108 
109 	list_for_each_entry(c, &conn->chan_l, list) {
110 		if (c->scid == cid)
111 			return c;
112 	}
113 	return NULL;
114 }
115 
116 /* Find channel with given SCID.
117  * Returns a reference locked channel.
118  */
l2cap_get_chan_by_scid(struct l2cap_conn * conn,u16 cid)119 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
120 						 u16 cid)
121 {
122 	struct l2cap_chan *c;
123 
124 	mutex_lock(&conn->chan_lock);
125 	c = __l2cap_get_chan_by_scid(conn, cid);
126 	if (c) {
127 		/* Only lock if chan reference is not 0 */
128 		c = l2cap_chan_hold_unless_zero(c);
129 		if (c)
130 			l2cap_chan_lock(c);
131 	}
132 	mutex_unlock(&conn->chan_lock);
133 
134 	return c;
135 }
136 
137 /* Find channel with given DCID.
138  * Returns a reference locked channel.
139  */
l2cap_get_chan_by_dcid(struct l2cap_conn * conn,u16 cid)140 static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
141 						 u16 cid)
142 {
143 	struct l2cap_chan *c;
144 
145 	mutex_lock(&conn->chan_lock);
146 	c = __l2cap_get_chan_by_dcid(conn, cid);
147 	if (c) {
148 		/* Only lock if chan reference is not 0 */
149 		c = l2cap_chan_hold_unless_zero(c);
150 		if (c)
151 			l2cap_chan_lock(c);
152 	}
153 	mutex_unlock(&conn->chan_lock);
154 
155 	return c;
156 }
157 
__l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)158 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
159 						    u8 ident)
160 {
161 	struct l2cap_chan *c;
162 
163 	list_for_each_entry(c, &conn->chan_l, list) {
164 		if (c->ident == ident)
165 			return c;
166 	}
167 	return NULL;
168 }
169 
l2cap_get_chan_by_ident(struct l2cap_conn * conn,u8 ident)170 static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
171 						  u8 ident)
172 {
173 	struct l2cap_chan *c;
174 
175 	mutex_lock(&conn->chan_lock);
176 	c = __l2cap_get_chan_by_ident(conn, ident);
177 	if (c) {
178 		/* Only lock if chan reference is not 0 */
179 		c = l2cap_chan_hold_unless_zero(c);
180 		if (c)
181 			l2cap_chan_lock(c);
182 	}
183 	mutex_unlock(&conn->chan_lock);
184 
185 	return c;
186 }
187 
__l2cap_global_chan_by_addr(__le16 psm,bdaddr_t * src,u8 src_type)188 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src,
189 						      u8 src_type)
190 {
191 	struct l2cap_chan *c;
192 
193 	list_for_each_entry(c, &chan_list, global_l) {
194 		if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR)
195 			continue;
196 
197 		if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR)
198 			continue;
199 
200 		if (c->sport == psm && !bacmp(&c->src, src))
201 			return c;
202 	}
203 	return NULL;
204 }
205 
l2cap_add_psm(struct l2cap_chan * chan,bdaddr_t * src,__le16 psm)206 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
207 {
208 	int err;
209 
210 	write_lock(&chan_list_lock);
211 
212 	if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) {
213 		err = -EADDRINUSE;
214 		goto done;
215 	}
216 
217 	if (psm) {
218 		chan->psm = psm;
219 		chan->sport = psm;
220 		err = 0;
221 	} else {
222 		u16 p, start, end, incr;
223 
224 		if (chan->src_type == BDADDR_BREDR) {
225 			start = L2CAP_PSM_DYN_START;
226 			end = L2CAP_PSM_AUTO_END;
227 			incr = 2;
228 		} else {
229 			start = L2CAP_PSM_LE_DYN_START;
230 			end = L2CAP_PSM_LE_DYN_END;
231 			incr = 1;
232 		}
233 
234 		err = -EINVAL;
235 		for (p = start; p <= end; p += incr)
236 			if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src,
237 							 chan->src_type)) {
238 				chan->psm   = cpu_to_le16(p);
239 				chan->sport = cpu_to_le16(p);
240 				err = 0;
241 				break;
242 			}
243 	}
244 
245 done:
246 	write_unlock(&chan_list_lock);
247 	return err;
248 }
249 EXPORT_SYMBOL_GPL(l2cap_add_psm);
250 
l2cap_add_scid(struct l2cap_chan * chan,__u16 scid)251 int l2cap_add_scid(struct l2cap_chan *chan,  __u16 scid)
252 {
253 	write_lock(&chan_list_lock);
254 
255 	/* Override the defaults (which are for conn-oriented) */
256 	chan->omtu = L2CAP_DEFAULT_MTU;
257 	chan->chan_type = L2CAP_CHAN_FIXED;
258 
259 	chan->scid = scid;
260 
261 	write_unlock(&chan_list_lock);
262 
263 	return 0;
264 }
265 
l2cap_alloc_cid(struct l2cap_conn * conn)266 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
267 {
268 	u16 cid, dyn_end;
269 
270 	if (conn->hcon->type == LE_LINK)
271 		dyn_end = L2CAP_CID_LE_DYN_END;
272 	else
273 		dyn_end = L2CAP_CID_DYN_END;
274 
275 	for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
276 		if (!__l2cap_get_chan_by_scid(conn, cid))
277 			return cid;
278 	}
279 
280 	return 0;
281 }
282 
l2cap_state_change(struct l2cap_chan * chan,int state)283 static void l2cap_state_change(struct l2cap_chan *chan, int state)
284 {
285 	BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
286 	       state_to_string(state));
287 
288 	chan->state = state;
289 	chan->ops->state_change(chan, state, 0);
290 }
291 
l2cap_state_change_and_error(struct l2cap_chan * chan,int state,int err)292 static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
293 						int state, int err)
294 {
295 	chan->state = state;
296 	chan->ops->state_change(chan, chan->state, err);
297 }
298 
l2cap_chan_set_err(struct l2cap_chan * chan,int err)299 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
300 {
301 	chan->ops->state_change(chan, chan->state, err);
302 }
303 
__set_retrans_timer(struct l2cap_chan * chan)304 static void __set_retrans_timer(struct l2cap_chan *chan)
305 {
306 	if (!delayed_work_pending(&chan->monitor_timer) &&
307 	    chan->retrans_timeout) {
308 		l2cap_set_timer(chan, &chan->retrans_timer,
309 				msecs_to_jiffies(chan->retrans_timeout));
310 	}
311 }
312 
__set_monitor_timer(struct l2cap_chan * chan)313 static void __set_monitor_timer(struct l2cap_chan *chan)
314 {
315 	__clear_retrans_timer(chan);
316 	if (chan->monitor_timeout) {
317 		l2cap_set_timer(chan, &chan->monitor_timer,
318 				msecs_to_jiffies(chan->monitor_timeout));
319 	}
320 }
321 
l2cap_ertm_seq_in_queue(struct sk_buff_head * head,u16 seq)322 static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
323 					       u16 seq)
324 {
325 	struct sk_buff *skb;
326 
327 	skb_queue_walk(head, skb) {
328 		if (bt_cb(skb)->l2cap.txseq == seq)
329 			return skb;
330 	}
331 
332 	return NULL;
333 }
334 
335 /* ---- L2CAP sequence number lists ---- */
336 
337 /* For ERTM, ordered lists of sequence numbers must be tracked for
338  * SREJ requests that are received and for frames that are to be
339  * retransmitted. These seq_list functions implement a singly-linked
340  * list in an array, where membership in the list can also be checked
341  * in constant time. Items can also be added to the tail of the list
342  * and removed from the head in constant time, without further memory
343  * allocs or frees.
344  */
345 
l2cap_seq_list_init(struct l2cap_seq_list * seq_list,u16 size)346 static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
347 {
348 	size_t alloc_size, i;
349 
350 	/* Allocated size is a power of 2 to map sequence numbers
351 	 * (which may be up to 14 bits) in to a smaller array that is
352 	 * sized for the negotiated ERTM transmit windows.
353 	 */
354 	alloc_size = roundup_pow_of_two(size);
355 
356 	seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL);
357 	if (!seq_list->list)
358 		return -ENOMEM;
359 
360 	seq_list->mask = alloc_size - 1;
361 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
362 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
363 	for (i = 0; i < alloc_size; i++)
364 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
365 
366 	return 0;
367 }
368 
l2cap_seq_list_free(struct l2cap_seq_list * seq_list)369 static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
370 {
371 	kfree(seq_list->list);
372 }
373 
l2cap_seq_list_contains(struct l2cap_seq_list * seq_list,u16 seq)374 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
375 					   u16 seq)
376 {
377 	/* Constant-time check for list membership */
378 	return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
379 }
380 
l2cap_seq_list_pop(struct l2cap_seq_list * seq_list)381 static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
382 {
383 	u16 seq = seq_list->head;
384 	u16 mask = seq_list->mask;
385 
386 	seq_list->head = seq_list->list[seq & mask];
387 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
388 
389 	if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
390 		seq_list->head = L2CAP_SEQ_LIST_CLEAR;
391 		seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
392 	}
393 
394 	return seq;
395 }
396 
l2cap_seq_list_clear(struct l2cap_seq_list * seq_list)397 static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
398 {
399 	u16 i;
400 
401 	if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
402 		return;
403 
404 	for (i = 0; i <= seq_list->mask; i++)
405 		seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
406 
407 	seq_list->head = L2CAP_SEQ_LIST_CLEAR;
408 	seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
409 }
410 
l2cap_seq_list_append(struct l2cap_seq_list * seq_list,u16 seq)411 static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
412 {
413 	u16 mask = seq_list->mask;
414 
415 	/* All appends happen in constant time */
416 
417 	if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
418 		return;
419 
420 	if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
421 		seq_list->head = seq;
422 	else
423 		seq_list->list[seq_list->tail & mask] = seq;
424 
425 	seq_list->tail = seq;
426 	seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
427 }
428 
l2cap_chan_timeout(struct work_struct * work)429 static void l2cap_chan_timeout(struct work_struct *work)
430 {
431 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
432 					       chan_timer.work);
433 	struct l2cap_conn *conn = chan->conn;
434 	int reason;
435 
436 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
437 
438 	mutex_lock(&conn->chan_lock);
439 	/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
440 	 * this work. No need to call l2cap_chan_hold(chan) here again.
441 	 */
442 	l2cap_chan_lock(chan);
443 
444 	if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
445 		reason = ECONNREFUSED;
446 	else if (chan->state == BT_CONNECT &&
447 		 chan->sec_level != BT_SECURITY_SDP)
448 		reason = ECONNREFUSED;
449 	else
450 		reason = ETIMEDOUT;
451 
452 	l2cap_chan_close(chan, reason);
453 
454 	chan->ops->close(chan);
455 
456 	l2cap_chan_unlock(chan);
457 	l2cap_chan_put(chan);
458 
459 	mutex_unlock(&conn->chan_lock);
460 }
461 
l2cap_chan_create(void)462 struct l2cap_chan *l2cap_chan_create(void)
463 {
464 	struct l2cap_chan *chan;
465 
466 	chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
467 	if (!chan)
468 		return NULL;
469 
470 	skb_queue_head_init(&chan->tx_q);
471 	skb_queue_head_init(&chan->srej_q);
472 	mutex_init(&chan->lock);
473 
474 	/* Set default lock nesting level */
475 	atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
476 
477 	write_lock(&chan_list_lock);
478 	list_add(&chan->global_l, &chan_list);
479 	write_unlock(&chan_list_lock);
480 
481 	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
482 	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
483 	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
484 	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
485 
486 	chan->state = BT_OPEN;
487 
488 	kref_init(&chan->kref);
489 
490 	/* This flag is cleared in l2cap_chan_ready() */
491 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
492 
493 	BT_DBG("chan %p", chan);
494 
495 	return chan;
496 }
497 EXPORT_SYMBOL_GPL(l2cap_chan_create);
498 
l2cap_chan_destroy(struct kref * kref)499 static void l2cap_chan_destroy(struct kref *kref)
500 {
501 	struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
502 
503 	BT_DBG("chan %p", chan);
504 
505 	write_lock(&chan_list_lock);
506 	list_del(&chan->global_l);
507 	write_unlock(&chan_list_lock);
508 
509 	kfree(chan);
510 }
511 
l2cap_chan_hold(struct l2cap_chan * c)512 void l2cap_chan_hold(struct l2cap_chan *c)
513 {
514 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
515 
516 	kref_get(&c->kref);
517 }
518 
l2cap_chan_hold_unless_zero(struct l2cap_chan * c)519 struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c)
520 {
521 	BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref));
522 
523 	if (!kref_get_unless_zero(&c->kref))
524 		return NULL;
525 
526 	return c;
527 }
528 
l2cap_chan_put(struct l2cap_chan * c)529 void l2cap_chan_put(struct l2cap_chan *c)
530 {
531 	BT_DBG("chan %p orig refcnt %d", c, kref_read(&c->kref));
532 
533 	kref_put(&c->kref, l2cap_chan_destroy);
534 }
535 EXPORT_SYMBOL_GPL(l2cap_chan_put);
536 
l2cap_chan_set_defaults(struct l2cap_chan * chan)537 void l2cap_chan_set_defaults(struct l2cap_chan *chan)
538 {
539 	chan->fcs  = L2CAP_FCS_CRC16;
540 	chan->max_tx = L2CAP_DEFAULT_MAX_TX;
541 	chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
542 	chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
543 	chan->remote_max_tx = chan->max_tx;
544 	chan->remote_tx_win = chan->tx_win;
545 	chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
546 	chan->sec_level = BT_SECURITY_LOW;
547 	chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
548 	chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
549 	chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
550 
551 	chan->conf_state = 0;
552 	set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
553 
554 	set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
555 }
556 EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
557 
l2cap_le_flowctl_init(struct l2cap_chan * chan,u16 tx_credits)558 static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
559 {
560 	chan->sdu = NULL;
561 	chan->sdu_last_frag = NULL;
562 	chan->sdu_len = 0;
563 	chan->tx_credits = tx_credits;
564 	/* Derive MPS from connection MTU to stop HCI fragmentation */
565 	chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE);
566 	/* Give enough credits for a full packet */
567 	chan->rx_credits = (chan->imtu / chan->mps) + 1;
568 
569 	skb_queue_head_init(&chan->tx_q);
570 }
571 
l2cap_ecred_init(struct l2cap_chan * chan,u16 tx_credits)572 static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
573 {
574 	l2cap_le_flowctl_init(chan, tx_credits);
575 
576 	/* L2CAP implementations shall support a minimum MPS of 64 octets */
577 	if (chan->mps < L2CAP_ECRED_MIN_MPS) {
578 		chan->mps = L2CAP_ECRED_MIN_MPS;
579 		chan->rx_credits = (chan->imtu / chan->mps) + 1;
580 	}
581 }
582 
__l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)583 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
584 {
585 	BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
586 	       __le16_to_cpu(chan->psm), chan->dcid);
587 
588 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
589 
590 	chan->conn = conn;
591 
592 	switch (chan->chan_type) {
593 	case L2CAP_CHAN_CONN_ORIENTED:
594 		/* Alloc CID for connection-oriented socket */
595 		chan->scid = l2cap_alloc_cid(conn);
596 		if (conn->hcon->type == ACL_LINK)
597 			chan->omtu = L2CAP_DEFAULT_MTU;
598 		break;
599 
600 	case L2CAP_CHAN_CONN_LESS:
601 		/* Connectionless socket */
602 		chan->scid = L2CAP_CID_CONN_LESS;
603 		chan->dcid = L2CAP_CID_CONN_LESS;
604 		chan->omtu = L2CAP_DEFAULT_MTU;
605 		break;
606 
607 	case L2CAP_CHAN_FIXED:
608 		/* Caller will set CID and CID specific MTU values */
609 		break;
610 
611 	default:
612 		/* Raw socket can send/recv signalling messages only */
613 		chan->scid = L2CAP_CID_SIGNALING;
614 		chan->dcid = L2CAP_CID_SIGNALING;
615 		chan->omtu = L2CAP_DEFAULT_MTU;
616 	}
617 
618 	chan->local_id		= L2CAP_BESTEFFORT_ID;
619 	chan->local_stype	= L2CAP_SERV_BESTEFFORT;
620 	chan->local_msdu	= L2CAP_DEFAULT_MAX_SDU_SIZE;
621 	chan->local_sdu_itime	= L2CAP_DEFAULT_SDU_ITIME;
622 	chan->local_acc_lat	= L2CAP_DEFAULT_ACC_LAT;
623 	chan->local_flush_to	= L2CAP_EFS_DEFAULT_FLUSH_TO;
624 
625 	l2cap_chan_hold(chan);
626 
627 	/* Only keep a reference for fixed channels if they requested it */
628 	if (chan->chan_type != L2CAP_CHAN_FIXED ||
629 	    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
630 		hci_conn_hold(conn->hcon);
631 
632 	list_add(&chan->list, &conn->chan_l);
633 }
634 
l2cap_chan_add(struct l2cap_conn * conn,struct l2cap_chan * chan)635 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
636 {
637 	mutex_lock(&conn->chan_lock);
638 	__l2cap_chan_add(conn, chan);
639 	mutex_unlock(&conn->chan_lock);
640 }
641 
l2cap_chan_del(struct l2cap_chan * chan,int err)642 void l2cap_chan_del(struct l2cap_chan *chan, int err)
643 {
644 	struct l2cap_conn *conn = chan->conn;
645 
646 	__clear_chan_timer(chan);
647 
648 	BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
649 	       state_to_string(chan->state));
650 
651 	chan->ops->teardown(chan, err);
652 
653 	if (conn) {
654 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
655 		/* Delete from channel list */
656 		list_del(&chan->list);
657 
658 		l2cap_chan_put(chan);
659 
660 		chan->conn = NULL;
661 
662 		/* Reference was only held for non-fixed channels or
663 		 * fixed channels that explicitly requested it using the
664 		 * FLAG_HOLD_HCI_CONN flag.
665 		 */
666 		if (chan->chan_type != L2CAP_CHAN_FIXED ||
667 		    test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
668 			hci_conn_drop(conn->hcon);
669 
670 		if (mgr && mgr->bredr_chan == chan)
671 			mgr->bredr_chan = NULL;
672 	}
673 
674 	if (chan->hs_hchan) {
675 		struct hci_chan *hs_hchan = chan->hs_hchan;
676 
677 		BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
678 		amp_disconnect_logical_link(hs_hchan);
679 	}
680 
681 	if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
682 		return;
683 
684 	switch(chan->mode) {
685 	case L2CAP_MODE_BASIC:
686 		break;
687 
688 	case L2CAP_MODE_LE_FLOWCTL:
689 	case L2CAP_MODE_EXT_FLOWCTL:
690 		skb_queue_purge(&chan->tx_q);
691 		break;
692 
693 	case L2CAP_MODE_ERTM:
694 		__clear_retrans_timer(chan);
695 		__clear_monitor_timer(chan);
696 		__clear_ack_timer(chan);
697 
698 		skb_queue_purge(&chan->srej_q);
699 
700 		l2cap_seq_list_free(&chan->srej_list);
701 		l2cap_seq_list_free(&chan->retrans_list);
702 		fallthrough;
703 
704 	case L2CAP_MODE_STREAMING:
705 		skb_queue_purge(&chan->tx_q);
706 		break;
707 	}
708 
709 	return;
710 }
711 EXPORT_SYMBOL_GPL(l2cap_chan_del);
712 
__l2cap_chan_list_id(struct l2cap_conn * conn,u16 id,l2cap_chan_func_t func,void * data)713 static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id,
714 				 l2cap_chan_func_t func, void *data)
715 {
716 	struct l2cap_chan *chan, *l;
717 
718 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
719 		if (chan->ident == id)
720 			func(chan, data);
721 	}
722 }
723 
__l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)724 static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
725 			      void *data)
726 {
727 	struct l2cap_chan *chan;
728 
729 	list_for_each_entry(chan, &conn->chan_l, list) {
730 		func(chan, data);
731 	}
732 }
733 
l2cap_chan_list(struct l2cap_conn * conn,l2cap_chan_func_t func,void * data)734 void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func,
735 		     void *data)
736 {
737 	if (!conn)
738 		return;
739 
740 	mutex_lock(&conn->chan_lock);
741 	__l2cap_chan_list(conn, func, data);
742 	mutex_unlock(&conn->chan_lock);
743 }
744 
745 EXPORT_SYMBOL_GPL(l2cap_chan_list);
746 
l2cap_conn_update_id_addr(struct work_struct * work)747 static void l2cap_conn_update_id_addr(struct work_struct *work)
748 {
749 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
750 					       id_addr_update_work);
751 	struct hci_conn *hcon = conn->hcon;
752 	struct l2cap_chan *chan;
753 
754 	mutex_lock(&conn->chan_lock);
755 
756 	list_for_each_entry(chan, &conn->chan_l, list) {
757 		l2cap_chan_lock(chan);
758 		bacpy(&chan->dst, &hcon->dst);
759 		chan->dst_type = bdaddr_dst_type(hcon);
760 		l2cap_chan_unlock(chan);
761 	}
762 
763 	mutex_unlock(&conn->chan_lock);
764 }
765 
l2cap_chan_le_connect_reject(struct l2cap_chan * chan)766 static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
767 {
768 	struct l2cap_conn *conn = chan->conn;
769 	struct l2cap_le_conn_rsp rsp;
770 	u16 result;
771 
772 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
773 		result = L2CAP_CR_LE_AUTHORIZATION;
774 	else
775 		result = L2CAP_CR_LE_BAD_PSM;
776 
777 	l2cap_state_change(chan, BT_DISCONN);
778 
779 	rsp.dcid    = cpu_to_le16(chan->scid);
780 	rsp.mtu     = cpu_to_le16(chan->imtu);
781 	rsp.mps     = cpu_to_le16(chan->mps);
782 	rsp.credits = cpu_to_le16(chan->rx_credits);
783 	rsp.result  = cpu_to_le16(result);
784 
785 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
786 		       &rsp);
787 }
788 
l2cap_chan_ecred_connect_reject(struct l2cap_chan * chan)789 static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
790 {
791 	l2cap_state_change(chan, BT_DISCONN);
792 
793 	__l2cap_ecred_conn_rsp_defer(chan);
794 }
795 
l2cap_chan_connect_reject(struct l2cap_chan * chan)796 static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
797 {
798 	struct l2cap_conn *conn = chan->conn;
799 	struct l2cap_conn_rsp rsp;
800 	u16 result;
801 
802 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
803 		result = L2CAP_CR_SEC_BLOCK;
804 	else
805 		result = L2CAP_CR_BAD_PSM;
806 
807 	l2cap_state_change(chan, BT_DISCONN);
808 
809 	rsp.scid   = cpu_to_le16(chan->dcid);
810 	rsp.dcid   = cpu_to_le16(chan->scid);
811 	rsp.result = cpu_to_le16(result);
812 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
813 
814 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
815 }
816 
l2cap_chan_close(struct l2cap_chan * chan,int reason)817 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
818 {
819 	struct l2cap_conn *conn = chan->conn;
820 
821 	BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
822 
823 	switch (chan->state) {
824 	case BT_LISTEN:
825 		chan->ops->teardown(chan, 0);
826 		break;
827 
828 	case BT_CONNECTED:
829 	case BT_CONFIG:
830 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
831 			__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
832 			l2cap_send_disconn_req(chan, reason);
833 		} else
834 			l2cap_chan_del(chan, reason);
835 		break;
836 
837 	case BT_CONNECT2:
838 		if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
839 			if (conn->hcon->type == ACL_LINK)
840 				l2cap_chan_connect_reject(chan);
841 			else if (conn->hcon->type == LE_LINK) {
842 				switch (chan->mode) {
843 				case L2CAP_MODE_LE_FLOWCTL:
844 					l2cap_chan_le_connect_reject(chan);
845 					break;
846 				case L2CAP_MODE_EXT_FLOWCTL:
847 					l2cap_chan_ecred_connect_reject(chan);
848 					return;
849 				}
850 			}
851 		}
852 
853 		l2cap_chan_del(chan, reason);
854 		break;
855 
856 	case BT_CONNECT:
857 	case BT_DISCONN:
858 		l2cap_chan_del(chan, reason);
859 		break;
860 
861 	default:
862 		chan->ops->teardown(chan, 0);
863 		break;
864 	}
865 }
866 EXPORT_SYMBOL(l2cap_chan_close);
867 
l2cap_get_auth_type(struct l2cap_chan * chan)868 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
869 {
870 	switch (chan->chan_type) {
871 	case L2CAP_CHAN_RAW:
872 		switch (chan->sec_level) {
873 		case BT_SECURITY_HIGH:
874 		case BT_SECURITY_FIPS:
875 			return HCI_AT_DEDICATED_BONDING_MITM;
876 		case BT_SECURITY_MEDIUM:
877 			return HCI_AT_DEDICATED_BONDING;
878 		default:
879 			return HCI_AT_NO_BONDING;
880 		}
881 		break;
882 	case L2CAP_CHAN_CONN_LESS:
883 		if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
884 			if (chan->sec_level == BT_SECURITY_LOW)
885 				chan->sec_level = BT_SECURITY_SDP;
886 		}
887 		if (chan->sec_level == BT_SECURITY_HIGH ||
888 		    chan->sec_level == BT_SECURITY_FIPS)
889 			return HCI_AT_NO_BONDING_MITM;
890 		else
891 			return HCI_AT_NO_BONDING;
892 		break;
893 	case L2CAP_CHAN_CONN_ORIENTED:
894 		if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
895 			if (chan->sec_level == BT_SECURITY_LOW)
896 				chan->sec_level = BT_SECURITY_SDP;
897 
898 			if (chan->sec_level == BT_SECURITY_HIGH ||
899 			    chan->sec_level == BT_SECURITY_FIPS)
900 				return HCI_AT_NO_BONDING_MITM;
901 			else
902 				return HCI_AT_NO_BONDING;
903 		}
904 		fallthrough;
905 
906 	default:
907 		switch (chan->sec_level) {
908 		case BT_SECURITY_HIGH:
909 		case BT_SECURITY_FIPS:
910 			return HCI_AT_GENERAL_BONDING_MITM;
911 		case BT_SECURITY_MEDIUM:
912 			return HCI_AT_GENERAL_BONDING;
913 		default:
914 			return HCI_AT_NO_BONDING;
915 		}
916 		break;
917 	}
918 }
919 
920 /* Service level security */
l2cap_chan_check_security(struct l2cap_chan * chan,bool initiator)921 int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
922 {
923 	struct l2cap_conn *conn = chan->conn;
924 	__u8 auth_type;
925 
926 	if (conn->hcon->type == LE_LINK)
927 		return smp_conn_security(conn->hcon, chan->sec_level);
928 
929 	auth_type = l2cap_get_auth_type(chan);
930 
931 	return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
932 				 initiator);
933 }
934 
l2cap_get_ident(struct l2cap_conn * conn)935 static u8 l2cap_get_ident(struct l2cap_conn *conn)
936 {
937 	u8 id;
938 
939 	/* Get next available identificator.
940 	 *    1 - 128 are used by kernel.
941 	 *  129 - 199 are reserved.
942 	 *  200 - 254 are used by utilities like l2ping, etc.
943 	 */
944 
945 	mutex_lock(&conn->ident_lock);
946 
947 	if (++conn->tx_ident > 128)
948 		conn->tx_ident = 1;
949 
950 	id = conn->tx_ident;
951 
952 	mutex_unlock(&conn->ident_lock);
953 
954 	return id;
955 }
956 
l2cap_send_cmd(struct l2cap_conn * conn,u8 ident,u8 code,u16 len,void * data)957 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
958 			   void *data)
959 {
960 	struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
961 	u8 flags;
962 
963 	BT_DBG("code 0x%2.2x", code);
964 
965 	if (!skb)
966 		return;
967 
968 	/* Use NO_FLUSH if supported or we have an LE link (which does
969 	 * not support auto-flushing packets) */
970 	if (lmp_no_flush_capable(conn->hcon->hdev) ||
971 	    conn->hcon->type == LE_LINK)
972 		flags = ACL_START_NO_FLUSH;
973 	else
974 		flags = ACL_START;
975 
976 	bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
977 	skb->priority = HCI_PRIO_MAX;
978 
979 	hci_send_acl(conn->hchan, skb, flags);
980 }
981 
__chan_is_moving(struct l2cap_chan * chan)982 static bool __chan_is_moving(struct l2cap_chan *chan)
983 {
984 	return chan->move_state != L2CAP_MOVE_STABLE &&
985 	       chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
986 }
987 
l2cap_do_send(struct l2cap_chan * chan,struct sk_buff * skb)988 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
989 {
990 	struct hci_conn *hcon = chan->conn->hcon;
991 	u16 flags;
992 
993 	BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
994 	       skb->priority);
995 
996 	if (chan->hs_hcon && !__chan_is_moving(chan)) {
997 		if (chan->hs_hchan)
998 			hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
999 		else
1000 			kfree_skb(skb);
1001 
1002 		return;
1003 	}
1004 
1005 	/* Use NO_FLUSH for LE links (where this is the only option) or
1006 	 * if the BR/EDR link supports it and flushing has not been
1007 	 * explicitly requested (through FLAG_FLUSHABLE).
1008 	 */
1009 	if (hcon->type == LE_LINK ||
1010 	    (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1011 	     lmp_no_flush_capable(hcon->hdev)))
1012 		flags = ACL_START_NO_FLUSH;
1013 	else
1014 		flags = ACL_START;
1015 
1016 	bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1017 	hci_send_acl(chan->conn->hchan, skb, flags);
1018 }
1019 
__unpack_enhanced_control(u16 enh,struct l2cap_ctrl * control)1020 static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
1021 {
1022 	control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
1023 	control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
1024 
1025 	if (enh & L2CAP_CTRL_FRAME_TYPE) {
1026 		/* S-Frame */
1027 		control->sframe = 1;
1028 		control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
1029 		control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
1030 
1031 		control->sar = 0;
1032 		control->txseq = 0;
1033 	} else {
1034 		/* I-Frame */
1035 		control->sframe = 0;
1036 		control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
1037 		control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
1038 
1039 		control->poll = 0;
1040 		control->super = 0;
1041 	}
1042 }
1043 
__unpack_extended_control(u32 ext,struct l2cap_ctrl * control)1044 static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
1045 {
1046 	control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1047 	control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
1048 
1049 	if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
1050 		/* S-Frame */
1051 		control->sframe = 1;
1052 		control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
1053 		control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
1054 
1055 		control->sar = 0;
1056 		control->txseq = 0;
1057 	} else {
1058 		/* I-Frame */
1059 		control->sframe = 0;
1060 		control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
1061 		control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1062 
1063 		control->poll = 0;
1064 		control->super = 0;
1065 	}
1066 }
1067 
__unpack_control(struct l2cap_chan * chan,struct sk_buff * skb)1068 static inline void __unpack_control(struct l2cap_chan *chan,
1069 				    struct sk_buff *skb)
1070 {
1071 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1072 		__unpack_extended_control(get_unaligned_le32(skb->data),
1073 					  &bt_cb(skb)->l2cap);
1074 		skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
1075 	} else {
1076 		__unpack_enhanced_control(get_unaligned_le16(skb->data),
1077 					  &bt_cb(skb)->l2cap);
1078 		skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
1079 	}
1080 }
1081 
__pack_extended_control(struct l2cap_ctrl * control)1082 static u32 __pack_extended_control(struct l2cap_ctrl *control)
1083 {
1084 	u32 packed;
1085 
1086 	packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
1087 	packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
1088 
1089 	if (control->sframe) {
1090 		packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
1091 		packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
1092 		packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
1093 	} else {
1094 		packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
1095 		packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
1096 	}
1097 
1098 	return packed;
1099 }
1100 
__pack_enhanced_control(struct l2cap_ctrl * control)1101 static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
1102 {
1103 	u16 packed;
1104 
1105 	packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1106 	packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1107 
1108 	if (control->sframe) {
1109 		packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1110 		packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1111 		packed |= L2CAP_CTRL_FRAME_TYPE;
1112 	} else {
1113 		packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1114 		packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1115 	}
1116 
1117 	return packed;
1118 }
1119 
__pack_control(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)1120 static inline void __pack_control(struct l2cap_chan *chan,
1121 				  struct l2cap_ctrl *control,
1122 				  struct sk_buff *skb)
1123 {
1124 	if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1125 		put_unaligned_le32(__pack_extended_control(control),
1126 				   skb->data + L2CAP_HDR_SIZE);
1127 	} else {
1128 		put_unaligned_le16(__pack_enhanced_control(control),
1129 				   skb->data + L2CAP_HDR_SIZE);
1130 	}
1131 }
1132 
__ertm_hdr_size(struct l2cap_chan * chan)1133 static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1134 {
1135 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1136 		return L2CAP_EXT_HDR_SIZE;
1137 	else
1138 		return L2CAP_ENH_HDR_SIZE;
1139 }
1140 
l2cap_create_sframe_pdu(struct l2cap_chan * chan,u32 control)1141 static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1142 					       u32 control)
1143 {
1144 	struct sk_buff *skb;
1145 	struct l2cap_hdr *lh;
1146 	int hlen = __ertm_hdr_size(chan);
1147 
1148 	if (chan->fcs == L2CAP_FCS_CRC16)
1149 		hlen += L2CAP_FCS_SIZE;
1150 
1151 	skb = bt_skb_alloc(hlen, GFP_KERNEL);
1152 
1153 	if (!skb)
1154 		return ERR_PTR(-ENOMEM);
1155 
1156 	lh = skb_put(skb, L2CAP_HDR_SIZE);
1157 	lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
1158 	lh->cid = cpu_to_le16(chan->dcid);
1159 
1160 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1161 		put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1162 	else
1163 		put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
1164 
1165 	if (chan->fcs == L2CAP_FCS_CRC16) {
1166 		u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
1167 		put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
1168 	}
1169 
1170 	skb->priority = HCI_PRIO_MAX;
1171 	return skb;
1172 }
1173 
l2cap_send_sframe(struct l2cap_chan * chan,struct l2cap_ctrl * control)1174 static void l2cap_send_sframe(struct l2cap_chan *chan,
1175 			      struct l2cap_ctrl *control)
1176 {
1177 	struct sk_buff *skb;
1178 	u32 control_field;
1179 
1180 	BT_DBG("chan %p, control %p", chan, control);
1181 
1182 	if (!control->sframe)
1183 		return;
1184 
1185 	if (__chan_is_moving(chan))
1186 		return;
1187 
1188 	if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1189 	    !control->poll)
1190 		control->final = 1;
1191 
1192 	if (control->super == L2CAP_SUPER_RR)
1193 		clear_bit(CONN_RNR_SENT, &chan->conn_state);
1194 	else if (control->super == L2CAP_SUPER_RNR)
1195 		set_bit(CONN_RNR_SENT, &chan->conn_state);
1196 
1197 	if (control->super != L2CAP_SUPER_SREJ) {
1198 		chan->last_acked_seq = control->reqseq;
1199 		__clear_ack_timer(chan);
1200 	}
1201 
1202 	BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1203 	       control->final, control->poll, control->super);
1204 
1205 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1206 		control_field = __pack_extended_control(control);
1207 	else
1208 		control_field = __pack_enhanced_control(control);
1209 
1210 	skb = l2cap_create_sframe_pdu(chan, control_field);
1211 	if (!IS_ERR(skb))
1212 		l2cap_do_send(chan, skb);
1213 }
1214 
l2cap_send_rr_or_rnr(struct l2cap_chan * chan,bool poll)1215 static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
1216 {
1217 	struct l2cap_ctrl control;
1218 
1219 	BT_DBG("chan %p, poll %d", chan, poll);
1220 
1221 	memset(&control, 0, sizeof(control));
1222 	control.sframe = 1;
1223 	control.poll = poll;
1224 
1225 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1226 		control.super = L2CAP_SUPER_RNR;
1227 	else
1228 		control.super = L2CAP_SUPER_RR;
1229 
1230 	control.reqseq = chan->buffer_seq;
1231 	l2cap_send_sframe(chan, &control);
1232 }
1233 
__l2cap_no_conn_pending(struct l2cap_chan * chan)1234 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
1235 {
1236 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1237 		return true;
1238 
1239 	return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
1240 }
1241 
__amp_capable(struct l2cap_chan * chan)1242 static bool __amp_capable(struct l2cap_chan *chan)
1243 {
1244 	struct l2cap_conn *conn = chan->conn;
1245 	struct hci_dev *hdev;
1246 	bool amp_available = false;
1247 
1248 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
1249 		return false;
1250 
1251 	if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
1252 		return false;
1253 
1254 	read_lock(&hci_dev_list_lock);
1255 	list_for_each_entry(hdev, &hci_dev_list, list) {
1256 		if (hdev->amp_type != AMP_TYPE_BREDR &&
1257 		    test_bit(HCI_UP, &hdev->flags)) {
1258 			amp_available = true;
1259 			break;
1260 		}
1261 	}
1262 	read_unlock(&hci_dev_list_lock);
1263 
1264 	if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1265 		return amp_available;
1266 
1267 	return false;
1268 }
1269 
l2cap_check_efs(struct l2cap_chan * chan)1270 static bool l2cap_check_efs(struct l2cap_chan *chan)
1271 {
1272 	/* Check EFS parameters */
1273 	return true;
1274 }
1275 
l2cap_send_conn_req(struct l2cap_chan * chan)1276 void l2cap_send_conn_req(struct l2cap_chan *chan)
1277 {
1278 	struct l2cap_conn *conn = chan->conn;
1279 	struct l2cap_conn_req req;
1280 
1281 	req.scid = cpu_to_le16(chan->scid);
1282 	req.psm  = chan->psm;
1283 
1284 	chan->ident = l2cap_get_ident(conn);
1285 
1286 	set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1287 
1288 	l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1289 }
1290 
l2cap_send_create_chan_req(struct l2cap_chan * chan,u8 amp_id)1291 static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1292 {
1293 	struct l2cap_create_chan_req req;
1294 	req.scid = cpu_to_le16(chan->scid);
1295 	req.psm  = chan->psm;
1296 	req.amp_id = amp_id;
1297 
1298 	chan->ident = l2cap_get_ident(chan->conn);
1299 
1300 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1301 		       sizeof(req), &req);
1302 }
1303 
l2cap_move_setup(struct l2cap_chan * chan)1304 static void l2cap_move_setup(struct l2cap_chan *chan)
1305 {
1306 	struct sk_buff *skb;
1307 
1308 	BT_DBG("chan %p", chan);
1309 
1310 	if (chan->mode != L2CAP_MODE_ERTM)
1311 		return;
1312 
1313 	__clear_retrans_timer(chan);
1314 	__clear_monitor_timer(chan);
1315 	__clear_ack_timer(chan);
1316 
1317 	chan->retry_count = 0;
1318 	skb_queue_walk(&chan->tx_q, skb) {
1319 		if (bt_cb(skb)->l2cap.retries)
1320 			bt_cb(skb)->l2cap.retries = 1;
1321 		else
1322 			break;
1323 	}
1324 
1325 	chan->expected_tx_seq = chan->buffer_seq;
1326 
1327 	clear_bit(CONN_REJ_ACT, &chan->conn_state);
1328 	clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1329 	l2cap_seq_list_clear(&chan->retrans_list);
1330 	l2cap_seq_list_clear(&chan->srej_list);
1331 	skb_queue_purge(&chan->srej_q);
1332 
1333 	chan->tx_state = L2CAP_TX_STATE_XMIT;
1334 	chan->rx_state = L2CAP_RX_STATE_MOVE;
1335 
1336 	set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1337 }
1338 
l2cap_move_done(struct l2cap_chan * chan)1339 static void l2cap_move_done(struct l2cap_chan *chan)
1340 {
1341 	u8 move_role = chan->move_role;
1342 	BT_DBG("chan %p", chan);
1343 
1344 	chan->move_state = L2CAP_MOVE_STABLE;
1345 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
1346 
1347 	if (chan->mode != L2CAP_MODE_ERTM)
1348 		return;
1349 
1350 	switch (move_role) {
1351 	case L2CAP_MOVE_ROLE_INITIATOR:
1352 		l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1353 		chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1354 		break;
1355 	case L2CAP_MOVE_ROLE_RESPONDER:
1356 		chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1357 		break;
1358 	}
1359 }
1360 
l2cap_chan_ready(struct l2cap_chan * chan)1361 static void l2cap_chan_ready(struct l2cap_chan *chan)
1362 {
1363 	/* The channel may have already been flagged as connected in
1364 	 * case of receiving data before the L2CAP info req/rsp
1365 	 * procedure is complete.
1366 	 */
1367 	if (chan->state == BT_CONNECTED)
1368 		return;
1369 
1370 	/* This clears all conf flags, including CONF_NOT_COMPLETE */
1371 	chan->conf_state = 0;
1372 	__clear_chan_timer(chan);
1373 
1374 	switch (chan->mode) {
1375 	case L2CAP_MODE_LE_FLOWCTL:
1376 	case L2CAP_MODE_EXT_FLOWCTL:
1377 		if (!chan->tx_credits)
1378 			chan->ops->suspend(chan);
1379 		break;
1380 	}
1381 
1382 	chan->state = BT_CONNECTED;
1383 
1384 	chan->ops->ready(chan);
1385 }
1386 
l2cap_le_connect(struct l2cap_chan * chan)1387 static void l2cap_le_connect(struct l2cap_chan *chan)
1388 {
1389 	struct l2cap_conn *conn = chan->conn;
1390 	struct l2cap_le_conn_req req;
1391 
1392 	if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1393 		return;
1394 
1395 	if (!chan->imtu)
1396 		chan->imtu = chan->conn->mtu;
1397 
1398 	l2cap_le_flowctl_init(chan, 0);
1399 
1400 	req.psm     = chan->psm;
1401 	req.scid    = cpu_to_le16(chan->scid);
1402 	req.mtu     = cpu_to_le16(chan->imtu);
1403 	req.mps     = cpu_to_le16(chan->mps);
1404 	req.credits = cpu_to_le16(chan->rx_credits);
1405 
1406 	chan->ident = l2cap_get_ident(conn);
1407 
1408 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1409 		       sizeof(req), &req);
1410 }
1411 
1412 struct l2cap_ecred_conn_data {
1413 	struct {
1414 		struct l2cap_ecred_conn_req req;
1415 		__le16 scid[5];
1416 	} __packed pdu;
1417 	struct l2cap_chan *chan;
1418 	struct pid *pid;
1419 	int count;
1420 };
1421 
l2cap_ecred_defer_connect(struct l2cap_chan * chan,void * data)1422 static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data)
1423 {
1424 	struct l2cap_ecred_conn_data *conn = data;
1425 	struct pid *pid;
1426 
1427 	if (chan == conn->chan)
1428 		return;
1429 
1430 	if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags))
1431 		return;
1432 
1433 	pid = chan->ops->get_peer_pid(chan);
1434 
1435 	/* Only add deferred channels with the same PID/PSM */
1436 	if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident ||
1437 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
1438 		return;
1439 
1440 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1441 		return;
1442 
1443 	l2cap_ecred_init(chan, 0);
1444 
1445 	/* Set the same ident so we can match on the rsp */
1446 	chan->ident = conn->chan->ident;
1447 
1448 	/* Include all channels deferred */
1449 	conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid);
1450 
1451 	conn->count++;
1452 }
1453 
l2cap_ecred_connect(struct l2cap_chan * chan)1454 static void l2cap_ecred_connect(struct l2cap_chan *chan)
1455 {
1456 	struct l2cap_conn *conn = chan->conn;
1457 	struct l2cap_ecred_conn_data data;
1458 
1459 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
1460 		return;
1461 
1462 	if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
1463 		return;
1464 
1465 	l2cap_ecred_init(chan, 0);
1466 
1467 	memset(&data, 0, sizeof(data));
1468 	data.pdu.req.psm     = chan->psm;
1469 	data.pdu.req.mtu     = cpu_to_le16(chan->imtu);
1470 	data.pdu.req.mps     = cpu_to_le16(chan->mps);
1471 	data.pdu.req.credits = cpu_to_le16(chan->rx_credits);
1472 	data.pdu.scid[0]     = cpu_to_le16(chan->scid);
1473 
1474 	chan->ident = l2cap_get_ident(conn);
1475 	data.pid = chan->ops->get_peer_pid(chan);
1476 
1477 	data.count = 1;
1478 	data.chan = chan;
1479 	data.pid = chan->ops->get_peer_pid(chan);
1480 
1481 	__l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data);
1482 
1483 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
1484 		       sizeof(data.pdu.req) + data.count * sizeof(__le16),
1485 		       &data.pdu);
1486 }
1487 
l2cap_le_start(struct l2cap_chan * chan)1488 static void l2cap_le_start(struct l2cap_chan *chan)
1489 {
1490 	struct l2cap_conn *conn = chan->conn;
1491 
1492 	if (!smp_conn_security(conn->hcon, chan->sec_level))
1493 		return;
1494 
1495 	if (!chan->psm) {
1496 		l2cap_chan_ready(chan);
1497 		return;
1498 	}
1499 
1500 	if (chan->state == BT_CONNECT) {
1501 		if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
1502 			l2cap_ecred_connect(chan);
1503 		else
1504 			l2cap_le_connect(chan);
1505 	}
1506 }
1507 
l2cap_start_connection(struct l2cap_chan * chan)1508 static void l2cap_start_connection(struct l2cap_chan *chan)
1509 {
1510 	if (__amp_capable(chan)) {
1511 		BT_DBG("chan %p AMP capable: discover AMPs", chan);
1512 		a2mp_discover_amp(chan);
1513 	} else if (chan->conn->hcon->type == LE_LINK) {
1514 		l2cap_le_start(chan);
1515 	} else {
1516 		l2cap_send_conn_req(chan);
1517 	}
1518 }
1519 
l2cap_request_info(struct l2cap_conn * conn)1520 static void l2cap_request_info(struct l2cap_conn *conn)
1521 {
1522 	struct l2cap_info_req req;
1523 
1524 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1525 		return;
1526 
1527 	req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1528 
1529 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1530 	conn->info_ident = l2cap_get_ident(conn);
1531 
1532 	schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1533 
1534 	l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1535 		       sizeof(req), &req);
1536 }
1537 
l2cap_check_enc_key_size(struct hci_conn * hcon)1538 static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1539 {
1540 	/* The minimum encryption key size needs to be enforced by the
1541 	 * host stack before establishing any L2CAP connections. The
1542 	 * specification in theory allows a minimum of 1, but to align
1543 	 * BR/EDR and LE transports, a minimum of 7 is chosen.
1544 	 *
1545 	 * This check might also be called for unencrypted connections
1546 	 * that have no key size requirements. Ensure that the link is
1547 	 * actually encrypted before enforcing a key size.
1548 	 */
1549 	return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
1550 		hcon->enc_key_size >= hcon->hdev->min_enc_key_size);
1551 }
1552 
l2cap_do_start(struct l2cap_chan * chan)1553 static void l2cap_do_start(struct l2cap_chan *chan)
1554 {
1555 	struct l2cap_conn *conn = chan->conn;
1556 
1557 	if (conn->hcon->type == LE_LINK) {
1558 		l2cap_le_start(chan);
1559 		return;
1560 	}
1561 
1562 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1563 		l2cap_request_info(conn);
1564 		return;
1565 	}
1566 
1567 	if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1568 		return;
1569 
1570 	if (!l2cap_chan_check_security(chan, true) ||
1571 	    !__l2cap_no_conn_pending(chan))
1572 		return;
1573 
1574 	if (l2cap_check_enc_key_size(conn->hcon))
1575 		l2cap_start_connection(chan);
1576 	else
1577 		__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
1578 }
1579 
l2cap_mode_supported(__u8 mode,__u32 feat_mask)1580 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1581 {
1582 	u32 local_feat_mask = l2cap_feat_mask;
1583 	if (!disable_ertm)
1584 		local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1585 
1586 	switch (mode) {
1587 	case L2CAP_MODE_ERTM:
1588 		return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1589 	case L2CAP_MODE_STREAMING:
1590 		return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1591 	default:
1592 		return 0x00;
1593 	}
1594 }
1595 
l2cap_send_disconn_req(struct l2cap_chan * chan,int err)1596 static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
1597 {
1598 	struct l2cap_conn *conn = chan->conn;
1599 	struct l2cap_disconn_req req;
1600 
1601 	if (!conn)
1602 		return;
1603 
1604 	if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
1605 		__clear_retrans_timer(chan);
1606 		__clear_monitor_timer(chan);
1607 		__clear_ack_timer(chan);
1608 	}
1609 
1610 	if (chan->scid == L2CAP_CID_A2MP) {
1611 		l2cap_state_change(chan, BT_DISCONN);
1612 		return;
1613 	}
1614 
1615 	req.dcid = cpu_to_le16(chan->dcid);
1616 	req.scid = cpu_to_le16(chan->scid);
1617 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1618 		       sizeof(req), &req);
1619 
1620 	l2cap_state_change_and_error(chan, BT_DISCONN, err);
1621 }
1622 
1623 /* ---- L2CAP connections ---- */
l2cap_conn_start(struct l2cap_conn * conn)1624 static void l2cap_conn_start(struct l2cap_conn *conn)
1625 {
1626 	struct l2cap_chan *chan, *tmp;
1627 
1628 	BT_DBG("conn %p", conn);
1629 
1630 	mutex_lock(&conn->chan_lock);
1631 
1632 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
1633 		l2cap_chan_lock(chan);
1634 
1635 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1636 			l2cap_chan_ready(chan);
1637 			l2cap_chan_unlock(chan);
1638 			continue;
1639 		}
1640 
1641 		if (chan->state == BT_CONNECT) {
1642 			if (!l2cap_chan_check_security(chan, true) ||
1643 			    !__l2cap_no_conn_pending(chan)) {
1644 				l2cap_chan_unlock(chan);
1645 				continue;
1646 			}
1647 
1648 			if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
1649 			    && test_bit(CONF_STATE2_DEVICE,
1650 					&chan->conf_state)) {
1651 				l2cap_chan_close(chan, ECONNRESET);
1652 				l2cap_chan_unlock(chan);
1653 				continue;
1654 			}
1655 
1656 			if (l2cap_check_enc_key_size(conn->hcon))
1657 				l2cap_start_connection(chan);
1658 			else
1659 				l2cap_chan_close(chan, ECONNREFUSED);
1660 
1661 		} else if (chan->state == BT_CONNECT2) {
1662 			struct l2cap_conn_rsp rsp;
1663 			char buf[128];
1664 			rsp.scid = cpu_to_le16(chan->dcid);
1665 			rsp.dcid = cpu_to_le16(chan->scid);
1666 
1667 			if (l2cap_chan_check_security(chan, false)) {
1668 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
1669 					rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1670 					rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
1671 					chan->ops->defer(chan);
1672 
1673 				} else {
1674 					l2cap_state_change(chan, BT_CONFIG);
1675 					rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1676 					rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1677 				}
1678 			} else {
1679 				rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1680 				rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
1681 			}
1682 
1683 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
1684 				       sizeof(rsp), &rsp);
1685 
1686 			if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
1687 			    rsp.result != L2CAP_CR_SUCCESS) {
1688 				l2cap_chan_unlock(chan);
1689 				continue;
1690 			}
1691 
1692 			set_bit(CONF_REQ_SENT, &chan->conf_state);
1693 			l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1694 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
1695 			chan->num_conf_req++;
1696 		}
1697 
1698 		l2cap_chan_unlock(chan);
1699 	}
1700 
1701 	mutex_unlock(&conn->chan_lock);
1702 }
1703 
l2cap_le_conn_ready(struct l2cap_conn * conn)1704 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1705 {
1706 	struct hci_conn *hcon = conn->hcon;
1707 	struct hci_dev *hdev = hcon->hdev;
1708 
1709 	BT_DBG("%s conn %p", hdev->name, conn);
1710 
1711 	/* For outgoing pairing which doesn't necessarily have an
1712 	 * associated socket (e.g. mgmt_pair_device).
1713 	 */
1714 	if (hcon->out)
1715 		smp_conn_security(hcon, hcon->pending_sec_level);
1716 
1717 	/* For LE peripheral connections, make sure the connection interval
1718 	 * is in the range of the minimum and maximum interval that has
1719 	 * been configured for this connection. If not, then trigger
1720 	 * the connection update procedure.
1721 	 */
1722 	if (hcon->role == HCI_ROLE_SLAVE &&
1723 	    (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1724 	     hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1725 		struct l2cap_conn_param_update_req req;
1726 
1727 		req.min = cpu_to_le16(hcon->le_conn_min_interval);
1728 		req.max = cpu_to_le16(hcon->le_conn_max_interval);
1729 		req.latency = cpu_to_le16(hcon->le_conn_latency);
1730 		req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1731 
1732 		l2cap_send_cmd(conn, l2cap_get_ident(conn),
1733 			       L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1734 	}
1735 }
1736 
l2cap_conn_ready(struct l2cap_conn * conn)1737 static void l2cap_conn_ready(struct l2cap_conn *conn)
1738 {
1739 	struct l2cap_chan *chan;
1740 	struct hci_conn *hcon = conn->hcon;
1741 
1742 	BT_DBG("conn %p", conn);
1743 
1744 	if (hcon->type == ACL_LINK)
1745 		l2cap_request_info(conn);
1746 
1747 	mutex_lock(&conn->chan_lock);
1748 
1749 	list_for_each_entry(chan, &conn->chan_l, list) {
1750 
1751 		l2cap_chan_lock(chan);
1752 
1753 		if (chan->scid == L2CAP_CID_A2MP) {
1754 			l2cap_chan_unlock(chan);
1755 			continue;
1756 		}
1757 
1758 		if (hcon->type == LE_LINK) {
1759 			l2cap_le_start(chan);
1760 		} else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1761 			if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1762 				l2cap_chan_ready(chan);
1763 		} else if (chan->state == BT_CONNECT) {
1764 			l2cap_do_start(chan);
1765 		}
1766 
1767 		l2cap_chan_unlock(chan);
1768 	}
1769 
1770 	mutex_unlock(&conn->chan_lock);
1771 
1772 	if (hcon->type == LE_LINK)
1773 		l2cap_le_conn_ready(conn);
1774 
1775 	queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
1776 }
1777 
1778 /* Notify sockets that we cannot guaranty reliability anymore */
l2cap_conn_unreliable(struct l2cap_conn * conn,int err)1779 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1780 {
1781 	struct l2cap_chan *chan;
1782 
1783 	BT_DBG("conn %p", conn);
1784 
1785 	mutex_lock(&conn->chan_lock);
1786 
1787 	list_for_each_entry(chan, &conn->chan_l, list) {
1788 		if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1789 			l2cap_chan_set_err(chan, err);
1790 	}
1791 
1792 	mutex_unlock(&conn->chan_lock);
1793 }
1794 
l2cap_info_timeout(struct work_struct * work)1795 static void l2cap_info_timeout(struct work_struct *work)
1796 {
1797 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1798 					       info_timer.work);
1799 
1800 	conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1801 	conn->info_ident = 0;
1802 
1803 	l2cap_conn_start(conn);
1804 }
1805 
1806 /*
1807  * l2cap_user
1808  * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1809  * callback is called during registration. The ->remove callback is called
1810  * during unregistration.
1811  * An l2cap_user object can either be explicitly unregistered or when the
1812  * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1813  * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1814  * External modules must own a reference to the l2cap_conn object if they intend
1815  * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1816  * any time if they don't.
1817  */
1818 
l2cap_register_user(struct l2cap_conn * conn,struct l2cap_user * user)1819 int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1820 {
1821 	struct hci_dev *hdev = conn->hcon->hdev;
1822 	int ret;
1823 
1824 	/* We need to check whether l2cap_conn is registered. If it is not, we
1825 	 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1826 	 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1827 	 * relies on the parent hci_conn object to be locked. This itself relies
1828 	 * on the hci_dev object to be locked. So we must lock the hci device
1829 	 * here, too. */
1830 
1831 	hci_dev_lock(hdev);
1832 
1833 	if (!list_empty(&user->list)) {
1834 		ret = -EINVAL;
1835 		goto out_unlock;
1836 	}
1837 
1838 	/* conn->hchan is NULL after l2cap_conn_del() was called */
1839 	if (!conn->hchan) {
1840 		ret = -ENODEV;
1841 		goto out_unlock;
1842 	}
1843 
1844 	ret = user->probe(conn, user);
1845 	if (ret)
1846 		goto out_unlock;
1847 
1848 	list_add(&user->list, &conn->users);
1849 	ret = 0;
1850 
1851 out_unlock:
1852 	hci_dev_unlock(hdev);
1853 	return ret;
1854 }
1855 EXPORT_SYMBOL(l2cap_register_user);
1856 
l2cap_unregister_user(struct l2cap_conn * conn,struct l2cap_user * user)1857 void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1858 {
1859 	struct hci_dev *hdev = conn->hcon->hdev;
1860 
1861 	hci_dev_lock(hdev);
1862 
1863 	if (list_empty(&user->list))
1864 		goto out_unlock;
1865 
1866 	list_del_init(&user->list);
1867 	user->remove(conn, user);
1868 
1869 out_unlock:
1870 	hci_dev_unlock(hdev);
1871 }
1872 EXPORT_SYMBOL(l2cap_unregister_user);
1873 
l2cap_unregister_all_users(struct l2cap_conn * conn)1874 static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1875 {
1876 	struct l2cap_user *user;
1877 
1878 	while (!list_empty(&conn->users)) {
1879 		user = list_first_entry(&conn->users, struct l2cap_user, list);
1880 		list_del_init(&user->list);
1881 		user->remove(conn, user);
1882 	}
1883 }
1884 
l2cap_conn_del(struct hci_conn * hcon,int err)1885 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1886 {
1887 	struct l2cap_conn *conn = hcon->l2cap_data;
1888 	struct l2cap_chan *chan, *l;
1889 
1890 	if (!conn)
1891 		return;
1892 
1893 	BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1894 
1895 	kfree_skb(conn->rx_skb);
1896 
1897 	skb_queue_purge(&conn->pending_rx);
1898 
1899 	/* We can not call flush_work(&conn->pending_rx_work) here since we
1900 	 * might block if we are running on a worker from the same workqueue
1901 	 * pending_rx_work is waiting on.
1902 	 */
1903 	if (work_pending(&conn->pending_rx_work))
1904 		cancel_work_sync(&conn->pending_rx_work);
1905 
1906 	if (work_pending(&conn->id_addr_update_work))
1907 		cancel_work_sync(&conn->id_addr_update_work);
1908 
1909 	l2cap_unregister_all_users(conn);
1910 
1911 	/* Force the connection to be immediately dropped */
1912 	hcon->disc_timeout = 0;
1913 
1914 	mutex_lock(&conn->chan_lock);
1915 
1916 	/* Kill channels */
1917 	list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1918 		l2cap_chan_hold(chan);
1919 		l2cap_chan_lock(chan);
1920 
1921 		l2cap_chan_del(chan, err);
1922 
1923 		chan->ops->close(chan);
1924 
1925 		l2cap_chan_unlock(chan);
1926 		l2cap_chan_put(chan);
1927 	}
1928 
1929 	mutex_unlock(&conn->chan_lock);
1930 
1931 	hci_chan_del(conn->hchan);
1932 
1933 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1934 		cancel_delayed_work_sync(&conn->info_timer);
1935 
1936 	hcon->l2cap_data = NULL;
1937 	conn->hchan = NULL;
1938 	l2cap_conn_put(conn);
1939 }
1940 
l2cap_conn_free(struct kref * ref)1941 static void l2cap_conn_free(struct kref *ref)
1942 {
1943 	struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1944 
1945 	hci_conn_put(conn->hcon);
1946 	kfree(conn);
1947 }
1948 
l2cap_conn_get(struct l2cap_conn * conn)1949 struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
1950 {
1951 	kref_get(&conn->ref);
1952 	return conn;
1953 }
1954 EXPORT_SYMBOL(l2cap_conn_get);
1955 
l2cap_conn_put(struct l2cap_conn * conn)1956 void l2cap_conn_put(struct l2cap_conn *conn)
1957 {
1958 	kref_put(&conn->ref, l2cap_conn_free);
1959 }
1960 EXPORT_SYMBOL(l2cap_conn_put);
1961 
1962 /* ---- Socket interface ---- */
1963 
1964 /* Find socket with psm and source / destination bdaddr.
1965  * Returns closest match.
1966  */
l2cap_global_chan_by_psm(int state,__le16 psm,bdaddr_t * src,bdaddr_t * dst,u8 link_type)1967 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1968 						   bdaddr_t *src,
1969 						   bdaddr_t *dst,
1970 						   u8 link_type)
1971 {
1972 	struct l2cap_chan *c, *tmp, *c1 = NULL;
1973 
1974 	read_lock(&chan_list_lock);
1975 
1976 	list_for_each_entry_safe(c, tmp, &chan_list, global_l) {
1977 		if (state && c->state != state)
1978 			continue;
1979 
1980 		if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1981 			continue;
1982 
1983 		if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1984 			continue;
1985 
1986 		if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) {
1987 			int src_match, dst_match;
1988 			int src_any, dst_any;
1989 
1990 			/* Exact match. */
1991 			src_match = !bacmp(&c->src, src);
1992 			dst_match = !bacmp(&c->dst, dst);
1993 			if (src_match && dst_match) {
1994 				if (!l2cap_chan_hold_unless_zero(c))
1995 					continue;
1996 
1997 				read_unlock(&chan_list_lock);
1998 				return c;
1999 			}
2000 
2001 			/* Closest match */
2002 			src_any = !bacmp(&c->src, BDADDR_ANY);
2003 			dst_any = !bacmp(&c->dst, BDADDR_ANY);
2004 			if ((src_match && dst_any) || (src_any && dst_match) ||
2005 			    (src_any && dst_any))
2006 				c1 = c;
2007 		}
2008 	}
2009 
2010 	if (c1)
2011 		c1 = l2cap_chan_hold_unless_zero(c1);
2012 
2013 	read_unlock(&chan_list_lock);
2014 
2015 	return c1;
2016 }
2017 
l2cap_monitor_timeout(struct work_struct * work)2018 static void l2cap_monitor_timeout(struct work_struct *work)
2019 {
2020 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2021 					       monitor_timer.work);
2022 
2023 	BT_DBG("chan %p", chan);
2024 
2025 	l2cap_chan_lock(chan);
2026 
2027 	if (!chan->conn) {
2028 		l2cap_chan_unlock(chan);
2029 		l2cap_chan_put(chan);
2030 		return;
2031 	}
2032 
2033 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
2034 
2035 	l2cap_chan_unlock(chan);
2036 	l2cap_chan_put(chan);
2037 }
2038 
l2cap_retrans_timeout(struct work_struct * work)2039 static void l2cap_retrans_timeout(struct work_struct *work)
2040 {
2041 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2042 					       retrans_timer.work);
2043 
2044 	BT_DBG("chan %p", chan);
2045 
2046 	l2cap_chan_lock(chan);
2047 
2048 	if (!chan->conn) {
2049 		l2cap_chan_unlock(chan);
2050 		l2cap_chan_put(chan);
2051 		return;
2052 	}
2053 
2054 	l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
2055 	l2cap_chan_unlock(chan);
2056 	l2cap_chan_put(chan);
2057 }
2058 
l2cap_streaming_send(struct l2cap_chan * chan,struct sk_buff_head * skbs)2059 static void l2cap_streaming_send(struct l2cap_chan *chan,
2060 				 struct sk_buff_head *skbs)
2061 {
2062 	struct sk_buff *skb;
2063 	struct l2cap_ctrl *control;
2064 
2065 	BT_DBG("chan %p, skbs %p", chan, skbs);
2066 
2067 	if (__chan_is_moving(chan))
2068 		return;
2069 
2070 	skb_queue_splice_tail_init(skbs, &chan->tx_q);
2071 
2072 	while (!skb_queue_empty(&chan->tx_q)) {
2073 
2074 		skb = skb_dequeue(&chan->tx_q);
2075 
2076 		bt_cb(skb)->l2cap.retries = 1;
2077 		control = &bt_cb(skb)->l2cap;
2078 
2079 		control->reqseq = 0;
2080 		control->txseq = chan->next_tx_seq;
2081 
2082 		__pack_control(chan, control, skb);
2083 
2084 		if (chan->fcs == L2CAP_FCS_CRC16) {
2085 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2086 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2087 		}
2088 
2089 		l2cap_do_send(chan, skb);
2090 
2091 		BT_DBG("Sent txseq %u", control->txseq);
2092 
2093 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2094 		chan->frames_sent++;
2095 	}
2096 }
2097 
l2cap_ertm_send(struct l2cap_chan * chan)2098 static int l2cap_ertm_send(struct l2cap_chan *chan)
2099 {
2100 	struct sk_buff *skb, *tx_skb;
2101 	struct l2cap_ctrl *control;
2102 	int sent = 0;
2103 
2104 	BT_DBG("chan %p", chan);
2105 
2106 	if (chan->state != BT_CONNECTED)
2107 		return -ENOTCONN;
2108 
2109 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2110 		return 0;
2111 
2112 	if (__chan_is_moving(chan))
2113 		return 0;
2114 
2115 	while (chan->tx_send_head &&
2116 	       chan->unacked_frames < chan->remote_tx_win &&
2117 	       chan->tx_state == L2CAP_TX_STATE_XMIT) {
2118 
2119 		skb = chan->tx_send_head;
2120 
2121 		bt_cb(skb)->l2cap.retries = 1;
2122 		control = &bt_cb(skb)->l2cap;
2123 
2124 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2125 			control->final = 1;
2126 
2127 		control->reqseq = chan->buffer_seq;
2128 		chan->last_acked_seq = chan->buffer_seq;
2129 		control->txseq = chan->next_tx_seq;
2130 
2131 		__pack_control(chan, control, skb);
2132 
2133 		if (chan->fcs == L2CAP_FCS_CRC16) {
2134 			u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
2135 			put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
2136 		}
2137 
2138 		/* Clone after data has been modified. Data is assumed to be
2139 		   read-only (for locking purposes) on cloned sk_buffs.
2140 		 */
2141 		tx_skb = skb_clone(skb, GFP_KERNEL);
2142 
2143 		if (!tx_skb)
2144 			break;
2145 
2146 		__set_retrans_timer(chan);
2147 
2148 		chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
2149 		chan->unacked_frames++;
2150 		chan->frames_sent++;
2151 		sent++;
2152 
2153 		if (skb_queue_is_last(&chan->tx_q, skb))
2154 			chan->tx_send_head = NULL;
2155 		else
2156 			chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
2157 
2158 		l2cap_do_send(chan, tx_skb);
2159 		BT_DBG("Sent txseq %u", control->txseq);
2160 	}
2161 
2162 	BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
2163 	       chan->unacked_frames, skb_queue_len(&chan->tx_q));
2164 
2165 	return sent;
2166 }
2167 
l2cap_ertm_resend(struct l2cap_chan * chan)2168 static void l2cap_ertm_resend(struct l2cap_chan *chan)
2169 {
2170 	struct l2cap_ctrl control;
2171 	struct sk_buff *skb;
2172 	struct sk_buff *tx_skb;
2173 	u16 seq;
2174 
2175 	BT_DBG("chan %p", chan);
2176 
2177 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2178 		return;
2179 
2180 	if (__chan_is_moving(chan))
2181 		return;
2182 
2183 	while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
2184 		seq = l2cap_seq_list_pop(&chan->retrans_list);
2185 
2186 		skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
2187 		if (!skb) {
2188 			BT_DBG("Error: Can't retransmit seq %d, frame missing",
2189 			       seq);
2190 			continue;
2191 		}
2192 
2193 		bt_cb(skb)->l2cap.retries++;
2194 		control = bt_cb(skb)->l2cap;
2195 
2196 		if (chan->max_tx != 0 &&
2197 		    bt_cb(skb)->l2cap.retries > chan->max_tx) {
2198 			BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
2199 			l2cap_send_disconn_req(chan, ECONNRESET);
2200 			l2cap_seq_list_clear(&chan->retrans_list);
2201 			break;
2202 		}
2203 
2204 		control.reqseq = chan->buffer_seq;
2205 		if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2206 			control.final = 1;
2207 		else
2208 			control.final = 0;
2209 
2210 		if (skb_cloned(skb)) {
2211 			/* Cloned sk_buffs are read-only, so we need a
2212 			 * writeable copy
2213 			 */
2214 			tx_skb = skb_copy(skb, GFP_KERNEL);
2215 		} else {
2216 			tx_skb = skb_clone(skb, GFP_KERNEL);
2217 		}
2218 
2219 		if (!tx_skb) {
2220 			l2cap_seq_list_clear(&chan->retrans_list);
2221 			break;
2222 		}
2223 
2224 		/* Update skb contents */
2225 		if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2226 			put_unaligned_le32(__pack_extended_control(&control),
2227 					   tx_skb->data + L2CAP_HDR_SIZE);
2228 		} else {
2229 			put_unaligned_le16(__pack_enhanced_control(&control),
2230 					   tx_skb->data + L2CAP_HDR_SIZE);
2231 		}
2232 
2233 		/* Update FCS */
2234 		if (chan->fcs == L2CAP_FCS_CRC16) {
2235 			u16 fcs = crc16(0, (u8 *) tx_skb->data,
2236 					tx_skb->len - L2CAP_FCS_SIZE);
2237 			put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2238 						L2CAP_FCS_SIZE);
2239 		}
2240 
2241 		l2cap_do_send(chan, tx_skb);
2242 
2243 		BT_DBG("Resent txseq %d", control.txseq);
2244 
2245 		chan->last_acked_seq = chan->buffer_seq;
2246 	}
2247 }
2248 
l2cap_retransmit(struct l2cap_chan * chan,struct l2cap_ctrl * control)2249 static void l2cap_retransmit(struct l2cap_chan *chan,
2250 			     struct l2cap_ctrl *control)
2251 {
2252 	BT_DBG("chan %p, control %p", chan, control);
2253 
2254 	l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2255 	l2cap_ertm_resend(chan);
2256 }
2257 
l2cap_retransmit_all(struct l2cap_chan * chan,struct l2cap_ctrl * control)2258 static void l2cap_retransmit_all(struct l2cap_chan *chan,
2259 				 struct l2cap_ctrl *control)
2260 {
2261 	struct sk_buff *skb;
2262 
2263 	BT_DBG("chan %p, control %p", chan, control);
2264 
2265 	if (control->poll)
2266 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
2267 
2268 	l2cap_seq_list_clear(&chan->retrans_list);
2269 
2270 	if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2271 		return;
2272 
2273 	if (chan->unacked_frames) {
2274 		skb_queue_walk(&chan->tx_q, skb) {
2275 			if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
2276 			    skb == chan->tx_send_head)
2277 				break;
2278 		}
2279 
2280 		skb_queue_walk_from(&chan->tx_q, skb) {
2281 			if (skb == chan->tx_send_head)
2282 				break;
2283 
2284 			l2cap_seq_list_append(&chan->retrans_list,
2285 					      bt_cb(skb)->l2cap.txseq);
2286 		}
2287 
2288 		l2cap_ertm_resend(chan);
2289 	}
2290 }
2291 
l2cap_send_ack(struct l2cap_chan * chan)2292 static void l2cap_send_ack(struct l2cap_chan *chan)
2293 {
2294 	struct l2cap_ctrl control;
2295 	u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2296 					 chan->last_acked_seq);
2297 	int threshold;
2298 
2299 	BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2300 	       chan, chan->last_acked_seq, chan->buffer_seq);
2301 
2302 	memset(&control, 0, sizeof(control));
2303 	control.sframe = 1;
2304 
2305 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2306 	    chan->rx_state == L2CAP_RX_STATE_RECV) {
2307 		__clear_ack_timer(chan);
2308 		control.super = L2CAP_SUPER_RNR;
2309 		control.reqseq = chan->buffer_seq;
2310 		l2cap_send_sframe(chan, &control);
2311 	} else {
2312 		if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2313 			l2cap_ertm_send(chan);
2314 			/* If any i-frames were sent, they included an ack */
2315 			if (chan->buffer_seq == chan->last_acked_seq)
2316 				frames_to_ack = 0;
2317 		}
2318 
2319 		/* Ack now if the window is 3/4ths full.
2320 		 * Calculate without mul or div
2321 		 */
2322 		threshold = chan->ack_win;
2323 		threshold += threshold << 1;
2324 		threshold >>= 2;
2325 
2326 		BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
2327 		       threshold);
2328 
2329 		if (frames_to_ack >= threshold) {
2330 			__clear_ack_timer(chan);
2331 			control.super = L2CAP_SUPER_RR;
2332 			control.reqseq = chan->buffer_seq;
2333 			l2cap_send_sframe(chan, &control);
2334 			frames_to_ack = 0;
2335 		}
2336 
2337 		if (frames_to_ack)
2338 			__set_ack_timer(chan);
2339 	}
2340 }
2341 
l2cap_skbuff_fromiovec(struct l2cap_chan * chan,struct msghdr * msg,int len,int count,struct sk_buff * skb)2342 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2343 					 struct msghdr *msg, int len,
2344 					 int count, struct sk_buff *skb)
2345 {
2346 	struct l2cap_conn *conn = chan->conn;
2347 	struct sk_buff **frag;
2348 	int sent = 0;
2349 
2350 	if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter))
2351 		return -EFAULT;
2352 
2353 	sent += count;
2354 	len  -= count;
2355 
2356 	/* Continuation fragments (no L2CAP header) */
2357 	frag = &skb_shinfo(skb)->frag_list;
2358 	while (len) {
2359 		struct sk_buff *tmp;
2360 
2361 		count = min_t(unsigned int, conn->mtu, len);
2362 
2363 		tmp = chan->ops->alloc_skb(chan, 0, count,
2364 					   msg->msg_flags & MSG_DONTWAIT);
2365 		if (IS_ERR(tmp))
2366 			return PTR_ERR(tmp);
2367 
2368 		*frag = tmp;
2369 
2370 		if (!copy_from_iter_full(skb_put(*frag, count), count,
2371 				   &msg->msg_iter))
2372 			return -EFAULT;
2373 
2374 		sent += count;
2375 		len  -= count;
2376 
2377 		skb->len += (*frag)->len;
2378 		skb->data_len += (*frag)->len;
2379 
2380 		frag = &(*frag)->next;
2381 	}
2382 
2383 	return sent;
2384 }
2385 
l2cap_create_connless_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2386 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
2387 						 struct msghdr *msg, size_t len)
2388 {
2389 	struct l2cap_conn *conn = chan->conn;
2390 	struct sk_buff *skb;
2391 	int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
2392 	struct l2cap_hdr *lh;
2393 
2394 	BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2395 	       __le16_to_cpu(chan->psm), len);
2396 
2397 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2398 
2399 	skb = chan->ops->alloc_skb(chan, hlen, count,
2400 				   msg->msg_flags & MSG_DONTWAIT);
2401 	if (IS_ERR(skb))
2402 		return skb;
2403 
2404 	/* Create L2CAP header */
2405 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2406 	lh->cid = cpu_to_le16(chan->dcid);
2407 	lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
2408 	put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
2409 
2410 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2411 	if (unlikely(err < 0)) {
2412 		kfree_skb(skb);
2413 		return ERR_PTR(err);
2414 	}
2415 	return skb;
2416 }
2417 
l2cap_create_basic_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2418 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
2419 					      struct msghdr *msg, size_t len)
2420 {
2421 	struct l2cap_conn *conn = chan->conn;
2422 	struct sk_buff *skb;
2423 	int err, count;
2424 	struct l2cap_hdr *lh;
2425 
2426 	BT_DBG("chan %p len %zu", chan, len);
2427 
2428 	count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
2429 
2430 	skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
2431 				   msg->msg_flags & MSG_DONTWAIT);
2432 	if (IS_ERR(skb))
2433 		return skb;
2434 
2435 	/* Create L2CAP header */
2436 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2437 	lh->cid = cpu_to_le16(chan->dcid);
2438 	lh->len = cpu_to_le16(len);
2439 
2440 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2441 	if (unlikely(err < 0)) {
2442 		kfree_skb(skb);
2443 		return ERR_PTR(err);
2444 	}
2445 	return skb;
2446 }
2447 
l2cap_create_iframe_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2448 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
2449 					       struct msghdr *msg, size_t len,
2450 					       u16 sdulen)
2451 {
2452 	struct l2cap_conn *conn = chan->conn;
2453 	struct sk_buff *skb;
2454 	int err, count, hlen;
2455 	struct l2cap_hdr *lh;
2456 
2457 	BT_DBG("chan %p len %zu", chan, len);
2458 
2459 	if (!conn)
2460 		return ERR_PTR(-ENOTCONN);
2461 
2462 	hlen = __ertm_hdr_size(chan);
2463 
2464 	if (sdulen)
2465 		hlen += L2CAP_SDULEN_SIZE;
2466 
2467 	if (chan->fcs == L2CAP_FCS_CRC16)
2468 		hlen += L2CAP_FCS_SIZE;
2469 
2470 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2471 
2472 	skb = chan->ops->alloc_skb(chan, hlen, count,
2473 				   msg->msg_flags & MSG_DONTWAIT);
2474 	if (IS_ERR(skb))
2475 		return skb;
2476 
2477 	/* Create L2CAP header */
2478 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2479 	lh->cid = cpu_to_le16(chan->dcid);
2480 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2481 
2482 	/* Control header is populated later */
2483 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2484 		put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2485 	else
2486 		put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
2487 
2488 	if (sdulen)
2489 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2490 
2491 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2492 	if (unlikely(err < 0)) {
2493 		kfree_skb(skb);
2494 		return ERR_PTR(err);
2495 	}
2496 
2497 	bt_cb(skb)->l2cap.fcs = chan->fcs;
2498 	bt_cb(skb)->l2cap.retries = 0;
2499 	return skb;
2500 }
2501 
l2cap_segment_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2502 static int l2cap_segment_sdu(struct l2cap_chan *chan,
2503 			     struct sk_buff_head *seg_queue,
2504 			     struct msghdr *msg, size_t len)
2505 {
2506 	struct sk_buff *skb;
2507 	u16 sdu_len;
2508 	size_t pdu_len;
2509 	u8 sar;
2510 
2511 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2512 
2513 	/* It is critical that ERTM PDUs fit in a single HCI fragment,
2514 	 * so fragmented skbs are not used.  The HCI layer's handling
2515 	 * of fragmented skbs is not compatible with ERTM's queueing.
2516 	 */
2517 
2518 	/* PDU size is derived from the HCI MTU */
2519 	pdu_len = chan->conn->mtu;
2520 
2521 	/* Constrain PDU size for BR/EDR connections */
2522 	if (!chan->hs_hcon)
2523 		pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2524 
2525 	/* Adjust for largest possible L2CAP overhead. */
2526 	if (chan->fcs)
2527 		pdu_len -= L2CAP_FCS_SIZE;
2528 
2529 	pdu_len -= __ertm_hdr_size(chan);
2530 
2531 	/* Remote device may have requested smaller PDUs */
2532 	pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2533 
2534 	if (len <= pdu_len) {
2535 		sar = L2CAP_SAR_UNSEGMENTED;
2536 		sdu_len = 0;
2537 		pdu_len = len;
2538 	} else {
2539 		sar = L2CAP_SAR_START;
2540 		sdu_len = len;
2541 	}
2542 
2543 	while (len > 0) {
2544 		skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
2545 
2546 		if (IS_ERR(skb)) {
2547 			__skb_queue_purge(seg_queue);
2548 			return PTR_ERR(skb);
2549 		}
2550 
2551 		bt_cb(skb)->l2cap.sar = sar;
2552 		__skb_queue_tail(seg_queue, skb);
2553 
2554 		len -= pdu_len;
2555 		if (sdu_len)
2556 			sdu_len = 0;
2557 
2558 		if (len <= pdu_len) {
2559 			sar = L2CAP_SAR_END;
2560 			pdu_len = len;
2561 		} else {
2562 			sar = L2CAP_SAR_CONTINUE;
2563 		}
2564 	}
2565 
2566 	return 0;
2567 }
2568 
l2cap_create_le_flowctl_pdu(struct l2cap_chan * chan,struct msghdr * msg,size_t len,u16 sdulen)2569 static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2570 						   struct msghdr *msg,
2571 						   size_t len, u16 sdulen)
2572 {
2573 	struct l2cap_conn *conn = chan->conn;
2574 	struct sk_buff *skb;
2575 	int err, count, hlen;
2576 	struct l2cap_hdr *lh;
2577 
2578 	BT_DBG("chan %p len %zu", chan, len);
2579 
2580 	if (!conn)
2581 		return ERR_PTR(-ENOTCONN);
2582 
2583 	hlen = L2CAP_HDR_SIZE;
2584 
2585 	if (sdulen)
2586 		hlen += L2CAP_SDULEN_SIZE;
2587 
2588 	count = min_t(unsigned int, (conn->mtu - hlen), len);
2589 
2590 	skb = chan->ops->alloc_skb(chan, hlen, count,
2591 				   msg->msg_flags & MSG_DONTWAIT);
2592 	if (IS_ERR(skb))
2593 		return skb;
2594 
2595 	/* Create L2CAP header */
2596 	lh = skb_put(skb, L2CAP_HDR_SIZE);
2597 	lh->cid = cpu_to_le16(chan->dcid);
2598 	lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2599 
2600 	if (sdulen)
2601 		put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2602 
2603 	err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2604 	if (unlikely(err < 0)) {
2605 		kfree_skb(skb);
2606 		return ERR_PTR(err);
2607 	}
2608 
2609 	return skb;
2610 }
2611 
l2cap_segment_le_sdu(struct l2cap_chan * chan,struct sk_buff_head * seg_queue,struct msghdr * msg,size_t len)2612 static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2613 				struct sk_buff_head *seg_queue,
2614 				struct msghdr *msg, size_t len)
2615 {
2616 	struct sk_buff *skb;
2617 	size_t pdu_len;
2618 	u16 sdu_len;
2619 
2620 	BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2621 
2622 	sdu_len = len;
2623 	pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
2624 
2625 	while (len > 0) {
2626 		if (len <= pdu_len)
2627 			pdu_len = len;
2628 
2629 		skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2630 		if (IS_ERR(skb)) {
2631 			__skb_queue_purge(seg_queue);
2632 			return PTR_ERR(skb);
2633 		}
2634 
2635 		__skb_queue_tail(seg_queue, skb);
2636 
2637 		len -= pdu_len;
2638 
2639 		if (sdu_len) {
2640 			sdu_len = 0;
2641 			pdu_len += L2CAP_SDULEN_SIZE;
2642 		}
2643 	}
2644 
2645 	return 0;
2646 }
2647 
l2cap_le_flowctl_send(struct l2cap_chan * chan)2648 static void l2cap_le_flowctl_send(struct l2cap_chan *chan)
2649 {
2650 	int sent = 0;
2651 
2652 	BT_DBG("chan %p", chan);
2653 
2654 	while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2655 		l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2656 		chan->tx_credits--;
2657 		sent++;
2658 	}
2659 
2660 	BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits,
2661 	       skb_queue_len(&chan->tx_q));
2662 }
2663 
l2cap_chan_send(struct l2cap_chan * chan,struct msghdr * msg,size_t len)2664 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
2665 {
2666 	struct sk_buff *skb;
2667 	int err;
2668 	struct sk_buff_head seg_queue;
2669 
2670 	if (!chan->conn)
2671 		return -ENOTCONN;
2672 
2673 	/* Connectionless channel */
2674 	if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
2675 		skb = l2cap_create_connless_pdu(chan, msg, len);
2676 		if (IS_ERR(skb))
2677 			return PTR_ERR(skb);
2678 
2679 		l2cap_do_send(chan, skb);
2680 		return len;
2681 	}
2682 
2683 	switch (chan->mode) {
2684 	case L2CAP_MODE_LE_FLOWCTL:
2685 	case L2CAP_MODE_EXT_FLOWCTL:
2686 		/* Check outgoing MTU */
2687 		if (len > chan->omtu)
2688 			return -EMSGSIZE;
2689 
2690 		__skb_queue_head_init(&seg_queue);
2691 
2692 		err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2693 
2694 		if (chan->state != BT_CONNECTED) {
2695 			__skb_queue_purge(&seg_queue);
2696 			err = -ENOTCONN;
2697 		}
2698 
2699 		if (err)
2700 			return err;
2701 
2702 		skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2703 
2704 		l2cap_le_flowctl_send(chan);
2705 
2706 		if (!chan->tx_credits)
2707 			chan->ops->suspend(chan);
2708 
2709 		err = len;
2710 
2711 		break;
2712 
2713 	case L2CAP_MODE_BASIC:
2714 		/* Check outgoing MTU */
2715 		if (len > chan->omtu)
2716 			return -EMSGSIZE;
2717 
2718 		/* Create a basic PDU */
2719 		skb = l2cap_create_basic_pdu(chan, msg, len);
2720 		if (IS_ERR(skb))
2721 			return PTR_ERR(skb);
2722 
2723 		l2cap_do_send(chan, skb);
2724 		err = len;
2725 		break;
2726 
2727 	case L2CAP_MODE_ERTM:
2728 	case L2CAP_MODE_STREAMING:
2729 		/* Check outgoing MTU */
2730 		if (len > chan->omtu) {
2731 			err = -EMSGSIZE;
2732 			break;
2733 		}
2734 
2735 		__skb_queue_head_init(&seg_queue);
2736 
2737 		/* Do segmentation before calling in to the state machine,
2738 		 * since it's possible to block while waiting for memory
2739 		 * allocation.
2740 		 */
2741 		err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2742 
2743 		if (err)
2744 			break;
2745 
2746 		if (chan->mode == L2CAP_MODE_ERTM)
2747 			l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
2748 		else
2749 			l2cap_streaming_send(chan, &seg_queue);
2750 
2751 		err = len;
2752 
2753 		/* If the skbs were not queued for sending, they'll still be in
2754 		 * seg_queue and need to be purged.
2755 		 */
2756 		__skb_queue_purge(&seg_queue);
2757 		break;
2758 
2759 	default:
2760 		BT_DBG("bad state %1.1x", chan->mode);
2761 		err = -EBADFD;
2762 	}
2763 
2764 	return err;
2765 }
2766 EXPORT_SYMBOL_GPL(l2cap_chan_send);
2767 
l2cap_send_srej(struct l2cap_chan * chan,u16 txseq)2768 static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2769 {
2770 	struct l2cap_ctrl control;
2771 	u16 seq;
2772 
2773 	BT_DBG("chan %p, txseq %u", chan, txseq);
2774 
2775 	memset(&control, 0, sizeof(control));
2776 	control.sframe = 1;
2777 	control.super = L2CAP_SUPER_SREJ;
2778 
2779 	for (seq = chan->expected_tx_seq; seq != txseq;
2780 	     seq = __next_seq(chan, seq)) {
2781 		if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2782 			control.reqseq = seq;
2783 			l2cap_send_sframe(chan, &control);
2784 			l2cap_seq_list_append(&chan->srej_list, seq);
2785 		}
2786 	}
2787 
2788 	chan->expected_tx_seq = __next_seq(chan, txseq);
2789 }
2790 
l2cap_send_srej_tail(struct l2cap_chan * chan)2791 static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2792 {
2793 	struct l2cap_ctrl control;
2794 
2795 	BT_DBG("chan %p", chan);
2796 
2797 	if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2798 		return;
2799 
2800 	memset(&control, 0, sizeof(control));
2801 	control.sframe = 1;
2802 	control.super = L2CAP_SUPER_SREJ;
2803 	control.reqseq = chan->srej_list.tail;
2804 	l2cap_send_sframe(chan, &control);
2805 }
2806 
l2cap_send_srej_list(struct l2cap_chan * chan,u16 txseq)2807 static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2808 {
2809 	struct l2cap_ctrl control;
2810 	u16 initial_head;
2811 	u16 seq;
2812 
2813 	BT_DBG("chan %p, txseq %u", chan, txseq);
2814 
2815 	memset(&control, 0, sizeof(control));
2816 	control.sframe = 1;
2817 	control.super = L2CAP_SUPER_SREJ;
2818 
2819 	/* Capture initial list head to allow only one pass through the list. */
2820 	initial_head = chan->srej_list.head;
2821 
2822 	do {
2823 		seq = l2cap_seq_list_pop(&chan->srej_list);
2824 		if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2825 			break;
2826 
2827 		control.reqseq = seq;
2828 		l2cap_send_sframe(chan, &control);
2829 		l2cap_seq_list_append(&chan->srej_list, seq);
2830 	} while (chan->srej_list.head != initial_head);
2831 }
2832 
l2cap_process_reqseq(struct l2cap_chan * chan,u16 reqseq)2833 static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2834 {
2835 	struct sk_buff *acked_skb;
2836 	u16 ackseq;
2837 
2838 	BT_DBG("chan %p, reqseq %u", chan, reqseq);
2839 
2840 	if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2841 		return;
2842 
2843 	BT_DBG("expected_ack_seq %u, unacked_frames %u",
2844 	       chan->expected_ack_seq, chan->unacked_frames);
2845 
2846 	for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2847 	     ackseq = __next_seq(chan, ackseq)) {
2848 
2849 		acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2850 		if (acked_skb) {
2851 			skb_unlink(acked_skb, &chan->tx_q);
2852 			kfree_skb(acked_skb);
2853 			chan->unacked_frames--;
2854 		}
2855 	}
2856 
2857 	chan->expected_ack_seq = reqseq;
2858 
2859 	if (chan->unacked_frames == 0)
2860 		__clear_retrans_timer(chan);
2861 
2862 	BT_DBG("unacked_frames %u", chan->unacked_frames);
2863 }
2864 
l2cap_abort_rx_srej_sent(struct l2cap_chan * chan)2865 static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2866 {
2867 	BT_DBG("chan %p", chan);
2868 
2869 	chan->expected_tx_seq = chan->buffer_seq;
2870 	l2cap_seq_list_clear(&chan->srej_list);
2871 	skb_queue_purge(&chan->srej_q);
2872 	chan->rx_state = L2CAP_RX_STATE_RECV;
2873 }
2874 
l2cap_tx_state_xmit(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2875 static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2876 				struct l2cap_ctrl *control,
2877 				struct sk_buff_head *skbs, u8 event)
2878 {
2879 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2880 	       event);
2881 
2882 	switch (event) {
2883 	case L2CAP_EV_DATA_REQUEST:
2884 		if (chan->tx_send_head == NULL)
2885 			chan->tx_send_head = skb_peek(skbs);
2886 
2887 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2888 		l2cap_ertm_send(chan);
2889 		break;
2890 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2891 		BT_DBG("Enter LOCAL_BUSY");
2892 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2893 
2894 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2895 			/* The SREJ_SENT state must be aborted if we are to
2896 			 * enter the LOCAL_BUSY state.
2897 			 */
2898 			l2cap_abort_rx_srej_sent(chan);
2899 		}
2900 
2901 		l2cap_send_ack(chan);
2902 
2903 		break;
2904 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2905 		BT_DBG("Exit LOCAL_BUSY");
2906 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2907 
2908 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2909 			struct l2cap_ctrl local_control;
2910 
2911 			memset(&local_control, 0, sizeof(local_control));
2912 			local_control.sframe = 1;
2913 			local_control.super = L2CAP_SUPER_RR;
2914 			local_control.poll = 1;
2915 			local_control.reqseq = chan->buffer_seq;
2916 			l2cap_send_sframe(chan, &local_control);
2917 
2918 			chan->retry_count = 1;
2919 			__set_monitor_timer(chan);
2920 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2921 		}
2922 		break;
2923 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2924 		l2cap_process_reqseq(chan, control->reqseq);
2925 		break;
2926 	case L2CAP_EV_EXPLICIT_POLL:
2927 		l2cap_send_rr_or_rnr(chan, 1);
2928 		chan->retry_count = 1;
2929 		__set_monitor_timer(chan);
2930 		__clear_ack_timer(chan);
2931 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2932 		break;
2933 	case L2CAP_EV_RETRANS_TO:
2934 		l2cap_send_rr_or_rnr(chan, 1);
2935 		chan->retry_count = 1;
2936 		__set_monitor_timer(chan);
2937 		chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2938 		break;
2939 	case L2CAP_EV_RECV_FBIT:
2940 		/* Nothing to process */
2941 		break;
2942 	default:
2943 		break;
2944 	}
2945 }
2946 
l2cap_tx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)2947 static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2948 				  struct l2cap_ctrl *control,
2949 				  struct sk_buff_head *skbs, u8 event)
2950 {
2951 	BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2952 	       event);
2953 
2954 	switch (event) {
2955 	case L2CAP_EV_DATA_REQUEST:
2956 		if (chan->tx_send_head == NULL)
2957 			chan->tx_send_head = skb_peek(skbs);
2958 		/* Queue data, but don't send. */
2959 		skb_queue_splice_tail_init(skbs, &chan->tx_q);
2960 		break;
2961 	case L2CAP_EV_LOCAL_BUSY_DETECTED:
2962 		BT_DBG("Enter LOCAL_BUSY");
2963 		set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2964 
2965 		if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2966 			/* The SREJ_SENT state must be aborted if we are to
2967 			 * enter the LOCAL_BUSY state.
2968 			 */
2969 			l2cap_abort_rx_srej_sent(chan);
2970 		}
2971 
2972 		l2cap_send_ack(chan);
2973 
2974 		break;
2975 	case L2CAP_EV_LOCAL_BUSY_CLEAR:
2976 		BT_DBG("Exit LOCAL_BUSY");
2977 		clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2978 
2979 		if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2980 			struct l2cap_ctrl local_control;
2981 			memset(&local_control, 0, sizeof(local_control));
2982 			local_control.sframe = 1;
2983 			local_control.super = L2CAP_SUPER_RR;
2984 			local_control.poll = 1;
2985 			local_control.reqseq = chan->buffer_seq;
2986 			l2cap_send_sframe(chan, &local_control);
2987 
2988 			chan->retry_count = 1;
2989 			__set_monitor_timer(chan);
2990 			chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2991 		}
2992 		break;
2993 	case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2994 		l2cap_process_reqseq(chan, control->reqseq);
2995 		fallthrough;
2996 
2997 	case L2CAP_EV_RECV_FBIT:
2998 		if (control && control->final) {
2999 			__clear_monitor_timer(chan);
3000 			if (chan->unacked_frames > 0)
3001 				__set_retrans_timer(chan);
3002 			chan->retry_count = 0;
3003 			chan->tx_state = L2CAP_TX_STATE_XMIT;
3004 			BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
3005 		}
3006 		break;
3007 	case L2CAP_EV_EXPLICIT_POLL:
3008 		/* Ignore */
3009 		break;
3010 	case L2CAP_EV_MONITOR_TO:
3011 		if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
3012 			l2cap_send_rr_or_rnr(chan, 1);
3013 			__set_monitor_timer(chan);
3014 			chan->retry_count++;
3015 		} else {
3016 			l2cap_send_disconn_req(chan, ECONNABORTED);
3017 		}
3018 		break;
3019 	default:
3020 		break;
3021 	}
3022 }
3023 
l2cap_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff_head * skbs,u8 event)3024 static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
3025 		     struct sk_buff_head *skbs, u8 event)
3026 {
3027 	BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
3028 	       chan, control, skbs, event, chan->tx_state);
3029 
3030 	switch (chan->tx_state) {
3031 	case L2CAP_TX_STATE_XMIT:
3032 		l2cap_tx_state_xmit(chan, control, skbs, event);
3033 		break;
3034 	case L2CAP_TX_STATE_WAIT_F:
3035 		l2cap_tx_state_wait_f(chan, control, skbs, event);
3036 		break;
3037 	default:
3038 		/* Ignore event */
3039 		break;
3040 	}
3041 }
3042 
l2cap_pass_to_tx(struct l2cap_chan * chan,struct l2cap_ctrl * control)3043 static void l2cap_pass_to_tx(struct l2cap_chan *chan,
3044 			     struct l2cap_ctrl *control)
3045 {
3046 	BT_DBG("chan %p, control %p", chan, control);
3047 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
3048 }
3049 
l2cap_pass_to_tx_fbit(struct l2cap_chan * chan,struct l2cap_ctrl * control)3050 static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
3051 				  struct l2cap_ctrl *control)
3052 {
3053 	BT_DBG("chan %p, control %p", chan, control);
3054 	l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
3055 }
3056 
3057 /* Copy frame to all raw sockets on that connection */
l2cap_raw_recv(struct l2cap_conn * conn,struct sk_buff * skb)3058 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
3059 {
3060 	struct sk_buff *nskb;
3061 	struct l2cap_chan *chan;
3062 
3063 	BT_DBG("conn %p", conn);
3064 
3065 	mutex_lock(&conn->chan_lock);
3066 
3067 	list_for_each_entry(chan, &conn->chan_l, list) {
3068 		if (chan->chan_type != L2CAP_CHAN_RAW)
3069 			continue;
3070 
3071 		/* Don't send frame to the channel it came from */
3072 		if (bt_cb(skb)->l2cap.chan == chan)
3073 			continue;
3074 
3075 		nskb = skb_clone(skb, GFP_KERNEL);
3076 		if (!nskb)
3077 			continue;
3078 		if (chan->ops->recv(chan, nskb))
3079 			kfree_skb(nskb);
3080 	}
3081 
3082 	mutex_unlock(&conn->chan_lock);
3083 }
3084 
3085 /* ---- L2CAP signalling commands ---- */
l2cap_build_cmd(struct l2cap_conn * conn,u8 code,u8 ident,u16 dlen,void * data)3086 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
3087 				       u8 ident, u16 dlen, void *data)
3088 {
3089 	struct sk_buff *skb, **frag;
3090 	struct l2cap_cmd_hdr *cmd;
3091 	struct l2cap_hdr *lh;
3092 	int len, count;
3093 
3094 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
3095 	       conn, code, ident, dlen);
3096 
3097 	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
3098 		return NULL;
3099 
3100 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
3101 	count = min_t(unsigned int, conn->mtu, len);
3102 
3103 	skb = bt_skb_alloc(count, GFP_KERNEL);
3104 	if (!skb)
3105 		return NULL;
3106 
3107 	lh = skb_put(skb, L2CAP_HDR_SIZE);
3108 	lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
3109 
3110 	if (conn->hcon->type == LE_LINK)
3111 		lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
3112 	else
3113 		lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
3114 
3115 	cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE);
3116 	cmd->code  = code;
3117 	cmd->ident = ident;
3118 	cmd->len   = cpu_to_le16(dlen);
3119 
3120 	if (dlen) {
3121 		count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
3122 		skb_put_data(skb, data, count);
3123 		data += count;
3124 	}
3125 
3126 	len -= skb->len;
3127 
3128 	/* Continuation fragments (no L2CAP header) */
3129 	frag = &skb_shinfo(skb)->frag_list;
3130 	while (len) {
3131 		count = min_t(unsigned int, conn->mtu, len);
3132 
3133 		*frag = bt_skb_alloc(count, GFP_KERNEL);
3134 		if (!*frag)
3135 			goto fail;
3136 
3137 		skb_put_data(*frag, data, count);
3138 
3139 		len  -= count;
3140 		data += count;
3141 
3142 		frag = &(*frag)->next;
3143 	}
3144 
3145 	return skb;
3146 
3147 fail:
3148 	kfree_skb(skb);
3149 	return NULL;
3150 }
3151 
l2cap_get_conf_opt(void ** ptr,int * type,int * olen,unsigned long * val)3152 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
3153 				     unsigned long *val)
3154 {
3155 	struct l2cap_conf_opt *opt = *ptr;
3156 	int len;
3157 
3158 	len = L2CAP_CONF_OPT_SIZE + opt->len;
3159 	*ptr += len;
3160 
3161 	*type = opt->type;
3162 	*olen = opt->len;
3163 
3164 	switch (opt->len) {
3165 	case 1:
3166 		*val = *((u8 *) opt->val);
3167 		break;
3168 
3169 	case 2:
3170 		*val = get_unaligned_le16(opt->val);
3171 		break;
3172 
3173 	case 4:
3174 		*val = get_unaligned_le32(opt->val);
3175 		break;
3176 
3177 	default:
3178 		*val = (unsigned long) opt->val;
3179 		break;
3180 	}
3181 
3182 	BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
3183 	return len;
3184 }
3185 
l2cap_add_conf_opt(void ** ptr,u8 type,u8 len,unsigned long val,size_t size)3186 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
3187 {
3188 	struct l2cap_conf_opt *opt = *ptr;
3189 
3190 	BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
3191 
3192 	if (size < L2CAP_CONF_OPT_SIZE + len)
3193 		return;
3194 
3195 	opt->type = type;
3196 	opt->len  = len;
3197 
3198 	switch (len) {
3199 	case 1:
3200 		*((u8 *) opt->val)  = val;
3201 		break;
3202 
3203 	case 2:
3204 		put_unaligned_le16(val, opt->val);
3205 		break;
3206 
3207 	case 4:
3208 		put_unaligned_le32(val, opt->val);
3209 		break;
3210 
3211 	default:
3212 		memcpy(opt->val, (void *) val, len);
3213 		break;
3214 	}
3215 
3216 	*ptr += L2CAP_CONF_OPT_SIZE + len;
3217 }
3218 
l2cap_add_opt_efs(void ** ptr,struct l2cap_chan * chan,size_t size)3219 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
3220 {
3221 	struct l2cap_conf_efs efs;
3222 
3223 	switch (chan->mode) {
3224 	case L2CAP_MODE_ERTM:
3225 		efs.id		= chan->local_id;
3226 		efs.stype	= chan->local_stype;
3227 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3228 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3229 		efs.acc_lat	= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3230 		efs.flush_to	= cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
3231 		break;
3232 
3233 	case L2CAP_MODE_STREAMING:
3234 		efs.id		= 1;
3235 		efs.stype	= L2CAP_SERV_BESTEFFORT;
3236 		efs.msdu	= cpu_to_le16(chan->local_msdu);
3237 		efs.sdu_itime	= cpu_to_le32(chan->local_sdu_itime);
3238 		efs.acc_lat	= 0;
3239 		efs.flush_to	= 0;
3240 		break;
3241 
3242 	default:
3243 		return;
3244 	}
3245 
3246 	l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
3247 			   (unsigned long) &efs, size);
3248 }
3249 
l2cap_ack_timeout(struct work_struct * work)3250 static void l2cap_ack_timeout(struct work_struct *work)
3251 {
3252 	struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
3253 					       ack_timer.work);
3254 	u16 frames_to_ack;
3255 
3256 	BT_DBG("chan %p", chan);
3257 
3258 	l2cap_chan_lock(chan);
3259 
3260 	frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3261 				     chan->last_acked_seq);
3262 
3263 	if (frames_to_ack)
3264 		l2cap_send_rr_or_rnr(chan, 0);
3265 
3266 	l2cap_chan_unlock(chan);
3267 	l2cap_chan_put(chan);
3268 }
3269 
l2cap_ertm_init(struct l2cap_chan * chan)3270 int l2cap_ertm_init(struct l2cap_chan *chan)
3271 {
3272 	int err;
3273 
3274 	chan->next_tx_seq = 0;
3275 	chan->expected_tx_seq = 0;
3276 	chan->expected_ack_seq = 0;
3277 	chan->unacked_frames = 0;
3278 	chan->buffer_seq = 0;
3279 	chan->frames_sent = 0;
3280 	chan->last_acked_seq = 0;
3281 	chan->sdu = NULL;
3282 	chan->sdu_last_frag = NULL;
3283 	chan->sdu_len = 0;
3284 
3285 	skb_queue_head_init(&chan->tx_q);
3286 
3287 	chan->local_amp_id = AMP_ID_BREDR;
3288 	chan->move_id = AMP_ID_BREDR;
3289 	chan->move_state = L2CAP_MOVE_STABLE;
3290 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
3291 
3292 	if (chan->mode != L2CAP_MODE_ERTM)
3293 		return 0;
3294 
3295 	chan->rx_state = L2CAP_RX_STATE_RECV;
3296 	chan->tx_state = L2CAP_TX_STATE_XMIT;
3297 
3298 	skb_queue_head_init(&chan->srej_q);
3299 
3300 	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3301 	if (err < 0)
3302 		return err;
3303 
3304 	err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3305 	if (err < 0)
3306 		l2cap_seq_list_free(&chan->srej_list);
3307 
3308 	return err;
3309 }
3310 
l2cap_select_mode(__u8 mode,__u16 remote_feat_mask)3311 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3312 {
3313 	switch (mode) {
3314 	case L2CAP_MODE_STREAMING:
3315 	case L2CAP_MODE_ERTM:
3316 		if (l2cap_mode_supported(mode, remote_feat_mask))
3317 			return mode;
3318 		fallthrough;
3319 	default:
3320 		return L2CAP_MODE_BASIC;
3321 	}
3322 }
3323 
__l2cap_ews_supported(struct l2cap_conn * conn)3324 static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
3325 {
3326 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3327 		(conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
3328 }
3329 
__l2cap_efs_supported(struct l2cap_conn * conn)3330 static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
3331 {
3332 	return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3333 		(conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
3334 }
3335 
__l2cap_set_ertm_timeouts(struct l2cap_chan * chan,struct l2cap_conf_rfc * rfc)3336 static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3337 				      struct l2cap_conf_rfc *rfc)
3338 {
3339 	if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
3340 		u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3341 
3342 		/* Class 1 devices have must have ERTM timeouts
3343 		 * exceeding the Link Supervision Timeout.  The
3344 		 * default Link Supervision Timeout for AMP
3345 		 * controllers is 10 seconds.
3346 		 *
3347 		 * Class 1 devices use 0xffffffff for their
3348 		 * best-effort flush timeout, so the clamping logic
3349 		 * will result in a timeout that meets the above
3350 		 * requirement.  ERTM timeouts are 16-bit values, so
3351 		 * the maximum timeout is 65.535 seconds.
3352 		 */
3353 
3354 		/* Convert timeout to milliseconds and round */
3355 		ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3356 
3357 		/* This is the recommended formula for class 2 devices
3358 		 * that start ERTM timers when packets are sent to the
3359 		 * controller.
3360 		 */
3361 		ertm_to = 3 * ertm_to + 500;
3362 
3363 		if (ertm_to > 0xffff)
3364 			ertm_to = 0xffff;
3365 
3366 		rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3367 		rfc->monitor_timeout = rfc->retrans_timeout;
3368 	} else {
3369 		rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3370 		rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3371 	}
3372 }
3373 
l2cap_txwin_setup(struct l2cap_chan * chan)3374 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3375 {
3376 	if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
3377 	    __l2cap_ews_supported(chan->conn)) {
3378 		/* use extended control field */
3379 		set_bit(FLAG_EXT_CTRL, &chan->flags);
3380 		chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3381 	} else {
3382 		chan->tx_win = min_t(u16, chan->tx_win,
3383 				     L2CAP_DEFAULT_TX_WINDOW);
3384 		chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3385 	}
3386 	chan->ack_win = chan->tx_win;
3387 }
3388 
l2cap_mtu_auto(struct l2cap_chan * chan)3389 static void l2cap_mtu_auto(struct l2cap_chan *chan)
3390 {
3391 	struct hci_conn *conn = chan->conn->hcon;
3392 
3393 	chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3394 
3395 	/* The 2-DH1 packet has between 2 and 56 information bytes
3396 	 * (including the 2-byte payload header)
3397 	 */
3398 	if (!(conn->pkt_type & HCI_2DH1))
3399 		chan->imtu = 54;
3400 
3401 	/* The 3-DH1 packet has between 2 and 85 information bytes
3402 	 * (including the 2-byte payload header)
3403 	 */
3404 	if (!(conn->pkt_type & HCI_3DH1))
3405 		chan->imtu = 83;
3406 
3407 	/* The 2-DH3 packet has between 2 and 369 information bytes
3408 	 * (including the 2-byte payload header)
3409 	 */
3410 	if (!(conn->pkt_type & HCI_2DH3))
3411 		chan->imtu = 367;
3412 
3413 	/* The 3-DH3 packet has between 2 and 554 information bytes
3414 	 * (including the 2-byte payload header)
3415 	 */
3416 	if (!(conn->pkt_type & HCI_3DH3))
3417 		chan->imtu = 552;
3418 
3419 	/* The 2-DH5 packet has between 2 and 681 information bytes
3420 	 * (including the 2-byte payload header)
3421 	 */
3422 	if (!(conn->pkt_type & HCI_2DH5))
3423 		chan->imtu = 679;
3424 
3425 	/* The 3-DH5 packet has between 2 and 1023 information bytes
3426 	 * (including the 2-byte payload header)
3427 	 */
3428 	if (!(conn->pkt_type & HCI_3DH5))
3429 		chan->imtu = 1021;
3430 }
3431 
l2cap_build_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3432 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3433 {
3434 	struct l2cap_conf_req *req = data;
3435 	struct l2cap_conf_rfc rfc = { .mode = chan->mode };
3436 	void *ptr = req->data;
3437 	void *endptr = data + data_size;
3438 	u16 size;
3439 
3440 	BT_DBG("chan %p", chan);
3441 
3442 	if (chan->num_conf_req || chan->num_conf_rsp)
3443 		goto done;
3444 
3445 	switch (chan->mode) {
3446 	case L2CAP_MODE_STREAMING:
3447 	case L2CAP_MODE_ERTM:
3448 		if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
3449 			break;
3450 
3451 		if (__l2cap_efs_supported(chan->conn))
3452 			set_bit(FLAG_EFS_ENABLE, &chan->flags);
3453 
3454 		fallthrough;
3455 	default:
3456 		chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
3457 		break;
3458 	}
3459 
3460 done:
3461 	if (chan->imtu != L2CAP_DEFAULT_MTU) {
3462 		if (!chan->imtu)
3463 			l2cap_mtu_auto(chan);
3464 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3465 				   endptr - ptr);
3466 	}
3467 
3468 	switch (chan->mode) {
3469 	case L2CAP_MODE_BASIC:
3470 		if (disable_ertm)
3471 			break;
3472 
3473 		if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3474 		    !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
3475 			break;
3476 
3477 		rfc.mode            = L2CAP_MODE_BASIC;
3478 		rfc.txwin_size      = 0;
3479 		rfc.max_transmit    = 0;
3480 		rfc.retrans_timeout = 0;
3481 		rfc.monitor_timeout = 0;
3482 		rfc.max_pdu_size    = 0;
3483 
3484 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3485 				   (unsigned long) &rfc, endptr - ptr);
3486 		break;
3487 
3488 	case L2CAP_MODE_ERTM:
3489 		rfc.mode            = L2CAP_MODE_ERTM;
3490 		rfc.max_transmit    = chan->max_tx;
3491 
3492 		__l2cap_set_ertm_timeouts(chan, &rfc);
3493 
3494 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3495 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3496 			     L2CAP_FCS_SIZE);
3497 		rfc.max_pdu_size = cpu_to_le16(size);
3498 
3499 		l2cap_txwin_setup(chan);
3500 
3501 		rfc.txwin_size = min_t(u16, chan->tx_win,
3502 				       L2CAP_DEFAULT_TX_WINDOW);
3503 
3504 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3505 				   (unsigned long) &rfc, endptr - ptr);
3506 
3507 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3508 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3509 
3510 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3511 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3512 					   chan->tx_win, endptr - ptr);
3513 
3514 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3515 			if (chan->fcs == L2CAP_FCS_NONE ||
3516 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3517 				chan->fcs = L2CAP_FCS_NONE;
3518 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3519 						   chan->fcs, endptr - ptr);
3520 			}
3521 		break;
3522 
3523 	case L2CAP_MODE_STREAMING:
3524 		l2cap_txwin_setup(chan);
3525 		rfc.mode            = L2CAP_MODE_STREAMING;
3526 		rfc.txwin_size      = 0;
3527 		rfc.max_transmit    = 0;
3528 		rfc.retrans_timeout = 0;
3529 		rfc.monitor_timeout = 0;
3530 
3531 		size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
3532 			     L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3533 			     L2CAP_FCS_SIZE);
3534 		rfc.max_pdu_size = cpu_to_le16(size);
3535 
3536 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3537 				   (unsigned long) &rfc, endptr - ptr);
3538 
3539 		if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3540 			l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
3541 
3542 		if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3543 			if (chan->fcs == L2CAP_FCS_NONE ||
3544 			    test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
3545 				chan->fcs = L2CAP_FCS_NONE;
3546 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3547 						   chan->fcs, endptr - ptr);
3548 			}
3549 		break;
3550 	}
3551 
3552 	req->dcid  = cpu_to_le16(chan->dcid);
3553 	req->flags = cpu_to_le16(0);
3554 
3555 	return ptr - data;
3556 }
3557 
l2cap_parse_conf_req(struct l2cap_chan * chan,void * data,size_t data_size)3558 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
3559 {
3560 	struct l2cap_conf_rsp *rsp = data;
3561 	void *ptr = rsp->data;
3562 	void *endptr = data + data_size;
3563 	void *req = chan->conf_req;
3564 	int len = chan->conf_len;
3565 	int type, hint, olen;
3566 	unsigned long val;
3567 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3568 	struct l2cap_conf_efs efs;
3569 	u8 remote_efs = 0;
3570 	u16 mtu = L2CAP_DEFAULT_MTU;
3571 	u16 result = L2CAP_CONF_SUCCESS;
3572 	u16 size;
3573 
3574 	BT_DBG("chan %p", chan);
3575 
3576 	while (len >= L2CAP_CONF_OPT_SIZE) {
3577 		len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3578 		if (len < 0)
3579 			break;
3580 
3581 		hint  = type & L2CAP_CONF_HINT;
3582 		type &= L2CAP_CONF_MASK;
3583 
3584 		switch (type) {
3585 		case L2CAP_CONF_MTU:
3586 			if (olen != 2)
3587 				break;
3588 			mtu = val;
3589 			break;
3590 
3591 		case L2CAP_CONF_FLUSH_TO:
3592 			if (olen != 2)
3593 				break;
3594 			chan->flush_to = val;
3595 			break;
3596 
3597 		case L2CAP_CONF_QOS:
3598 			break;
3599 
3600 		case L2CAP_CONF_RFC:
3601 			if (olen != sizeof(rfc))
3602 				break;
3603 			memcpy(&rfc, (void *) val, olen);
3604 			break;
3605 
3606 		case L2CAP_CONF_FCS:
3607 			if (olen != 1)
3608 				break;
3609 			if (val == L2CAP_FCS_NONE)
3610 				set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
3611 			break;
3612 
3613 		case L2CAP_CONF_EFS:
3614 			if (olen != sizeof(efs))
3615 				break;
3616 			remote_efs = 1;
3617 			memcpy(&efs, (void *) val, olen);
3618 			break;
3619 
3620 		case L2CAP_CONF_EWS:
3621 			if (olen != 2)
3622 				break;
3623 			if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
3624 				return -ECONNREFUSED;
3625 			set_bit(FLAG_EXT_CTRL, &chan->flags);
3626 			set_bit(CONF_EWS_RECV, &chan->conf_state);
3627 			chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3628 			chan->remote_tx_win = val;
3629 			break;
3630 
3631 		default:
3632 			if (hint)
3633 				break;
3634 			result = L2CAP_CONF_UNKNOWN;
3635 			*((u8 *) ptr++) = type;
3636 			break;
3637 		}
3638 	}
3639 
3640 	if (chan->num_conf_rsp || chan->num_conf_req > 1)
3641 		goto done;
3642 
3643 	switch (chan->mode) {
3644 	case L2CAP_MODE_STREAMING:
3645 	case L2CAP_MODE_ERTM:
3646 		if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
3647 			chan->mode = l2cap_select_mode(rfc.mode,
3648 						       chan->conn->feat_mask);
3649 			break;
3650 		}
3651 
3652 		if (remote_efs) {
3653 			if (__l2cap_efs_supported(chan->conn))
3654 				set_bit(FLAG_EFS_ENABLE, &chan->flags);
3655 			else
3656 				return -ECONNREFUSED;
3657 		}
3658 
3659 		if (chan->mode != rfc.mode)
3660 			return -ECONNREFUSED;
3661 
3662 		break;
3663 	}
3664 
3665 done:
3666 	if (chan->mode != rfc.mode) {
3667 		result = L2CAP_CONF_UNACCEPT;
3668 		rfc.mode = chan->mode;
3669 
3670 		if (chan->num_conf_rsp == 1)
3671 			return -ECONNREFUSED;
3672 
3673 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3674 				   (unsigned long) &rfc, endptr - ptr);
3675 	}
3676 
3677 	if (result == L2CAP_CONF_SUCCESS) {
3678 		/* Configure output options and let the other side know
3679 		 * which ones we don't like. */
3680 
3681 		if (mtu < L2CAP_DEFAULT_MIN_MTU)
3682 			result = L2CAP_CONF_UNACCEPT;
3683 		else {
3684 			chan->omtu = mtu;
3685 			set_bit(CONF_MTU_DONE, &chan->conf_state);
3686 		}
3687 		l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
3688 
3689 		if (remote_efs) {
3690 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3691 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3692 			    efs.stype != chan->local_stype) {
3693 
3694 				result = L2CAP_CONF_UNACCEPT;
3695 
3696 				if (chan->num_conf_req >= 1)
3697 					return -ECONNREFUSED;
3698 
3699 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3700 						   sizeof(efs),
3701 						   (unsigned long) &efs, endptr - ptr);
3702 			} else {
3703 				/* Send PENDING Conf Rsp */
3704 				result = L2CAP_CONF_PENDING;
3705 				set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3706 			}
3707 		}
3708 
3709 		switch (rfc.mode) {
3710 		case L2CAP_MODE_BASIC:
3711 			chan->fcs = L2CAP_FCS_NONE;
3712 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3713 			break;
3714 
3715 		case L2CAP_MODE_ERTM:
3716 			if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3717 				chan->remote_tx_win = rfc.txwin_size;
3718 			else
3719 				rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3720 
3721 			chan->remote_max_tx = rfc.max_transmit;
3722 
3723 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3724 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3725 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3726 			rfc.max_pdu_size = cpu_to_le16(size);
3727 			chan->remote_mps = size;
3728 
3729 			__l2cap_set_ertm_timeouts(chan, &rfc);
3730 
3731 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3732 
3733 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3734 					   sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
3735 
3736 			if (remote_efs &&
3737 			    test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3738 				chan->remote_id = efs.id;
3739 				chan->remote_stype = efs.stype;
3740 				chan->remote_msdu = le16_to_cpu(efs.msdu);
3741 				chan->remote_flush_to =
3742 					le32_to_cpu(efs.flush_to);
3743 				chan->remote_acc_lat =
3744 					le32_to_cpu(efs.acc_lat);
3745 				chan->remote_sdu_itime =
3746 					le32_to_cpu(efs.sdu_itime);
3747 				l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
3748 						   sizeof(efs),
3749 						   (unsigned long) &efs, endptr - ptr);
3750 			}
3751 			break;
3752 
3753 		case L2CAP_MODE_STREAMING:
3754 			size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
3755 				     chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3756 				     L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
3757 			rfc.max_pdu_size = cpu_to_le16(size);
3758 			chan->remote_mps = size;
3759 
3760 			set_bit(CONF_MODE_DONE, &chan->conf_state);
3761 
3762 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3763 					   (unsigned long) &rfc, endptr - ptr);
3764 
3765 			break;
3766 
3767 		default:
3768 			result = L2CAP_CONF_UNACCEPT;
3769 
3770 			memset(&rfc, 0, sizeof(rfc));
3771 			rfc.mode = chan->mode;
3772 		}
3773 
3774 		if (result == L2CAP_CONF_SUCCESS)
3775 			set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3776 	}
3777 	rsp->scid   = cpu_to_le16(chan->dcid);
3778 	rsp->result = cpu_to_le16(result);
3779 	rsp->flags  = cpu_to_le16(0);
3780 
3781 	return ptr - data;
3782 }
3783 
l2cap_parse_conf_rsp(struct l2cap_chan * chan,void * rsp,int len,void * data,size_t size,u16 * result)3784 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3785 				void *data, size_t size, u16 *result)
3786 {
3787 	struct l2cap_conf_req *req = data;
3788 	void *ptr = req->data;
3789 	void *endptr = data + size;
3790 	int type, olen;
3791 	unsigned long val;
3792 	struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3793 	struct l2cap_conf_efs efs;
3794 
3795 	BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
3796 
3797 	while (len >= L2CAP_CONF_OPT_SIZE) {
3798 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3799 		if (len < 0)
3800 			break;
3801 
3802 		switch (type) {
3803 		case L2CAP_CONF_MTU:
3804 			if (olen != 2)
3805 				break;
3806 			if (val < L2CAP_DEFAULT_MIN_MTU) {
3807 				*result = L2CAP_CONF_UNACCEPT;
3808 				chan->imtu = L2CAP_DEFAULT_MIN_MTU;
3809 			} else
3810 				chan->imtu = val;
3811 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3812 					   endptr - ptr);
3813 			break;
3814 
3815 		case L2CAP_CONF_FLUSH_TO:
3816 			if (olen != 2)
3817 				break;
3818 			chan->flush_to = val;
3819 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3820 					   chan->flush_to, endptr - ptr);
3821 			break;
3822 
3823 		case L2CAP_CONF_RFC:
3824 			if (olen != sizeof(rfc))
3825 				break;
3826 			memcpy(&rfc, (void *)val, olen);
3827 			if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
3828 			    rfc.mode != chan->mode)
3829 				return -ECONNREFUSED;
3830 			chan->fcs = 0;
3831 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3832 					   (unsigned long) &rfc, endptr - ptr);
3833 			break;
3834 
3835 		case L2CAP_CONF_EWS:
3836 			if (olen != 2)
3837 				break;
3838 			chan->ack_win = min_t(u16, val, chan->ack_win);
3839 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
3840 					   chan->tx_win, endptr - ptr);
3841 			break;
3842 
3843 		case L2CAP_CONF_EFS:
3844 			if (olen != sizeof(efs))
3845 				break;
3846 			memcpy(&efs, (void *)val, olen);
3847 			if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3848 			    efs.stype != L2CAP_SERV_NOTRAFIC &&
3849 			    efs.stype != chan->local_stype)
3850 				return -ECONNREFUSED;
3851 			l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3852 					   (unsigned long) &efs, endptr - ptr);
3853 			break;
3854 
3855 		case L2CAP_CONF_FCS:
3856 			if (olen != 1)
3857 				break;
3858 			if (*result == L2CAP_CONF_PENDING)
3859 				if (val == L2CAP_FCS_NONE)
3860 					set_bit(CONF_RECV_NO_FCS,
3861 						&chan->conf_state);
3862 			break;
3863 		}
3864 	}
3865 
3866 	if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
3867 		return -ECONNREFUSED;
3868 
3869 	chan->mode = rfc.mode;
3870 
3871 	if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
3872 		switch (rfc.mode) {
3873 		case L2CAP_MODE_ERTM:
3874 			chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3875 			chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3876 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3877 			if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3878 				chan->ack_win = min_t(u16, chan->ack_win,
3879 						      rfc.txwin_size);
3880 
3881 			if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3882 				chan->local_msdu = le16_to_cpu(efs.msdu);
3883 				chan->local_sdu_itime =
3884 					le32_to_cpu(efs.sdu_itime);
3885 				chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3886 				chan->local_flush_to =
3887 					le32_to_cpu(efs.flush_to);
3888 			}
3889 			break;
3890 
3891 		case L2CAP_MODE_STREAMING:
3892 			chan->mps    = le16_to_cpu(rfc.max_pdu_size);
3893 		}
3894 	}
3895 
3896 	req->dcid   = cpu_to_le16(chan->dcid);
3897 	req->flags  = cpu_to_le16(0);
3898 
3899 	return ptr - data;
3900 }
3901 
l2cap_build_conf_rsp(struct l2cap_chan * chan,void * data,u16 result,u16 flags)3902 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3903 				u16 result, u16 flags)
3904 {
3905 	struct l2cap_conf_rsp *rsp = data;
3906 	void *ptr = rsp->data;
3907 
3908 	BT_DBG("chan %p", chan);
3909 
3910 	rsp->scid   = cpu_to_le16(chan->dcid);
3911 	rsp->result = cpu_to_le16(result);
3912 	rsp->flags  = cpu_to_le16(flags);
3913 
3914 	return ptr - data;
3915 }
3916 
__l2cap_le_connect_rsp_defer(struct l2cap_chan * chan)3917 void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3918 {
3919 	struct l2cap_le_conn_rsp rsp;
3920 	struct l2cap_conn *conn = chan->conn;
3921 
3922 	BT_DBG("chan %p", chan);
3923 
3924 	rsp.dcid    = cpu_to_le16(chan->scid);
3925 	rsp.mtu     = cpu_to_le16(chan->imtu);
3926 	rsp.mps     = cpu_to_le16(chan->mps);
3927 	rsp.credits = cpu_to_le16(chan->rx_credits);
3928 	rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3929 
3930 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3931 		       &rsp);
3932 }
3933 
l2cap_ecred_list_defer(struct l2cap_chan * chan,void * data)3934 static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data)
3935 {
3936 	int *result = data;
3937 
3938 	if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3939 		return;
3940 
3941 	switch (chan->state) {
3942 	case BT_CONNECT2:
3943 		/* If channel still pending accept add to result */
3944 		(*result)++;
3945 		return;
3946 	case BT_CONNECTED:
3947 		return;
3948 	default:
3949 		/* If not connected or pending accept it has been refused */
3950 		*result = -ECONNREFUSED;
3951 		return;
3952 	}
3953 }
3954 
3955 struct l2cap_ecred_rsp_data {
3956 	struct {
3957 		struct l2cap_ecred_conn_rsp rsp;
3958 		__le16 scid[L2CAP_ECRED_MAX_CID];
3959 	} __packed pdu;
3960 	int count;
3961 };
3962 
l2cap_ecred_rsp_defer(struct l2cap_chan * chan,void * data)3963 static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data)
3964 {
3965 	struct l2cap_ecred_rsp_data *rsp = data;
3966 
3967 	if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
3968 		return;
3969 
3970 	/* Reset ident so only one response is sent */
3971 	chan->ident = 0;
3972 
3973 	/* Include all channels pending with the same ident */
3974 	if (!rsp->pdu.rsp.result)
3975 		rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid);
3976 	else
3977 		l2cap_chan_del(chan, ECONNRESET);
3978 }
3979 
__l2cap_ecred_conn_rsp_defer(struct l2cap_chan * chan)3980 void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
3981 {
3982 	struct l2cap_conn *conn = chan->conn;
3983 	struct l2cap_ecred_rsp_data data;
3984 	u16 id = chan->ident;
3985 	int result = 0;
3986 
3987 	if (!id)
3988 		return;
3989 
3990 	BT_DBG("chan %p id %d", chan, id);
3991 
3992 	memset(&data, 0, sizeof(data));
3993 
3994 	data.pdu.rsp.mtu     = cpu_to_le16(chan->imtu);
3995 	data.pdu.rsp.mps     = cpu_to_le16(chan->mps);
3996 	data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
3997 	data.pdu.rsp.result  = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
3998 
3999 	/* Verify that all channels are ready */
4000 	__l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result);
4001 
4002 	if (result > 0)
4003 		return;
4004 
4005 	if (result < 0)
4006 		data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION);
4007 
4008 	/* Build response */
4009 	__l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data);
4010 
4011 	l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP,
4012 		       sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)),
4013 		       &data.pdu);
4014 }
4015 
__l2cap_connect_rsp_defer(struct l2cap_chan * chan)4016 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
4017 {
4018 	struct l2cap_conn_rsp rsp;
4019 	struct l2cap_conn *conn = chan->conn;
4020 	u8 buf[128];
4021 	u8 rsp_code;
4022 
4023 	rsp.scid   = cpu_to_le16(chan->dcid);
4024 	rsp.dcid   = cpu_to_le16(chan->scid);
4025 	rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4026 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4027 
4028 	if (chan->hs_hcon)
4029 		rsp_code = L2CAP_CREATE_CHAN_RSP;
4030 	else
4031 		rsp_code = L2CAP_CONN_RSP;
4032 
4033 	BT_DBG("chan %p rsp_code %u", chan, rsp_code);
4034 
4035 	l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
4036 
4037 	if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4038 		return;
4039 
4040 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4041 		       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4042 	chan->num_conf_req++;
4043 }
4044 
l2cap_conf_rfc_get(struct l2cap_chan * chan,void * rsp,int len)4045 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
4046 {
4047 	int type, olen;
4048 	unsigned long val;
4049 	/* Use sane default values in case a misbehaving remote device
4050 	 * did not send an RFC or extended window size option.
4051 	 */
4052 	u16 txwin_ext = chan->ack_win;
4053 	struct l2cap_conf_rfc rfc = {
4054 		.mode = chan->mode,
4055 		.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
4056 		.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
4057 		.max_pdu_size = cpu_to_le16(chan->imtu),
4058 		.txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
4059 	};
4060 
4061 	BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
4062 
4063 	if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
4064 		return;
4065 
4066 	while (len >= L2CAP_CONF_OPT_SIZE) {
4067 		len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
4068 		if (len < 0)
4069 			break;
4070 
4071 		switch (type) {
4072 		case L2CAP_CONF_RFC:
4073 			if (olen != sizeof(rfc))
4074 				break;
4075 			memcpy(&rfc, (void *)val, olen);
4076 			break;
4077 		case L2CAP_CONF_EWS:
4078 			if (olen != 2)
4079 				break;
4080 			txwin_ext = val;
4081 			break;
4082 		}
4083 	}
4084 
4085 	switch (rfc.mode) {
4086 	case L2CAP_MODE_ERTM:
4087 		chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
4088 		chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
4089 		chan->mps = le16_to_cpu(rfc.max_pdu_size);
4090 		if (test_bit(FLAG_EXT_CTRL, &chan->flags))
4091 			chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
4092 		else
4093 			chan->ack_win = min_t(u16, chan->ack_win,
4094 					      rfc.txwin_size);
4095 		break;
4096 	case L2CAP_MODE_STREAMING:
4097 		chan->mps    = le16_to_cpu(rfc.max_pdu_size);
4098 	}
4099 }
4100 
l2cap_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4101 static inline int l2cap_command_rej(struct l2cap_conn *conn,
4102 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4103 				    u8 *data)
4104 {
4105 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
4106 
4107 	if (cmd_len < sizeof(*rej))
4108 		return -EPROTO;
4109 
4110 	if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
4111 		return 0;
4112 
4113 	if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4114 	    cmd->ident == conn->info_ident) {
4115 		cancel_delayed_work(&conn->info_timer);
4116 
4117 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4118 		conn->info_ident = 0;
4119 
4120 		l2cap_conn_start(conn);
4121 	}
4122 
4123 	return 0;
4124 }
4125 
l2cap_connect(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u8 * data,u8 rsp_code,u8 amp_id)4126 static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
4127 					struct l2cap_cmd_hdr *cmd,
4128 					u8 *data, u8 rsp_code, u8 amp_id)
4129 {
4130 	struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4131 	struct l2cap_conn_rsp rsp;
4132 	struct l2cap_chan *chan = NULL, *pchan;
4133 	int result, status = L2CAP_CS_NO_INFO;
4134 
4135 	u16 dcid = 0, scid = __le16_to_cpu(req->scid);
4136 	__le16 psm = req->psm;
4137 
4138 	BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
4139 
4140 	/* Check if we have socket listening on psm */
4141 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
4142 					 &conn->hcon->dst, ACL_LINK);
4143 	if (!pchan) {
4144 		result = L2CAP_CR_BAD_PSM;
4145 		goto sendresp;
4146 	}
4147 
4148 	mutex_lock(&conn->chan_lock);
4149 	l2cap_chan_lock(pchan);
4150 
4151 	/* Check if the ACL is secure enough (if not SDP) */
4152 	if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
4153 	    !hci_conn_check_link_mode(conn->hcon)) {
4154 		conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
4155 		result = L2CAP_CR_SEC_BLOCK;
4156 		goto response;
4157 	}
4158 
4159 	result = L2CAP_CR_NO_MEM;
4160 
4161 	/* Check for valid dynamic CID range (as per Erratum 3253) */
4162 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) {
4163 		result = L2CAP_CR_INVALID_SCID;
4164 		goto response;
4165 	}
4166 
4167 	/* Check if we already have channel with that dcid */
4168 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
4169 		result = L2CAP_CR_SCID_IN_USE;
4170 		goto response;
4171 	}
4172 
4173 	chan = pchan->ops->new_connection(pchan);
4174 	if (!chan)
4175 		goto response;
4176 
4177 	/* For certain devices (ex: HID mouse), support for authentication,
4178 	 * pairing and bonding is optional. For such devices, inorder to avoid
4179 	 * the ACL alive for too long after L2CAP disconnection, reset the ACL
4180 	 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
4181 	 */
4182 	conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4183 
4184 	bacpy(&chan->src, &conn->hcon->src);
4185 	bacpy(&chan->dst, &conn->hcon->dst);
4186 	chan->src_type = bdaddr_src_type(conn->hcon);
4187 	chan->dst_type = bdaddr_dst_type(conn->hcon);
4188 	chan->psm  = psm;
4189 	chan->dcid = scid;
4190 	chan->local_amp_id = amp_id;
4191 
4192 	__l2cap_chan_add(conn, chan);
4193 
4194 	dcid = chan->scid;
4195 
4196 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
4197 
4198 	chan->ident = cmd->ident;
4199 
4200 	if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
4201 		if (l2cap_chan_check_security(chan, false)) {
4202 			if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
4203 				l2cap_state_change(chan, BT_CONNECT2);
4204 				result = L2CAP_CR_PEND;
4205 				status = L2CAP_CS_AUTHOR_PEND;
4206 				chan->ops->defer(chan);
4207 			} else {
4208 				/* Force pending result for AMP controllers.
4209 				 * The connection will succeed after the
4210 				 * physical link is up.
4211 				 */
4212 				if (amp_id == AMP_ID_BREDR) {
4213 					l2cap_state_change(chan, BT_CONFIG);
4214 					result = L2CAP_CR_SUCCESS;
4215 				} else {
4216 					l2cap_state_change(chan, BT_CONNECT2);
4217 					result = L2CAP_CR_PEND;
4218 				}
4219 				status = L2CAP_CS_NO_INFO;
4220 			}
4221 		} else {
4222 			l2cap_state_change(chan, BT_CONNECT2);
4223 			result = L2CAP_CR_PEND;
4224 			status = L2CAP_CS_AUTHEN_PEND;
4225 		}
4226 	} else {
4227 		l2cap_state_change(chan, BT_CONNECT2);
4228 		result = L2CAP_CR_PEND;
4229 		status = L2CAP_CS_NO_INFO;
4230 	}
4231 
4232 response:
4233 	l2cap_chan_unlock(pchan);
4234 	mutex_unlock(&conn->chan_lock);
4235 	l2cap_chan_put(pchan);
4236 
4237 sendresp:
4238 	rsp.scid   = cpu_to_le16(scid);
4239 	rsp.dcid   = cpu_to_le16(dcid);
4240 	rsp.result = cpu_to_le16(result);
4241 	rsp.status = cpu_to_le16(status);
4242 	l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
4243 
4244 	if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
4245 		struct l2cap_info_req info;
4246 		info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4247 
4248 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4249 		conn->info_ident = l2cap_get_ident(conn);
4250 
4251 		schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
4252 
4253 		l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
4254 			       sizeof(info), &info);
4255 	}
4256 
4257 	if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
4258 	    result == L2CAP_CR_SUCCESS) {
4259 		u8 buf[128];
4260 		set_bit(CONF_REQ_SENT, &chan->conf_state);
4261 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4262 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4263 		chan->num_conf_req++;
4264 	}
4265 
4266 	return chan;
4267 }
4268 
l2cap_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4269 static int l2cap_connect_req(struct l2cap_conn *conn,
4270 			     struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
4271 {
4272 	struct hci_dev *hdev = conn->hcon->hdev;
4273 	struct hci_conn *hcon = conn->hcon;
4274 
4275 	if (cmd_len < sizeof(struct l2cap_conn_req))
4276 		return -EPROTO;
4277 
4278 	hci_dev_lock(hdev);
4279 	if (hci_dev_test_flag(hdev, HCI_MGMT) &&
4280 	    !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
4281 		mgmt_device_connected(hdev, hcon, 0, NULL, 0);
4282 	hci_dev_unlock(hdev);
4283 
4284 	l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
4285 	return 0;
4286 }
4287 
l2cap_connect_create_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4288 static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
4289 				    struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 				    u8 *data)
4291 {
4292 	struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4293 	u16 scid, dcid, result, status;
4294 	struct l2cap_chan *chan;
4295 	u8 req[128];
4296 	int err;
4297 
4298 	if (cmd_len < sizeof(*rsp))
4299 		return -EPROTO;
4300 
4301 	scid   = __le16_to_cpu(rsp->scid);
4302 	dcid   = __le16_to_cpu(rsp->dcid);
4303 	result = __le16_to_cpu(rsp->result);
4304 	status = __le16_to_cpu(rsp->status);
4305 
4306 	if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START ||
4307 					   dcid > L2CAP_CID_DYN_END))
4308 		return -EPROTO;
4309 
4310 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
4311 	       dcid, scid, result, status);
4312 
4313 	mutex_lock(&conn->chan_lock);
4314 
4315 	if (scid) {
4316 		chan = __l2cap_get_chan_by_scid(conn, scid);
4317 		if (!chan) {
4318 			err = -EBADSLT;
4319 			goto unlock;
4320 		}
4321 	} else {
4322 		chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4323 		if (!chan) {
4324 			err = -EBADSLT;
4325 			goto unlock;
4326 		}
4327 	}
4328 
4329 	chan = l2cap_chan_hold_unless_zero(chan);
4330 	if (!chan) {
4331 		err = -EBADSLT;
4332 		goto unlock;
4333 	}
4334 
4335 	err = 0;
4336 
4337 	l2cap_chan_lock(chan);
4338 
4339 	switch (result) {
4340 	case L2CAP_CR_SUCCESS:
4341 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
4342 			err = -EBADSLT;
4343 			break;
4344 		}
4345 
4346 		l2cap_state_change(chan, BT_CONFIG);
4347 		chan->ident = 0;
4348 		chan->dcid = dcid;
4349 		clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
4350 
4351 		if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
4352 			break;
4353 
4354 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4355 			       l2cap_build_conf_req(chan, req, sizeof(req)), req);
4356 		chan->num_conf_req++;
4357 		break;
4358 
4359 	case L2CAP_CR_PEND:
4360 		set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4361 		break;
4362 
4363 	default:
4364 		l2cap_chan_del(chan, ECONNREFUSED);
4365 		break;
4366 	}
4367 
4368 	l2cap_chan_unlock(chan);
4369 	l2cap_chan_put(chan);
4370 
4371 unlock:
4372 	mutex_unlock(&conn->chan_lock);
4373 
4374 	return err;
4375 }
4376 
set_default_fcs(struct l2cap_chan * chan)4377 static inline void set_default_fcs(struct l2cap_chan *chan)
4378 {
4379 	/* FCS is enabled only in ERTM or streaming mode, if one or both
4380 	 * sides request it.
4381 	 */
4382 	if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
4383 		chan->fcs = L2CAP_FCS_NONE;
4384 	else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
4385 		chan->fcs = L2CAP_FCS_CRC16;
4386 }
4387 
l2cap_send_efs_conf_rsp(struct l2cap_chan * chan,void * data,u8 ident,u16 flags)4388 static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4389 				    u8 ident, u16 flags)
4390 {
4391 	struct l2cap_conn *conn = chan->conn;
4392 
4393 	BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4394 	       flags);
4395 
4396 	clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4397 	set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4398 
4399 	l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4400 		       l2cap_build_conf_rsp(chan, data,
4401 					    L2CAP_CONF_SUCCESS, flags), data);
4402 }
4403 
cmd_reject_invalid_cid(struct l2cap_conn * conn,u8 ident,u16 scid,u16 dcid)4404 static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4405 				   u16 scid, u16 dcid)
4406 {
4407 	struct l2cap_cmd_rej_cid rej;
4408 
4409 	rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
4410 	rej.scid = __cpu_to_le16(scid);
4411 	rej.dcid = __cpu_to_le16(dcid);
4412 
4413 	l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4414 }
4415 
l2cap_config_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4416 static inline int l2cap_config_req(struct l2cap_conn *conn,
4417 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4418 				   u8 *data)
4419 {
4420 	struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4421 	u16 dcid, flags;
4422 	u8 rsp[64];
4423 	struct l2cap_chan *chan;
4424 	int len, err = 0;
4425 
4426 	if (cmd_len < sizeof(*req))
4427 		return -EPROTO;
4428 
4429 	dcid  = __le16_to_cpu(req->dcid);
4430 	flags = __le16_to_cpu(req->flags);
4431 
4432 	BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4433 
4434 	chan = l2cap_get_chan_by_scid(conn, dcid);
4435 	if (!chan) {
4436 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4437 		return 0;
4438 	}
4439 
4440 	if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
4441 	    chan->state != BT_CONNECTED) {
4442 		cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4443 				       chan->dcid);
4444 		goto unlock;
4445 	}
4446 
4447 	/* Reject if config buffer is too small. */
4448 	len = cmd_len - sizeof(*req);
4449 	if (chan->conf_len + len > sizeof(chan->conf_req)) {
4450 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4451 			       l2cap_build_conf_rsp(chan, rsp,
4452 			       L2CAP_CONF_REJECT, flags), rsp);
4453 		goto unlock;
4454 	}
4455 
4456 	/* Store config. */
4457 	memcpy(chan->conf_req + chan->conf_len, req->data, len);
4458 	chan->conf_len += len;
4459 
4460 	if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
4461 		/* Incomplete config. Send empty response. */
4462 		l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
4463 			       l2cap_build_conf_rsp(chan, rsp,
4464 			       L2CAP_CONF_SUCCESS, flags), rsp);
4465 		goto unlock;
4466 	}
4467 
4468 	/* Complete config. */
4469 	len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
4470 	if (len < 0) {
4471 		l2cap_send_disconn_req(chan, ECONNRESET);
4472 		goto unlock;
4473 	}
4474 
4475 	chan->ident = cmd->ident;
4476 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
4477 	if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP)
4478 		chan->num_conf_rsp++;
4479 
4480 	/* Reset config buffer. */
4481 	chan->conf_len = 0;
4482 
4483 	if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
4484 		goto unlock;
4485 
4486 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
4487 		set_default_fcs(chan);
4488 
4489 		if (chan->mode == L2CAP_MODE_ERTM ||
4490 		    chan->mode == L2CAP_MODE_STREAMING)
4491 			err = l2cap_ertm_init(chan);
4492 
4493 		if (err < 0)
4494 			l2cap_send_disconn_req(chan, -err);
4495 		else
4496 			l2cap_chan_ready(chan);
4497 
4498 		goto unlock;
4499 	}
4500 
4501 	if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
4502 		u8 buf[64];
4503 		l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
4504 			       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
4505 		chan->num_conf_req++;
4506 	}
4507 
4508 	/* Got Conf Rsp PENDING from remote side and assume we sent
4509 	   Conf Rsp PENDING in the code above */
4510 	if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
4511 	    test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4512 
4513 		/* check compatibility */
4514 
4515 		/* Send rsp for BR/EDR channel */
4516 		if (!chan->hs_hcon)
4517 			l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4518 		else
4519 			chan->ident = cmd->ident;
4520 	}
4521 
4522 unlock:
4523 	l2cap_chan_unlock(chan);
4524 	l2cap_chan_put(chan);
4525 	return err;
4526 }
4527 
l2cap_config_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4528 static inline int l2cap_config_rsp(struct l2cap_conn *conn,
4529 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4530 				   u8 *data)
4531 {
4532 	struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4533 	u16 scid, flags, result;
4534 	struct l2cap_chan *chan;
4535 	int len = cmd_len - sizeof(*rsp);
4536 	int err = 0;
4537 
4538 	if (cmd_len < sizeof(*rsp))
4539 		return -EPROTO;
4540 
4541 	scid   = __le16_to_cpu(rsp->scid);
4542 	flags  = __le16_to_cpu(rsp->flags);
4543 	result = __le16_to_cpu(rsp->result);
4544 
4545 	BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4546 	       result, len);
4547 
4548 	chan = l2cap_get_chan_by_scid(conn, scid);
4549 	if (!chan)
4550 		return 0;
4551 
4552 	switch (result) {
4553 	case L2CAP_CONF_SUCCESS:
4554 		l2cap_conf_rfc_get(chan, rsp->data, len);
4555 		clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4556 		break;
4557 
4558 	case L2CAP_CONF_PENDING:
4559 		set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4560 
4561 		if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4562 			char buf[64];
4563 
4564 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4565 						   buf, sizeof(buf), &result);
4566 			if (len < 0) {
4567 				l2cap_send_disconn_req(chan, ECONNRESET);
4568 				goto done;
4569 			}
4570 
4571 			if (!chan->hs_hcon) {
4572 				l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4573 							0);
4574 			} else {
4575 				if (l2cap_check_efs(chan)) {
4576 					amp_create_logical_link(chan);
4577 					chan->ident = cmd->ident;
4578 				}
4579 			}
4580 		}
4581 		goto done;
4582 
4583 	case L2CAP_CONF_UNACCEPT:
4584 		if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
4585 			char req[64];
4586 
4587 			if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
4588 				l2cap_send_disconn_req(chan, ECONNRESET);
4589 				goto done;
4590 			}
4591 
4592 			/* throw out any old stored conf requests */
4593 			result = L2CAP_CONF_SUCCESS;
4594 			len = l2cap_parse_conf_rsp(chan, rsp->data, len,
4595 						   req, sizeof(req), &result);
4596 			if (len < 0) {
4597 				l2cap_send_disconn_req(chan, ECONNRESET);
4598 				goto done;
4599 			}
4600 
4601 			l2cap_send_cmd(conn, l2cap_get_ident(conn),
4602 				       L2CAP_CONF_REQ, len, req);
4603 			chan->num_conf_req++;
4604 			if (result != L2CAP_CONF_SUCCESS)
4605 				goto done;
4606 			break;
4607 		}
4608 		fallthrough;
4609 
4610 	default:
4611 		l2cap_chan_set_err(chan, ECONNRESET);
4612 
4613 		__set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
4614 		l2cap_send_disconn_req(chan, ECONNRESET);
4615 		goto done;
4616 	}
4617 
4618 	if (flags & L2CAP_CONF_FLAG_CONTINUATION)
4619 		goto done;
4620 
4621 	set_bit(CONF_INPUT_DONE, &chan->conf_state);
4622 
4623 	if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
4624 		set_default_fcs(chan);
4625 
4626 		if (chan->mode == L2CAP_MODE_ERTM ||
4627 		    chan->mode == L2CAP_MODE_STREAMING)
4628 			err = l2cap_ertm_init(chan);
4629 
4630 		if (err < 0)
4631 			l2cap_send_disconn_req(chan, -err);
4632 		else
4633 			l2cap_chan_ready(chan);
4634 	}
4635 
4636 done:
4637 	l2cap_chan_unlock(chan);
4638 	l2cap_chan_put(chan);
4639 	return err;
4640 }
4641 
l2cap_disconnect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4642 static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
4643 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4644 				       u8 *data)
4645 {
4646 	struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4647 	struct l2cap_disconn_rsp rsp;
4648 	u16 dcid, scid;
4649 	struct l2cap_chan *chan;
4650 
4651 	if (cmd_len != sizeof(*req))
4652 		return -EPROTO;
4653 
4654 	scid = __le16_to_cpu(req->scid);
4655 	dcid = __le16_to_cpu(req->dcid);
4656 
4657 	BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4658 
4659 	chan = l2cap_get_chan_by_scid(conn, dcid);
4660 	if (!chan) {
4661 		cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4662 		return 0;
4663 	}
4664 
4665 	rsp.dcid = cpu_to_le16(chan->scid);
4666 	rsp.scid = cpu_to_le16(chan->dcid);
4667 	l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4668 
4669 	chan->ops->set_shutdown(chan);
4670 
4671 	l2cap_chan_unlock(chan);
4672 	mutex_lock(&conn->chan_lock);
4673 	l2cap_chan_lock(chan);
4674 	l2cap_chan_del(chan, ECONNRESET);
4675 	mutex_unlock(&conn->chan_lock);
4676 
4677 	chan->ops->close(chan);
4678 
4679 	l2cap_chan_unlock(chan);
4680 	l2cap_chan_put(chan);
4681 
4682 	return 0;
4683 }
4684 
l2cap_disconnect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4685 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
4686 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4687 				       u8 *data)
4688 {
4689 	struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4690 	u16 dcid, scid;
4691 	struct l2cap_chan *chan;
4692 
4693 	if (cmd_len != sizeof(*rsp))
4694 		return -EPROTO;
4695 
4696 	scid = __le16_to_cpu(rsp->scid);
4697 	dcid = __le16_to_cpu(rsp->dcid);
4698 
4699 	BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4700 
4701 	chan = l2cap_get_chan_by_scid(conn, scid);
4702 	if (!chan) {
4703 		return 0;
4704 	}
4705 
4706 	if (chan->state != BT_DISCONN) {
4707 		l2cap_chan_unlock(chan);
4708 		l2cap_chan_put(chan);
4709 		return 0;
4710 	}
4711 
4712 	l2cap_chan_unlock(chan);
4713 	mutex_lock(&conn->chan_lock);
4714 	l2cap_chan_lock(chan);
4715 	l2cap_chan_del(chan, 0);
4716 	mutex_unlock(&conn->chan_lock);
4717 
4718 	chan->ops->close(chan);
4719 
4720 	l2cap_chan_unlock(chan);
4721 	l2cap_chan_put(chan);
4722 
4723 	return 0;
4724 }
4725 
l2cap_information_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4726 static inline int l2cap_information_req(struct l2cap_conn *conn,
4727 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4728 					u8 *data)
4729 {
4730 	struct l2cap_info_req *req = (struct l2cap_info_req *) data;
4731 	u16 type;
4732 
4733 	if (cmd_len != sizeof(*req))
4734 		return -EPROTO;
4735 
4736 	type = __le16_to_cpu(req->type);
4737 
4738 	BT_DBG("type 0x%4.4x", type);
4739 
4740 	if (type == L2CAP_IT_FEAT_MASK) {
4741 		u8 buf[8];
4742 		u32 feat_mask = l2cap_feat_mask;
4743 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4744 		rsp->type   = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4745 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4746 		if (!disable_ertm)
4747 			feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
4748 				| L2CAP_FEAT_FCS;
4749 		if (conn->local_fixed_chan & L2CAP_FC_A2MP)
4750 			feat_mask |= L2CAP_FEAT_EXT_FLOW
4751 				| L2CAP_FEAT_EXT_WINDOW;
4752 
4753 		put_unaligned_le32(feat_mask, rsp->data);
4754 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4755 			       buf);
4756 	} else if (type == L2CAP_IT_FIXED_CHAN) {
4757 		u8 buf[12];
4758 		struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4759 
4760 		rsp->type   = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4761 		rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4762 		rsp->data[0] = conn->local_fixed_chan;
4763 		memset(rsp->data + 1, 0, 7);
4764 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4765 			       buf);
4766 	} else {
4767 		struct l2cap_info_rsp rsp;
4768 		rsp.type   = cpu_to_le16(type);
4769 		rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4770 		l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4771 			       &rsp);
4772 	}
4773 
4774 	return 0;
4775 }
4776 
l2cap_information_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)4777 static inline int l2cap_information_rsp(struct l2cap_conn *conn,
4778 					struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4779 					u8 *data)
4780 {
4781 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4782 	u16 type, result;
4783 
4784 	if (cmd_len < sizeof(*rsp))
4785 		return -EPROTO;
4786 
4787 	type   = __le16_to_cpu(rsp->type);
4788 	result = __le16_to_cpu(rsp->result);
4789 
4790 	BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4791 
4792 	/* L2CAP Info req/rsp are unbound to channels, add extra checks */
4793 	if (cmd->ident != conn->info_ident ||
4794 	    conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4795 		return 0;
4796 
4797 	cancel_delayed_work(&conn->info_timer);
4798 
4799 	if (result != L2CAP_IR_SUCCESS) {
4800 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4801 		conn->info_ident = 0;
4802 
4803 		l2cap_conn_start(conn);
4804 
4805 		return 0;
4806 	}
4807 
4808 	switch (type) {
4809 	case L2CAP_IT_FEAT_MASK:
4810 		conn->feat_mask = get_unaligned_le32(rsp->data);
4811 
4812 		if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
4813 			struct l2cap_info_req req;
4814 			req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4815 
4816 			conn->info_ident = l2cap_get_ident(conn);
4817 
4818 			l2cap_send_cmd(conn, conn->info_ident,
4819 				       L2CAP_INFO_REQ, sizeof(req), &req);
4820 		} else {
4821 			conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4822 			conn->info_ident = 0;
4823 
4824 			l2cap_conn_start(conn);
4825 		}
4826 		break;
4827 
4828 	case L2CAP_IT_FIXED_CHAN:
4829 		conn->remote_fixed_chan = rsp->data[0];
4830 		conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4831 		conn->info_ident = 0;
4832 
4833 		l2cap_conn_start(conn);
4834 		break;
4835 	}
4836 
4837 	return 0;
4838 }
4839 
l2cap_create_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)4840 static int l2cap_create_channel_req(struct l2cap_conn *conn,
4841 				    struct l2cap_cmd_hdr *cmd,
4842 				    u16 cmd_len, void *data)
4843 {
4844 	struct l2cap_create_chan_req *req = data;
4845 	struct l2cap_create_chan_rsp rsp;
4846 	struct l2cap_chan *chan;
4847 	struct hci_dev *hdev;
4848 	u16 psm, scid;
4849 
4850 	if (cmd_len != sizeof(*req))
4851 		return -EPROTO;
4852 
4853 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
4854 		return -EINVAL;
4855 
4856 	psm = le16_to_cpu(req->psm);
4857 	scid = le16_to_cpu(req->scid);
4858 
4859 	BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
4860 
4861 	/* For controller id 0 make BR/EDR connection */
4862 	if (req->amp_id == AMP_ID_BREDR) {
4863 		l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4864 			      req->amp_id);
4865 		return 0;
4866 	}
4867 
4868 	/* Validate AMP controller id */
4869 	hdev = hci_dev_get(req->amp_id);
4870 	if (!hdev)
4871 		goto error;
4872 
4873 	if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
4874 		hci_dev_put(hdev);
4875 		goto error;
4876 	}
4877 
4878 	chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4879 			     req->amp_id);
4880 	if (chan) {
4881 		struct amp_mgr *mgr = conn->hcon->amp_mgr;
4882 		struct hci_conn *hs_hcon;
4883 
4884 		hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4885 						  &conn->hcon->dst);
4886 		if (!hs_hcon) {
4887 			hci_dev_put(hdev);
4888 			cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4889 					       chan->dcid);
4890 			return 0;
4891 		}
4892 
4893 		BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4894 
4895 		mgr->bredr_chan = chan;
4896 		chan->hs_hcon = hs_hcon;
4897 		chan->fcs = L2CAP_FCS_NONE;
4898 		conn->mtu = hdev->block_mtu;
4899 	}
4900 
4901 	hci_dev_put(hdev);
4902 
4903 	return 0;
4904 
4905 error:
4906 	rsp.dcid = 0;
4907 	rsp.scid = cpu_to_le16(scid);
4908 	rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4909 	rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4910 
4911 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4912 		       sizeof(rsp), &rsp);
4913 
4914 	return 0;
4915 }
4916 
l2cap_send_move_chan_req(struct l2cap_chan * chan,u8 dest_amp_id)4917 static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4918 {
4919 	struct l2cap_move_chan_req req;
4920 	u8 ident;
4921 
4922 	BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4923 
4924 	ident = l2cap_get_ident(chan->conn);
4925 	chan->ident = ident;
4926 
4927 	req.icid = cpu_to_le16(chan->scid);
4928 	req.dest_amp_id = dest_amp_id;
4929 
4930 	l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4931 		       &req);
4932 
4933 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4934 }
4935 
l2cap_send_move_chan_rsp(struct l2cap_chan * chan,u16 result)4936 static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
4937 {
4938 	struct l2cap_move_chan_rsp rsp;
4939 
4940 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4941 
4942 	rsp.icid = cpu_to_le16(chan->dcid);
4943 	rsp.result = cpu_to_le16(result);
4944 
4945 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4946 		       sizeof(rsp), &rsp);
4947 }
4948 
l2cap_send_move_chan_cfm(struct l2cap_chan * chan,u16 result)4949 static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
4950 {
4951 	struct l2cap_move_chan_cfm cfm;
4952 
4953 	BT_DBG("chan %p, result 0x%4.4x", chan, result);
4954 
4955 	chan->ident = l2cap_get_ident(chan->conn);
4956 
4957 	cfm.icid = cpu_to_le16(chan->scid);
4958 	cfm.result = cpu_to_le16(result);
4959 
4960 	l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4961 		       sizeof(cfm), &cfm);
4962 
4963 	__set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4964 }
4965 
l2cap_send_move_chan_cfm_icid(struct l2cap_conn * conn,u16 icid)4966 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4967 {
4968 	struct l2cap_move_chan_cfm cfm;
4969 
4970 	BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4971 
4972 	cfm.icid = cpu_to_le16(icid);
4973 	cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
4974 
4975 	l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4976 		       sizeof(cfm), &cfm);
4977 }
4978 
l2cap_send_move_chan_cfm_rsp(struct l2cap_conn * conn,u8 ident,u16 icid)4979 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4980 					 u16 icid)
4981 {
4982 	struct l2cap_move_chan_cfm_rsp rsp;
4983 
4984 	BT_DBG("icid 0x%4.4x", icid);
4985 
4986 	rsp.icid = cpu_to_le16(icid);
4987 	l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4988 }
4989 
__release_logical_link(struct l2cap_chan * chan)4990 static void __release_logical_link(struct l2cap_chan *chan)
4991 {
4992 	chan->hs_hchan = NULL;
4993 	chan->hs_hcon = NULL;
4994 
4995 	/* Placeholder - release the logical link */
4996 }
4997 
l2cap_logical_fail(struct l2cap_chan * chan)4998 static void l2cap_logical_fail(struct l2cap_chan *chan)
4999 {
5000 	/* Logical link setup failed */
5001 	if (chan->state != BT_CONNECTED) {
5002 		/* Create channel failure, disconnect */
5003 		l2cap_send_disconn_req(chan, ECONNRESET);
5004 		return;
5005 	}
5006 
5007 	switch (chan->move_role) {
5008 	case L2CAP_MOVE_ROLE_RESPONDER:
5009 		l2cap_move_done(chan);
5010 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
5011 		break;
5012 	case L2CAP_MOVE_ROLE_INITIATOR:
5013 		if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
5014 		    chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
5015 			/* Remote has only sent pending or
5016 			 * success responses, clean up
5017 			 */
5018 			l2cap_move_done(chan);
5019 		}
5020 
5021 		/* Other amp move states imply that the move
5022 		 * has already aborted
5023 		 */
5024 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5025 		break;
5026 	}
5027 }
5028 
l2cap_logical_finish_create(struct l2cap_chan * chan,struct hci_chan * hchan)5029 static void l2cap_logical_finish_create(struct l2cap_chan *chan,
5030 					struct hci_chan *hchan)
5031 {
5032 	struct l2cap_conf_rsp rsp;
5033 
5034 	chan->hs_hchan = hchan;
5035 	chan->hs_hcon->l2cap_data = chan->conn;
5036 
5037 	l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
5038 
5039 	if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
5040 		int err;
5041 
5042 		set_default_fcs(chan);
5043 
5044 		err = l2cap_ertm_init(chan);
5045 		if (err < 0)
5046 			l2cap_send_disconn_req(chan, -err);
5047 		else
5048 			l2cap_chan_ready(chan);
5049 	}
5050 }
5051 
l2cap_logical_finish_move(struct l2cap_chan * chan,struct hci_chan * hchan)5052 static void l2cap_logical_finish_move(struct l2cap_chan *chan,
5053 				      struct hci_chan *hchan)
5054 {
5055 	chan->hs_hcon = hchan->conn;
5056 	chan->hs_hcon->l2cap_data = chan->conn;
5057 
5058 	BT_DBG("move_state %d", chan->move_state);
5059 
5060 	switch (chan->move_state) {
5061 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5062 		/* Move confirm will be sent after a success
5063 		 * response is received
5064 		 */
5065 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5066 		break;
5067 	case L2CAP_MOVE_WAIT_LOGICAL_CFM:
5068 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5069 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5070 		} else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5071 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5072 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5073 		} else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5074 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5075 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5076 		}
5077 		break;
5078 	default:
5079 		/* Move was not in expected state, free the channel */
5080 		__release_logical_link(chan);
5081 
5082 		chan->move_state = L2CAP_MOVE_STABLE;
5083 	}
5084 }
5085 
5086 /* Call with chan locked */
l2cap_logical_cfm(struct l2cap_chan * chan,struct hci_chan * hchan,u8 status)5087 void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
5088 		       u8 status)
5089 {
5090 	BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
5091 
5092 	if (status) {
5093 		l2cap_logical_fail(chan);
5094 		__release_logical_link(chan);
5095 		return;
5096 	}
5097 
5098 	if (chan->state != BT_CONNECTED) {
5099 		/* Ignore logical link if channel is on BR/EDR */
5100 		if (chan->local_amp_id != AMP_ID_BREDR)
5101 			l2cap_logical_finish_create(chan, hchan);
5102 	} else {
5103 		l2cap_logical_finish_move(chan, hchan);
5104 	}
5105 }
5106 
l2cap_move_start(struct l2cap_chan * chan)5107 void l2cap_move_start(struct l2cap_chan *chan)
5108 {
5109 	BT_DBG("chan %p", chan);
5110 
5111 	if (chan->local_amp_id == AMP_ID_BREDR) {
5112 		if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
5113 			return;
5114 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5115 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5116 		/* Placeholder - start physical link setup */
5117 	} else {
5118 		chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
5119 		chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5120 		chan->move_id = 0;
5121 		l2cap_move_setup(chan);
5122 		l2cap_send_move_chan_req(chan, 0);
5123 	}
5124 }
5125 
l2cap_do_create(struct l2cap_chan * chan,int result,u8 local_amp_id,u8 remote_amp_id)5126 static void l2cap_do_create(struct l2cap_chan *chan, int result,
5127 			    u8 local_amp_id, u8 remote_amp_id)
5128 {
5129 	BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
5130 	       local_amp_id, remote_amp_id);
5131 
5132 	chan->fcs = L2CAP_FCS_NONE;
5133 
5134 	/* Outgoing channel on AMP */
5135 	if (chan->state == BT_CONNECT) {
5136 		if (result == L2CAP_CR_SUCCESS) {
5137 			chan->local_amp_id = local_amp_id;
5138 			l2cap_send_create_chan_req(chan, remote_amp_id);
5139 		} else {
5140 			/* Revert to BR/EDR connect */
5141 			l2cap_send_conn_req(chan);
5142 		}
5143 
5144 		return;
5145 	}
5146 
5147 	/* Incoming channel on AMP */
5148 	if (__l2cap_no_conn_pending(chan)) {
5149 		struct l2cap_conn_rsp rsp;
5150 		char buf[128];
5151 		rsp.scid = cpu_to_le16(chan->dcid);
5152 		rsp.dcid = cpu_to_le16(chan->scid);
5153 
5154 		if (result == L2CAP_CR_SUCCESS) {
5155 			/* Send successful response */
5156 			rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5157 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5158 		} else {
5159 			/* Send negative response */
5160 			rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5161 			rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5162 		}
5163 
5164 		l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
5165 			       sizeof(rsp), &rsp);
5166 
5167 		if (result == L2CAP_CR_SUCCESS) {
5168 			l2cap_state_change(chan, BT_CONFIG);
5169 			set_bit(CONF_REQ_SENT, &chan->conf_state);
5170 			l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
5171 				       L2CAP_CONF_REQ,
5172 				       l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
5173 			chan->num_conf_req++;
5174 		}
5175 	}
5176 }
5177 
l2cap_do_move_initiate(struct l2cap_chan * chan,u8 local_amp_id,u8 remote_amp_id)5178 static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
5179 				   u8 remote_amp_id)
5180 {
5181 	l2cap_move_setup(chan);
5182 	chan->move_id = local_amp_id;
5183 	chan->move_state = L2CAP_MOVE_WAIT_RSP;
5184 
5185 	l2cap_send_move_chan_req(chan, remote_amp_id);
5186 }
5187 
l2cap_do_move_respond(struct l2cap_chan * chan,int result)5188 static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
5189 {
5190 	struct hci_chan *hchan = NULL;
5191 
5192 	/* Placeholder - get hci_chan for logical link */
5193 
5194 	if (hchan) {
5195 		if (hchan->state == BT_CONNECTED) {
5196 			/* Logical link is ready to go */
5197 			chan->hs_hcon = hchan->conn;
5198 			chan->hs_hcon->l2cap_data = chan->conn;
5199 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5200 			l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
5201 
5202 			l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5203 		} else {
5204 			/* Wait for logical link to be ready */
5205 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5206 		}
5207 	} else {
5208 		/* Logical link not available */
5209 		l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
5210 	}
5211 }
5212 
l2cap_do_move_cancel(struct l2cap_chan * chan,int result)5213 static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
5214 {
5215 	if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
5216 		u8 rsp_result;
5217 		if (result == -EINVAL)
5218 			rsp_result = L2CAP_MR_BAD_ID;
5219 		else
5220 			rsp_result = L2CAP_MR_NOT_ALLOWED;
5221 
5222 		l2cap_send_move_chan_rsp(chan, rsp_result);
5223 	}
5224 
5225 	chan->move_role = L2CAP_MOVE_ROLE_NONE;
5226 	chan->move_state = L2CAP_MOVE_STABLE;
5227 
5228 	/* Restart data transmission */
5229 	l2cap_ertm_send(chan);
5230 }
5231 
5232 /* Invoke with locked chan */
__l2cap_physical_cfm(struct l2cap_chan * chan,int result)5233 void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
5234 {
5235 	u8 local_amp_id = chan->local_amp_id;
5236 	u8 remote_amp_id = chan->remote_amp_id;
5237 
5238 	BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
5239 	       chan, result, local_amp_id, remote_amp_id);
5240 
5241 	if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
5242 		return;
5243 
5244 	if (chan->state != BT_CONNECTED) {
5245 		l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
5246 	} else if (result != L2CAP_MR_SUCCESS) {
5247 		l2cap_do_move_cancel(chan, result);
5248 	} else {
5249 		switch (chan->move_role) {
5250 		case L2CAP_MOVE_ROLE_INITIATOR:
5251 			l2cap_do_move_initiate(chan, local_amp_id,
5252 					       remote_amp_id);
5253 			break;
5254 		case L2CAP_MOVE_ROLE_RESPONDER:
5255 			l2cap_do_move_respond(chan, result);
5256 			break;
5257 		default:
5258 			l2cap_do_move_cancel(chan, result);
5259 			break;
5260 		}
5261 	}
5262 }
5263 
l2cap_move_channel_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5264 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
5265 					 struct l2cap_cmd_hdr *cmd,
5266 					 u16 cmd_len, void *data)
5267 {
5268 	struct l2cap_move_chan_req *req = data;
5269 	struct l2cap_move_chan_rsp rsp;
5270 	struct l2cap_chan *chan;
5271 	u16 icid = 0;
5272 	u16 result = L2CAP_MR_NOT_ALLOWED;
5273 
5274 	if (cmd_len != sizeof(*req))
5275 		return -EPROTO;
5276 
5277 	icid = le16_to_cpu(req->icid);
5278 
5279 	BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
5280 
5281 	if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
5282 		return -EINVAL;
5283 
5284 	chan = l2cap_get_chan_by_dcid(conn, icid);
5285 	if (!chan) {
5286 		rsp.icid = cpu_to_le16(icid);
5287 		rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
5288 		l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
5289 			       sizeof(rsp), &rsp);
5290 		return 0;
5291 	}
5292 
5293 	chan->ident = cmd->ident;
5294 
5295 	if (chan->scid < L2CAP_CID_DYN_START ||
5296 	    chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
5297 	    (chan->mode != L2CAP_MODE_ERTM &&
5298 	     chan->mode != L2CAP_MODE_STREAMING)) {
5299 		result = L2CAP_MR_NOT_ALLOWED;
5300 		goto send_move_response;
5301 	}
5302 
5303 	if (chan->local_amp_id == req->dest_amp_id) {
5304 		result = L2CAP_MR_SAME_ID;
5305 		goto send_move_response;
5306 	}
5307 
5308 	if (req->dest_amp_id != AMP_ID_BREDR) {
5309 		struct hci_dev *hdev;
5310 		hdev = hci_dev_get(req->dest_amp_id);
5311 		if (!hdev || hdev->dev_type != HCI_AMP ||
5312 		    !test_bit(HCI_UP, &hdev->flags)) {
5313 			if (hdev)
5314 				hci_dev_put(hdev);
5315 
5316 			result = L2CAP_MR_BAD_ID;
5317 			goto send_move_response;
5318 		}
5319 		hci_dev_put(hdev);
5320 	}
5321 
5322 	/* Detect a move collision.  Only send a collision response
5323 	 * if this side has "lost", otherwise proceed with the move.
5324 	 * The winner has the larger bd_addr.
5325 	 */
5326 	if ((__chan_is_moving(chan) ||
5327 	     chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
5328 	    bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
5329 		result = L2CAP_MR_COLLISION;
5330 		goto send_move_response;
5331 	}
5332 
5333 	chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5334 	l2cap_move_setup(chan);
5335 	chan->move_id = req->dest_amp_id;
5336 
5337 	if (req->dest_amp_id == AMP_ID_BREDR) {
5338 		/* Moving to BR/EDR */
5339 		if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5340 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5341 			result = L2CAP_MR_PEND;
5342 		} else {
5343 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5344 			result = L2CAP_MR_SUCCESS;
5345 		}
5346 	} else {
5347 		chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5348 		/* Placeholder - uncomment when amp functions are available */
5349 		/*amp_accept_physical(chan, req->dest_amp_id);*/
5350 		result = L2CAP_MR_PEND;
5351 	}
5352 
5353 send_move_response:
5354 	l2cap_send_move_chan_rsp(chan, result);
5355 
5356 	l2cap_chan_unlock(chan);
5357 	l2cap_chan_put(chan);
5358 
5359 	return 0;
5360 }
5361 
l2cap_move_continue(struct l2cap_conn * conn,u16 icid,u16 result)5362 static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5363 {
5364 	struct l2cap_chan *chan;
5365 	struct hci_chan *hchan = NULL;
5366 
5367 	chan = l2cap_get_chan_by_scid(conn, icid);
5368 	if (!chan) {
5369 		l2cap_send_move_chan_cfm_icid(conn, icid);
5370 		return;
5371 	}
5372 
5373 	__clear_chan_timer(chan);
5374 	if (result == L2CAP_MR_PEND)
5375 		__set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5376 
5377 	switch (chan->move_state) {
5378 	case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5379 		/* Move confirm will be sent when logical link
5380 		 * is complete.
5381 		 */
5382 		chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5383 		break;
5384 	case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5385 		if (result == L2CAP_MR_PEND) {
5386 			break;
5387 		} else if (test_bit(CONN_LOCAL_BUSY,
5388 				    &chan->conn_state)) {
5389 			chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5390 		} else {
5391 			/* Logical link is up or moving to BR/EDR,
5392 			 * proceed with move
5393 			 */
5394 			chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5395 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5396 		}
5397 		break;
5398 	case L2CAP_MOVE_WAIT_RSP:
5399 		/* Moving to AMP */
5400 		if (result == L2CAP_MR_SUCCESS) {
5401 			/* Remote is ready, send confirm immediately
5402 			 * after logical link is ready
5403 			 */
5404 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5405 		} else {
5406 			/* Both logical link and move success
5407 			 * are required to confirm
5408 			 */
5409 			chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5410 		}
5411 
5412 		/* Placeholder - get hci_chan for logical link */
5413 		if (!hchan) {
5414 			/* Logical link not available */
5415 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5416 			break;
5417 		}
5418 
5419 		/* If the logical link is not yet connected, do not
5420 		 * send confirmation.
5421 		 */
5422 		if (hchan->state != BT_CONNECTED)
5423 			break;
5424 
5425 		/* Logical link is already ready to go */
5426 
5427 		chan->hs_hcon = hchan->conn;
5428 		chan->hs_hcon->l2cap_data = chan->conn;
5429 
5430 		if (result == L2CAP_MR_SUCCESS) {
5431 			/* Can confirm now */
5432 			l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5433 		} else {
5434 			/* Now only need move success
5435 			 * to confirm
5436 			 */
5437 			chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5438 		}
5439 
5440 		l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5441 		break;
5442 	default:
5443 		/* Any other amp move state means the move failed. */
5444 		chan->move_id = chan->local_amp_id;
5445 		l2cap_move_done(chan);
5446 		l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5447 	}
5448 
5449 	l2cap_chan_unlock(chan);
5450 	l2cap_chan_put(chan);
5451 }
5452 
l2cap_move_fail(struct l2cap_conn * conn,u8 ident,u16 icid,u16 result)5453 static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5454 			    u16 result)
5455 {
5456 	struct l2cap_chan *chan;
5457 
5458 	chan = l2cap_get_chan_by_ident(conn, ident);
5459 	if (!chan) {
5460 		/* Could not locate channel, icid is best guess */
5461 		l2cap_send_move_chan_cfm_icid(conn, icid);
5462 		return;
5463 	}
5464 
5465 	__clear_chan_timer(chan);
5466 
5467 	if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5468 		if (result == L2CAP_MR_COLLISION) {
5469 			chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5470 		} else {
5471 			/* Cleanup - cancel move */
5472 			chan->move_id = chan->local_amp_id;
5473 			l2cap_move_done(chan);
5474 		}
5475 	}
5476 
5477 	l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5478 
5479 	l2cap_chan_unlock(chan);
5480 	l2cap_chan_put(chan);
5481 }
5482 
l2cap_move_channel_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5483 static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5484 				  struct l2cap_cmd_hdr *cmd,
5485 				  u16 cmd_len, void *data)
5486 {
5487 	struct l2cap_move_chan_rsp *rsp = data;
5488 	u16 icid, result;
5489 
5490 	if (cmd_len != sizeof(*rsp))
5491 		return -EPROTO;
5492 
5493 	icid = le16_to_cpu(rsp->icid);
5494 	result = le16_to_cpu(rsp->result);
5495 
5496 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5497 
5498 	if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5499 		l2cap_move_continue(conn, icid, result);
5500 	else
5501 		l2cap_move_fail(conn, cmd->ident, icid, result);
5502 
5503 	return 0;
5504 }
5505 
l2cap_move_channel_confirm(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5506 static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5507 				      struct l2cap_cmd_hdr *cmd,
5508 				      u16 cmd_len, void *data)
5509 {
5510 	struct l2cap_move_chan_cfm *cfm = data;
5511 	struct l2cap_chan *chan;
5512 	u16 icid, result;
5513 
5514 	if (cmd_len != sizeof(*cfm))
5515 		return -EPROTO;
5516 
5517 	icid = le16_to_cpu(cfm->icid);
5518 	result = le16_to_cpu(cfm->result);
5519 
5520 	BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
5521 
5522 	chan = l2cap_get_chan_by_dcid(conn, icid);
5523 	if (!chan) {
5524 		/* Spec requires a response even if the icid was not found */
5525 		l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5526 		return 0;
5527 	}
5528 
5529 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5530 		if (result == L2CAP_MC_CONFIRMED) {
5531 			chan->local_amp_id = chan->move_id;
5532 			if (chan->local_amp_id == AMP_ID_BREDR)
5533 				__release_logical_link(chan);
5534 		} else {
5535 			chan->move_id = chan->local_amp_id;
5536 		}
5537 
5538 		l2cap_move_done(chan);
5539 	}
5540 
5541 	l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5542 
5543 	l2cap_chan_unlock(chan);
5544 	l2cap_chan_put(chan);
5545 
5546 	return 0;
5547 }
5548 
l2cap_move_channel_confirm_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,void * data)5549 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5550 						 struct l2cap_cmd_hdr *cmd,
5551 						 u16 cmd_len, void *data)
5552 {
5553 	struct l2cap_move_chan_cfm_rsp *rsp = data;
5554 	struct l2cap_chan *chan;
5555 	u16 icid;
5556 
5557 	if (cmd_len != sizeof(*rsp))
5558 		return -EPROTO;
5559 
5560 	icid = le16_to_cpu(rsp->icid);
5561 
5562 	BT_DBG("icid 0x%4.4x", icid);
5563 
5564 	chan = l2cap_get_chan_by_scid(conn, icid);
5565 	if (!chan)
5566 		return 0;
5567 
5568 	__clear_chan_timer(chan);
5569 
5570 	if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5571 		chan->local_amp_id = chan->move_id;
5572 
5573 		if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
5574 			__release_logical_link(chan);
5575 
5576 		l2cap_move_done(chan);
5577 	}
5578 
5579 	l2cap_chan_unlock(chan);
5580 	l2cap_chan_put(chan);
5581 
5582 	return 0;
5583 }
5584 
l2cap_conn_param_update_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5585 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5586 					      struct l2cap_cmd_hdr *cmd,
5587 					      u16 cmd_len, u8 *data)
5588 {
5589 	struct hci_conn *hcon = conn->hcon;
5590 	struct l2cap_conn_param_update_req *req;
5591 	struct l2cap_conn_param_update_rsp rsp;
5592 	u16 min, max, latency, to_multiplier;
5593 	int err;
5594 
5595 	if (hcon->role != HCI_ROLE_MASTER)
5596 		return -EINVAL;
5597 
5598 	if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5599 		return -EPROTO;
5600 
5601 	req = (struct l2cap_conn_param_update_req *) data;
5602 	min		= __le16_to_cpu(req->min);
5603 	max		= __le16_to_cpu(req->max);
5604 	latency		= __le16_to_cpu(req->latency);
5605 	to_multiplier	= __le16_to_cpu(req->to_multiplier);
5606 
5607 	BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5608 	       min, max, latency, to_multiplier);
5609 
5610 	memset(&rsp, 0, sizeof(rsp));
5611 
5612 	err = hci_check_conn_params(min, max, latency, to_multiplier);
5613 	if (err)
5614 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5615 	else
5616 		rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5617 
5618 	l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5619 		       sizeof(rsp), &rsp);
5620 
5621 	if (!err) {
5622 		u8 store_hint;
5623 
5624 		store_hint = hci_le_conn_update(hcon, min, max, latency,
5625 						to_multiplier);
5626 		mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5627 				    store_hint, min, max, latency,
5628 				    to_multiplier);
5629 
5630 	}
5631 
5632 	return 0;
5633 }
5634 
l2cap_le_connect_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5635 static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5636 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5637 				u8 *data)
5638 {
5639 	struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5640 	struct hci_conn *hcon = conn->hcon;
5641 	u16 dcid, mtu, mps, credits, result;
5642 	struct l2cap_chan *chan;
5643 	int err, sec_level;
5644 
5645 	if (cmd_len < sizeof(*rsp))
5646 		return -EPROTO;
5647 
5648 	dcid    = __le16_to_cpu(rsp->dcid);
5649 	mtu     = __le16_to_cpu(rsp->mtu);
5650 	mps     = __le16_to_cpu(rsp->mps);
5651 	credits = __le16_to_cpu(rsp->credits);
5652 	result  = __le16_to_cpu(rsp->result);
5653 
5654 	if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 ||
5655 					   dcid < L2CAP_CID_DYN_START ||
5656 					   dcid > L2CAP_CID_LE_DYN_END))
5657 		return -EPROTO;
5658 
5659 	BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5660 	       dcid, mtu, mps, credits, result);
5661 
5662 	mutex_lock(&conn->chan_lock);
5663 
5664 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5665 	if (!chan) {
5666 		err = -EBADSLT;
5667 		goto unlock;
5668 	}
5669 
5670 	err = 0;
5671 
5672 	l2cap_chan_lock(chan);
5673 
5674 	switch (result) {
5675 	case L2CAP_CR_LE_SUCCESS:
5676 		if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5677 			err = -EBADSLT;
5678 			break;
5679 		}
5680 
5681 		chan->ident = 0;
5682 		chan->dcid = dcid;
5683 		chan->omtu = mtu;
5684 		chan->remote_mps = mps;
5685 		chan->tx_credits = credits;
5686 		l2cap_chan_ready(chan);
5687 		break;
5688 
5689 	case L2CAP_CR_LE_AUTHENTICATION:
5690 	case L2CAP_CR_LE_ENCRYPTION:
5691 		/* If we already have MITM protection we can't do
5692 		 * anything.
5693 		 */
5694 		if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5695 			l2cap_chan_del(chan, ECONNREFUSED);
5696 			break;
5697 		}
5698 
5699 		sec_level = hcon->sec_level + 1;
5700 		if (chan->sec_level < sec_level)
5701 			chan->sec_level = sec_level;
5702 
5703 		/* We'll need to send a new Connect Request */
5704 		clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5705 
5706 		smp_conn_security(hcon, chan->sec_level);
5707 		break;
5708 
5709 	default:
5710 		l2cap_chan_del(chan, ECONNREFUSED);
5711 		break;
5712 	}
5713 
5714 	l2cap_chan_unlock(chan);
5715 
5716 unlock:
5717 	mutex_unlock(&conn->chan_lock);
5718 
5719 	return err;
5720 }
5721 
l2cap_bredr_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5722 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
5723 				      struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5724 				      u8 *data)
5725 {
5726 	int err = 0;
5727 
5728 	switch (cmd->code) {
5729 	case L2CAP_COMMAND_REJ:
5730 		l2cap_command_rej(conn, cmd, cmd_len, data);
5731 		break;
5732 
5733 	case L2CAP_CONN_REQ:
5734 		err = l2cap_connect_req(conn, cmd, cmd_len, data);
5735 		break;
5736 
5737 	case L2CAP_CONN_RSP:
5738 	case L2CAP_CREATE_CHAN_RSP:
5739 		l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
5740 		break;
5741 
5742 	case L2CAP_CONF_REQ:
5743 		err = l2cap_config_req(conn, cmd, cmd_len, data);
5744 		break;
5745 
5746 	case L2CAP_CONF_RSP:
5747 		l2cap_config_rsp(conn, cmd, cmd_len, data);
5748 		break;
5749 
5750 	case L2CAP_DISCONN_REQ:
5751 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5752 		break;
5753 
5754 	case L2CAP_DISCONN_RSP:
5755 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
5756 		break;
5757 
5758 	case L2CAP_ECHO_REQ:
5759 		l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5760 		break;
5761 
5762 	case L2CAP_ECHO_RSP:
5763 		break;
5764 
5765 	case L2CAP_INFO_REQ:
5766 		err = l2cap_information_req(conn, cmd, cmd_len, data);
5767 		break;
5768 
5769 	case L2CAP_INFO_RSP:
5770 		l2cap_information_rsp(conn, cmd, cmd_len, data);
5771 		break;
5772 
5773 	case L2CAP_CREATE_CHAN_REQ:
5774 		err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5775 		break;
5776 
5777 	case L2CAP_MOVE_CHAN_REQ:
5778 		err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5779 		break;
5780 
5781 	case L2CAP_MOVE_CHAN_RSP:
5782 		l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
5783 		break;
5784 
5785 	case L2CAP_MOVE_CHAN_CFM:
5786 		err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5787 		break;
5788 
5789 	case L2CAP_MOVE_CHAN_CFM_RSP:
5790 		l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
5791 		break;
5792 
5793 	default:
5794 		BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5795 		err = -EINVAL;
5796 		break;
5797 	}
5798 
5799 	return err;
5800 }
5801 
l2cap_le_connect_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5802 static int l2cap_le_connect_req(struct l2cap_conn *conn,
5803 				struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5804 				u8 *data)
5805 {
5806 	struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5807 	struct l2cap_le_conn_rsp rsp;
5808 	struct l2cap_chan *chan, *pchan;
5809 	u16 dcid, scid, credits, mtu, mps;
5810 	__le16 psm;
5811 	u8 result;
5812 
5813 	if (cmd_len != sizeof(*req))
5814 		return -EPROTO;
5815 
5816 	scid = __le16_to_cpu(req->scid);
5817 	mtu  = __le16_to_cpu(req->mtu);
5818 	mps  = __le16_to_cpu(req->mps);
5819 	psm  = req->psm;
5820 	dcid = 0;
5821 	credits = 0;
5822 
5823 	if (mtu < 23 || mps < 23)
5824 		return -EPROTO;
5825 
5826 	BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5827 	       scid, mtu, mps);
5828 
5829 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
5830 	 * page 1059:
5831 	 *
5832 	 * Valid range: 0x0001-0x00ff
5833 	 *
5834 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
5835 	 */
5836 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
5837 		result = L2CAP_CR_LE_BAD_PSM;
5838 		chan = NULL;
5839 		goto response;
5840 	}
5841 
5842 	/* Check if we have socket listening on psm */
5843 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5844 					 &conn->hcon->dst, LE_LINK);
5845 	if (!pchan) {
5846 		result = L2CAP_CR_LE_BAD_PSM;
5847 		chan = NULL;
5848 		goto response;
5849 	}
5850 
5851 	mutex_lock(&conn->chan_lock);
5852 	l2cap_chan_lock(pchan);
5853 
5854 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5855 				     SMP_ALLOW_STK)) {
5856 		result = L2CAP_CR_LE_AUTHENTICATION;
5857 		chan = NULL;
5858 		goto response_unlock;
5859 	}
5860 
5861 	/* Check for valid dynamic CID range */
5862 	if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5863 		result = L2CAP_CR_LE_INVALID_SCID;
5864 		chan = NULL;
5865 		goto response_unlock;
5866 	}
5867 
5868 	/* Check if we already have channel with that dcid */
5869 	if (__l2cap_get_chan_by_dcid(conn, scid)) {
5870 		result = L2CAP_CR_LE_SCID_IN_USE;
5871 		chan = NULL;
5872 		goto response_unlock;
5873 	}
5874 
5875 	chan = pchan->ops->new_connection(pchan);
5876 	if (!chan) {
5877 		result = L2CAP_CR_LE_NO_MEM;
5878 		goto response_unlock;
5879 	}
5880 
5881 	bacpy(&chan->src, &conn->hcon->src);
5882 	bacpy(&chan->dst, &conn->hcon->dst);
5883 	chan->src_type = bdaddr_src_type(conn->hcon);
5884 	chan->dst_type = bdaddr_dst_type(conn->hcon);
5885 	chan->psm  = psm;
5886 	chan->dcid = scid;
5887 	chan->omtu = mtu;
5888 	chan->remote_mps = mps;
5889 
5890 	__l2cap_chan_add(conn, chan);
5891 
5892 	l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits));
5893 
5894 	dcid = chan->scid;
5895 	credits = chan->rx_credits;
5896 
5897 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5898 
5899 	chan->ident = cmd->ident;
5900 
5901 	if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5902 		l2cap_state_change(chan, BT_CONNECT2);
5903 		/* The following result value is actually not defined
5904 		 * for LE CoC but we use it to let the function know
5905 		 * that it should bail out after doing its cleanup
5906 		 * instead of sending a response.
5907 		 */
5908 		result = L2CAP_CR_PEND;
5909 		chan->ops->defer(chan);
5910 	} else {
5911 		l2cap_chan_ready(chan);
5912 		result = L2CAP_CR_LE_SUCCESS;
5913 	}
5914 
5915 response_unlock:
5916 	l2cap_chan_unlock(pchan);
5917 	mutex_unlock(&conn->chan_lock);
5918 	l2cap_chan_put(pchan);
5919 
5920 	if (result == L2CAP_CR_PEND)
5921 		return 0;
5922 
5923 response:
5924 	if (chan) {
5925 		rsp.mtu = cpu_to_le16(chan->imtu);
5926 		rsp.mps = cpu_to_le16(chan->mps);
5927 	} else {
5928 		rsp.mtu = 0;
5929 		rsp.mps = 0;
5930 	}
5931 
5932 	rsp.dcid    = cpu_to_le16(dcid);
5933 	rsp.credits = cpu_to_le16(credits);
5934 	rsp.result  = cpu_to_le16(result);
5935 
5936 	l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5937 
5938 	return 0;
5939 }
5940 
l2cap_le_credits(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5941 static inline int l2cap_le_credits(struct l2cap_conn *conn,
5942 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5943 				   u8 *data)
5944 {
5945 	struct l2cap_le_credits *pkt;
5946 	struct l2cap_chan *chan;
5947 	u16 cid, credits, max_credits;
5948 
5949 	if (cmd_len != sizeof(*pkt))
5950 		return -EPROTO;
5951 
5952 	pkt = (struct l2cap_le_credits *) data;
5953 	cid	= __le16_to_cpu(pkt->cid);
5954 	credits	= __le16_to_cpu(pkt->credits);
5955 
5956 	BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5957 
5958 	chan = l2cap_get_chan_by_dcid(conn, cid);
5959 	if (!chan)
5960 		return -EBADSLT;
5961 
5962 	max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5963 	if (credits > max_credits) {
5964 		BT_ERR("LE credits overflow");
5965 		l2cap_send_disconn_req(chan, ECONNRESET);
5966 
5967 		/* Return 0 so that we don't trigger an unnecessary
5968 		 * command reject packet.
5969 		 */
5970 		goto unlock;
5971 	}
5972 
5973 	chan->tx_credits += credits;
5974 
5975 	/* Resume sending */
5976 	l2cap_le_flowctl_send(chan);
5977 
5978 	if (chan->tx_credits)
5979 		chan->ops->resume(chan);
5980 
5981 unlock:
5982 	l2cap_chan_unlock(chan);
5983 	l2cap_chan_put(chan);
5984 
5985 	return 0;
5986 }
5987 
l2cap_ecred_conn_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)5988 static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
5989 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5990 				       u8 *data)
5991 {
5992 	struct l2cap_ecred_conn_req *req = (void *) data;
5993 	struct {
5994 		struct l2cap_ecred_conn_rsp rsp;
5995 		__le16 dcid[L2CAP_ECRED_MAX_CID];
5996 	} __packed pdu;
5997 	struct l2cap_chan *chan, *pchan;
5998 	u16 mtu, mps;
5999 	__le16 psm;
6000 	u8 result, len = 0;
6001 	int i, num_scid;
6002 	bool defer = false;
6003 
6004 	if (!enable_ecred)
6005 		return -EINVAL;
6006 
6007 	if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) {
6008 		result = L2CAP_CR_LE_INVALID_PARAMS;
6009 		goto response;
6010 	}
6011 
6012 	cmd_len -= sizeof(*req);
6013 	num_scid = cmd_len / sizeof(u16);
6014 
6015 	if (num_scid > ARRAY_SIZE(pdu.dcid)) {
6016 		result = L2CAP_CR_LE_INVALID_PARAMS;
6017 		goto response;
6018 	}
6019 
6020 	mtu  = __le16_to_cpu(req->mtu);
6021 	mps  = __le16_to_cpu(req->mps);
6022 
6023 	if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
6024 		result = L2CAP_CR_LE_UNACCEPT_PARAMS;
6025 		goto response;
6026 	}
6027 
6028 	psm  = req->psm;
6029 
6030 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A
6031 	 * page 1059:
6032 	 *
6033 	 * Valid range: 0x0001-0x00ff
6034 	 *
6035 	 * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges
6036 	 */
6037 	if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) {
6038 		result = L2CAP_CR_LE_BAD_PSM;
6039 		goto response;
6040 	}
6041 
6042 	BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
6043 
6044 	memset(&pdu, 0, sizeof(pdu));
6045 
6046 	/* Check if we have socket listening on psm */
6047 	pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
6048 					 &conn->hcon->dst, LE_LINK);
6049 	if (!pchan) {
6050 		result = L2CAP_CR_LE_BAD_PSM;
6051 		goto response;
6052 	}
6053 
6054 	mutex_lock(&conn->chan_lock);
6055 	l2cap_chan_lock(pchan);
6056 
6057 	if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
6058 				     SMP_ALLOW_STK)) {
6059 		result = L2CAP_CR_LE_AUTHENTICATION;
6060 		goto unlock;
6061 	}
6062 
6063 	result = L2CAP_CR_LE_SUCCESS;
6064 
6065 	for (i = 0; i < num_scid; i++) {
6066 		u16 scid = __le16_to_cpu(req->scid[i]);
6067 
6068 		BT_DBG("scid[%d] 0x%4.4x", i, scid);
6069 
6070 		pdu.dcid[i] = 0x0000;
6071 		len += sizeof(*pdu.dcid);
6072 
6073 		/* Check for valid dynamic CID range */
6074 		if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
6075 			result = L2CAP_CR_LE_INVALID_SCID;
6076 			continue;
6077 		}
6078 
6079 		/* Check if we already have channel with that dcid */
6080 		if (__l2cap_get_chan_by_dcid(conn, scid)) {
6081 			result = L2CAP_CR_LE_SCID_IN_USE;
6082 			continue;
6083 		}
6084 
6085 		chan = pchan->ops->new_connection(pchan);
6086 		if (!chan) {
6087 			result = L2CAP_CR_LE_NO_MEM;
6088 			continue;
6089 		}
6090 
6091 		bacpy(&chan->src, &conn->hcon->src);
6092 		bacpy(&chan->dst, &conn->hcon->dst);
6093 		chan->src_type = bdaddr_src_type(conn->hcon);
6094 		chan->dst_type = bdaddr_dst_type(conn->hcon);
6095 		chan->psm  = psm;
6096 		chan->dcid = scid;
6097 		chan->omtu = mtu;
6098 		chan->remote_mps = mps;
6099 
6100 		__l2cap_chan_add(conn, chan);
6101 
6102 		l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
6103 
6104 		/* Init response */
6105 		if (!pdu.rsp.credits) {
6106 			pdu.rsp.mtu = cpu_to_le16(chan->imtu);
6107 			pdu.rsp.mps = cpu_to_le16(chan->mps);
6108 			pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
6109 		}
6110 
6111 		pdu.dcid[i] = cpu_to_le16(chan->scid);
6112 
6113 		__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
6114 
6115 		chan->ident = cmd->ident;
6116 		chan->mode = L2CAP_MODE_EXT_FLOWCTL;
6117 
6118 		if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
6119 			l2cap_state_change(chan, BT_CONNECT2);
6120 			defer = true;
6121 			chan->ops->defer(chan);
6122 		} else {
6123 			l2cap_chan_ready(chan);
6124 		}
6125 	}
6126 
6127 unlock:
6128 	l2cap_chan_unlock(pchan);
6129 	mutex_unlock(&conn->chan_lock);
6130 	l2cap_chan_put(pchan);
6131 
6132 response:
6133 	pdu.rsp.result = cpu_to_le16(result);
6134 
6135 	if (defer)
6136 		return 0;
6137 
6138 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
6139 		       sizeof(pdu.rsp) + len, &pdu);
6140 
6141 	return 0;
6142 }
6143 
l2cap_ecred_conn_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6144 static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
6145 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6146 				       u8 *data)
6147 {
6148 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6149 	struct hci_conn *hcon = conn->hcon;
6150 	u16 mtu, mps, credits, result;
6151 	struct l2cap_chan *chan, *tmp;
6152 	int err = 0, sec_level;
6153 	int i = 0;
6154 
6155 	if (cmd_len < sizeof(*rsp))
6156 		return -EPROTO;
6157 
6158 	mtu     = __le16_to_cpu(rsp->mtu);
6159 	mps     = __le16_to_cpu(rsp->mps);
6160 	credits = __le16_to_cpu(rsp->credits);
6161 	result  = __le16_to_cpu(rsp->result);
6162 
6163 	BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
6164 	       result);
6165 
6166 	mutex_lock(&conn->chan_lock);
6167 
6168 	cmd_len -= sizeof(*rsp);
6169 
6170 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6171 		u16 dcid;
6172 
6173 		if (chan->ident != cmd->ident ||
6174 		    chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
6175 		    chan->state == BT_CONNECTED)
6176 			continue;
6177 
6178 		l2cap_chan_lock(chan);
6179 
6180 		/* Check that there is a dcid for each pending channel */
6181 		if (cmd_len < sizeof(dcid)) {
6182 			l2cap_chan_del(chan, ECONNREFUSED);
6183 			l2cap_chan_unlock(chan);
6184 			continue;
6185 		}
6186 
6187 		dcid = __le16_to_cpu(rsp->dcid[i++]);
6188 		cmd_len -= sizeof(u16);
6189 
6190 		BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
6191 
6192 		/* Check if dcid is already in use */
6193 		if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
6194 			/* If a device receives a
6195 			 * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
6196 			 * already-assigned Destination CID, then both the
6197 			 * original channel and the new channel shall be
6198 			 * immediately discarded and not used.
6199 			 */
6200 			l2cap_chan_del(chan, ECONNREFUSED);
6201 			l2cap_chan_unlock(chan);
6202 			chan = __l2cap_get_chan_by_dcid(conn, dcid);
6203 			l2cap_chan_lock(chan);
6204 			l2cap_chan_del(chan, ECONNRESET);
6205 			l2cap_chan_unlock(chan);
6206 			continue;
6207 		}
6208 
6209 		switch (result) {
6210 		case L2CAP_CR_LE_AUTHENTICATION:
6211 		case L2CAP_CR_LE_ENCRYPTION:
6212 			/* If we already have MITM protection we can't do
6213 			 * anything.
6214 			 */
6215 			if (hcon->sec_level > BT_SECURITY_MEDIUM) {
6216 				l2cap_chan_del(chan, ECONNREFUSED);
6217 				break;
6218 			}
6219 
6220 			sec_level = hcon->sec_level + 1;
6221 			if (chan->sec_level < sec_level)
6222 				chan->sec_level = sec_level;
6223 
6224 			/* We'll need to send a new Connect Request */
6225 			clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
6226 
6227 			smp_conn_security(hcon, chan->sec_level);
6228 			break;
6229 
6230 		case L2CAP_CR_LE_BAD_PSM:
6231 			l2cap_chan_del(chan, ECONNREFUSED);
6232 			break;
6233 
6234 		default:
6235 			/* If dcid was not set it means channels was refused */
6236 			if (!dcid) {
6237 				l2cap_chan_del(chan, ECONNREFUSED);
6238 				break;
6239 			}
6240 
6241 			chan->ident = 0;
6242 			chan->dcid = dcid;
6243 			chan->omtu = mtu;
6244 			chan->remote_mps = mps;
6245 			chan->tx_credits = credits;
6246 			l2cap_chan_ready(chan);
6247 			break;
6248 		}
6249 
6250 		l2cap_chan_unlock(chan);
6251 	}
6252 
6253 	mutex_unlock(&conn->chan_lock);
6254 
6255 	return err;
6256 }
6257 
l2cap_ecred_reconf_req(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6258 static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
6259 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6260 					 u8 *data)
6261 {
6262 	struct l2cap_ecred_reconf_req *req = (void *) data;
6263 	struct l2cap_ecred_reconf_rsp rsp;
6264 	u16 mtu, mps, result;
6265 	struct l2cap_chan *chan;
6266 	int i, num_scid;
6267 
6268 	if (!enable_ecred)
6269 		return -EINVAL;
6270 
6271 	if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
6272 		result = L2CAP_CR_LE_INVALID_PARAMS;
6273 		goto respond;
6274 	}
6275 
6276 	mtu = __le16_to_cpu(req->mtu);
6277 	mps = __le16_to_cpu(req->mps);
6278 
6279 	BT_DBG("mtu %u mps %u", mtu, mps);
6280 
6281 	if (mtu < L2CAP_ECRED_MIN_MTU) {
6282 		result = L2CAP_RECONF_INVALID_MTU;
6283 		goto respond;
6284 	}
6285 
6286 	if (mps < L2CAP_ECRED_MIN_MPS) {
6287 		result = L2CAP_RECONF_INVALID_MPS;
6288 		goto respond;
6289 	}
6290 
6291 	cmd_len -= sizeof(*req);
6292 	num_scid = cmd_len / sizeof(u16);
6293 	result = L2CAP_RECONF_SUCCESS;
6294 
6295 	for (i = 0; i < num_scid; i++) {
6296 		u16 scid;
6297 
6298 		scid = __le16_to_cpu(req->scid[i]);
6299 		if (!scid)
6300 			return -EPROTO;
6301 
6302 		chan = __l2cap_get_chan_by_dcid(conn, scid);
6303 		if (!chan)
6304 			continue;
6305 
6306 		/* If the MTU value is decreased for any of the included
6307 		 * channels, then the receiver shall disconnect all
6308 		 * included channels.
6309 		 */
6310 		if (chan->omtu > mtu) {
6311 			BT_ERR("chan %p decreased MTU %u -> %u", chan,
6312 			       chan->omtu, mtu);
6313 			result = L2CAP_RECONF_INVALID_MTU;
6314 		}
6315 
6316 		chan->omtu = mtu;
6317 		chan->remote_mps = mps;
6318 	}
6319 
6320 respond:
6321 	rsp.result = cpu_to_le16(result);
6322 
6323 	l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
6324 		       &rsp);
6325 
6326 	return 0;
6327 }
6328 
l2cap_ecred_reconf_rsp(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6329 static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
6330 					 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6331 					 u8 *data)
6332 {
6333 	struct l2cap_chan *chan, *tmp;
6334 	struct l2cap_ecred_conn_rsp *rsp = (void *) data;
6335 	u16 result;
6336 
6337 	if (cmd_len < sizeof(*rsp))
6338 		return -EPROTO;
6339 
6340 	result = __le16_to_cpu(rsp->result);
6341 
6342 	BT_DBG("result 0x%4.4x", rsp->result);
6343 
6344 	if (!result)
6345 		return 0;
6346 
6347 	list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
6348 		if (chan->ident != cmd->ident)
6349 			continue;
6350 
6351 		l2cap_chan_del(chan, ECONNRESET);
6352 	}
6353 
6354 	return 0;
6355 }
6356 
l2cap_le_command_rej(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6357 static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
6358 				       struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6359 				       u8 *data)
6360 {
6361 	struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
6362 	struct l2cap_chan *chan;
6363 
6364 	if (cmd_len < sizeof(*rej))
6365 		return -EPROTO;
6366 
6367 	mutex_lock(&conn->chan_lock);
6368 
6369 	chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
6370 	if (!chan)
6371 		goto done;
6372 
6373 	l2cap_chan_lock(chan);
6374 	l2cap_chan_del(chan, ECONNREFUSED);
6375 	l2cap_chan_unlock(chan);
6376 
6377 done:
6378 	mutex_unlock(&conn->chan_lock);
6379 	return 0;
6380 }
6381 
l2cap_le_sig_cmd(struct l2cap_conn * conn,struct l2cap_cmd_hdr * cmd,u16 cmd_len,u8 * data)6382 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
6383 				   struct l2cap_cmd_hdr *cmd, u16 cmd_len,
6384 				   u8 *data)
6385 {
6386 	int err = 0;
6387 
6388 	switch (cmd->code) {
6389 	case L2CAP_COMMAND_REJ:
6390 		l2cap_le_command_rej(conn, cmd, cmd_len, data);
6391 		break;
6392 
6393 	case L2CAP_CONN_PARAM_UPDATE_REQ:
6394 		err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
6395 		break;
6396 
6397 	case L2CAP_CONN_PARAM_UPDATE_RSP:
6398 		break;
6399 
6400 	case L2CAP_LE_CONN_RSP:
6401 		l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
6402 		break;
6403 
6404 	case L2CAP_LE_CONN_REQ:
6405 		err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
6406 		break;
6407 
6408 	case L2CAP_LE_CREDITS:
6409 		err = l2cap_le_credits(conn, cmd, cmd_len, data);
6410 		break;
6411 
6412 	case L2CAP_ECRED_CONN_REQ:
6413 		err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
6414 		break;
6415 
6416 	case L2CAP_ECRED_CONN_RSP:
6417 		err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
6418 		break;
6419 
6420 	case L2CAP_ECRED_RECONF_REQ:
6421 		err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
6422 		break;
6423 
6424 	case L2CAP_ECRED_RECONF_RSP:
6425 		err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
6426 		break;
6427 
6428 	case L2CAP_DISCONN_REQ:
6429 		err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
6430 		break;
6431 
6432 	case L2CAP_DISCONN_RSP:
6433 		l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
6434 		break;
6435 
6436 	default:
6437 		BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
6438 		err = -EINVAL;
6439 		break;
6440 	}
6441 
6442 	return err;
6443 }
6444 
l2cap_le_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6445 static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
6446 					struct sk_buff *skb)
6447 {
6448 	struct hci_conn *hcon = conn->hcon;
6449 	struct l2cap_cmd_hdr *cmd;
6450 	u16 len;
6451 	int err;
6452 
6453 	if (hcon->type != LE_LINK)
6454 		goto drop;
6455 
6456 	if (skb->len < L2CAP_CMD_HDR_SIZE)
6457 		goto drop;
6458 
6459 	cmd = (void *) skb->data;
6460 	skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6461 
6462 	len = le16_to_cpu(cmd->len);
6463 
6464 	BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
6465 
6466 	if (len != skb->len || !cmd->ident) {
6467 		BT_DBG("corrupted command");
6468 		goto drop;
6469 	}
6470 
6471 	err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
6472 	if (err) {
6473 		struct l2cap_cmd_rej_unk rej;
6474 
6475 		BT_ERR("Wrong link type (%d)", err);
6476 
6477 		rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6478 		l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6479 			       sizeof(rej), &rej);
6480 	}
6481 
6482 drop:
6483 	kfree_skb(skb);
6484 }
6485 
l2cap_sig_channel(struct l2cap_conn * conn,struct sk_buff * skb)6486 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
6487 				     struct sk_buff *skb)
6488 {
6489 	struct hci_conn *hcon = conn->hcon;
6490 	struct l2cap_cmd_hdr *cmd;
6491 	int err;
6492 
6493 	l2cap_raw_recv(conn, skb);
6494 
6495 	if (hcon->type != ACL_LINK)
6496 		goto drop;
6497 
6498 	while (skb->len >= L2CAP_CMD_HDR_SIZE) {
6499 		u16 len;
6500 
6501 		cmd = (void *) skb->data;
6502 		skb_pull(skb, L2CAP_CMD_HDR_SIZE);
6503 
6504 		len = le16_to_cpu(cmd->len);
6505 
6506 		BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
6507 		       cmd->ident);
6508 
6509 		if (len > skb->len || !cmd->ident) {
6510 			BT_DBG("corrupted command");
6511 			break;
6512 		}
6513 
6514 		err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
6515 		if (err) {
6516 			struct l2cap_cmd_rej_unk rej;
6517 
6518 			BT_ERR("Wrong link type (%d)", err);
6519 
6520 			rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
6521 			l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
6522 				       sizeof(rej), &rej);
6523 		}
6524 
6525 		skb_pull(skb, len);
6526 	}
6527 
6528 drop:
6529 	kfree_skb(skb);
6530 }
6531 
l2cap_check_fcs(struct l2cap_chan * chan,struct sk_buff * skb)6532 static int l2cap_check_fcs(struct l2cap_chan *chan,  struct sk_buff *skb)
6533 {
6534 	u16 our_fcs, rcv_fcs;
6535 	int hdr_size;
6536 
6537 	if (test_bit(FLAG_EXT_CTRL, &chan->flags))
6538 		hdr_size = L2CAP_EXT_HDR_SIZE;
6539 	else
6540 		hdr_size = L2CAP_ENH_HDR_SIZE;
6541 
6542 	if (chan->fcs == L2CAP_FCS_CRC16) {
6543 		skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
6544 		rcv_fcs = get_unaligned_le16(skb->data + skb->len);
6545 		our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
6546 
6547 		if (our_fcs != rcv_fcs)
6548 			return -EBADMSG;
6549 	}
6550 	return 0;
6551 }
6552 
l2cap_send_i_or_rr_or_rnr(struct l2cap_chan * chan)6553 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
6554 {
6555 	struct l2cap_ctrl control;
6556 
6557 	BT_DBG("chan %p", chan);
6558 
6559 	memset(&control, 0, sizeof(control));
6560 	control.sframe = 1;
6561 	control.final = 1;
6562 	control.reqseq = chan->buffer_seq;
6563 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
6564 
6565 	if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6566 		control.super = L2CAP_SUPER_RNR;
6567 		l2cap_send_sframe(chan, &control);
6568 	}
6569 
6570 	if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
6571 	    chan->unacked_frames > 0)
6572 		__set_retrans_timer(chan);
6573 
6574 	/* Send pending iframes */
6575 	l2cap_ertm_send(chan);
6576 
6577 	if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
6578 	    test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
6579 		/* F-bit wasn't sent in an s-frame or i-frame yet, so
6580 		 * send it now.
6581 		 */
6582 		control.super = L2CAP_SUPER_RR;
6583 		l2cap_send_sframe(chan, &control);
6584 	}
6585 }
6586 
append_skb_frag(struct sk_buff * skb,struct sk_buff * new_frag,struct sk_buff ** last_frag)6587 static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
6588 			    struct sk_buff **last_frag)
6589 {
6590 	/* skb->len reflects data in skb as well as all fragments
6591 	 * skb->data_len reflects only data in fragments
6592 	 */
6593 	if (!skb_has_frag_list(skb))
6594 		skb_shinfo(skb)->frag_list = new_frag;
6595 
6596 	new_frag->next = NULL;
6597 
6598 	(*last_frag)->next = new_frag;
6599 	*last_frag = new_frag;
6600 
6601 	skb->len += new_frag->len;
6602 	skb->data_len += new_frag->len;
6603 	skb->truesize += new_frag->truesize;
6604 }
6605 
l2cap_reassemble_sdu(struct l2cap_chan * chan,struct sk_buff * skb,struct l2cap_ctrl * control)6606 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
6607 				struct l2cap_ctrl *control)
6608 {
6609 	int err = -EINVAL;
6610 
6611 	switch (control->sar) {
6612 	case L2CAP_SAR_UNSEGMENTED:
6613 		if (chan->sdu)
6614 			break;
6615 
6616 		err = chan->ops->recv(chan, skb);
6617 		break;
6618 
6619 	case L2CAP_SAR_START:
6620 		if (chan->sdu)
6621 			break;
6622 
6623 		if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
6624 			break;
6625 
6626 		chan->sdu_len = get_unaligned_le16(skb->data);
6627 		skb_pull(skb, L2CAP_SDULEN_SIZE);
6628 
6629 		if (chan->sdu_len > chan->imtu) {
6630 			err = -EMSGSIZE;
6631 			break;
6632 		}
6633 
6634 		if (skb->len >= chan->sdu_len)
6635 			break;
6636 
6637 		chan->sdu = skb;
6638 		chan->sdu_last_frag = skb;
6639 
6640 		skb = NULL;
6641 		err = 0;
6642 		break;
6643 
6644 	case L2CAP_SAR_CONTINUE:
6645 		if (!chan->sdu)
6646 			break;
6647 
6648 		append_skb_frag(chan->sdu, skb,
6649 				&chan->sdu_last_frag);
6650 		skb = NULL;
6651 
6652 		if (chan->sdu->len >= chan->sdu_len)
6653 			break;
6654 
6655 		err = 0;
6656 		break;
6657 
6658 	case L2CAP_SAR_END:
6659 		if (!chan->sdu)
6660 			break;
6661 
6662 		append_skb_frag(chan->sdu, skb,
6663 				&chan->sdu_last_frag);
6664 		skb = NULL;
6665 
6666 		if (chan->sdu->len != chan->sdu_len)
6667 			break;
6668 
6669 		err = chan->ops->recv(chan, chan->sdu);
6670 
6671 		if (!err) {
6672 			/* Reassembly complete */
6673 			chan->sdu = NULL;
6674 			chan->sdu_last_frag = NULL;
6675 			chan->sdu_len = 0;
6676 		}
6677 		break;
6678 	}
6679 
6680 	if (err) {
6681 		kfree_skb(skb);
6682 		kfree_skb(chan->sdu);
6683 		chan->sdu = NULL;
6684 		chan->sdu_last_frag = NULL;
6685 		chan->sdu_len = 0;
6686 	}
6687 
6688 	return err;
6689 }
6690 
l2cap_resegment(struct l2cap_chan * chan)6691 static int l2cap_resegment(struct l2cap_chan *chan)
6692 {
6693 	/* Placeholder */
6694 	return 0;
6695 }
6696 
l2cap_chan_busy(struct l2cap_chan * chan,int busy)6697 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
6698 {
6699 	u8 event;
6700 
6701 	if (chan->mode != L2CAP_MODE_ERTM)
6702 		return;
6703 
6704 	event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
6705 	l2cap_tx(chan, NULL, NULL, event);
6706 }
6707 
l2cap_rx_queued_iframes(struct l2cap_chan * chan)6708 static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
6709 {
6710 	int err = 0;
6711 	/* Pass sequential frames to l2cap_reassemble_sdu()
6712 	 * until a gap is encountered.
6713 	 */
6714 
6715 	BT_DBG("chan %p", chan);
6716 
6717 	while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6718 		struct sk_buff *skb;
6719 		BT_DBG("Searching for skb with txseq %d (queue len %d)",
6720 		       chan->buffer_seq, skb_queue_len(&chan->srej_q));
6721 
6722 		skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
6723 
6724 		if (!skb)
6725 			break;
6726 
6727 		skb_unlink(skb, &chan->srej_q);
6728 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6729 		err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
6730 		if (err)
6731 			break;
6732 	}
6733 
6734 	if (skb_queue_empty(&chan->srej_q)) {
6735 		chan->rx_state = L2CAP_RX_STATE_RECV;
6736 		l2cap_send_ack(chan);
6737 	}
6738 
6739 	return err;
6740 }
6741 
l2cap_handle_srej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6742 static void l2cap_handle_srej(struct l2cap_chan *chan,
6743 			      struct l2cap_ctrl *control)
6744 {
6745 	struct sk_buff *skb;
6746 
6747 	BT_DBG("chan %p, control %p", chan, control);
6748 
6749 	if (control->reqseq == chan->next_tx_seq) {
6750 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6751 		l2cap_send_disconn_req(chan, ECONNRESET);
6752 		return;
6753 	}
6754 
6755 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6756 
6757 	if (skb == NULL) {
6758 		BT_DBG("Seq %d not available for retransmission",
6759 		       control->reqseq);
6760 		return;
6761 	}
6762 
6763 	if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6764 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6765 		l2cap_send_disconn_req(chan, ECONNRESET);
6766 		return;
6767 	}
6768 
6769 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6770 
6771 	if (control->poll) {
6772 		l2cap_pass_to_tx(chan, control);
6773 
6774 		set_bit(CONN_SEND_FBIT, &chan->conn_state);
6775 		l2cap_retransmit(chan, control);
6776 		l2cap_ertm_send(chan);
6777 
6778 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6779 			set_bit(CONN_SREJ_ACT, &chan->conn_state);
6780 			chan->srej_save_reqseq = control->reqseq;
6781 		}
6782 	} else {
6783 		l2cap_pass_to_tx_fbit(chan, control);
6784 
6785 		if (control->final) {
6786 			if (chan->srej_save_reqseq != control->reqseq ||
6787 			    !test_and_clear_bit(CONN_SREJ_ACT,
6788 						&chan->conn_state))
6789 				l2cap_retransmit(chan, control);
6790 		} else {
6791 			l2cap_retransmit(chan, control);
6792 			if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6793 				set_bit(CONN_SREJ_ACT, &chan->conn_state);
6794 				chan->srej_save_reqseq = control->reqseq;
6795 			}
6796 		}
6797 	}
6798 }
6799 
l2cap_handle_rej(struct l2cap_chan * chan,struct l2cap_ctrl * control)6800 static void l2cap_handle_rej(struct l2cap_chan *chan,
6801 			     struct l2cap_ctrl *control)
6802 {
6803 	struct sk_buff *skb;
6804 
6805 	BT_DBG("chan %p, control %p", chan, control);
6806 
6807 	if (control->reqseq == chan->next_tx_seq) {
6808 		BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
6809 		l2cap_send_disconn_req(chan, ECONNRESET);
6810 		return;
6811 	}
6812 
6813 	skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6814 
6815 	if (chan->max_tx && skb &&
6816 	    bt_cb(skb)->l2cap.retries >= chan->max_tx) {
6817 		BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
6818 		l2cap_send_disconn_req(chan, ECONNRESET);
6819 		return;
6820 	}
6821 
6822 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6823 
6824 	l2cap_pass_to_tx(chan, control);
6825 
6826 	if (control->final) {
6827 		if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6828 			l2cap_retransmit_all(chan, control);
6829 	} else {
6830 		l2cap_retransmit_all(chan, control);
6831 		l2cap_ertm_send(chan);
6832 		if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6833 			set_bit(CONN_REJ_ACT, &chan->conn_state);
6834 	}
6835 }
6836 
l2cap_classify_txseq(struct l2cap_chan * chan,u16 txseq)6837 static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6838 {
6839 	BT_DBG("chan %p, txseq %d", chan, txseq);
6840 
6841 	BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6842 	       chan->expected_tx_seq);
6843 
6844 	if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6845 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6846 		    chan->tx_win) {
6847 			/* See notes below regarding "double poll" and
6848 			 * invalid packets.
6849 			 */
6850 			if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6851 				BT_DBG("Invalid/Ignore - after SREJ");
6852 				return L2CAP_TXSEQ_INVALID_IGNORE;
6853 			} else {
6854 				BT_DBG("Invalid - in window after SREJ sent");
6855 				return L2CAP_TXSEQ_INVALID;
6856 			}
6857 		}
6858 
6859 		if (chan->srej_list.head == txseq) {
6860 			BT_DBG("Expected SREJ");
6861 			return L2CAP_TXSEQ_EXPECTED_SREJ;
6862 		}
6863 
6864 		if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6865 			BT_DBG("Duplicate SREJ - txseq already stored");
6866 			return L2CAP_TXSEQ_DUPLICATE_SREJ;
6867 		}
6868 
6869 		if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6870 			BT_DBG("Unexpected SREJ - not requested");
6871 			return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6872 		}
6873 	}
6874 
6875 	if (chan->expected_tx_seq == txseq) {
6876 		if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6877 		    chan->tx_win) {
6878 			BT_DBG("Invalid - txseq outside tx window");
6879 			return L2CAP_TXSEQ_INVALID;
6880 		} else {
6881 			BT_DBG("Expected");
6882 			return L2CAP_TXSEQ_EXPECTED;
6883 		}
6884 	}
6885 
6886 	if (__seq_offset(chan, txseq, chan->last_acked_seq) <
6887 	    __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
6888 		BT_DBG("Duplicate - expected_tx_seq later than txseq");
6889 		return L2CAP_TXSEQ_DUPLICATE;
6890 	}
6891 
6892 	if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6893 		/* A source of invalid packets is a "double poll" condition,
6894 		 * where delays cause us to send multiple poll packets.  If
6895 		 * the remote stack receives and processes both polls,
6896 		 * sequence numbers can wrap around in such a way that a
6897 		 * resent frame has a sequence number that looks like new data
6898 		 * with a sequence gap.  This would trigger an erroneous SREJ
6899 		 * request.
6900 		 *
6901 		 * Fortunately, this is impossible with a tx window that's
6902 		 * less than half of the maximum sequence number, which allows
6903 		 * invalid frames to be safely ignored.
6904 		 *
6905 		 * With tx window sizes greater than half of the tx window
6906 		 * maximum, the frame is invalid and cannot be ignored.  This
6907 		 * causes a disconnect.
6908 		 */
6909 
6910 		if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6911 			BT_DBG("Invalid/Ignore - txseq outside tx window");
6912 			return L2CAP_TXSEQ_INVALID_IGNORE;
6913 		} else {
6914 			BT_DBG("Invalid - txseq outside tx window");
6915 			return L2CAP_TXSEQ_INVALID;
6916 		}
6917 	} else {
6918 		BT_DBG("Unexpected - txseq indicates missing frames");
6919 		return L2CAP_TXSEQ_UNEXPECTED;
6920 	}
6921 }
6922 
l2cap_rx_state_recv(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)6923 static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6924 			       struct l2cap_ctrl *control,
6925 			       struct sk_buff *skb, u8 event)
6926 {
6927 	struct l2cap_ctrl local_control;
6928 	int err = 0;
6929 	bool skb_in_use = false;
6930 
6931 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6932 	       event);
6933 
6934 	switch (event) {
6935 	case L2CAP_EV_RECV_IFRAME:
6936 		switch (l2cap_classify_txseq(chan, control->txseq)) {
6937 		case L2CAP_TXSEQ_EXPECTED:
6938 			l2cap_pass_to_tx(chan, control);
6939 
6940 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6941 				BT_DBG("Busy, discarding expected seq %d",
6942 				       control->txseq);
6943 				break;
6944 			}
6945 
6946 			chan->expected_tx_seq = __next_seq(chan,
6947 							   control->txseq);
6948 
6949 			chan->buffer_seq = chan->expected_tx_seq;
6950 			skb_in_use = true;
6951 
6952 			/* l2cap_reassemble_sdu may free skb, hence invalidate
6953 			 * control, so make a copy in advance to use it after
6954 			 * l2cap_reassemble_sdu returns and to avoid the race
6955 			 * condition, for example:
6956 			 *
6957 			 * The current thread calls:
6958 			 *   l2cap_reassemble_sdu
6959 			 *     chan->ops->recv == l2cap_sock_recv_cb
6960 			 *       __sock_queue_rcv_skb
6961 			 * Another thread calls:
6962 			 *   bt_sock_recvmsg
6963 			 *     skb_recv_datagram
6964 			 *     skb_free_datagram
6965 			 * Then the current thread tries to access control, but
6966 			 * it was freed by skb_free_datagram.
6967 			 */
6968 			local_control = *control;
6969 			err = l2cap_reassemble_sdu(chan, skb, control);
6970 			if (err)
6971 				break;
6972 
6973 			if (local_control.final) {
6974 				if (!test_and_clear_bit(CONN_REJ_ACT,
6975 							&chan->conn_state)) {
6976 					local_control.final = 0;
6977 					l2cap_retransmit_all(chan, &local_control);
6978 					l2cap_ertm_send(chan);
6979 				}
6980 			}
6981 
6982 			if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6983 				l2cap_send_ack(chan);
6984 			break;
6985 		case L2CAP_TXSEQ_UNEXPECTED:
6986 			l2cap_pass_to_tx(chan, control);
6987 
6988 			/* Can't issue SREJ frames in the local busy state.
6989 			 * Drop this frame, it will be seen as missing
6990 			 * when local busy is exited.
6991 			 */
6992 			if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6993 				BT_DBG("Busy, discarding unexpected seq %d",
6994 				       control->txseq);
6995 				break;
6996 			}
6997 
6998 			/* There was a gap in the sequence, so an SREJ
6999 			 * must be sent for each missing frame.  The
7000 			 * current frame is stored for later use.
7001 			 */
7002 			skb_queue_tail(&chan->srej_q, skb);
7003 			skb_in_use = true;
7004 			BT_DBG("Queued %p (queue len %d)", skb,
7005 			       skb_queue_len(&chan->srej_q));
7006 
7007 			clear_bit(CONN_SREJ_ACT, &chan->conn_state);
7008 			l2cap_seq_list_clear(&chan->srej_list);
7009 			l2cap_send_srej(chan, control->txseq);
7010 
7011 			chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
7012 			break;
7013 		case L2CAP_TXSEQ_DUPLICATE:
7014 			l2cap_pass_to_tx(chan, control);
7015 			break;
7016 		case L2CAP_TXSEQ_INVALID_IGNORE:
7017 			break;
7018 		case L2CAP_TXSEQ_INVALID:
7019 		default:
7020 			l2cap_send_disconn_req(chan, ECONNRESET);
7021 			break;
7022 		}
7023 		break;
7024 	case L2CAP_EV_RECV_RR:
7025 		l2cap_pass_to_tx(chan, control);
7026 		if (control->final) {
7027 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7028 
7029 			if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
7030 			    !__chan_is_moving(chan)) {
7031 				control->final = 0;
7032 				l2cap_retransmit_all(chan, control);
7033 			}
7034 
7035 			l2cap_ertm_send(chan);
7036 		} else if (control->poll) {
7037 			l2cap_send_i_or_rr_or_rnr(chan);
7038 		} else {
7039 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7040 					       &chan->conn_state) &&
7041 			    chan->unacked_frames)
7042 				__set_retrans_timer(chan);
7043 
7044 			l2cap_ertm_send(chan);
7045 		}
7046 		break;
7047 	case L2CAP_EV_RECV_RNR:
7048 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7049 		l2cap_pass_to_tx(chan, control);
7050 		if (control && control->poll) {
7051 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7052 			l2cap_send_rr_or_rnr(chan, 0);
7053 		}
7054 		__clear_retrans_timer(chan);
7055 		l2cap_seq_list_clear(&chan->retrans_list);
7056 		break;
7057 	case L2CAP_EV_RECV_REJ:
7058 		l2cap_handle_rej(chan, control);
7059 		break;
7060 	case L2CAP_EV_RECV_SREJ:
7061 		l2cap_handle_srej(chan, control);
7062 		break;
7063 	default:
7064 		break;
7065 	}
7066 
7067 	if (skb && !skb_in_use) {
7068 		BT_DBG("Freeing %p", skb);
7069 		kfree_skb(skb);
7070 	}
7071 
7072 	return err;
7073 }
7074 
l2cap_rx_state_srej_sent(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7075 static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
7076 				    struct l2cap_ctrl *control,
7077 				    struct sk_buff *skb, u8 event)
7078 {
7079 	int err = 0;
7080 	u16 txseq = control->txseq;
7081 	bool skb_in_use = false;
7082 
7083 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7084 	       event);
7085 
7086 	switch (event) {
7087 	case L2CAP_EV_RECV_IFRAME:
7088 		switch (l2cap_classify_txseq(chan, txseq)) {
7089 		case L2CAP_TXSEQ_EXPECTED:
7090 			/* Keep frame for reassembly later */
7091 			l2cap_pass_to_tx(chan, control);
7092 			skb_queue_tail(&chan->srej_q, skb);
7093 			skb_in_use = true;
7094 			BT_DBG("Queued %p (queue len %d)", skb,
7095 			       skb_queue_len(&chan->srej_q));
7096 
7097 			chan->expected_tx_seq = __next_seq(chan, txseq);
7098 			break;
7099 		case L2CAP_TXSEQ_EXPECTED_SREJ:
7100 			l2cap_seq_list_pop(&chan->srej_list);
7101 
7102 			l2cap_pass_to_tx(chan, control);
7103 			skb_queue_tail(&chan->srej_q, skb);
7104 			skb_in_use = true;
7105 			BT_DBG("Queued %p (queue len %d)", skb,
7106 			       skb_queue_len(&chan->srej_q));
7107 
7108 			err = l2cap_rx_queued_iframes(chan);
7109 			if (err)
7110 				break;
7111 
7112 			break;
7113 		case L2CAP_TXSEQ_UNEXPECTED:
7114 			/* Got a frame that can't be reassembled yet.
7115 			 * Save it for later, and send SREJs to cover
7116 			 * the missing frames.
7117 			 */
7118 			skb_queue_tail(&chan->srej_q, skb);
7119 			skb_in_use = true;
7120 			BT_DBG("Queued %p (queue len %d)", skb,
7121 			       skb_queue_len(&chan->srej_q));
7122 
7123 			l2cap_pass_to_tx(chan, control);
7124 			l2cap_send_srej(chan, control->txseq);
7125 			break;
7126 		case L2CAP_TXSEQ_UNEXPECTED_SREJ:
7127 			/* This frame was requested with an SREJ, but
7128 			 * some expected retransmitted frames are
7129 			 * missing.  Request retransmission of missing
7130 			 * SREJ'd frames.
7131 			 */
7132 			skb_queue_tail(&chan->srej_q, skb);
7133 			skb_in_use = true;
7134 			BT_DBG("Queued %p (queue len %d)", skb,
7135 			       skb_queue_len(&chan->srej_q));
7136 
7137 			l2cap_pass_to_tx(chan, control);
7138 			l2cap_send_srej_list(chan, control->txseq);
7139 			break;
7140 		case L2CAP_TXSEQ_DUPLICATE_SREJ:
7141 			/* We've already queued this frame.  Drop this copy. */
7142 			l2cap_pass_to_tx(chan, control);
7143 			break;
7144 		case L2CAP_TXSEQ_DUPLICATE:
7145 			/* Expecting a later sequence number, so this frame
7146 			 * was already received.  Ignore it completely.
7147 			 */
7148 			break;
7149 		case L2CAP_TXSEQ_INVALID_IGNORE:
7150 			break;
7151 		case L2CAP_TXSEQ_INVALID:
7152 		default:
7153 			l2cap_send_disconn_req(chan, ECONNRESET);
7154 			break;
7155 		}
7156 		break;
7157 	case L2CAP_EV_RECV_RR:
7158 		l2cap_pass_to_tx(chan, control);
7159 		if (control->final) {
7160 			clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7161 
7162 			if (!test_and_clear_bit(CONN_REJ_ACT,
7163 						&chan->conn_state)) {
7164 				control->final = 0;
7165 				l2cap_retransmit_all(chan, control);
7166 			}
7167 
7168 			l2cap_ertm_send(chan);
7169 		} else if (control->poll) {
7170 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7171 					       &chan->conn_state) &&
7172 			    chan->unacked_frames) {
7173 				__set_retrans_timer(chan);
7174 			}
7175 
7176 			set_bit(CONN_SEND_FBIT, &chan->conn_state);
7177 			l2cap_send_srej_tail(chan);
7178 		} else {
7179 			if (test_and_clear_bit(CONN_REMOTE_BUSY,
7180 					       &chan->conn_state) &&
7181 			    chan->unacked_frames)
7182 				__set_retrans_timer(chan);
7183 
7184 			l2cap_send_ack(chan);
7185 		}
7186 		break;
7187 	case L2CAP_EV_RECV_RNR:
7188 		set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7189 		l2cap_pass_to_tx(chan, control);
7190 		if (control->poll) {
7191 			l2cap_send_srej_tail(chan);
7192 		} else {
7193 			struct l2cap_ctrl rr_control;
7194 			memset(&rr_control, 0, sizeof(rr_control));
7195 			rr_control.sframe = 1;
7196 			rr_control.super = L2CAP_SUPER_RR;
7197 			rr_control.reqseq = chan->buffer_seq;
7198 			l2cap_send_sframe(chan, &rr_control);
7199 		}
7200 
7201 		break;
7202 	case L2CAP_EV_RECV_REJ:
7203 		l2cap_handle_rej(chan, control);
7204 		break;
7205 	case L2CAP_EV_RECV_SREJ:
7206 		l2cap_handle_srej(chan, control);
7207 		break;
7208 	}
7209 
7210 	if (skb && !skb_in_use) {
7211 		BT_DBG("Freeing %p", skb);
7212 		kfree_skb(skb);
7213 	}
7214 
7215 	return err;
7216 }
7217 
l2cap_finish_move(struct l2cap_chan * chan)7218 static int l2cap_finish_move(struct l2cap_chan *chan)
7219 {
7220 	BT_DBG("chan %p", chan);
7221 
7222 	chan->rx_state = L2CAP_RX_STATE_RECV;
7223 
7224 	if (chan->hs_hcon)
7225 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7226 	else
7227 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7228 
7229 	return l2cap_resegment(chan);
7230 }
7231 
l2cap_rx_state_wait_p(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7232 static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
7233 				 struct l2cap_ctrl *control,
7234 				 struct sk_buff *skb, u8 event)
7235 {
7236 	int err;
7237 
7238 	BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
7239 	       event);
7240 
7241 	if (!control->poll)
7242 		return -EPROTO;
7243 
7244 	l2cap_process_reqseq(chan, control->reqseq);
7245 
7246 	if (!skb_queue_empty(&chan->tx_q))
7247 		chan->tx_send_head = skb_peek(&chan->tx_q);
7248 	else
7249 		chan->tx_send_head = NULL;
7250 
7251 	/* Rewind next_tx_seq to the point expected
7252 	 * by the receiver.
7253 	 */
7254 	chan->next_tx_seq = control->reqseq;
7255 	chan->unacked_frames = 0;
7256 
7257 	err = l2cap_finish_move(chan);
7258 	if (err)
7259 		return err;
7260 
7261 	set_bit(CONN_SEND_FBIT, &chan->conn_state);
7262 	l2cap_send_i_or_rr_or_rnr(chan);
7263 
7264 	if (event == L2CAP_EV_RECV_IFRAME)
7265 		return -EPROTO;
7266 
7267 	return l2cap_rx_state_recv(chan, control, NULL, event);
7268 }
7269 
l2cap_rx_state_wait_f(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7270 static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
7271 				 struct l2cap_ctrl *control,
7272 				 struct sk_buff *skb, u8 event)
7273 {
7274 	int err;
7275 
7276 	if (!control->final)
7277 		return -EPROTO;
7278 
7279 	clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
7280 
7281 	chan->rx_state = L2CAP_RX_STATE_RECV;
7282 	l2cap_process_reqseq(chan, control->reqseq);
7283 
7284 	if (!skb_queue_empty(&chan->tx_q))
7285 		chan->tx_send_head = skb_peek(&chan->tx_q);
7286 	else
7287 		chan->tx_send_head = NULL;
7288 
7289 	/* Rewind next_tx_seq to the point expected
7290 	 * by the receiver.
7291 	 */
7292 	chan->next_tx_seq = control->reqseq;
7293 	chan->unacked_frames = 0;
7294 
7295 	if (chan->hs_hcon)
7296 		chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
7297 	else
7298 		chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
7299 
7300 	err = l2cap_resegment(chan);
7301 
7302 	if (!err)
7303 		err = l2cap_rx_state_recv(chan, control, skb, event);
7304 
7305 	return err;
7306 }
7307 
__valid_reqseq(struct l2cap_chan * chan,u16 reqseq)7308 static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
7309 {
7310 	/* Make sure reqseq is for a packet that has been sent but not acked */
7311 	u16 unacked;
7312 
7313 	unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
7314 	return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
7315 }
7316 
l2cap_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb,u8 event)7317 static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7318 		    struct sk_buff *skb, u8 event)
7319 {
7320 	int err = 0;
7321 
7322 	BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
7323 	       control, skb, event, chan->rx_state);
7324 
7325 	if (__valid_reqseq(chan, control->reqseq)) {
7326 		switch (chan->rx_state) {
7327 		case L2CAP_RX_STATE_RECV:
7328 			err = l2cap_rx_state_recv(chan, control, skb, event);
7329 			break;
7330 		case L2CAP_RX_STATE_SREJ_SENT:
7331 			err = l2cap_rx_state_srej_sent(chan, control, skb,
7332 						       event);
7333 			break;
7334 		case L2CAP_RX_STATE_WAIT_P:
7335 			err = l2cap_rx_state_wait_p(chan, control, skb, event);
7336 			break;
7337 		case L2CAP_RX_STATE_WAIT_F:
7338 			err = l2cap_rx_state_wait_f(chan, control, skb, event);
7339 			break;
7340 		default:
7341 			/* shut it down */
7342 			break;
7343 		}
7344 	} else {
7345 		BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
7346 		       control->reqseq, chan->next_tx_seq,
7347 		       chan->expected_ack_seq);
7348 		l2cap_send_disconn_req(chan, ECONNRESET);
7349 	}
7350 
7351 	return err;
7352 }
7353 
l2cap_stream_rx(struct l2cap_chan * chan,struct l2cap_ctrl * control,struct sk_buff * skb)7354 static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
7355 			   struct sk_buff *skb)
7356 {
7357 	/* l2cap_reassemble_sdu may free skb, hence invalidate control, so store
7358 	 * the txseq field in advance to use it after l2cap_reassemble_sdu
7359 	 * returns and to avoid the race condition, for example:
7360 	 *
7361 	 * The current thread calls:
7362 	 *   l2cap_reassemble_sdu
7363 	 *     chan->ops->recv == l2cap_sock_recv_cb
7364 	 *       __sock_queue_rcv_skb
7365 	 * Another thread calls:
7366 	 *   bt_sock_recvmsg
7367 	 *     skb_recv_datagram
7368 	 *     skb_free_datagram
7369 	 * Then the current thread tries to access control, but it was freed by
7370 	 * skb_free_datagram.
7371 	 */
7372 	u16 txseq = control->txseq;
7373 
7374 	BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
7375 	       chan->rx_state);
7376 
7377 	if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) {
7378 		l2cap_pass_to_tx(chan, control);
7379 
7380 		BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
7381 		       __next_seq(chan, chan->buffer_seq));
7382 
7383 		chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
7384 
7385 		l2cap_reassemble_sdu(chan, skb, control);
7386 	} else {
7387 		if (chan->sdu) {
7388 			kfree_skb(chan->sdu);
7389 			chan->sdu = NULL;
7390 		}
7391 		chan->sdu_last_frag = NULL;
7392 		chan->sdu_len = 0;
7393 
7394 		if (skb) {
7395 			BT_DBG("Freeing %p", skb);
7396 			kfree_skb(skb);
7397 		}
7398 	}
7399 
7400 	chan->last_acked_seq = txseq;
7401 	chan->expected_tx_seq = __next_seq(chan, txseq);
7402 
7403 	return 0;
7404 }
7405 
l2cap_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7406 static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7407 {
7408 	struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
7409 	u16 len;
7410 	u8 event;
7411 
7412 	__unpack_control(chan, skb);
7413 
7414 	len = skb->len;
7415 
7416 	/*
7417 	 * We can just drop the corrupted I-frame here.
7418 	 * Receiver will miss it and start proper recovery
7419 	 * procedures and ask for retransmission.
7420 	 */
7421 	if (l2cap_check_fcs(chan, skb))
7422 		goto drop;
7423 
7424 	if (!control->sframe && control->sar == L2CAP_SAR_START)
7425 		len -= L2CAP_SDULEN_SIZE;
7426 
7427 	if (chan->fcs == L2CAP_FCS_CRC16)
7428 		len -= L2CAP_FCS_SIZE;
7429 
7430 	if (len > chan->mps) {
7431 		l2cap_send_disconn_req(chan, ECONNRESET);
7432 		goto drop;
7433 	}
7434 
7435 	if (chan->ops->filter) {
7436 		if (chan->ops->filter(chan, skb))
7437 			goto drop;
7438 	}
7439 
7440 	if (!control->sframe) {
7441 		int err;
7442 
7443 		BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7444 		       control->sar, control->reqseq, control->final,
7445 		       control->txseq);
7446 
7447 		/* Validate F-bit - F=0 always valid, F=1 only
7448 		 * valid in TX WAIT_F
7449 		 */
7450 		if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
7451 			goto drop;
7452 
7453 		if (chan->mode != L2CAP_MODE_STREAMING) {
7454 			event = L2CAP_EV_RECV_IFRAME;
7455 			err = l2cap_rx(chan, control, skb, event);
7456 		} else {
7457 			err = l2cap_stream_rx(chan, control, skb);
7458 		}
7459 
7460 		if (err)
7461 			l2cap_send_disconn_req(chan, ECONNRESET);
7462 	} else {
7463 		const u8 rx_func_to_event[4] = {
7464 			L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
7465 			L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
7466 		};
7467 
7468 		/* Only I-frames are expected in streaming mode */
7469 		if (chan->mode == L2CAP_MODE_STREAMING)
7470 			goto drop;
7471 
7472 		BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7473 		       control->reqseq, control->final, control->poll,
7474 		       control->super);
7475 
7476 		if (len != 0) {
7477 			BT_ERR("Trailing bytes: %d in sframe", len);
7478 			l2cap_send_disconn_req(chan, ECONNRESET);
7479 			goto drop;
7480 		}
7481 
7482 		/* Validate F and P bits */
7483 		if (control->final && (control->poll ||
7484 				       chan->tx_state != L2CAP_TX_STATE_WAIT_F))
7485 			goto drop;
7486 
7487 		event = rx_func_to_event[control->super];
7488 		if (l2cap_rx(chan, control, skb, event))
7489 			l2cap_send_disconn_req(chan, ECONNRESET);
7490 	}
7491 
7492 	return 0;
7493 
7494 drop:
7495 	kfree_skb(skb);
7496 	return 0;
7497 }
7498 
l2cap_chan_le_send_credits(struct l2cap_chan * chan)7499 static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
7500 {
7501 	struct l2cap_conn *conn = chan->conn;
7502 	struct l2cap_le_credits pkt;
7503 	u16 return_credits;
7504 
7505 	return_credits = (chan->imtu / chan->mps) + 1;
7506 
7507 	if (chan->rx_credits >= return_credits)
7508 		return;
7509 
7510 	return_credits -= chan->rx_credits;
7511 
7512 	BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
7513 
7514 	chan->rx_credits += return_credits;
7515 
7516 	pkt.cid     = cpu_to_le16(chan->scid);
7517 	pkt.credits = cpu_to_le16(return_credits);
7518 
7519 	chan->ident = l2cap_get_ident(conn);
7520 
7521 	l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
7522 }
7523 
l2cap_ecred_recv(struct l2cap_chan * chan,struct sk_buff * skb)7524 static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
7525 {
7526 	int err;
7527 
7528 	BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len);
7529 
7530 	/* Wait recv to confirm reception before updating the credits */
7531 	err = chan->ops->recv(chan, skb);
7532 
7533 	/* Update credits whenever an SDU is received */
7534 	l2cap_chan_le_send_credits(chan);
7535 
7536 	return err;
7537 }
7538 
l2cap_ecred_data_rcv(struct l2cap_chan * chan,struct sk_buff * skb)7539 static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
7540 {
7541 	int err;
7542 
7543 	if (!chan->rx_credits) {
7544 		BT_ERR("No credits to receive LE L2CAP data");
7545 		l2cap_send_disconn_req(chan, ECONNRESET);
7546 		return -ENOBUFS;
7547 	}
7548 
7549 	if (chan->imtu < skb->len) {
7550 		BT_ERR("Too big LE L2CAP PDU");
7551 		return -ENOBUFS;
7552 	}
7553 
7554 	chan->rx_credits--;
7555 	BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
7556 
7557 	/* Update if remote had run out of credits, this should only happens
7558 	 * if the remote is not using the entire MPS.
7559 	 */
7560 	if (!chan->rx_credits)
7561 		l2cap_chan_le_send_credits(chan);
7562 
7563 	err = 0;
7564 
7565 	if (!chan->sdu) {
7566 		u16 sdu_len;
7567 
7568 		sdu_len = get_unaligned_le16(skb->data);
7569 		skb_pull(skb, L2CAP_SDULEN_SIZE);
7570 
7571 		BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
7572 		       sdu_len, skb->len, chan->imtu);
7573 
7574 		if (sdu_len > chan->imtu) {
7575 			BT_ERR("Too big LE L2CAP SDU length received");
7576 			err = -EMSGSIZE;
7577 			goto failed;
7578 		}
7579 
7580 		if (skb->len > sdu_len) {
7581 			BT_ERR("Too much LE L2CAP data received");
7582 			err = -EINVAL;
7583 			goto failed;
7584 		}
7585 
7586 		if (skb->len == sdu_len)
7587 			return l2cap_ecred_recv(chan, skb);
7588 
7589 		chan->sdu = skb;
7590 		chan->sdu_len = sdu_len;
7591 		chan->sdu_last_frag = skb;
7592 
7593 		/* Detect if remote is not able to use the selected MPS */
7594 		if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
7595 			u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
7596 
7597 			/* Adjust the number of credits */
7598 			BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
7599 			chan->mps = mps_len;
7600 			l2cap_chan_le_send_credits(chan);
7601 		}
7602 
7603 		return 0;
7604 	}
7605 
7606 	BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
7607 	       chan->sdu->len, skb->len, chan->sdu_len);
7608 
7609 	if (chan->sdu->len + skb->len > chan->sdu_len) {
7610 		BT_ERR("Too much LE L2CAP data received");
7611 		err = -EINVAL;
7612 		goto failed;
7613 	}
7614 
7615 	append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
7616 	skb = NULL;
7617 
7618 	if (chan->sdu->len == chan->sdu_len) {
7619 		err = l2cap_ecred_recv(chan, chan->sdu);
7620 		if (!err) {
7621 			chan->sdu = NULL;
7622 			chan->sdu_last_frag = NULL;
7623 			chan->sdu_len = 0;
7624 		}
7625 	}
7626 
7627 failed:
7628 	if (err) {
7629 		kfree_skb(skb);
7630 		kfree_skb(chan->sdu);
7631 		chan->sdu = NULL;
7632 		chan->sdu_last_frag = NULL;
7633 		chan->sdu_len = 0;
7634 	}
7635 
7636 	/* We can't return an error here since we took care of the skb
7637 	 * freeing internally. An error return would cause the caller to
7638 	 * do a double-free of the skb.
7639 	 */
7640 	return 0;
7641 }
7642 
l2cap_data_channel(struct l2cap_conn * conn,u16 cid,struct sk_buff * skb)7643 static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
7644 			       struct sk_buff *skb)
7645 {
7646 	struct l2cap_chan *chan;
7647 
7648 	chan = l2cap_get_chan_by_scid(conn, cid);
7649 	if (!chan) {
7650 		if (cid == L2CAP_CID_A2MP) {
7651 			chan = a2mp_channel_create(conn, skb);
7652 			if (!chan) {
7653 				kfree_skb(skb);
7654 				return;
7655 			}
7656 
7657 			l2cap_chan_hold(chan);
7658 			l2cap_chan_lock(chan);
7659 		} else {
7660 			BT_DBG("unknown cid 0x%4.4x", cid);
7661 			/* Drop packet and return */
7662 			kfree_skb(skb);
7663 			return;
7664 		}
7665 	}
7666 
7667 	BT_DBG("chan %p, len %d", chan, skb->len);
7668 
7669 	/* If we receive data on a fixed channel before the info req/rsp
7670 	 * procedure is done simply assume that the channel is supported
7671 	 * and mark it as ready.
7672 	 */
7673 	if (chan->chan_type == L2CAP_CHAN_FIXED)
7674 		l2cap_chan_ready(chan);
7675 
7676 	if (chan->state != BT_CONNECTED)
7677 		goto drop;
7678 
7679 	switch (chan->mode) {
7680 	case L2CAP_MODE_LE_FLOWCTL:
7681 	case L2CAP_MODE_EXT_FLOWCTL:
7682 		if (l2cap_ecred_data_rcv(chan, skb) < 0)
7683 			goto drop;
7684 
7685 		goto done;
7686 
7687 	case L2CAP_MODE_BASIC:
7688 		/* If socket recv buffers overflows we drop data here
7689 		 * which is *bad* because L2CAP has to be reliable.
7690 		 * But we don't have any other choice. L2CAP doesn't
7691 		 * provide flow control mechanism. */
7692 
7693 		if (chan->imtu < skb->len) {
7694 			BT_ERR("Dropping L2CAP data: receive buffer overflow");
7695 			goto drop;
7696 		}
7697 
7698 		if (!chan->ops->recv(chan, skb))
7699 			goto done;
7700 		break;
7701 
7702 	case L2CAP_MODE_ERTM:
7703 	case L2CAP_MODE_STREAMING:
7704 		l2cap_data_rcv(chan, skb);
7705 		goto done;
7706 
7707 	default:
7708 		BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
7709 		break;
7710 	}
7711 
7712 drop:
7713 	kfree_skb(skb);
7714 
7715 done:
7716 	l2cap_chan_unlock(chan);
7717 	l2cap_chan_put(chan);
7718 }
7719 
l2cap_conless_channel(struct l2cap_conn * conn,__le16 psm,struct sk_buff * skb)7720 static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
7721 				  struct sk_buff *skb)
7722 {
7723 	struct hci_conn *hcon = conn->hcon;
7724 	struct l2cap_chan *chan;
7725 
7726 	if (hcon->type != ACL_LINK)
7727 		goto free_skb;
7728 
7729 	chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
7730 					ACL_LINK);
7731 	if (!chan)
7732 		goto free_skb;
7733 
7734 	BT_DBG("chan %p, len %d", chan, skb->len);
7735 
7736 	if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
7737 		goto drop;
7738 
7739 	if (chan->imtu < skb->len)
7740 		goto drop;
7741 
7742 	/* Store remote BD_ADDR and PSM for msg_name */
7743 	bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
7744 	bt_cb(skb)->l2cap.psm = psm;
7745 
7746 	if (!chan->ops->recv(chan, skb)) {
7747 		l2cap_chan_put(chan);
7748 		return;
7749 	}
7750 
7751 drop:
7752 	l2cap_chan_put(chan);
7753 free_skb:
7754 	kfree_skb(skb);
7755 }
7756 
l2cap_recv_frame(struct l2cap_conn * conn,struct sk_buff * skb)7757 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7758 {
7759 	struct l2cap_hdr *lh = (void *) skb->data;
7760 	struct hci_conn *hcon = conn->hcon;
7761 	u16 cid, len;
7762 	__le16 psm;
7763 
7764 	if (hcon->state != BT_CONNECTED) {
7765 		BT_DBG("queueing pending rx skb");
7766 		skb_queue_tail(&conn->pending_rx, skb);
7767 		return;
7768 	}
7769 
7770 	skb_pull(skb, L2CAP_HDR_SIZE);
7771 	cid = __le16_to_cpu(lh->cid);
7772 	len = __le16_to_cpu(lh->len);
7773 
7774 	if (len != skb->len) {
7775 		kfree_skb(skb);
7776 		return;
7777 	}
7778 
7779 	/* Since we can't actively block incoming LE connections we must
7780 	 * at least ensure that we ignore incoming data from them.
7781 	 */
7782 	if (hcon->type == LE_LINK &&
7783 	    hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst,
7784 				   bdaddr_dst_type(hcon))) {
7785 		kfree_skb(skb);
7786 		return;
7787 	}
7788 
7789 	BT_DBG("len %d, cid 0x%4.4x", len, cid);
7790 
7791 	switch (cid) {
7792 	case L2CAP_CID_SIGNALING:
7793 		l2cap_sig_channel(conn, skb);
7794 		break;
7795 
7796 	case L2CAP_CID_CONN_LESS:
7797 		psm = get_unaligned((__le16 *) skb->data);
7798 		skb_pull(skb, L2CAP_PSMLEN_SIZE);
7799 		l2cap_conless_channel(conn, psm, skb);
7800 		break;
7801 
7802 	case L2CAP_CID_LE_SIGNALING:
7803 		l2cap_le_sig_channel(conn, skb);
7804 		break;
7805 
7806 	default:
7807 		l2cap_data_channel(conn, cid, skb);
7808 		break;
7809 	}
7810 }
7811 
process_pending_rx(struct work_struct * work)7812 static void process_pending_rx(struct work_struct *work)
7813 {
7814 	struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7815 					       pending_rx_work);
7816 	struct sk_buff *skb;
7817 
7818 	BT_DBG("");
7819 
7820 	while ((skb = skb_dequeue(&conn->pending_rx)))
7821 		l2cap_recv_frame(conn, skb);
7822 }
7823 
l2cap_conn_add(struct hci_conn * hcon)7824 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7825 {
7826 	struct l2cap_conn *conn = hcon->l2cap_data;
7827 	struct hci_chan *hchan;
7828 
7829 	if (conn)
7830 		return conn;
7831 
7832 	hchan = hci_chan_create(hcon);
7833 	if (!hchan)
7834 		return NULL;
7835 
7836 	conn = kzalloc(sizeof(*conn), GFP_KERNEL);
7837 	if (!conn) {
7838 		hci_chan_del(hchan);
7839 		return NULL;
7840 	}
7841 
7842 	kref_init(&conn->ref);
7843 	hcon->l2cap_data = conn;
7844 	conn->hcon = hci_conn_get(hcon);
7845 	conn->hchan = hchan;
7846 
7847 	BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7848 
7849 	switch (hcon->type) {
7850 	case LE_LINK:
7851 		if (hcon->hdev->le_mtu) {
7852 			conn->mtu = hcon->hdev->le_mtu;
7853 			break;
7854 		}
7855 		fallthrough;
7856 	default:
7857 		conn->mtu = hcon->hdev->acl_mtu;
7858 		break;
7859 	}
7860 
7861 	conn->feat_mask = 0;
7862 
7863 	conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7864 
7865 	if (hcon->type == ACL_LINK &&
7866 	    hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
7867 		conn->local_fixed_chan |= L2CAP_FC_A2MP;
7868 
7869 	if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
7870 	    (bredr_sc_enabled(hcon->hdev) ||
7871 	     hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
7872 		conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7873 
7874 	mutex_init(&conn->ident_lock);
7875 	mutex_init(&conn->chan_lock);
7876 
7877 	INIT_LIST_HEAD(&conn->chan_l);
7878 	INIT_LIST_HEAD(&conn->users);
7879 
7880 	INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
7881 
7882 	skb_queue_head_init(&conn->pending_rx);
7883 	INIT_WORK(&conn->pending_rx_work, process_pending_rx);
7884 	INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
7885 
7886 	conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7887 
7888 	return conn;
7889 }
7890 
is_valid_psm(u16 psm,u8 dst_type)7891 static bool is_valid_psm(u16 psm, u8 dst_type) {
7892 	if (!psm)
7893 		return false;
7894 
7895 	if (bdaddr_type_is_le(dst_type))
7896 		return (psm <= 0x00ff);
7897 
7898 	/* PSM must be odd and lsb of upper byte must be 0 */
7899 	return ((psm & 0x0101) == 0x0001);
7900 }
7901 
7902 struct l2cap_chan_data {
7903 	struct l2cap_chan *chan;
7904 	struct pid *pid;
7905 	int count;
7906 };
7907 
l2cap_chan_by_pid(struct l2cap_chan * chan,void * data)7908 static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data)
7909 {
7910 	struct l2cap_chan_data *d = data;
7911 	struct pid *pid;
7912 
7913 	if (chan == d->chan)
7914 		return;
7915 
7916 	if (!test_bit(FLAG_DEFER_SETUP, &chan->flags))
7917 		return;
7918 
7919 	pid = chan->ops->get_peer_pid(chan);
7920 
7921 	/* Only count deferred channels with the same PID/PSM */
7922 	if (d->pid != pid || chan->psm != d->chan->psm || chan->ident ||
7923 	    chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT)
7924 		return;
7925 
7926 	d->count++;
7927 }
7928 
l2cap_chan_connect(struct l2cap_chan * chan,__le16 psm,u16 cid,bdaddr_t * dst,u8 dst_type)7929 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7930 		       bdaddr_t *dst, u8 dst_type)
7931 {
7932 	struct l2cap_conn *conn;
7933 	struct hci_conn *hcon;
7934 	struct hci_dev *hdev;
7935 	int err;
7936 
7937 	BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
7938 	       dst, dst_type, __le16_to_cpu(psm), chan->mode);
7939 
7940 	hdev = hci_get_route(dst, &chan->src, chan->src_type);
7941 	if (!hdev)
7942 		return -EHOSTUNREACH;
7943 
7944 	hci_dev_lock(hdev);
7945 
7946 	if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7947 	    chan->chan_type != L2CAP_CHAN_RAW) {
7948 		err = -EINVAL;
7949 		goto done;
7950 	}
7951 
7952 	if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7953 		err = -EINVAL;
7954 		goto done;
7955 	}
7956 
7957 	if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
7958 		err = -EINVAL;
7959 		goto done;
7960 	}
7961 
7962 	switch (chan->mode) {
7963 	case L2CAP_MODE_BASIC:
7964 		break;
7965 	case L2CAP_MODE_LE_FLOWCTL:
7966 		break;
7967 	case L2CAP_MODE_EXT_FLOWCTL:
7968 		if (!enable_ecred) {
7969 			err = -EOPNOTSUPP;
7970 			goto done;
7971 		}
7972 		break;
7973 	case L2CAP_MODE_ERTM:
7974 	case L2CAP_MODE_STREAMING:
7975 		if (!disable_ertm)
7976 			break;
7977 		fallthrough;
7978 	default:
7979 		err = -EOPNOTSUPP;
7980 		goto done;
7981 	}
7982 
7983 	switch (chan->state) {
7984 	case BT_CONNECT:
7985 	case BT_CONNECT2:
7986 	case BT_CONFIG:
7987 		/* Already connecting */
7988 		err = 0;
7989 		goto done;
7990 
7991 	case BT_CONNECTED:
7992 		/* Already connected */
7993 		err = -EISCONN;
7994 		goto done;
7995 
7996 	case BT_OPEN:
7997 	case BT_BOUND:
7998 		/* Can connect */
7999 		break;
8000 
8001 	default:
8002 		err = -EBADFD;
8003 		goto done;
8004 	}
8005 
8006 	/* Set destination address and psm */
8007 	bacpy(&chan->dst, dst);
8008 	chan->dst_type = dst_type;
8009 
8010 	chan->psm = psm;
8011 	chan->dcid = cid;
8012 
8013 	if (bdaddr_type_is_le(dst_type)) {
8014 		/* Convert from L2CAP channel address type to HCI address type
8015 		 */
8016 		if (dst_type == BDADDR_LE_PUBLIC)
8017 			dst_type = ADDR_LE_DEV_PUBLIC;
8018 		else
8019 			dst_type = ADDR_LE_DEV_RANDOM;
8020 
8021 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
8022 			hcon = hci_connect_le(hdev, dst, dst_type,
8023 					      chan->sec_level,
8024 					      HCI_LE_CONN_TIMEOUT,
8025 					      HCI_ROLE_SLAVE, NULL);
8026 		else
8027 			hcon = hci_connect_le_scan(hdev, dst, dst_type,
8028 						   chan->sec_level,
8029 						   HCI_LE_CONN_TIMEOUT,
8030 						   CONN_REASON_L2CAP_CHAN);
8031 
8032 	} else {
8033 		u8 auth_type = l2cap_get_auth_type(chan);
8034 		hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type,
8035 				       CONN_REASON_L2CAP_CHAN);
8036 	}
8037 
8038 	if (IS_ERR(hcon)) {
8039 		err = PTR_ERR(hcon);
8040 		goto done;
8041 	}
8042 
8043 	conn = l2cap_conn_add(hcon);
8044 	if (!conn) {
8045 		hci_conn_drop(hcon);
8046 		err = -ENOMEM;
8047 		goto done;
8048 	}
8049 
8050 	if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
8051 		struct l2cap_chan_data data;
8052 
8053 		data.chan = chan;
8054 		data.pid = chan->ops->get_peer_pid(chan);
8055 		data.count = 1;
8056 
8057 		l2cap_chan_list(conn, l2cap_chan_by_pid, &data);
8058 
8059 		/* Check if there isn't too many channels being connected */
8060 		if (data.count > L2CAP_ECRED_CONN_SCID_MAX) {
8061 			hci_conn_drop(hcon);
8062 			err = -EPROTO;
8063 			goto done;
8064 		}
8065 	}
8066 
8067 	mutex_lock(&conn->chan_lock);
8068 	l2cap_chan_lock(chan);
8069 
8070 	if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
8071 		hci_conn_drop(hcon);
8072 		err = -EBUSY;
8073 		goto chan_unlock;
8074 	}
8075 
8076 	/* Update source addr of the socket */
8077 	bacpy(&chan->src, &hcon->src);
8078 	chan->src_type = bdaddr_src_type(hcon);
8079 
8080 	__l2cap_chan_add(conn, chan);
8081 
8082 	/* l2cap_chan_add takes its own ref so we can drop this one */
8083 	hci_conn_drop(hcon);
8084 
8085 	l2cap_state_change(chan, BT_CONNECT);
8086 	__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
8087 
8088 	/* Release chan->sport so that it can be reused by other
8089 	 * sockets (as it's only used for listening sockets).
8090 	 */
8091 	write_lock(&chan_list_lock);
8092 	chan->sport = 0;
8093 	write_unlock(&chan_list_lock);
8094 
8095 	if (hcon->state == BT_CONNECTED) {
8096 		if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
8097 			__clear_chan_timer(chan);
8098 			if (l2cap_chan_check_security(chan, true))
8099 				l2cap_state_change(chan, BT_CONNECTED);
8100 		} else
8101 			l2cap_do_start(chan);
8102 	}
8103 
8104 	err = 0;
8105 
8106 chan_unlock:
8107 	l2cap_chan_unlock(chan);
8108 	mutex_unlock(&conn->chan_lock);
8109 done:
8110 	hci_dev_unlock(hdev);
8111 	hci_dev_put(hdev);
8112 	return err;
8113 }
8114 EXPORT_SYMBOL_GPL(l2cap_chan_connect);
8115 
l2cap_ecred_reconfigure(struct l2cap_chan * chan)8116 static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
8117 {
8118 	struct l2cap_conn *conn = chan->conn;
8119 	struct {
8120 		struct l2cap_ecred_reconf_req req;
8121 		__le16 scid;
8122 	} pdu;
8123 
8124 	pdu.req.mtu = cpu_to_le16(chan->imtu);
8125 	pdu.req.mps = cpu_to_le16(chan->mps);
8126 	pdu.scid    = cpu_to_le16(chan->scid);
8127 
8128 	chan->ident = l2cap_get_ident(conn);
8129 
8130 	l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
8131 		       sizeof(pdu), &pdu);
8132 }
8133 
l2cap_chan_reconfigure(struct l2cap_chan * chan,__u16 mtu)8134 int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
8135 {
8136 	if (chan->imtu > mtu)
8137 		return -EINVAL;
8138 
8139 	BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
8140 
8141 	chan->imtu = mtu;
8142 
8143 	l2cap_ecred_reconfigure(chan);
8144 
8145 	return 0;
8146 }
8147 
8148 /* ---- L2CAP interface with lower layer (HCI) ---- */
8149 
l2cap_connect_ind(struct hci_dev * hdev,bdaddr_t * bdaddr)8150 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
8151 {
8152 	int exact = 0, lm1 = 0, lm2 = 0;
8153 	struct l2cap_chan *c;
8154 
8155 	BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
8156 
8157 	/* Find listening sockets and check their link_mode */
8158 	read_lock(&chan_list_lock);
8159 	list_for_each_entry(c, &chan_list, global_l) {
8160 		if (c->state != BT_LISTEN)
8161 			continue;
8162 
8163 		if (!bacmp(&c->src, &hdev->bdaddr)) {
8164 			lm1 |= HCI_LM_ACCEPT;
8165 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8166 				lm1 |= HCI_LM_MASTER;
8167 			exact++;
8168 		} else if (!bacmp(&c->src, BDADDR_ANY)) {
8169 			lm2 |= HCI_LM_ACCEPT;
8170 			if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
8171 				lm2 |= HCI_LM_MASTER;
8172 		}
8173 	}
8174 	read_unlock(&chan_list_lock);
8175 
8176 	return exact ? lm1 : lm2;
8177 }
8178 
8179 /* Find the next fixed channel in BT_LISTEN state, continue iteration
8180  * from an existing channel in the list or from the beginning of the
8181  * global list (by passing NULL as first parameter).
8182  */
l2cap_global_fixed_chan(struct l2cap_chan * c,struct hci_conn * hcon)8183 static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
8184 						  struct hci_conn *hcon)
8185 {
8186 	u8 src_type = bdaddr_src_type(hcon);
8187 
8188 	read_lock(&chan_list_lock);
8189 
8190 	if (c)
8191 		c = list_next_entry(c, global_l);
8192 	else
8193 		c = list_entry(chan_list.next, typeof(*c), global_l);
8194 
8195 	list_for_each_entry_from(c, &chan_list, global_l) {
8196 		if (c->chan_type != L2CAP_CHAN_FIXED)
8197 			continue;
8198 		if (c->state != BT_LISTEN)
8199 			continue;
8200 		if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
8201 			continue;
8202 		if (src_type != c->src_type)
8203 			continue;
8204 
8205 		c = l2cap_chan_hold_unless_zero(c);
8206 		read_unlock(&chan_list_lock);
8207 		return c;
8208 	}
8209 
8210 	read_unlock(&chan_list_lock);
8211 
8212 	return NULL;
8213 }
8214 
l2cap_connect_cfm(struct hci_conn * hcon,u8 status)8215 static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
8216 {
8217 	struct hci_dev *hdev = hcon->hdev;
8218 	struct l2cap_conn *conn;
8219 	struct l2cap_chan *pchan;
8220 	u8 dst_type;
8221 
8222 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8223 		return;
8224 
8225 	BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
8226 
8227 	if (status) {
8228 		l2cap_conn_del(hcon, bt_to_errno(status));
8229 		return;
8230 	}
8231 
8232 	conn = l2cap_conn_add(hcon);
8233 	if (!conn)
8234 		return;
8235 
8236 	dst_type = bdaddr_dst_type(hcon);
8237 
8238 	/* If device is blocked, do not create channels for it */
8239 	if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type))
8240 		return;
8241 
8242 	/* Find fixed channels and notify them of the new connection. We
8243 	 * use multiple individual lookups, continuing each time where
8244 	 * we left off, because the list lock would prevent calling the
8245 	 * potentially sleeping l2cap_chan_lock() function.
8246 	 */
8247 	pchan = l2cap_global_fixed_chan(NULL, hcon);
8248 	while (pchan) {
8249 		struct l2cap_chan *chan, *next;
8250 
8251 		/* Client fixed channels should override server ones */
8252 		if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
8253 			goto next;
8254 
8255 		l2cap_chan_lock(pchan);
8256 		chan = pchan->ops->new_connection(pchan);
8257 		if (chan) {
8258 			bacpy(&chan->src, &hcon->src);
8259 			bacpy(&chan->dst, &hcon->dst);
8260 			chan->src_type = bdaddr_src_type(hcon);
8261 			chan->dst_type = dst_type;
8262 
8263 			__l2cap_chan_add(conn, chan);
8264 		}
8265 
8266 		l2cap_chan_unlock(pchan);
8267 next:
8268 		next = l2cap_global_fixed_chan(pchan, hcon);
8269 		l2cap_chan_put(pchan);
8270 		pchan = next;
8271 	}
8272 
8273 	l2cap_conn_ready(conn);
8274 }
8275 
l2cap_disconn_ind(struct hci_conn * hcon)8276 int l2cap_disconn_ind(struct hci_conn *hcon)
8277 {
8278 	struct l2cap_conn *conn = hcon->l2cap_data;
8279 
8280 	BT_DBG("hcon %p", hcon);
8281 
8282 	if (!conn)
8283 		return HCI_ERROR_REMOTE_USER_TERM;
8284 	return conn->disc_reason;
8285 }
8286 
l2cap_disconn_cfm(struct hci_conn * hcon,u8 reason)8287 static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
8288 {
8289 	if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
8290 		return;
8291 
8292 	BT_DBG("hcon %p reason %d", hcon, reason);
8293 
8294 	l2cap_conn_del(hcon, bt_to_errno(reason));
8295 }
8296 
l2cap_check_encryption(struct l2cap_chan * chan,u8 encrypt)8297 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
8298 {
8299 	if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
8300 		return;
8301 
8302 	if (encrypt == 0x00) {
8303 		if (chan->sec_level == BT_SECURITY_MEDIUM) {
8304 			__set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
8305 		} else if (chan->sec_level == BT_SECURITY_HIGH ||
8306 			   chan->sec_level == BT_SECURITY_FIPS)
8307 			l2cap_chan_close(chan, ECONNREFUSED);
8308 	} else {
8309 		if (chan->sec_level == BT_SECURITY_MEDIUM)
8310 			__clear_chan_timer(chan);
8311 	}
8312 }
8313 
l2cap_security_cfm(struct hci_conn * hcon,u8 status,u8 encrypt)8314 static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
8315 {
8316 	struct l2cap_conn *conn = hcon->l2cap_data;
8317 	struct l2cap_chan *chan;
8318 
8319 	if (!conn)
8320 		return;
8321 
8322 	BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
8323 
8324 	mutex_lock(&conn->chan_lock);
8325 
8326 	list_for_each_entry(chan, &conn->chan_l, list) {
8327 		l2cap_chan_lock(chan);
8328 
8329 		BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
8330 		       state_to_string(chan->state));
8331 
8332 		if (chan->scid == L2CAP_CID_A2MP) {
8333 			l2cap_chan_unlock(chan);
8334 			continue;
8335 		}
8336 
8337 		if (!status && encrypt)
8338 			chan->sec_level = hcon->sec_level;
8339 
8340 		if (!__l2cap_no_conn_pending(chan)) {
8341 			l2cap_chan_unlock(chan);
8342 			continue;
8343 		}
8344 
8345 		if (!status && (chan->state == BT_CONNECTED ||
8346 				chan->state == BT_CONFIG)) {
8347 			chan->ops->resume(chan);
8348 			l2cap_check_encryption(chan, encrypt);
8349 			l2cap_chan_unlock(chan);
8350 			continue;
8351 		}
8352 
8353 		if (chan->state == BT_CONNECT) {
8354 			if (!status && l2cap_check_enc_key_size(hcon))
8355 				l2cap_start_connection(chan);
8356 			else
8357 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8358 		} else if (chan->state == BT_CONNECT2 &&
8359 			   !(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
8360 			     chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
8361 			struct l2cap_conn_rsp rsp;
8362 			__u16 res, stat;
8363 
8364 			if (!status && l2cap_check_enc_key_size(hcon)) {
8365 				if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
8366 					res = L2CAP_CR_PEND;
8367 					stat = L2CAP_CS_AUTHOR_PEND;
8368 					chan->ops->defer(chan);
8369 				} else {
8370 					l2cap_state_change(chan, BT_CONFIG);
8371 					res = L2CAP_CR_SUCCESS;
8372 					stat = L2CAP_CS_NO_INFO;
8373 				}
8374 			} else {
8375 				l2cap_state_change(chan, BT_DISCONN);
8376 				__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
8377 				res = L2CAP_CR_SEC_BLOCK;
8378 				stat = L2CAP_CS_NO_INFO;
8379 			}
8380 
8381 			rsp.scid   = cpu_to_le16(chan->dcid);
8382 			rsp.dcid   = cpu_to_le16(chan->scid);
8383 			rsp.result = cpu_to_le16(res);
8384 			rsp.status = cpu_to_le16(stat);
8385 			l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
8386 				       sizeof(rsp), &rsp);
8387 
8388 			if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
8389 			    res == L2CAP_CR_SUCCESS) {
8390 				char buf[128];
8391 				set_bit(CONF_REQ_SENT, &chan->conf_state);
8392 				l2cap_send_cmd(conn, l2cap_get_ident(conn),
8393 					       L2CAP_CONF_REQ,
8394 					       l2cap_build_conf_req(chan, buf, sizeof(buf)),
8395 					       buf);
8396 				chan->num_conf_req++;
8397 			}
8398 		}
8399 
8400 		l2cap_chan_unlock(chan);
8401 	}
8402 
8403 	mutex_unlock(&conn->chan_lock);
8404 }
8405 
l2cap_recv_acldata(struct hci_conn * hcon,struct sk_buff * skb,u16 flags)8406 void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
8407 {
8408 	struct l2cap_conn *conn = hcon->l2cap_data;
8409 	struct l2cap_hdr *hdr;
8410 	int len;
8411 
8412 	/* For AMP controller do not create l2cap conn */
8413 	if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
8414 		goto drop;
8415 
8416 	if (!conn)
8417 		conn = l2cap_conn_add(hcon);
8418 
8419 	if (!conn)
8420 		goto drop;
8421 
8422 	BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
8423 
8424 	switch (flags) {
8425 	case ACL_START:
8426 	case ACL_START_NO_FLUSH:
8427 	case ACL_COMPLETE:
8428 		if (conn->rx_len) {
8429 			BT_ERR("Unexpected start frame (len %d)", skb->len);
8430 			kfree_skb(conn->rx_skb);
8431 			conn->rx_skb = NULL;
8432 			conn->rx_len = 0;
8433 			l2cap_conn_unreliable(conn, ECOMM);
8434 		}
8435 
8436 		/* Start fragment always begin with Basic L2CAP header */
8437 		if (skb->len < L2CAP_HDR_SIZE) {
8438 			BT_ERR("Frame is too short (len %d)", skb->len);
8439 			l2cap_conn_unreliable(conn, ECOMM);
8440 			goto drop;
8441 		}
8442 
8443 		hdr = (struct l2cap_hdr *) skb->data;
8444 		len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
8445 
8446 		if (len == skb->len) {
8447 			/* Complete frame received */
8448 			l2cap_recv_frame(conn, skb);
8449 			return;
8450 		}
8451 
8452 		BT_DBG("Start: total len %d, frag len %d", len, skb->len);
8453 
8454 		if (skb->len > len) {
8455 			BT_ERR("Frame is too long (len %d, expected len %d)",
8456 			       skb->len, len);
8457 			l2cap_conn_unreliable(conn, ECOMM);
8458 			goto drop;
8459 		}
8460 
8461 		/* Allocate skb for the complete frame (with header) */
8462 		conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
8463 		if (!conn->rx_skb)
8464 			goto drop;
8465 
8466 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8467 					  skb->len);
8468 		conn->rx_len = len - skb->len;
8469 		break;
8470 
8471 	case ACL_CONT:
8472 		BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
8473 
8474 		if (!conn->rx_len) {
8475 			BT_ERR("Unexpected continuation frame (len %d)", skb->len);
8476 			l2cap_conn_unreliable(conn, ECOMM);
8477 			goto drop;
8478 		}
8479 
8480 		if (skb->len > conn->rx_len) {
8481 			BT_ERR("Fragment is too long (len %d, expected %d)",
8482 			       skb->len, conn->rx_len);
8483 			kfree_skb(conn->rx_skb);
8484 			conn->rx_skb = NULL;
8485 			conn->rx_len = 0;
8486 			l2cap_conn_unreliable(conn, ECOMM);
8487 			goto drop;
8488 		}
8489 
8490 		skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
8491 					  skb->len);
8492 		conn->rx_len -= skb->len;
8493 
8494 		if (!conn->rx_len) {
8495 			/* Complete frame received. l2cap_recv_frame
8496 			 * takes ownership of the skb so set the global
8497 			 * rx_skb pointer to NULL first.
8498 			 */
8499 			struct sk_buff *rx_skb = conn->rx_skb;
8500 			conn->rx_skb = NULL;
8501 			l2cap_recv_frame(conn, rx_skb);
8502 		}
8503 		break;
8504 	}
8505 
8506 drop:
8507 	kfree_skb(skb);
8508 }
8509 
8510 static struct hci_cb l2cap_cb = {
8511 	.name		= "L2CAP",
8512 	.connect_cfm	= l2cap_connect_cfm,
8513 	.disconn_cfm	= l2cap_disconn_cfm,
8514 	.security_cfm	= l2cap_security_cfm,
8515 };
8516 
l2cap_debugfs_show(struct seq_file * f,void * p)8517 static int l2cap_debugfs_show(struct seq_file *f, void *p)
8518 {
8519 	struct l2cap_chan *c;
8520 
8521 	read_lock(&chan_list_lock);
8522 
8523 	list_for_each_entry(c, &chan_list, global_l) {
8524 		seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
8525 			   &c->src, c->src_type, &c->dst, c->dst_type,
8526 			   c->state, __le16_to_cpu(c->psm),
8527 			   c->scid, c->dcid, c->imtu, c->omtu,
8528 			   c->sec_level, c->mode);
8529 	}
8530 
8531 	read_unlock(&chan_list_lock);
8532 
8533 	return 0;
8534 }
8535 
8536 DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs);
8537 
8538 static struct dentry *l2cap_debugfs;
8539 
l2cap_init(void)8540 int __init l2cap_init(void)
8541 {
8542 	int err;
8543 
8544 	err = l2cap_init_sockets();
8545 	if (err < 0)
8546 		return err;
8547 
8548 	hci_register_cb(&l2cap_cb);
8549 
8550 	if (IS_ERR_OR_NULL(bt_debugfs))
8551 		return 0;
8552 
8553 	l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
8554 					    NULL, &l2cap_debugfs_fops);
8555 
8556 	return 0;
8557 }
8558 
l2cap_exit(void)8559 void l2cap_exit(void)
8560 {
8561 	debugfs_remove(l2cap_debugfs);
8562 	hci_unregister_cb(&l2cap_cb);
8563 	l2cap_cleanup_sockets();
8564 }
8565 
8566 module_param(disable_ertm, bool, 0644);
8567 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
8568 
8569 module_param(enable_ecred, bool, 0644);
8570 MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
8571